|
|
|
|
|
import time, logging |
|
|
from typing import Any, Dict, AsyncIterable |
|
|
|
|
|
from vllm.sampling_params import SamplingParams |
|
|
from backends_base import ChatBackend, ImagesBackend |
|
|
from state import vllm_engine |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class VLLMChatBackend(ChatBackend): |
|
|
""" |
|
|
Streams completions from a local vLLM engine. |
|
|
Produces OpenAI-compatible ChatCompletionChunk dicts. |
|
|
""" |
|
|
async def stream(self, request: Dict[str, Any]) -> AsyncIterable[Dict[str, Any]]: |
|
|
if vllm_engine is None: |
|
|
raise RuntimeError("vLLM engine not initialized") |
|
|
|
|
|
|
|
|
messages = request.get("messages", []) |
|
|
prompt = messages[-1]["content"] if messages else "(empty)" |
|
|
|
|
|
params = SamplingParams( |
|
|
temperature=float(request.get("temperature", 0.7)), |
|
|
max_tokens=int(request.get("max_tokens", 512)), |
|
|
stream=True, |
|
|
) |
|
|
|
|
|
rid = f"chatcmpl-local-{int(time.time())}" |
|
|
now = int(time.time()) |
|
|
model_name = request.get("model", "local-vllm") |
|
|
|
|
|
try: |
|
|
async for output in vllm_engine.generate(prompt, params, request_id=rid): |
|
|
text_piece = output.outputs[0].text |
|
|
yield { |
|
|
"id": rid, |
|
|
"object": "chat.completion.chunk", |
|
|
"created": now, |
|
|
"model": model_name, |
|
|
"choices": [ |
|
|
{"index": 0, "delta": {"content": text_piece}, "finish_reason": None} |
|
|
], |
|
|
} |
|
|
except Exception: |
|
|
logger.exception("vLLM generation failed") |
|
|
raise |
|
|
|
|
|
|
|
|
yield { |
|
|
"id": rid, |
|
|
"object": "chat.completion.chunk", |
|
|
"created": now, |
|
|
"model": model_name, |
|
|
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}], |
|
|
} |
|
|
|
|
|
class StubImagesBackend(ImagesBackend): |
|
|
""" |
|
|
vLLM does not support image generation. |
|
|
For now, return a transparent PNG placeholder. |
|
|
""" |
|
|
async def generate_b64(self, request: Dict[str, Any]) -> str: |
|
|
logger.warning("Image generation not supported in local vLLM backend.") |
|
|
|
|
|
return "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGP4BwQACfsD/etCJH0AAAAASUVORK5CYII=" |
|
|
|