GradLLM / vllm_backend.py
johnbridges's picture
.
d6ff847
raw
history blame
2.44 kB
# vllm_backend.py
import time, logging
from typing import Any, Dict, AsyncIterable
from vllm.sampling_params import SamplingParams
from backends_base import ChatBackend, ImagesBackend
from state import vllm_engine # βœ… the single source of truth
logger = logging.getLogger(__name__)
class VLLMChatBackend(ChatBackend):
"""
Streams completions from a local vLLM engine.
Produces OpenAI-compatible ChatCompletionChunk dicts.
"""
async def stream(self, request: Dict[str, Any]) -> AsyncIterable[Dict[str, Any]]:
if vllm_engine is None:
raise RuntimeError("vLLM engine not initialized")
# For now: just grab the last user message
messages = request.get("messages", [])
prompt = messages[-1]["content"] if messages else "(empty)"
params = SamplingParams(
temperature=float(request.get("temperature", 0.7)),
max_tokens=int(request.get("max_tokens", 512)),
stream=True,
)
rid = f"chatcmpl-local-{int(time.time())}"
now = int(time.time())
model_name = request.get("model", "local-vllm")
try:
async for output in vllm_engine.generate(prompt, params, request_id=rid):
text_piece = output.outputs[0].text
yield {
"id": rid,
"object": "chat.completion.chunk",
"created": now,
"model": model_name,
"choices": [
{"index": 0, "delta": {"content": text_piece}, "finish_reason": None}
],
}
except Exception:
logger.exception("vLLM generation failed")
raise
# Final stop signal
yield {
"id": rid,
"object": "chat.completion.chunk",
"created": now,
"model": model_name,
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
}
class StubImagesBackend(ImagesBackend):
"""
vLLM does not support image generation.
For now, return a transparent PNG placeholder.
"""
async def generate_b64(self, request: Dict[str, Any]) -> str:
logger.warning("Image generation not supported in local vLLM backend.")
# 1x1 transparent PNG
return "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGP4BwQACfsD/etCJH0AAAAASUVORK5CYII="