GradLLM / app.py
johnbridges's picture
.
527d73d
raw
history blame
3.92 kB
# app.py
import asyncio
import gradio as gr
from fastapi import FastAPI
from config import settings
from rabbit_base import RabbitBase
from listener import RabbitListenerBase
from rabbit_repo import RabbitRepo
from service import LLMService
from runners.base import ILLMRunner
# --- Runner factory (stub) ---
class EchoRunner(ILLMRunner):
Type = "EchoRunner"
async def StartProcess(self, llmServiceObj: dict): pass
async def RemoveProcess(self, sessionId: str): pass
async def StopRequest(self, sessionId: str): pass
async def SendInputAndGetResponse(self, llmServiceObj: dict): pass
async def runner_factory(llmServiceObj: dict) -> ILLMRunner:
return EchoRunner()
# --- Publisher and Service ---
publisher = RabbitRepo(external_source="https://space.external")
service = LLMService(publisher, runner_factory)
# --- Handlers mapping .NET FuncName -> service method ---
async def h_start(data): await service.StartProcess(data or {})
async def h_user(data): await service.UserInput(data or {})
async def h_remove(data): await service.RemoveSession(data or {})
async def h_stop(data): await service.StopRequest(data or {})
async def h_qir(data): await service.QueryIndexResult(data or {})
async def h_getreg(_): await service.GetFunctionRegistry(False)
async def h_getreg_f(_): await service.GetFunctionRegistry(True)
handlers = {
"llmStartSession": h_start,
"llmUserInput": h_user,
"llmRemoveSession": h_remove,
"llmStopRequest": h_stop,
"queryIndexResult": h_qir,
"getFunctionRegistry": h_getreg,
"getFunctionRegistryFiltered": h_getreg_f,
}
# --- Listener wiring (fix: needs base + instance_name) ---
base = RabbitBase()
listener = RabbitListenerBase(
base,
instance_name=settings.RABBIT_INSTANCE_NAME, # <- queue prefix, like your .NET instance
handlers=handlers,
)
# Declarations mirror your C# InitRabbitMQObjs()
DECLS = [
{"ExchangeName": f"llmStartSession{settings.SERVICE_ID}", "FuncName": "llmStartSession",
"MessageTimeout": 600_000, "RoutingKeys": [settings.RABBIT_ROUTING_KEY]},
{"ExchangeName": f"llmUserInput{settings.SERVICE_ID}", "FuncName": "llmUserInput",
"MessageTimeout": 600_000, "RoutingKeys": [settings.RABBIT_ROUTING_KEY]},
{"ExchangeName": f"llmRemoveSession{settings.SERVICE_ID}", "FuncName": "llmRemoveSession",
"MessageTimeout": 60_000, "RoutingKeys": [settings.RABBIT_ROUTING_KEY]},
{"ExchangeName": f"llmStopRequest{settings.SERVICE_ID}", "FuncName": "llmStopRequest",
"MessageTimeout": 60_000, "RoutingKeys": [settings.RABBIT_ROUTING_KEY]},
{"ExchangeName": f"queryIndexResult{settings.SERVICE_ID}", "FuncName": "queryIndexResult",
"MessageTimeout": 60_000, "RoutingKeys": [settings.RABBIT_ROUTING_KEY]},
{"ExchangeName": f"getFunctionRegistry{settings.SERVICE_ID}", "FuncName": "getFunctionRegistry",
"MessageTimeout": 60_000, "RoutingKeys": [settings.RABBIT_ROUTING_KEY]},
{"ExchangeName": f"getFunctionRegistryFiltered{settings.SERVICE_ID}", "FuncName": "getFunctionRegistryFiltered",
"MessageTimeout": 60_000, "RoutingKeys": [settings.RABBIT_ROUTING_KEY]},
]
# --- Gradio UI (for smoke test) ---
async def ping():
return "ok"
with gr.Blocks() as demo:
gr.Markdown("### LLM Runner (Python) listening on RabbitMQ")
btn = gr.Button("Ping")
out = gr.Textbox()
btn.click(ping, inputs=None, outputs=out)
# --- FastAPI mount + lifecycle ---
app = FastAPI()
app = gr.mount_gradio_app(app, demo, path="/")
@app.get("/health")
async def health():
return {"status": "ok"}
@app.on_event("startup")
async def on_start():
await publisher.connect()
await service.init()
await listener.start(DECLS)
@app.on_event("shutdown")
async def on_stop():
# Optionally close connections/channels if you track them
pass
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)