Spaces:
Sleeping
Sleeping
| from openboxing_api import get_all_champions, find_champion_by_name, get_all_bouts, get_bouts_for_champion | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # ------------------------------------------------------- | |
| # SYSTEM PROMPT (Upgraded for DeepSeek Reasoning) | |
| # ------------------------------------------------------- | |
| system_prompt = """ | |
| You are BOXTRON-AI, an elite boxing analyst and professional fight judge. | |
| Your expertise includes stylistic breakdowns, matchup analysis, round-by-round simulations, | |
| and probability-based fight predictions. | |
| Your responsibilities: | |
| - Score rounds using the official 10-point must system. | |
| - Provide deep, technical, and objective fight analysis. | |
| - Break down styles, strengths, weaknesses, pace, footwork, combinations, IQ, and strategy. | |
| - Analyze attributes such as height, reach, stance, age, KO %, defense, accuracy, and habits. | |
| - Predict multiple realistic scenarios, assign probabilities, and justify them. | |
| - For simulations, describe round-by-round action and scoring. | |
| - Clearly explain uncertainty when present. | |
| - NEVER invent fake fight records or medical information. | |
| - Base predictions on logic, style interaction, known tendencies, and probability. | |
| Your tone: | |
| Professional, analytical, neutral, expert-level, similar to a mix of a ringside commentator | |
| and a veteran boxing judge. | |
| Whenever the user mentions two fighters, assume they want: | |
| 1. A matchup breakdown | |
| 2. A stylistic analysis | |
| 3. A round-by-round prediction (if asked) | |
| 4. Probabilities for each plausible outcome | |
| Always think step-by-step and use reasoning before concluding. | |
| """ | |
| # ------------------------------------------------------- | |
| # CHAT RESPONSE FUNCTION | |
| # ------------------------------------------------------- | |
| def respond( | |
| message, | |
| history: list[dict[str, str]], | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| hf_token: gr.OAuthToken, | |
| ): | |
| # Example prompt: predict shakur stevenson vs teofimo lopez | |
| if "predict" in message.lower() and "vs" in message.lower(): | |
| message = message.replace("predict ", "") | |
| print(message) | |
| fighters = message.split(" vs ") | |
| print(fighters) | |
| if len(fighters) == 2: | |
| all_champs = get_all_champions() | |
| all_bouts = get_all_bouts() | |
| fighter1_name = fighters[0].strip() | |
| fighter2_name = fighters[1].strip() | |
| fighter1 = find_champion_by_name(all_champs, fighter1_name) | |
| fighter2 = find_champion_by_name(all_champs, fighter2_name) | |
| if fighter1 and fighter2: | |
| history1 = get_bouts_for_champion(all_bouts, fighter1["championId"]) | |
| history2 = get_bouts_for_champion(all_bouts, fighter2["championId"]) | |
| # Build structured prompt for LLM | |
| message = f""" | |
| Fighter 1: {fighter1['name']['first']} {fighter1['name']['last']} | |
| Fighter 2: {fighter2['name']['first']} {fighter2['name']['last']} | |
| Fighter 1 bouts: {len(history1)} | |
| Fighter 2 bouts: {len(history2)} | |
| Predict this fight round by round. | |
| """ | |
| # Initialize model client (DeepSeek 70B) | |
| client = InferenceClient( | |
| token=hf_token.token, | |
| model="deepseek-ai/DeepSeek-R1-Distill-Llama-70B" | |
| ) | |
| # --------------------------------------------------- | |
| # Normalize chat history for LLM format | |
| # --------------------------------------------------- | |
| fixed_history = [] | |
| for h in history: | |
| user_msg = h.get("user") or h.get("content") or "" | |
| bot_msg = h.get("assistant") or h.get("message") or "" | |
| if user_msg: | |
| fixed_history.append({"role": "user", "content": user_msg}) | |
| if bot_msg: | |
| fixed_history.append({"role": "assistant", "content": bot_msg}) | |
| # --------------------------------------------------- | |
| # Build entire message list | |
| # --------------------------------------------------- | |
| messages = [{"role": "system", "content": system_message}] | |
| messages.extend(fixed_history) | |
| messages.append({"role": "user", "content": message}) | |
| # --------------------------------------------------- | |
| # Stream model response token-by-token | |
| # --------------------------------------------------- | |
| response_text = "" | |
| for chunk in client.chat_completion( | |
| messages, | |
| max_tokens=max_tokens, | |
| stream=True, | |
| temperature=temperature, | |
| top_p=top_p, | |
| ): | |
| token = "" | |
| if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: | |
| token = chunk.choices[0].delta.content | |
| response_text += token | |
| yield response_text | |
| # ------------------------------------------------------- | |
| # GRADIO CHAT UI | |
| # ------------------------------------------------------- | |
| chatbot = gr.ChatInterface( | |
| respond, | |
| type="messages", | |
| additional_inputs=[ | |
| gr.Textbox(value=system_prompt, label="System Prompt"), | |
| gr.Slider(1, 4096, value=800, step=1, label="Max Tokens"), | |
| gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"), | |
| ], | |
| title="🥊 BOXTRON-AI — Advanced Boxing Prediction Engine", | |
| description="DeepSeek-powered boxing analyst that predicts fights, simulates rounds, scores bouts, and breaks down styles." | |
| ) | |
| # ------------------------------------------------------- | |
| # LAUNCH APP | |
| # ------------------------------------------------------- | |
| with gr.Blocks() as demo: | |
| with gr.Sidebar(): | |
| gr.Markdown("### Login Required") | |
| gr.LoginButton() | |
| chatbot.render() | |
| if __name__ == "__main__": | |
| demo.launch() | |
| from openboxing_api import find_champion_by_name, get_bouts_for_champion | |
| fighter1 = find_champion_by_name("Isaac Cruz") | |
| fighter2 = find_champion_by_name("Lemont Roach") | |
| if fighter1 and fighter2: | |
| history1 = get_bouts_for_champion(fighter1["championId"]) | |
| history2 = get_bouts_for_champion(fighter2["championId"]) | |
| # Build your prompt for LLM | |
| prompt = f""" | |
| Fighter 1: {fighter1['name']['first']} {fighter1['name']['last']} | |
| Fighter 2: {fighter2['name']['first']} {fighter2['name']['last']} | |
| Fighter 1 bouts: {len(history1)} | |
| Fighter 2 bouts: {len(history2)} | |
| Predict this fight round by round. | |
| """ | |