gabrielchua commited on
Commit
1f2e9c6
·
1 Parent(s): cc30f3f
.DS_Store ADDED
Binary file (6.15 kB). View file
 
Dockerfile ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ # Install build dependencies for packages such as uvicorn[standard]
4
+ RUN apt-get update && \
5
+ apt-get install -y --no-install-recommends build-essential git && \
6
+ rm -rf /var/lib/apt/lists/*
7
+
8
+ # Create the non-root user expected by Hugging Face Spaces
9
+ RUN useradd -m -u 1000 user
10
+
11
+ USER user
12
+
13
+ ENV HOME=/home/user \
14
+ PATH=/home/user/.local/bin:$PATH \
15
+ PYTHONDONTWRITEBYTECODE=1 \
16
+ PYTHONUNBUFFERED=1 \
17
+ PORT=7860
18
+
19
+ WORKDIR $HOME/app
20
+
21
+ # Install Python dependencies first to leverage Docker layer caching
22
+ COPY --chown=user requirements.txt .
23
+ RUN pip install --no-cache-dir --upgrade pip && \
24
+ pip install --no-cache-dir -r requirements.txt
25
+
26
+ # Copy the rest of the application code
27
+ COPY --chown=user . .
28
+
29
+ # Expose the port expected by Hugging Face Spaces
30
+ EXPOSE 7860
31
+
32
+ # Run the FastAPI app from the backend directory so local imports resolve
33
+ WORKDIR $HOME/app/app/backend
34
+
35
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
app/.DS_Store ADDED
Binary file (6.15 kB). View file
 
app/backend/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """
2
+ LionGuard 2 Backend Package
3
+ """
4
+
5
+
6
+
app/backend/__pycache__/models.cpython-313.pyc ADDED
Binary file (3.63 kB). View file
 
app/backend/__pycache__/services.cpython-313.pyc ADDED
Binary file (16.4 kB). View file
 
app/backend/main.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FastAPI backend for LionGuard moderation
3
+ """
4
+
5
+ from fastapi import FastAPI, HTTPException
6
+ from fastapi.staticfiles import StaticFiles
7
+ from fastapi.responses import FileResponse
8
+ from fastapi.middleware.cors import CORSMiddleware
9
+ import os
10
+ from typing import List
11
+
12
+ from models import (
13
+ ModerateRequest,
14
+ ModerateResponse,
15
+ FeedbackRequest,
16
+ FeedbackResponse,
17
+ ChatRequest,
18
+ ChatResponse,
19
+ CategoryScore,
20
+ ChatHistories,
21
+ )
22
+ from services import (
23
+ analyze_text,
24
+ submit_feedback,
25
+ process_chat_message,
26
+ )
27
+
28
+ app = FastAPI(
29
+ title="LionGuard API",
30
+ description="Multilingual moderation and guardrail comparison",
31
+ version="2.0.0"
32
+ )
33
+
34
+ # Enable CORS for development
35
+ app.add_middleware(
36
+ CORSMiddleware,
37
+ allow_origins=["*"],
38
+ allow_credentials=True,
39
+ allow_methods=["*"],
40
+ allow_headers=["*"],
41
+ )
42
+
43
+ # Get the path to frontend directory
44
+ FRONTEND_DIR = os.path.join(os.path.dirname(__file__), "../frontend")
45
+
46
+
47
+ @app.post("/moderate", response_model=ModerateResponse)
48
+ async def moderate_text(request: ModerateRequest):
49
+ """
50
+ Analyze text for moderation risks using LionGuard models
51
+ """
52
+ try:
53
+ result = analyze_text(request.text, request.model)
54
+ return ModerateResponse(**result)
55
+ except Exception as e:
56
+ raise HTTPException(status_code=500, detail=f"Error analyzing text: {str(e)}")
57
+
58
+
59
+ @app.post("/send_feedback", response_model=FeedbackResponse)
60
+ async def send_feedback(request: FeedbackRequest):
61
+ """
62
+ Submit user feedback on moderation result
63
+ """
64
+ try:
65
+ result = submit_feedback(request.text_id, request.agree)
66
+ return FeedbackResponse(**result)
67
+ except Exception as e:
68
+ raise HTTPException(status_code=500, detail=f"Error submitting feedback: {str(e)}")
69
+
70
+
71
+ @app.post("/chat", response_model=ChatResponse)
72
+ async def chat_comparison(request: ChatRequest):
73
+ """
74
+ Compare guardrails across three approaches:
75
+ - No moderation
76
+ - OpenAI moderation
77
+ - LionGuard moderation
78
+ """
79
+ try:
80
+ # Convert request histories to list of dicts
81
+ history_no_mod = [msg.dict() for msg in request.histories.no_moderation]
82
+ history_openai = [msg.dict() for msg in request.histories.openai_moderation]
83
+ history_lg = [msg.dict() for msg in request.histories.lionguard]
84
+
85
+ # Process message
86
+ updated_no_mod, updated_openai, updated_lg, lg_score = await process_chat_message(
87
+ request.message,
88
+ request.model,
89
+ history_no_mod,
90
+ history_openai,
91
+ history_lg
92
+ )
93
+
94
+ # Convert back to response format
95
+ return ChatResponse(
96
+ histories=ChatHistories(
97
+ no_moderation=updated_no_mod,
98
+ openai_moderation=updated_openai,
99
+ lionguard=updated_lg
100
+ ),
101
+ lionguard_score=lg_score
102
+ )
103
+ except Exception as e:
104
+ raise HTTPException(status_code=500, detail=f"Error processing chat: {str(e)}")
105
+
106
+
107
+ # Serve static frontend files
108
+ app.mount("/static", StaticFiles(directory=FRONTEND_DIR), name="static")
109
+
110
+
111
+ @app.get("/")
112
+ async def serve_frontend():
113
+ """
114
+ Serve the main HTML page
115
+ """
116
+ index_path = os.path.join(FRONTEND_DIR, "index.html")
117
+ if os.path.exists(index_path):
118
+ return FileResponse(index_path)
119
+ raise HTTPException(status_code=404, detail="Frontend not found")
120
+
121
+
122
+ @app.get("/health")
123
+ async def health_check():
124
+ """
125
+ Health check endpoint
126
+ """
127
+ return {"status": "healthy", "service": "lionguard-api"}
128
+
129
+
130
+ if __name__ == "__main__":
131
+ import uvicorn
132
+ uvicorn.run(app, host="0.0.0.0", port=8000)
133
+
134
+
135
+
app/backend/models.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pydantic models for request/response validation
3
+ """
4
+
5
+ from typing import Dict, List, Optional
6
+ from pydantic import BaseModel, Field
7
+
8
+
9
+ class ModerateRequest(BaseModel):
10
+ text: str = Field(..., description="Text to moderate")
11
+ model: str = Field(
12
+ default="lionguard-2.1",
13
+ description="Model to use: lionguard-2, lionguard-2.1, or lionguard-2-lite"
14
+ )
15
+
16
+
17
+ class CategoryScore(BaseModel):
18
+ name: str
19
+ emoji: str
20
+ max_score: float
21
+
22
+
23
+ class ModerateResponse(BaseModel):
24
+ binary_score: float
25
+ binary_verdict: str # "pass", "warn", "fail"
26
+ binary_percentage: int
27
+ categories: List[CategoryScore]
28
+ text_id: str
29
+ model_used: str
30
+
31
+
32
+ class FeedbackRequest(BaseModel):
33
+ text_id: str = Field(..., description="ID of the text being voted on")
34
+ agree: bool = Field(..., description="True for thumbs up, False for thumbs down")
35
+
36
+
37
+ class FeedbackResponse(BaseModel):
38
+ success: bool
39
+ message: str
40
+
41
+
42
+ class ChatMessage(BaseModel):
43
+ role: str
44
+ content: str
45
+
46
+
47
+ class ChatHistories(BaseModel):
48
+ no_moderation: List[ChatMessage] = Field(default_factory=list)
49
+ openai_moderation: List[ChatMessage] = Field(default_factory=list)
50
+ lionguard: List[ChatMessage] = Field(default_factory=list)
51
+
52
+
53
+ class ChatRequest(BaseModel):
54
+ message: str = Field(..., description="Message to send to all guardrails")
55
+ model: str = Field(
56
+ default="lionguard-2.1",
57
+ description="LionGuard model variant to use"
58
+ )
59
+ histories: ChatHistories = Field(default_factory=ChatHistories)
60
+
61
+
62
+ class ChatResponse(BaseModel):
63
+ histories: ChatHistories
64
+ lionguard_score: Optional[float] = None
65
+
66
+
67
+
app.py → app/backend/services.py RENAMED
@@ -1,54 +1,38 @@
1
  """
2
- app.py
3
  """
4
 
5
- # Standard imports
6
  import json
7
  import os
8
- import sys
9
  import uuid
10
  import asyncio
11
  from datetime import datetime
 
12
 
13
- # Third party imports
14
  import openai
15
- import gradio as gr
16
  import gspread
17
  from google.oauth2 import service_account
18
- from transformers import AutoModel
19
 
20
- # Local imports
21
- from utils import get_embeddings
 
 
22
 
23
- # --- Categories
24
  CATEGORIES = {
25
  "binary": ["binary"],
26
  "hateful": ["hateful_l1", "hateful_l2"],
27
  "insults": ["insults"],
28
- "sexual": [
29
- "sexual_l1",
30
- "sexual_l2",
31
- ],
32
  "physical_violence": ["physical_violence"],
33
  "self_harm": ["self_harm_l1", "self_harm_l2"],
34
- "all_other_misconduct": [
35
- "all_other_misconduct_l1",
36
- "all_other_misconduct_l2",
37
- ],
38
  }
39
 
40
  # --- OpenAI Setup ---
41
- # Create both sync and async clients
42
  client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
43
  async_client = openai.AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
44
 
45
- # --- Model Loading ---
46
- def load_lionguard2():
47
- model = AutoModel.from_pretrained("govtech/lionguard-2", trust_remote_code=True)
48
- return model
49
-
50
- model = load_lionguard2()
51
-
52
  # --- Google Sheets Config ---
53
  GOOGLE_SHEET_URL = os.environ.get("GOOGLE_SHEET_URL")
54
  GOOGLE_CREDENTIALS = os.environ.get("GCP_SERVICE_ACCOUNT")
@@ -56,7 +40,9 @@ RESULTS_SHEET_NAME = "results"
56
  VOTES_SHEET_NAME = "votes"
57
  CHATBOT_SHEET_NAME = "chatbot"
58
 
 
59
  def get_gspread_client():
 
60
  credentials = service_account.Credentials.from_service_account_info(
61
  json.loads(GOOGLE_CREDENTIALS),
62
  scopes=[
@@ -66,7 +52,9 @@ def get_gspread_client():
66
  )
67
  return gspread.authorize(credentials)
68
 
69
- def save_results_data(row):
 
 
70
  try:
71
  gc = get_gspread_client()
72
  sheet = gc.open_by_url(GOOGLE_SHEET_URL)
@@ -75,7 +63,9 @@ def save_results_data(row):
75
  except Exception as e:
76
  print(f"Error saving results data: {e}")
77
 
78
- def save_vote_data(text_id, agree):
 
 
79
  try:
80
  gc = get_gspread_client()
81
  sheet = gc.open_by_url(GOOGLE_SHEET_URL)
@@ -89,7 +79,9 @@ def save_vote_data(text_id, agree):
89
  except Exception as e:
90
  print(f"Error saving vote data: {e}")
91
 
92
- def log_chatbot_data(row):
 
 
93
  try:
94
  gc = get_gspread_client()
95
  sheet = gc.open_by_url(GOOGLE_SHEET_URL)
@@ -104,78 +96,64 @@ def log_chatbot_data(row):
104
  except Exception as e:
105
  print(f"Error saving chatbot data: {e}")
106
 
107
- # --- Classifier logic ---
108
 
109
- def format_score_with_style(score_str):
110
- if score_str == "-":
111
- return '<span style="color: #9ca3af;">-</span>'
112
- try:
113
- score = float(score_str)
114
- percentage = int(score * 100)
115
- if score < 0.4:
116
- return f'<span style="color: #34d399; font-weight:600;">👌 {percentage}%</span>'
117
- elif 0.4 <= score < 0.7:
118
- return f'<span style="color: #fbbf24; font-weight:600;">⚠️ {percentage}%</span>'
119
- else:
120
- return f'<span style="color: #fca5a5; font-weight:600;">🚨 {percentage}%</span>'
121
- except:
122
- return score_str
123
-
124
- def format_binary_score(score):
125
- percentage = int(score * 100)
126
- if score < 0.4:
127
- return f'<div style="background:linear-gradient(135deg, #065f46 0%, #047857 100%); color:#34d399; padding:24px 0; border-radius:20px; text-align:center; font-weight:900; border:3px solid #10b981; font-size:24px; margin:24px 0; box-shadow:0 4px 24px rgba(0,0,0,0.3);">✅ Pass ({percentage}/100)</div>'
128
- elif 0.4 <= score < 0.7:
129
- return f'<div style="background:linear-gradient(135deg, #92400e 0%, #b45309 100%); color:#fbbf24; padding:24px 0; border-radius:20px; text-align:center; font-weight:900; border:3px solid #f59e0b; font-size:24px; margin:24px 0; box-shadow:0 4px 24px rgba(0,0,0,0.3);">⚠️ Warning ({percentage}/100)</div>'
130
- else:
131
- return f'<div style="background:linear-gradient(135deg, #991b1b 0%, #b91c1c 100%); color:#fca5a5; padding:24px 0; border-radius:20px; text-align:center; font-weight:900; border:3px solid #ef4444; font-size:24px; margin:24px 0; box-shadow:0 4px 24px rgba(0,0,0,0.3);">🚨 Fail ({percentage}/100)</div>'
132
 
133
- def analyze_text(text):
 
 
 
 
134
  if not text.strip():
135
- empty_html = '<div style="text-align: center; color: #9ca3af; padding: 30px; font-style: italic;">Enter text to analyze</div>'
136
- return empty_html, empty_html, "", ""
 
 
 
 
 
 
 
137
  try:
138
  text_id = str(uuid.uuid4())
139
- embeddings = get_embeddings([text])
140
- results = model.predict(embeddings)
141
  binary_score = results.get('binary', [0.0])[0]
142
-
 
 
 
 
 
 
 
 
 
143
  main_categories = ['hateful', 'insults', 'sexual', 'physical_violence', 'self_harm', 'all_other_misconduct']
144
- categories_html = []
 
 
 
 
 
 
 
 
 
145
  max_scores = {}
 
146
  for category in main_categories:
147
  subcategories = CATEGORIES[category]
148
- category_name = category.replace('_', ' ').title()
149
- category_emojis = {
150
- 'Hateful': '🤬',
151
- 'Insults': '💢',
152
- 'Sexual': '🔞',
153
- 'Physical Violence': '⚔️',
154
- 'Self Harm': '☹️',
155
- 'All Other Misconduct': '🙅‍♀️'
156
- }
157
- category_display = f"{category_emojis.get(category_name, '📝')} {category_name}"
158
  level_scores = [results.get(subcategory_key, [0.0])[0] for subcategory_key in subcategories]
159
  max_score = max(level_scores) if level_scores else 0.0
160
  max_scores[category] = max_score
161
- categories_html.append(f'''
162
- <tr>
163
- <td>{category_display}</td>
164
- <td style="text-align: center;">{format_score_with_style(f"{max_score:.4f}")}</td>
165
- </tr>
166
- ''')
167
-
168
- html_table = f'''
169
- <table style="width:100%">
170
- <thead>
171
- <tr><th>Category</th><th>Score</th></tr>
172
- </thead>
173
- <tbody>
174
- {''.join(categories_html)}
175
- </tbody>
176
- </table>
177
- '''
178
-
179
  # Save to Google Sheets if enabled
180
  if GOOGLE_SHEET_URL and GOOGLE_CREDENTIALS:
181
  results_row = {
@@ -183,34 +161,43 @@ def analyze_text(text):
183
  "text_id": text_id,
184
  "text": text,
185
  "binary_score": binary_score,
 
186
  }
187
  for category in main_categories:
188
  results_row[f"{category}_max"] = max_scores[category]
189
  save_results_data(results_row)
190
-
191
- voting_html = '<div>Help improve LionGuard2! Rate the analysis below.</div>'
192
- return format_binary_score(binary_score), html_table, text_id, voting_html
193
-
 
 
 
 
 
 
194
  except Exception as e:
195
- error_msg = f"Error analyzing text: {str(e)}"
196
- return f'<div style="color: #fca5a5;">❌ {error_msg}</div>', '', '', ''
197
 
198
- def vote_thumbs_up(text_id):
199
- if text_id and GOOGLE_SHEET_URL and GOOGLE_CREDENTIALS:
200
- save_vote_data(text_id, True)
201
- return '<div style="color: #34d399; font-weight:700;">🎉 Thank you!</div>'
202
- return '<div>Voting not available or analysis not yet run.</div>'
203
 
204
- def vote_thumbs_down(text_id):
205
- if text_id and GOOGLE_SHEET_URL and GOOGLE_CREDENTIALS:
206
- save_vote_data(text_id, False)
207
- return '<div style="color: #fca5a5; font-weight:700;">📝 Thanks for the feedback!</div>'
208
- return '<div>Voting not available or analysis not yet run.</div>'
 
 
 
 
 
 
209
 
210
- # --- Guardrail Comparison logic (ASYNC VERSION) ---
211
 
212
- async def get_openai_response_async(message, system_prompt="You are a helpful assistant."):
213
- """Async version of OpenAI API call"""
 
 
214
  try:
215
  response = await async_client.chat.completions.create(
216
  model="gpt-4.1-nano",
@@ -226,8 +213,9 @@ async def get_openai_response_async(message, system_prompt="You are a helpful as
226
  except Exception as e:
227
  return f"Error: {str(e)}. Please check your OpenAI API key."
228
 
229
- async def openai_moderation_async(message):
230
- """Async version of OpenAI moderation"""
 
231
  try:
232
  response = await async_client.moderations.create(input=message)
233
  return response.results[0].flagged
@@ -235,87 +223,58 @@ async def openai_moderation_async(message):
235
  print(f"Error in OpenAI moderation: {e}")
236
  return False
237
 
238
- def lionguard_2_sync(message, threshold=0.5):
239
- """LionGuard remains sync as it's using a local model"""
 
240
  try:
241
- embeddings = get_embeddings([message])
242
- results = model.predict(embeddings)
243
- binary_prob = results['binary'][0]
244
  return binary_prob > threshold, binary_prob
245
  except Exception as e:
246
- print(f"Error in LionGuard 2: {e}")
247
  return False, 0.0
248
 
249
- async def process_no_moderation(message, history_no_mod):
 
250
  """Process message without moderation"""
251
  no_mod_response = await get_openai_response_async(message)
252
- history_no_mod.append({"role": "user", "content": message})
253
- history_no_mod.append({"role": "assistant", "content": no_mod_response})
254
- return history_no_mod
 
255
 
256
- async def process_openai_moderation(message, history_openai):
257
  """Process message with OpenAI moderation"""
258
  openai_flagged = await openai_moderation_async(message)
259
- history_openai.append({"role": "user", "content": message})
260
  if openai_flagged:
261
  openai_response = "🚫 This message has been flagged by OpenAI moderation"
262
- history_openai.append({"role": "assistant", "content": openai_response})
263
  else:
264
  openai_response = await get_openai_response_async(message)
265
- history_openai.append({"role": "assistant", "content": openai_response})
266
- return history_openai
267
 
268
- async def process_lionguard(message, history_lg):
269
- """Process message with LionGuard 2"""
270
- # Run LionGuard sync check in thread pool to not block
271
  loop = asyncio.get_event_loop()
272
- lg_flagged, lg_score = await loop.run_in_executor(None, lionguard_2_sync, message, 0.5)
273
 
274
- history_lg.append({"role": "user", "content": message})
275
  if lg_flagged:
276
- lg_response = "🚫 This message has been flagged by LionGuard 2"
277
- history_lg.append({"role": "assistant", "content": lg_response})
278
  else:
279
  lg_response = await get_openai_response_async(message)
280
- history_lg.append({"role": "assistant", "content": lg_response})
281
- return history_lg, lg_score
282
-
283
- async def process_message_async(message, history_no_mod, history_openai, history_lg):
284
- """Process message concurrently across all three guardrails"""
285
- if not message.strip():
286
- return history_no_mod, history_openai, history_lg, ""
287
-
288
- # Run all three processes concurrently using asyncio.gather
289
- results = await asyncio.gather(
290
- process_no_moderation(message, history_no_mod),
291
- process_openai_moderation(message, history_openai),
292
- process_lionguard(message, history_lg),
293
- return_exceptions=True # Continue even if one fails
294
- )
295
-
296
- # Unpack results
297
- history_no_mod = results[0] if not isinstance(results[0], Exception) else history_no_mod
298
- history_openai = results[1] if not isinstance(results[1], Exception) else history_openai
299
- history_lg_result = results[2] if not isinstance(results[2], Exception) else (history_lg, 0.0)
300
- history_lg = history_lg_result[0]
301
- lg_score = history_lg_result[1] if isinstance(history_lg_result, tuple) else 0.0
302
-
303
- # --- Logging for chatbot worksheet (runs in background) ---
304
- if GOOGLE_SHEET_URL and GOOGLE_CREDENTIALS:
305
- try:
306
- loop = asyncio.get_event_loop()
307
- # Run logging in thread pool so it doesn't block
308
- loop.run_in_executor(None, _log_chatbot_sync, message, lg_score)
309
- except Exception as e:
310
- print(f"Chatbot logging failed: {e}")
311
 
312
- return history_no_mod, history_openai, history_lg, ""
313
 
314
- def _log_chatbot_sync(message, lg_score):
315
- """Sync helper for logging - runs in thread pool"""
316
  try:
317
- embeddings = get_embeddings([message])
318
- results = model.predict(embeddings)
319
  now = datetime.now().isoformat()
320
  text_id = str(uuid.uuid4())
321
  row = {
@@ -333,118 +292,55 @@ def _log_chatbot_sync(message, lg_score):
333
  "self_harm_l2_score": results.get(CATEGORIES['self_harm'][1], [None])[0],
334
  "aom_l1_score": results.get(CATEGORIES['all_other_misconduct'][0], [None])[0],
335
  "aom_l2_score": results.get(CATEGORIES['all_other_misconduct'][1], [None])[0],
336
- "openai_score": None
337
  }
338
  try:
339
  openai_result = client.moderations.create(input=message)
340
  row["openai_score"] = float(openai_result.results[0].category_scores.get("hate", 0.0))
341
  except Exception:
342
  row["openai_score"] = None
343
-
344
  log_chatbot_data(row)
345
  except Exception as e:
346
  print(f"Error in sync logging: {e}")
347
 
348
- def process_message(message, history_no_mod, history_openai, history_lg):
349
- """Wrapper function for Gradio (converts async to sync)"""
350
- return asyncio.run(process_message_async(message, history_no_mod, history_openai, history_lg))
351
-
352
- def clear_all_chats():
353
- return [], [], []
354
-
355
- # ---- MAIN GRADIO UI ----
356
 
357
- DISCLAIMER = """
358
- <div style='background: #fbbf24; color: #1e293b; border-radius: 8px; padding: 14px; margin-bottom: 12px; font-size: 15px; font-weight:500;'>
359
- ⚠️ LionGuard 2 may make mistakes. All entries are logged (anonymised) to improve the model.
360
- </div>
361
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
 
363
- with gr.Blocks(title="LionGuard 2 Demo", theme=gr.themes.Soft()) as demo:
364
- gr.HTML("<h1 style='text-align:center'>LionGuard 2 Demo</h1>")
365
-
366
- with gr.Tabs():
367
- with gr.Tab("Classifier"):
368
- gr.HTML(DISCLAIMER)
369
- with gr.Row():
370
- with gr.Column(scale=1, min_width=400):
371
- text_input = gr.Textbox(
372
- label="Enter text to analyze:",
373
- placeholder="Type your text here...",
374
- lines=8,
375
- max_lines=16,
376
- container=True
377
- )
378
- analyze_btn = gr.Button("Analyze", variant="primary")
379
- with gr.Column(scale=1, min_width=400):
380
- binary_output = gr.HTML(
381
- value='<div style="text-align: center; color: #9ca3af; padding: 30px; font-style: italic; font-size:36px;">Enter text to analyze</div>'
382
- )
383
- category_table = gr.HTML(
384
- value='<div style="text-align: center; color: #9ca3af; padding: 30px; font-style: italic;">Category scores will appear here after analysis</div>'
385
- )
386
- voting_feedback = gr.HTML(value="")
387
- current_text_id = gr.Textbox(value="", visible=False)
388
-
389
- with gr.Row(visible=False) as voting_buttons_row:
390
- thumbs_up_btn = gr.Button("👍 Looks Accurate", variant="primary")
391
- thumbs_down_btn = gr.Button("👎 Looks Wrong", variant="secondary")
392
-
393
- def analyze_and_show_voting(text):
394
- binary_score, category_table_val, text_id, voting_html = analyze_text(text)
395
- show_vote = gr.update(visible=True) if text_id else gr.update(visible=False)
396
- return binary_score, category_table_val, text_id, show_vote, "", ""
397
-
398
- analyze_btn.click(
399
- analyze_and_show_voting,
400
- inputs=[text_input],
401
- outputs=[binary_output, category_table, current_text_id, voting_buttons_row, voting_feedback, voting_feedback]
402
- )
403
- text_input.submit(
404
- analyze_and_show_voting,
405
- inputs=[text_input],
406
- outputs=[binary_output, category_table, current_text_id, voting_buttons_row, voting_feedback, voting_feedback]
407
- )
408
- thumbs_up_btn.click(vote_thumbs_up, inputs=[current_text_id], outputs=[voting_feedback])
409
- thumbs_down_btn.click(vote_thumbs_down, inputs=[current_text_id], outputs=[voting_feedback])
410
-
411
- with gr.Tab("Guardrail Comparison"):
412
- gr.HTML(DISCLAIMER)
413
- with gr.Row():
414
- with gr.Column(scale=1):
415
- gr.Markdown("#### 🔵 No Moderation")
416
- chatbot_no_mod = gr.Chatbot(height=650, label="No Moderation", show_label=False, bubble_full_width=False, type='messages')
417
- with gr.Column(scale=1):
418
- gr.Markdown("#### 🟠 OpenAI Moderation")
419
- chatbot_openai = gr.Chatbot(height=650, label="OpenAI Moderation", show_label=False, bubble_full_width=False, type='messages')
420
- with gr.Column(scale=1):
421
- gr.Markdown("#### 🛡️ LionGuard 2")
422
- chatbot_lg = gr.Chatbot(height=650, label="LionGuard 2", show_label=False, bubble_full_width=False, type='messages')
423
- gr.Markdown("##### 💬 Send Message to All Models")
424
- with gr.Row():
425
- message_input = gr.Textbox(
426
- placeholder="Type your message to compare responses...",
427
- show_label=False,
428
- scale=4
429
- )
430
- send_btn = gr.Button("Send", variant="primary", scale=1)
431
- with gr.Row():
432
- clear_btn = gr.Button("Clear All Chats", variant="stop")
433
-
434
- send_btn.click(
435
- process_message,
436
- inputs=[message_input, chatbot_no_mod, chatbot_openai, chatbot_lg],
437
- outputs=[chatbot_no_mod, chatbot_openai, chatbot_lg, message_input]
438
- )
439
- message_input.submit(
440
- process_message,
441
- inputs=[message_input, chatbot_no_mod, chatbot_openai, chatbot_lg],
442
- outputs=[chatbot_no_mod, chatbot_openai, chatbot_lg, message_input]
443
- )
444
- clear_btn.click(
445
- clear_all_chats,
446
- outputs=[chatbot_no_mod, chatbot_openai, chatbot_lg]
447
- )
448
-
449
- if __name__ == "__main__":
450
- demo.launch()
 
1
  """
2
+ Business logic for moderation and guardrail services
3
  """
4
 
 
5
  import json
6
  import os
 
7
  import uuid
8
  import asyncio
9
  from datetime import datetime
10
+ from typing import Dict, List, Tuple, Optional
11
 
 
12
  import openai
 
13
  import gspread
14
  from google.oauth2 import service_account
 
15
 
16
+ # Import from parent directory
17
+ import sys
18
+ sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
19
+ from utils import MODEL_CONFIGS, predict_with_model
20
 
21
+ # --- Categories ---
22
  CATEGORIES = {
23
  "binary": ["binary"],
24
  "hateful": ["hateful_l1", "hateful_l2"],
25
  "insults": ["insults"],
26
+ "sexual": ["sexual_l1", "sexual_l2"],
 
 
 
27
  "physical_violence": ["physical_violence"],
28
  "self_harm": ["self_harm_l1", "self_harm_l2"],
29
+ "all_other_misconduct": ["all_other_misconduct_l1", "all_other_misconduct_l2"],
 
 
 
30
  }
31
 
32
  # --- OpenAI Setup ---
 
33
  client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
34
  async_client = openai.AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
35
 
 
 
 
 
 
 
 
36
  # --- Google Sheets Config ---
37
  GOOGLE_SHEET_URL = os.environ.get("GOOGLE_SHEET_URL")
38
  GOOGLE_CREDENTIALS = os.environ.get("GCP_SERVICE_ACCOUNT")
 
40
  VOTES_SHEET_NAME = "votes"
41
  CHATBOT_SHEET_NAME = "chatbot"
42
 
43
+
44
  def get_gspread_client():
45
+ """Get authenticated Google Sheets client"""
46
  credentials = service_account.Credentials.from_service_account_info(
47
  json.loads(GOOGLE_CREDENTIALS),
48
  scopes=[
 
52
  )
53
  return gspread.authorize(credentials)
54
 
55
+
56
+ def save_results_data(row: Dict):
57
+ """Save moderation results to Google Sheets"""
58
  try:
59
  gc = get_gspread_client()
60
  sheet = gc.open_by_url(GOOGLE_SHEET_URL)
 
63
  except Exception as e:
64
  print(f"Error saving results data: {e}")
65
 
66
+
67
+ def save_vote_data(text_id: str, agree: bool):
68
+ """Save user feedback vote to Google Sheets"""
69
  try:
70
  gc = get_gspread_client()
71
  sheet = gc.open_by_url(GOOGLE_SHEET_URL)
 
79
  except Exception as e:
80
  print(f"Error saving vote data: {e}")
81
 
82
+
83
+ def log_chatbot_data(row: Dict):
84
+ """Log chatbot interaction to Google Sheets"""
85
  try:
86
  gc = get_gspread_client()
87
  sheet = gc.open_by_url(GOOGLE_SHEET_URL)
 
96
  except Exception as e:
97
  print(f"Error saving chatbot data: {e}")
98
 
 
99
 
100
+ # --- Moderation Logic ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
+ def analyze_text(text: str, model_key: str = None) -> Dict:
103
+ """
104
+ Analyze text for moderation risks
105
+ Returns dict with binary score, categories, text_id, and model info
106
+ """
107
  if not text.strip():
108
+ return {
109
+ "binary_score": 0.0,
110
+ "binary_verdict": "pass",
111
+ "binary_percentage": 0,
112
+ "categories": [],
113
+ "text_id": "",
114
+ "model_used": model_key or "lionguard-2.1"
115
+ }
116
+
117
  try:
118
  text_id = str(uuid.uuid4())
119
+ results, selected_model_key = predict_with_model([text], model_key)
 
120
  binary_score = results.get('binary', [0.0])[0]
121
+
122
+ # Determine verdict
123
+ if binary_score < 0.4:
124
+ verdict = "pass"
125
+ elif 0.4 <= binary_score < 0.7:
126
+ verdict = "warn"
127
+ else:
128
+ verdict = "fail"
129
+
130
+ # Process categories
131
  main_categories = ['hateful', 'insults', 'sexual', 'physical_violence', 'self_harm', 'all_other_misconduct']
132
+ category_emojis = {
133
+ 'hateful': '🤬',
134
+ 'insults': '💢',
135
+ 'sexual': '🔞',
136
+ 'physical_violence': '⚔️',
137
+ 'self_harm': '☹️',
138
+ 'all_other_misconduct': '🙅‍♀️'
139
+ }
140
+
141
+ categories_list = []
142
  max_scores = {}
143
+
144
  for category in main_categories:
145
  subcategories = CATEGORIES[category]
 
 
 
 
 
 
 
 
 
 
146
  level_scores = [results.get(subcategory_key, [0.0])[0] for subcategory_key in subcategories]
147
  max_score = max(level_scores) if level_scores else 0.0
148
  max_scores[category] = max_score
149
+
150
+ category_name = category.replace('_', ' ').title()
151
+ categories_list.append({
152
+ "name": category_name,
153
+ "emoji": category_emojis.get(category, '📝'),
154
+ "max_score": max_score
155
+ })
156
+
 
 
 
 
 
 
 
 
 
 
157
  # Save to Google Sheets if enabled
158
  if GOOGLE_SHEET_URL and GOOGLE_CREDENTIALS:
159
  results_row = {
 
161
  "text_id": text_id,
162
  "text": text,
163
  "binary_score": binary_score,
164
+ "model": selected_model_key,
165
  }
166
  for category in main_categories:
167
  results_row[f"{category}_max"] = max_scores[category]
168
  save_results_data(results_row)
169
+
170
+ return {
171
+ "binary_score": binary_score,
172
+ "binary_verdict": verdict,
173
+ "binary_percentage": int(binary_score * 100),
174
+ "categories": categories_list,
175
+ "text_id": text_id,
176
+ "model_used": selected_model_key
177
+ }
178
+
179
  except Exception as e:
180
+ print(f"Error analyzing text: {e}")
181
+ raise
182
 
 
 
 
 
 
183
 
184
+ def submit_feedback(text_id: str, agree: bool) -> Dict:
185
+ """Submit user feedback"""
186
+ if not text_id:
187
+ return {"success": False, "message": "No text ID provided"}
188
+
189
+ if GOOGLE_SHEET_URL and GOOGLE_CREDENTIALS:
190
+ save_vote_data(text_id, agree)
191
+ message = "🎉 Thank you!" if agree else "📝 Thanks for the feedback!"
192
+ return {"success": True, "message": message}
193
+
194
+ return {"success": False, "message": "Voting not available"}
195
 
 
196
 
197
+ # --- Guardrail Comparison Logic (Async) ---
198
+
199
+ async def get_openai_response_async(message: str, system_prompt: str = "You are a helpful assistant.") -> str:
200
+ """Get OpenAI chat response asynchronously"""
201
  try:
202
  response = await async_client.chat.completions.create(
203
  model="gpt-4.1-nano",
 
213
  except Exception as e:
214
  return f"Error: {str(e)}. Please check your OpenAI API key."
215
 
216
+
217
+ async def openai_moderation_async(message: str) -> bool:
218
+ """Check if message is flagged by OpenAI moderation"""
219
  try:
220
  response = await async_client.moderations.create(input=message)
221
  return response.results[0].flagged
 
223
  print(f"Error in OpenAI moderation: {e}")
224
  return False
225
 
226
+
227
+ def lionguard_2_sync(message: str, model_key: str, threshold: float = 0.5) -> Tuple[bool, float]:
228
+ """Check if message is flagged by Lionguard"""
229
  try:
230
+ results, _ = predict_with_model([message], model_key)
231
+ binary_prob = results.get('binary', [0.0])[0]
 
232
  return binary_prob > threshold, binary_prob
233
  except Exception as e:
234
+ print(f"Error in LionGuard inference for {model_key}: {e}")
235
  return False, 0.0
236
 
237
+
238
+ async def process_no_moderation(message: str, history: List[Dict]) -> List[Dict]:
239
  """Process message without moderation"""
240
  no_mod_response = await get_openai_response_async(message)
241
+ history.append({"role": "user", "content": message})
242
+ history.append({"role": "assistant", "content": no_mod_response})
243
+ return history
244
+
245
 
246
+ async def process_openai_moderation(message: str, history: List[Dict]) -> List[Dict]:
247
  """Process message with OpenAI moderation"""
248
  openai_flagged = await openai_moderation_async(message)
249
+ history.append({"role": "user", "content": message})
250
  if openai_flagged:
251
  openai_response = "🚫 This message has been flagged by OpenAI moderation"
252
+ history.append({"role": "assistant", "content": openai_response})
253
  else:
254
  openai_response = await get_openai_response_async(message)
255
+ history.append({"role": "assistant", "content": openai_response})
256
+ return history
257
 
258
+
259
+ async def process_lionguard(message: str, history: List[Dict], model_key: str) -> Tuple[List[Dict], float]:
260
+ """Process message with Lionguard model"""
261
  loop = asyncio.get_event_loop()
262
+ lg_flagged, lg_score = await loop.run_in_executor(None, lionguard_2_sync, message, model_key, 0.5)
263
 
264
+ history.append({"role": "user", "content": message})
265
  if lg_flagged:
266
+ lg_response = f"🚫 This message has been flagged by {MODEL_CONFIGS[model_key]['label']}"
267
+ history.append({"role": "assistant", "content": lg_response})
268
  else:
269
  lg_response = await get_openai_response_async(message)
270
+ history.append({"role": "assistant", "content": lg_response})
271
+ return history, lg_score
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
 
 
273
 
274
+ def _log_chatbot_sync(message: str, lg_score: float, model_key: str):
275
+ """Sync helper for logging chatbot data"""
276
  try:
277
+ results, selected_model_key = predict_with_model([message], model_key)
 
278
  now = datetime.now().isoformat()
279
  text_id = str(uuid.uuid4())
280
  row = {
 
292
  "self_harm_l2_score": results.get(CATEGORIES['self_harm'][1], [None])[0],
293
  "aom_l1_score": results.get(CATEGORIES['all_other_misconduct'][0], [None])[0],
294
  "aom_l2_score": results.get(CATEGORIES['all_other_misconduct'][1], [None])[0],
295
+ "openai_score": None,
296
  }
297
  try:
298
  openai_result = client.moderations.create(input=message)
299
  row["openai_score"] = float(openai_result.results[0].category_scores.get("hate", 0.0))
300
  except Exception:
301
  row["openai_score"] = None
302
+
303
  log_chatbot_data(row)
304
  except Exception as e:
305
  print(f"Error in sync logging: {e}")
306
 
 
 
 
 
 
 
 
 
307
 
308
+ async def process_chat_message(
309
+ message: str,
310
+ model_key: str,
311
+ history_no_mod: List[Dict],
312
+ history_openai: List[Dict],
313
+ history_lg: List[Dict]
314
+ ) -> Tuple[List[Dict], List[Dict], List[Dict], Optional[float]]:
315
+ """
316
+ Process message concurrently across all three guardrails
317
+ Returns updated histories and LionGuard score
318
+ """
319
+ if not message.strip():
320
+ return history_no_mod, history_openai, history_lg, None
321
+
322
+ # Run all three processes concurrently
323
+ results = await asyncio.gather(
324
+ process_no_moderation(message, history_no_mod),
325
+ process_openai_moderation(message, history_openai),
326
+ process_lionguard(message, history_lg, model_key),
327
+ return_exceptions=True
328
+ )
329
+
330
+ # Unpack results
331
+ history_no_mod = results[0] if not isinstance(results[0], Exception) else history_no_mod
332
+ history_openai = results[1] if not isinstance(results[1], Exception) else history_openai
333
+ history_lg_result = results[2] if not isinstance(results[2], Exception) else (history_lg, 0.0)
334
+ history_lg = history_lg_result[0]
335
+ lg_score = history_lg_result[1] if isinstance(history_lg_result, tuple) else 0.0
336
+
337
+ # Log to Google Sheets in background
338
+ if GOOGLE_SHEET_URL and GOOGLE_CREDENTIALS:
339
+ try:
340
+ loop = asyncio.get_event_loop()
341
+ loop.run_in_executor(None, _log_chatbot_sync, message, lg_score, model_key)
342
+ except Exception as e:
343
+ print(f"Chatbot logging failed: {e}")
344
+
345
+ return history_no_mod, history_openai, history_lg, lg_score
346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/frontend/index.html ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Lionguard</title>
7
+ <link rel="stylesheet" href="/static/style.css">
8
+ </head>
9
+ <body>
10
+ <!-- Header -->
11
+ <header class="header">
12
+ <div class="container">
13
+ <div class="header-content">
14
+ <div class="logo-section">
15
+ <img src="/static/logo.png" alt="Lionguard Logo" class="logo">
16
+ <div class="logo-text">
17
+ <h1>Lionguard</h1>
18
+ <p>A content moderation tool designed for Singapore</p>
19
+ </div>
20
+ </div>
21
+ <div class="header-controls">
22
+ <nav class="tabs" aria-label="Primary navigation">
23
+ <div class="nav-dropdown">
24
+ <button
25
+ class="tab dropdown-toggle"
26
+ aria-haspopup="true"
27
+ aria-expanded="false"
28
+ >
29
+ <span class="tab-icon">🛠️</span>
30
+ Demos
31
+ <span class="dropdown-caret">▾</span>
32
+ </button>
33
+ <div class="dropdown-menu" role="menu">
34
+ <button class="tab dropdown-item active" data-tab="detector" role="menuitem">
35
+ <span class="tab-icon">🔍</span>
36
+ Detector
37
+ </button>
38
+ <button class="tab dropdown-item" data-tab="chat" role="menuitem">
39
+ <span class="tab-icon">💬</span>
40
+ Chatbot Guardrail
41
+ </button>
42
+ </div>
43
+ </div>
44
+ <button class="tab nav-link" data-tab="about">
45
+ <span class="tab-icon">ℹ️</span>
46
+ About
47
+ </button>
48
+ </nav>
49
+ <button id="theme-toggle" class="theme-icon-button" aria-label="Toggle theme">
50
+ <span class="theme-icon" aria-hidden="true">🌞</span>
51
+ </button>
52
+ </div>
53
+ </div>
54
+ </div>
55
+ </header>
56
+
57
+ <!-- Main Content -->
58
+ <main class="container">
59
+ <!-- Detector Tab Content -->
60
+ <div id="detector-content" class="tab-content active">
61
+ <!-- Disclaimer -->
62
+ <div class="warning-card">
63
+ ⚠️ Inputs are anonymised and logged to improve Lionguard's moderation models.
64
+ </div>
65
+
66
+ <!-- Model Selector -->
67
+ <div class="model-selector-prominent">
68
+ <div class="model-selector-header">
69
+ <h3>Model Selection</h3>
70
+ </div>
71
+ <div class="model-dropdown">
72
+ <select id="model-select" class="model-select" aria-label="Detector guardrail model">
73
+ <option value="lionguard-2.1" selected>Lionguard 2.1 (Gemini Embeddings, API)</option>
74
+ <option value="lionguard-2">Lionguard 2 (OpenAI Embeddings, API)</option>
75
+ <option value="lionguard-2-lite">Lionguard 2 Lite (Gemma Embeddings, Local)</option>
76
+ </select>
77
+ </div>
78
+ </div>
79
+
80
+ <!-- Analysis Section -->
81
+ <div class="analysis-grid">
82
+ <!-- Input Panel -->
83
+ <div class="panel input-panel">
84
+ <h3>Input</h3>
85
+ <textarea
86
+ id="text-input"
87
+ placeholder="Enter text to analyze for content moderation..."
88
+ rows="10"
89
+ ></textarea>
90
+ <button id="analyze-btn" class="btn btn-primary">
91
+ <span class="btn-icon">🔍</span>
92
+ Analyze
93
+ </button>
94
+ </div>
95
+
96
+ <!-- Results Panel -->
97
+ <div class="panel results-panel">
98
+ <h3>Analysis</h3>
99
+
100
+ <!-- Binary Score -->
101
+ <div id="binary-result" class="binary-placeholder">
102
+ <div class="placeholder-icon">📝</div>
103
+ <p>Enter text to analyze</p>
104
+ </div>
105
+
106
+ <!-- Category Scores -->
107
+ <div id="category-results" class="category-placeholder">
108
+ <p>Category scores will appear here after analysis</p>
109
+ </div>
110
+
111
+ <!-- Feedback Section -->
112
+ <div id="feedback-section" class="feedback-section" style="display: none;">
113
+ <p class="feedback-prompt">Does this look correct?</p>
114
+ <div class="feedback-buttons">
115
+ <button id="thumbs-up" class="btn btn-success">
116
+ <span>👍</span>
117
+ Yes
118
+ </button>
119
+ <button id="thumbs-down" class="btn btn-secondary">
120
+ <span>👎</span>
121
+ No
122
+ </button>
123
+ </div>
124
+ <div id="feedback-message" class="feedback-message"></div>
125
+ </div>
126
+ </div>
127
+ </div>
128
+ </div>
129
+
130
+ <!-- Chatbot Guardrail Tab Content -->
131
+ <div id="chat-content" class="tab-content full-width-section">
132
+ <!-- Disclaimer -->
133
+ <div class="warning-card">
134
+ ⚠️ Inputs are anonymised and logged to improve Lionguard's moderation models.
135
+ </div>
136
+
137
+ <!-- Model Selector for Guardrail -->
138
+ <div class="model-selector-prominent">
139
+ <div class="model-selector-header">
140
+ <h3>Model Selection</h3>
141
+ </div>
142
+ <div class="model-dropdown">
143
+ <select id="model-select-gc" class="model-select" aria-label="Chat guardrail model">
144
+ <option value="lionguard-2.1" selected>Lionguard 2.1 (Gemini Embeddings, API)</option>
145
+ <option value="lionguard-2">Lionguard 2 (OpenAI Embeddings, API)</option>
146
+ <option value="lionguard-2-lite">Lionguard 2 Lite (Gemma Embeddings, Local)</option>
147
+ </select>
148
+ </div>
149
+ </div>
150
+
151
+ <!-- Chat Grid -->
152
+ <div class="chat-grid">
153
+ <!-- No Moderation -->
154
+ <div class="chat-panel">
155
+ <div class="chat-header">
156
+ <span class="chat-icon">🔵</span>
157
+ <h4>No Moderation</h4>
158
+ </div>
159
+ <div id="chat-no-mod" class="chat-messages"></div>
160
+ </div>
161
+
162
+ <!-- OpenAI Moderation -->
163
+ <div class="chat-panel">
164
+ <div class="chat-header">
165
+ <span class="chat-icon">🟠</span>
166
+ <h4>OpenAI Moderation</h4>
167
+ </div>
168
+ <div id="chat-openai" class="chat-messages"></div>
169
+ </div>
170
+
171
+ <!-- Lionguard -->
172
+ <div class="chat-panel">
173
+ <div class="chat-header">
174
+ <span class="chat-icon">🛡️</span>
175
+ <h4>Lionguard</h4>
176
+ </div>
177
+ <div id="chat-lionguard" class="chat-messages"></div>
178
+ </div>
179
+ </div>
180
+
181
+ <!-- Message Input -->
182
+ <div class="message-input-section">
183
+ <div class="message-input-group">
184
+ <input
185
+ type="text"
186
+ id="message-input"
187
+ placeholder="Enter message to test across all guardrails..."
188
+ >
189
+ <button id="send-btn" class="btn btn-primary">Send</button>
190
+ <button id="clear-btn" class="btn btn-danger">Clear</button>
191
+ </div>
192
+ </div>
193
+ </div>
194
+
195
+ <!-- About Tab Content -->
196
+ <div id="about-content" class="tab-content">
197
+ <!-- Hero Section -->
198
+ <section class="about-intro-section">
199
+ <p class="lead">Lionguard is a family of open-source content moderation models specifically designed for Singapore's multilingual environment. Optimized for Singapore’s linguistic mix, including Singlish, Mandarin, Malay, and Tamil, Lionguard delivers accurate moderation grounded in local usage and cultural nuance.</p>
200
+ <p class="lead" style="font-style: italic;">Developed by <a href="https://www.tech.gov.sg/" target="_blank" style="color: var(--primary-red); text-decoration: none; font-weight: 600;">GovTech Singapore</a>.</p>
201
+ </section>
202
+
203
+ <!-- Resources Section -->
204
+ <section class="about-resources-grid">
205
+ <!-- Models -->
206
+ <div class="resource-card">
207
+ <h3>🤗 Open-Sourced Models</h3>
208
+ <div class="resource-list">
209
+ <a href="https://huggingface.co/govtech/lionguard-2.1" target="_blank">Lionguard 2.1</a>
210
+ <a href="https://huggingface.co/govtech/lionguard-2" target="_blank">Lionguard 2</a>
211
+ <a href="https://huggingface.co/govtech/lionguard-2-lite" target="_blank">Lionguard 2 Lite</a>
212
+ <a href="https://huggingface.co/govtech/lionguard-v1" target="_blank">Lionguard 1</a>
213
+ </div>
214
+ </div>
215
+
216
+ <!-- Datasets -->
217
+ <div class="resource-card">
218
+ <h3>📊 Open-Sourced Datasets</h3>
219
+ <div class="resource-list">
220
+ <a href="https://huggingface.co/datasets/govtech/lionguard-2-synthetic-instruct" target="_blank">Subset of Training Data</a>
221
+ <a href="https://huggingface.co/datasets/govtech/RabakBench" target="_blank">RabakBench</a>
222
+ </div>
223
+ </div>
224
+
225
+ <!-- Blog Posts -->
226
+ <div class="resource-card">
227
+ <h3>📝 Blog Posts</h3>
228
+ <div class="resource-list">
229
+ <a href="https://medium.com/dsaid-govtech/lionguard-2-8066d4e20d16" target="_blank">Lionguard 2</a>
230
+ <a href="https://medium.com/dsaid-govtech/building-lionguard-a-contextualised-moderation-classifier-to-tackle-local-unsafe-content-8f68c8f13179" target="_blank">Lionguard</a>
231
+ </div>
232
+ </div>
233
+
234
+ <!-- Papers -->
235
+ <div class="resource-card">
236
+ <h3>📄 Research Papers</h3>
237
+ <div class="resource-list">
238
+ <a href="https://arxiv.org/abs/2507.05980" target="_blank">Lionguard 2 (arXiv:2507.05980)</a>
239
+ <a href="https://arxiv.org/abs/2507.15339" target="_blank">RabakBench (arXiv:2507.15339)</a>
240
+ <a href="https://arxiv.org/abs/2407.10995" target="_blank">Lionguard 1 (arXiv:2407.10995)</a>
241
+ </div>
242
+ </div>
243
+ </section>
244
+ </div>
245
+ </main>
246
+
247
+ <!-- Footer -->
248
+ <footer class="footer">
249
+ <div class="container">
250
+ <p>Lionguard · Powered by <a href="https://www.tech.gov.sg/" target="_blank">GovTech</a></p>
251
+ </div>
252
+ </footer>
253
+
254
+ <script src="/static/script.js"></script>
255
+ </body>
256
+ </html>
app/frontend/script.js ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // LionGuard 2 Frontend JavaScript
2
+
3
+ // State management
4
+ const state = {
5
+ selectedModel: 'lionguard-2.1',
6
+ selectedModelGC: 'lionguard-2.1',
7
+ currentTextId: '',
8
+ chatHistories: {
9
+ no_moderation: [],
10
+ openai_moderation: [],
11
+ lionguard: []
12
+ }
13
+ };
14
+
15
+ // Utility functions
16
+ function showLoading(button) {
17
+ button.disabled = true;
18
+ button.classList.add('loading');
19
+ const originalText = button.textContent;
20
+ button.textContent = 'Loading...';
21
+ return originalText;
22
+ }
23
+
24
+ function hideLoading(button, originalText) {
25
+ button.disabled = false;
26
+ button.classList.remove('loading');
27
+ button.textContent = originalText;
28
+ }
29
+
30
+ function getScoreLevel(score) {
31
+ if (score < 0.4) {
32
+ return { className: 'good', icon: '👌', title: 'Low risk' };
33
+ }
34
+ if (score < 0.7) {
35
+ return { className: 'warn', icon: '⚠️', title: 'Needs review' };
36
+ }
37
+ return { className: 'bad', icon: '🚨', title: 'High risk' };
38
+ }
39
+
40
+ function formatScore(score) {
41
+ const percentage = Math.round(score * 100);
42
+ const { className, icon, title } = getScoreLevel(score);
43
+
44
+ return `<span class="score-chip ${className}" title="${title}">${icon} ${percentage}%</span>`;
45
+ }
46
+
47
+ function renderCategoryMeter(score) {
48
+ const filledSegments = Math.min(10, Math.round(score * 10));
49
+ const { className } = getScoreLevel(score);
50
+
51
+ const segments = Array.from({ length: 10 }, (_, index) => {
52
+ const isFilled = index < filledSegments;
53
+ const filledClass = isFilled ? `filled ${className}` : '';
54
+ return `<span class="category-meter-segment ${filledClass}"></span>`;
55
+ }).join('');
56
+
57
+ return `<div class="category-meter" aria-label="${Math.round(score * 100)}%">${segments}</div>`;
58
+ }
59
+
60
+ // Tab switching
61
+ function initTabs() {
62
+ const tabs = document.querySelectorAll('.tab[data-tab]');
63
+ const tabContents = document.querySelectorAll('.tab-content');
64
+ const dropdownToggle = document.querySelector('.dropdown-toggle');
65
+ const demoTabs = ['detector', 'chat'];
66
+
67
+ const updateDropdownState = (targetTab) => {
68
+ if (!dropdownToggle) return;
69
+ if (demoTabs.includes(targetTab)) {
70
+ dropdownToggle.classList.add('active');
71
+ } else {
72
+ dropdownToggle.classList.remove('active');
73
+ }
74
+ };
75
+
76
+ tabs.forEach(tab => {
77
+ tab.addEventListener('click', () => {
78
+ const targetTab = tab.dataset.tab;
79
+
80
+ // Update tabs
81
+ tabs.forEach(t => t.classList.remove('active'));
82
+ tab.classList.add('active');
83
+
84
+ // Update content
85
+ tabContents.forEach(content => {
86
+ content.classList.remove('active');
87
+ if (content.id === `${targetTab}-content`) {
88
+ content.classList.add('active');
89
+ }
90
+ });
91
+
92
+ updateDropdownState(targetTab);
93
+
94
+ // Smooth scroll to top when switching tabs
95
+ window.scrollTo({ top: 0, behavior: 'smooth' });
96
+ });
97
+ });
98
+
99
+ const initialActiveTab = document.querySelector('.tab[data-tab].active');
100
+ if (initialActiveTab) {
101
+ updateDropdownState(initialActiveTab.dataset.tab);
102
+ }
103
+ }
104
+
105
+ function initNavDropdown() {
106
+ const dropdown = document.querySelector('.nav-dropdown');
107
+ if (!dropdown) return;
108
+
109
+ const toggle = dropdown.querySelector('.dropdown-toggle');
110
+ const dropdownTabs = dropdown.querySelectorAll('.dropdown-item[data-tab]');
111
+
112
+ const closeDropdown = () => {
113
+ dropdown.classList.remove('open');
114
+ toggle.setAttribute('aria-expanded', 'false');
115
+ };
116
+
117
+ const toggleDropdown = (event) => {
118
+ event.stopPropagation();
119
+ const isOpen = dropdown.classList.toggle('open');
120
+ toggle.setAttribute('aria-expanded', isOpen ? 'true' : 'false');
121
+ };
122
+
123
+ toggle.addEventListener('click', toggleDropdown);
124
+
125
+ dropdownTabs.forEach(tab => {
126
+ tab.addEventListener('click', () => {
127
+ closeDropdown();
128
+ });
129
+ });
130
+
131
+ document.addEventListener('click', (event) => {
132
+ if (!dropdown.contains(event.target)) {
133
+ closeDropdown();
134
+ }
135
+ });
136
+ }
137
+
138
+ // Model selection for Classifier
139
+ function initModelSelector() {
140
+ const select = document.getElementById('model-select');
141
+ if (!select) return;
142
+ select.value = state.selectedModel;
143
+ select.addEventListener('change', () => {
144
+ state.selectedModel = select.value;
145
+ });
146
+ }
147
+
148
+ // Model selection for Guardrail Comparison
149
+ function initModelSelectorGC() {
150
+ const select = document.getElementById('model-select-gc');
151
+ if (!select) return;
152
+ select.value = state.selectedModelGC;
153
+ select.addEventListener('change', () => {
154
+ state.selectedModelGC = select.value;
155
+ });
156
+ }
157
+
158
+ // Classifier: Analyze text
159
+ async function analyzeText() {
160
+ const textInput = document.getElementById('text-input');
161
+ const analyzeBtn = document.getElementById('analyze-btn');
162
+ const binaryResult = document.getElementById('binary-result');
163
+ const categoryResults = document.getElementById('category-results');
164
+ const feedbackSection = document.getElementById('feedback-section');
165
+ const feedbackMessage = document.getElementById('feedback-message');
166
+
167
+ const text = textInput.value.trim();
168
+
169
+ if (!text) {
170
+ alert('Please enter some text to analyze');
171
+ return;
172
+ }
173
+
174
+ const originalText = showLoading(analyzeBtn);
175
+ feedbackMessage.textContent = '';
176
+ feedbackMessage.className = 'feedback-message';
177
+
178
+ try {
179
+ const response = await fetch('/moderate', {
180
+ method: 'POST',
181
+ headers: {
182
+ 'Content-Type': 'application/json'
183
+ },
184
+ body: JSON.stringify({
185
+ text: text,
186
+ model: state.selectedModel
187
+ })
188
+ });
189
+
190
+ if (!response.ok) {
191
+ throw new Error('Failed to analyze text');
192
+ }
193
+
194
+ const data = await response.json();
195
+
196
+ // Display binary result
197
+ const verdictClass = data.binary_verdict;
198
+ const verdictText = verdictClass.charAt(0).toUpperCase() + verdictClass.slice(1);
199
+ const verdictIcons = {
200
+ 'pass': '✅',
201
+ 'warn': '⚠️',
202
+ 'fail': '🚨'
203
+ };
204
+
205
+ binaryResult.innerHTML = `
206
+ <div class="binary-card ${verdictClass}">
207
+ <div class="binary-icon">${verdictIcons[verdictClass]}</div>
208
+ <div class="binary-body">
209
+ <div class="binary-label">Overall</div>
210
+ <div class="binary-score-line">
211
+ <h2>${verdictText}</h2>
212
+ <span class="binary-percentage">${data.binary_percentage}/100</span>
213
+ </div>
214
+ </div>
215
+ </div>
216
+ `;
217
+
218
+ // Display category results
219
+ const categoryHTML = data.categories.map(cat => `
220
+ <div class="category-card">
221
+ <div class="category-label">${cat.emoji} ${cat.name}</div>
222
+ ${renderCategoryMeter(cat.max_score)}
223
+ <div class="category-score">${formatScore(cat.max_score)}</div>
224
+ </div>
225
+ `).join('');
226
+
227
+ categoryResults.innerHTML = `
228
+ <div class="category-grid">
229
+ ${categoryHTML}
230
+ </div>
231
+ `;
232
+
233
+ // Show feedback section
234
+ state.currentTextId = data.text_id;
235
+ feedbackSection.style.display = 'block';
236
+
237
+ } catch (error) {
238
+ console.error('Error:', error);
239
+ binaryResult.innerHTML = `
240
+ <div style="color: #E63946; padding: 20px; text-align: center;">
241
+ ❌ Error analyzing text: ${error.message}
242
+ </div>
243
+ `;
244
+ } finally {
245
+ hideLoading(analyzeBtn, originalText);
246
+ }
247
+ }
248
+
249
+ // Classifier: Submit feedback
250
+ async function submitFeedback(agree) {
251
+ const feedbackMessage = document.getElementById('feedback-message');
252
+
253
+ if (!state.currentTextId) {
254
+ feedbackMessage.textContent = 'No analysis to provide feedback on';
255
+ feedbackMessage.className = 'feedback-message info';
256
+ return;
257
+ }
258
+
259
+ try {
260
+ const response = await fetch('/send_feedback', {
261
+ method: 'POST',
262
+ headers: {
263
+ 'Content-Type': 'application/json'
264
+ },
265
+ body: JSON.stringify({
266
+ text_id: state.currentTextId,
267
+ agree: agree
268
+ })
269
+ });
270
+
271
+ if (!response.ok) {
272
+ throw new Error('Failed to submit feedback');
273
+ }
274
+
275
+ const data = await response.json();
276
+
277
+ feedbackMessage.textContent = data.message;
278
+ feedbackMessage.className = 'feedback-message success';
279
+
280
+ } catch (error) {
281
+ console.error('Error:', error);
282
+ feedbackMessage.textContent = 'Error submitting feedback';
283
+ feedbackMessage.className = 'feedback-message info';
284
+ }
285
+ }
286
+
287
+ // Guardrail Comparison: Render chat messages
288
+ function renderChatMessages(containerId, messages) {
289
+ const container = document.getElementById(containerId);
290
+ container.innerHTML = messages.map(msg => `
291
+ <div class="chat-message ${msg.role}">
292
+ ${msg.content}
293
+ </div>
294
+ `).join('');
295
+
296
+ // Scroll to bottom
297
+ container.scrollTop = container.scrollHeight;
298
+ }
299
+
300
+ // Guardrail Comparison: Send message
301
+ async function sendMessage() {
302
+ const messageInput = document.getElementById('message-input');
303
+ const sendBtn = document.getElementById('send-btn');
304
+
305
+ const message = messageInput.value.trim();
306
+
307
+ if (!message) {
308
+ alert('Please enter a message');
309
+ return;
310
+ }
311
+
312
+ const originalText = showLoading(sendBtn);
313
+
314
+ try {
315
+ const response = await fetch('/chat', {
316
+ method: 'POST',
317
+ headers: {
318
+ 'Content-Type': 'application/json'
319
+ },
320
+ body: JSON.stringify({
321
+ message: message,
322
+ model: state.selectedModelGC,
323
+ histories: state.chatHistories
324
+ })
325
+ });
326
+
327
+ if (!response.ok) {
328
+ throw new Error('Failed to send message');
329
+ }
330
+
331
+ const data = await response.json();
332
+
333
+ // Update state
334
+ state.chatHistories = data.histories;
335
+
336
+ // Render all chat panels
337
+ renderChatMessages('chat-no-mod', data.histories.no_moderation);
338
+ renderChatMessages('chat-openai', data.histories.openai_moderation);
339
+ renderChatMessages('chat-lionguard', data.histories.lionguard);
340
+
341
+ // Clear input
342
+ messageInput.value = '';
343
+
344
+ } catch (error) {
345
+ console.error('Error:', error);
346
+ alert('Error sending message: ' + error.message);
347
+ } finally {
348
+ hideLoading(sendBtn, originalText);
349
+ }
350
+ }
351
+
352
+ // Guardrail Comparison: Clear all chats
353
+ function clearAllChats() {
354
+ state.chatHistories = {
355
+ no_moderation: [],
356
+ openai_moderation: [],
357
+ lionguard: []
358
+ };
359
+
360
+ document.getElementById('chat-no-mod').innerHTML = '';
361
+ document.getElementById('chat-openai').innerHTML = '';
362
+ document.getElementById('chat-lionguard').innerHTML = '';
363
+ }
364
+
365
+ // Initialize event listeners
366
+ function initEventListeners() {
367
+ // Classifier tab
368
+ const analyzeBtn = document.getElementById('analyze-btn');
369
+ const textInput = document.getElementById('text-input');
370
+ const thumbsUpBtn = document.getElementById('thumbs-up');
371
+ const thumbsDownBtn = document.getElementById('thumbs-down');
372
+
373
+ analyzeBtn.addEventListener('click', analyzeText);
374
+ textInput.addEventListener('keypress', (e) => {
375
+ if (e.key === 'Enter' && e.ctrlKey) {
376
+ analyzeText();
377
+ }
378
+ });
379
+ thumbsUpBtn.addEventListener('click', () => submitFeedback(true));
380
+ thumbsDownBtn.addEventListener('click', () => submitFeedback(false));
381
+
382
+ // Guardrail Comparison tab
383
+ const sendBtn = document.getElementById('send-btn');
384
+ const messageInput = document.getElementById('message-input');
385
+ const clearBtn = document.getElementById('clear-btn');
386
+
387
+ sendBtn.addEventListener('click', sendMessage);
388
+ messageInput.addEventListener('keypress', (e) => {
389
+ if (e.key === 'Enter') {
390
+ e.preventDefault();
391
+ sendMessage();
392
+ }
393
+ });
394
+ clearBtn.addEventListener('click', clearAllChats);
395
+ }
396
+
397
+ // Dark mode toggle
398
+ function initThemeToggle() {
399
+ const themeToggle = document.getElementById('theme-toggle');
400
+ if (!themeToggle) return;
401
+
402
+ const themeIcon = themeToggle.querySelector('.theme-icon');
403
+ const updateIcon = (isDark) => {
404
+ themeToggle.setAttribute('aria-pressed', isDark ? 'true' : 'false');
405
+ if (themeIcon) {
406
+ themeIcon.textContent = isDark ? '🌙' : '🌞';
407
+ }
408
+ };
409
+
410
+ const savedTheme = localStorage.getItem('theme') || 'light';
411
+ const shouldStartDark = savedTheme === 'dark';
412
+ if (shouldStartDark) {
413
+ document.body.classList.add('dark-mode');
414
+ }
415
+ updateIcon(shouldStartDark);
416
+
417
+ themeToggle.addEventListener('click', () => {
418
+ document.body.classList.toggle('dark-mode');
419
+ const isDark = document.body.classList.contains('dark-mode');
420
+ updateIcon(isDark);
421
+ localStorage.setItem('theme', isDark ? 'dark' : 'light');
422
+ });
423
+ }
424
+
425
+ // Initialize app
426
+ document.addEventListener('DOMContentLoaded', () => {
427
+ initTabs();
428
+ initNavDropdown();
429
+ initModelSelector();
430
+ initModelSelectorGC();
431
+ initEventListeners();
432
+ initThemeToggle();
433
+
434
+ console.log('LionGuard 2 app initialized');
435
+ });
app/frontend/style.css ADDED
@@ -0,0 +1,1289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* LionGuard 2 - Warm, Inviting, and Bright Design */
2
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&family=Poppins:wght@600;700&display=swap');
3
+
4
+ :root {
5
+ /* Modern color palette inspired by logo */
6
+ --primary-red: #E14746;
7
+ --primary-dark: #1E293B;
8
+ --warm-beige: #F8FAFC;
9
+ --warm-cream: #F1F5F9;
10
+ --warm-tan: #E2E8F0;
11
+ --soft-white: #FFFFFF;
12
+
13
+ /* Accent colors - more sophisticated */
14
+ --accent-purple: #8B5CF6;
15
+ --accent-blue: #3B82F6;
16
+ --success-green: #10B981;
17
+ --warning-amber: #F59E0B;
18
+ --info-blue: #0EA5E9;
19
+
20
+ /* Text colors */
21
+ --text-primary: #0F172A;
22
+ --text-secondary: #475569;
23
+ --text-muted: #94A3B8;
24
+
25
+ /* UI elements - more subtle shadows, less rounded */
26
+ --shadow-soft: 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 2px rgba(0, 0, 0, 0.1);
27
+ --shadow-medium: 0 4px 6px rgba(0, 0, 0, 0.07), 0 2px 4px rgba(0, 0, 0, 0.06);
28
+ --shadow-strong: 0 10px 15px rgba(0, 0, 0, 0.1), 0 4px 6px rgba(0, 0, 0, 0.05);
29
+ --border-radius: 8px;
30
+ --border-radius-sm: 6px;
31
+ --border-radius-lg: 12px;
32
+ }
33
+
34
+ /* Dark Mode */
35
+ body.dark-mode {
36
+ --primary-dark: #E2E8F0;
37
+ --warm-beige: #0F172A;
38
+ --warm-cream: #1E293B;
39
+ --warm-tan: #334155;
40
+ --soft-white: #1E293B;
41
+
42
+ --text-primary: #F1F5F9;
43
+ --text-secondary: #CBD5E1;
44
+ --text-muted: #64748B;
45
+
46
+ --shadow-soft: 0 1px 3px rgba(0, 0, 0, 0.3), 0 1px 2px rgba(0, 0, 0, 0.2);
47
+ --shadow-medium: 0 4px 6px rgba(0, 0, 0, 0.4), 0 2px 4px rgba(0, 0, 0, 0.3);
48
+ --shadow-strong: 0 10px 15px rgba(0, 0, 0, 0.5), 0 4px 6px rgba(0, 0, 0, 0.4);
49
+ }
50
+
51
+ /* Fix model selector gradient in dark mode */
52
+ body.dark-mode .model-selector-prominent {
53
+ background: transparent;
54
+ }
55
+
56
+ body.dark-mode .guardrail-intro {
57
+ background: var(--soft-white);
58
+ }
59
+
60
+ /* Reset & Base Styles */
61
+ * {
62
+ margin: 0;
63
+ padding: 0;
64
+ box-sizing: border-box;
65
+ }
66
+
67
+ body {
68
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
69
+ background: var(--warm-beige);
70
+ color: var(--text-primary);
71
+ line-height: 1.6;
72
+ min-height: 100vh;
73
+ display: flex;
74
+ flex-direction: column;
75
+ }
76
+
77
+ .container {
78
+ width: 100%;
79
+ max-width: 1800px;
80
+ margin: 0 auto;
81
+ padding: 0 20px;
82
+ }
83
+
84
+ main {
85
+ flex: 1;
86
+ }
87
+
88
+ /* Header */
89
+ .header {
90
+ background: var(--soft-white);
91
+ box-shadow: var(--shadow-soft);
92
+ padding: 12px 0;
93
+ margin-bottom: 16px;
94
+ }
95
+
96
+ .header-content {
97
+ display: flex;
98
+ justify-content: space-between;
99
+ align-items: center;
100
+ flex-wrap: wrap;
101
+ gap: 16px;
102
+ }
103
+
104
+ .header-controls {
105
+ margin-left: auto;
106
+ display: flex;
107
+ align-items: center;
108
+ gap: 16px;
109
+ flex-wrap: wrap;
110
+ justify-content: flex-end;
111
+ background: transparent;
112
+ border: none;
113
+ border-radius: 0;
114
+ padding: 0;
115
+ box-shadow: none;
116
+ }
117
+
118
+ .logo-section {
119
+ display: flex;
120
+ align-items: center;
121
+ gap: 16px;
122
+ }
123
+
124
+ .logo {
125
+ width: 48px;
126
+ height: 48px;
127
+ border-radius: 0;
128
+ box-shadow: none;
129
+ }
130
+
131
+ .logo-text h1 {
132
+ font-family: 'Poppins', sans-serif;
133
+ font-size: 1.4rem;
134
+ color: var(--primary-red);
135
+ margin-bottom: 0px;
136
+ line-height: 1.2;
137
+ }
138
+
139
+ .logo-text p {
140
+ font-size: 0.8rem;
141
+ color: var(--text-secondary);
142
+ }
143
+
144
+ body.dark-mode .header-controls {
145
+ background: transparent;
146
+ }
147
+
148
+ /* Theme Toggle */
149
+ .theme-icon-button {
150
+ background: var(--warm-cream);
151
+ border: 1px solid var(--warm-tan);
152
+ border-radius: 50%;
153
+ width: 42px;
154
+ height: 42px;
155
+ padding: 0;
156
+ cursor: pointer;
157
+ display: flex;
158
+ align-items: center;
159
+ justify-content: center;
160
+ gap: 0;
161
+ font-size: 1.1rem;
162
+ box-shadow: var(--shadow-soft);
163
+ transition: background 0.2s ease, transform 0.2s ease, border-color 0.2s ease;
164
+ }
165
+
166
+ .theme-icon-button:hover {
167
+ transform: translateY(-1px);
168
+ border-color: var(--primary-red);
169
+ background: var(--soft-white);
170
+ }
171
+
172
+ .theme-icon-button:focus-visible {
173
+ outline: 2px solid rgba(225, 71, 70, 0.4);
174
+ outline-offset: 3px;
175
+ }
176
+
177
+ .theme-icon {
178
+ font-size: 1.1rem;
179
+ line-height: 1;
180
+ }
181
+
182
+ body.dark-mode .theme-icon-button {
183
+ background: var(--warm-cream);
184
+ border-color: #475569;
185
+ }
186
+
187
+ body.dark-mode .toggle-icon-moon {
188
+ opacity: 0.8;
189
+ }
190
+
191
+ /* Hero Section */
192
+ .hero {
193
+ background: linear-gradient(135deg, var(--soft-white) 0%, var(--warm-cream) 100%);
194
+ border: 2px solid var(--warm-tan);
195
+ border-radius: var(--border-radius-lg);
196
+ padding: 48px;
197
+ margin-bottom: 32px;
198
+ box-shadow: var(--shadow-medium);
199
+ display: flex;
200
+ gap: 48px;
201
+ align-items: center;
202
+ flex-wrap: wrap;
203
+ }
204
+
205
+ .hero-content {
206
+ flex: 1 1 400px;
207
+ }
208
+
209
+ .hero-eyebrow {
210
+ text-transform: uppercase;
211
+ letter-spacing: 0.1em;
212
+ font-size: 0.75rem;
213
+ font-weight: 600;
214
+ color: var(--primary-red);
215
+ margin-bottom: 12px;
216
+ }
217
+
218
+ .hero-content h2 {
219
+ font-family: 'Poppins', sans-serif;
220
+ font-size: 2.2rem;
221
+ color: var(--primary-dark);
222
+ margin-bottom: 16px;
223
+ line-height: 1.3;
224
+ }
225
+
226
+ .hero-content p {
227
+ color: var(--text-secondary);
228
+ font-size: 1.05rem;
229
+ margin-bottom: 24px;
230
+ }
231
+
232
+ .hero-badges {
233
+ display: flex;
234
+ flex-wrap: wrap;
235
+ gap: 12px;
236
+ }
237
+
238
+ .badge {
239
+ background: var(--soft-white);
240
+ border: 1px solid var(--warm-tan);
241
+ padding: 6px 14px;
242
+ border-radius: 6px;
243
+ font-size: 0.85rem;
244
+ font-weight: 500;
245
+ color: var(--text-primary);
246
+ box-shadow: var(--shadow-soft);
247
+ }
248
+
249
+ .hero-stats {
250
+ display: flex;
251
+ gap: 20px;
252
+ flex: 0 0 auto;
253
+ }
254
+
255
+ .stat-card {
256
+ background: var(--soft-white);
257
+ border: 2px solid var(--warm-tan);
258
+ border-radius: var(--border-radius);
259
+ padding: 24px 32px;
260
+ text-align: center;
261
+ box-shadow: var(--shadow-soft);
262
+ }
263
+
264
+ .stat-card strong {
265
+ display: block;
266
+ font-size: 2.5rem;
267
+ color: var(--primary-red);
268
+ font-family: 'Poppins', sans-serif;
269
+ line-height: 1;
270
+ margin-bottom: 8px;
271
+ }
272
+
273
+ .stat-card span {
274
+ font-size: 0.9rem;
275
+ color: var(--text-muted);
276
+ }
277
+
278
+ /* Info Strip */
279
+ .info-strip {
280
+ display: grid;
281
+ grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
282
+ gap: 16px;
283
+ margin-bottom: 24px;
284
+ }
285
+
286
+ .info-pill {
287
+ background: var(--soft-white);
288
+ border: 1px solid var(--warm-tan);
289
+ border-radius: var(--border-radius);
290
+ padding: 14px 18px;
291
+ color: var(--text-primary);
292
+ font-size: 0.9rem;
293
+ box-shadow: var(--shadow-soft);
294
+ transition: all 0.2s ease;
295
+ }
296
+
297
+ .info-pill:hover {
298
+ transform: translateY(-1px);
299
+ box-shadow: var(--shadow-medium);
300
+ border-color: var(--primary-red);
301
+ }
302
+
303
+ /* Warning Card */
304
+ .warning-card {
305
+ background: #FEF3C7;
306
+ border-left: 3px solid var(--warning-amber);
307
+ border-right: 1px solid var(--warm-tan);
308
+ border-top: 1px solid var(--warm-tan);
309
+ border-bottom: 1px solid var(--warm-tan);
310
+ color: #78350F;
311
+ padding: 8px 14px;
312
+ border-radius: var(--border-radius);
313
+ margin-bottom: 12px;
314
+ font-weight: 500;
315
+ font-size: 0.85rem;
316
+ box-shadow: var(--shadow-soft);
317
+ }
318
+
319
+ /* Tabs */
320
+ .tabs {
321
+ display: inline-flex;
322
+ align-items: center;
323
+ flex-wrap: wrap;
324
+ gap: 18px;
325
+ margin-bottom: 0;
326
+ background: transparent;
327
+ border: none;
328
+ border-radius: 0;
329
+ padding: 0;
330
+ box-shadow: none;
331
+ }
332
+
333
+ .nav-dropdown {
334
+ position: relative;
335
+ }
336
+
337
+ .tab {
338
+ flex: 0 0 auto;
339
+ background: transparent;
340
+ border: none;
341
+ border-bottom: 2px solid transparent;
342
+ padding: 6px 0;
343
+ border-radius: 0;
344
+ font-size: 0.95rem;
345
+ font-weight: 600;
346
+ color: var(--text-secondary);
347
+ cursor: pointer;
348
+ transition: all 0.2s ease;
349
+ display: flex;
350
+ align-items: center;
351
+ justify-content: center;
352
+ gap: 6px;
353
+ white-space: nowrap;
354
+ appearance: none;
355
+ -webkit-appearance: none;
356
+ }
357
+
358
+ .tab:hover {
359
+ background: transparent;
360
+ border-bottom-color: var(--warm-tan);
361
+ color: var(--text-primary);
362
+ }
363
+
364
+ .tab.active {
365
+ background: transparent;
366
+ border-bottom-color: var(--primary-red);
367
+ color: var(--primary-red);
368
+ }
369
+
370
+ .tab:focus-visible {
371
+ outline: 2px solid rgba(225, 71, 70, 0.4);
372
+ outline-offset: 2px;
373
+ }
374
+
375
+ .tab-icon {
376
+ font-size: 1.2rem;
377
+ }
378
+
379
+ .dropdown-toggle {
380
+ padding-right: 22px;
381
+ }
382
+
383
+ .dropdown-caret {
384
+ font-size: 0.75rem;
385
+ margin-left: auto;
386
+ }
387
+
388
+ .dropdown-menu {
389
+ position: absolute;
390
+ top: calc(100% + 6px);
391
+ left: 0;
392
+ background: var(--soft-white);
393
+ border: 1px solid var(--warm-tan);
394
+ border-radius: var(--border-radius);
395
+ box-shadow: var(--shadow-medium);
396
+ padding: 8px;
397
+ display: none;
398
+ flex-direction: column;
399
+ min-width: 200px;
400
+ z-index: 10;
401
+ }
402
+
403
+ .nav-dropdown.open .dropdown-menu {
404
+ display: flex;
405
+ }
406
+
407
+ .dropdown-item {
408
+ width: 100%;
409
+ justify-content: flex-start;
410
+ border-radius: var(--border-radius-sm);
411
+ }
412
+
413
+ .dropdown-item + .dropdown-item {
414
+ margin-top: 4px;
415
+ }
416
+
417
+ .nav-link {
418
+ font-weight: 600;
419
+ }
420
+
421
+ /* Tab Content */
422
+ .tab-content {
423
+ display: none;
424
+ width: 100%;
425
+ }
426
+
427
+ .tab-content.active {
428
+ display: block;
429
+ animation: fadeIn 0.4s ease;
430
+ width: 100%;
431
+ }
432
+
433
+ /* Ensure consistent content width across all tabs */
434
+ #detector-content {
435
+ width: 100%;
436
+ max-width: 1800px;
437
+ margin: 0 auto;
438
+ }
439
+
440
+ #chat-content {
441
+ width: 100%;
442
+ max-width: none;
443
+ margin: 0;
444
+ }
445
+
446
+ #about-content {
447
+ width: 100%;
448
+ max-width: 1400px;
449
+ margin: 0 auto;
450
+ }
451
+
452
+ /* Keep model selector full width */
453
+ .model-selector-prominent {
454
+ width: 100%;
455
+ max-width: none;
456
+ }
457
+
458
+ .full-width-section {
459
+ width: 100vw;
460
+ margin-left: calc(50% - 50vw);
461
+ margin-right: calc(50% - 50vw);
462
+ padding-left: clamp(16px, 4vw, 64px);
463
+ padding-right: clamp(16px, 4vw, 64px);
464
+ }
465
+
466
+ /* Ensure consistent inner content widths */
467
+ #detector-content .analysis-grid,
468
+ #chat-content .guardrail-intro,
469
+ #chat-content .chat-grid,
470
+ #chat-content .message-input-section {
471
+ width: 100%;
472
+ max-width: none;
473
+ margin-left: auto;
474
+ margin-right: auto;
475
+ }
476
+
477
+ #about-content .about-intro-section,
478
+ #about-content .about-resources-grid {
479
+ width: 100%;
480
+ max-width: 1000px;
481
+ margin-left: auto;
482
+ margin-right: auto;
483
+ }
484
+
485
+ @keyframes fadeIn {
486
+ from { opacity: 0; transform: translateY(10px); }
487
+ to { opacity: 1; transform: translateY(0); }
488
+ }
489
+
490
+ /* Model Selector */
491
+ .model-selector-prominent {
492
+ background: transparent;
493
+ border: none;
494
+ border-radius: 0;
495
+ padding: 0;
496
+ margin-bottom: 28px;
497
+ box-shadow: none;
498
+ }
499
+
500
+ .model-selector-header {
501
+ display: flex;
502
+ align-items: flex-end;
503
+ justify-content: space-between;
504
+ gap: 12px;
505
+ margin-bottom: 12px;
506
+ }
507
+
508
+ .model-selector-header h3 {
509
+ font-family: 'Poppins', sans-serif;
510
+ font-size: 1.15rem;
511
+ color: var(--primary-dark);
512
+ margin: 0;
513
+ }
514
+
515
+ .model-selector-subtitle {
516
+ color: var(--text-secondary);
517
+ font-size: 0.9rem;
518
+ margin: 0;
519
+ }
520
+
521
+ .model-dropdown {
522
+ position: relative;
523
+ max-width: 520px;
524
+ }
525
+
526
+ .model-select {
527
+ width: 100%;
528
+ padding: 16px 18px;
529
+ border: 2px solid var(--warm-tan);
530
+ border-radius: var(--border-radius);
531
+ background: var(--soft-white);
532
+ color: var(--primary-dark);
533
+ font-size: 1rem;
534
+ font-weight: 600;
535
+ box-shadow: var(--shadow-soft);
536
+ appearance: none;
537
+ -webkit-appearance: none;
538
+ -moz-appearance: none;
539
+ cursor: pointer;
540
+ transition: border-color 0.2s ease, box-shadow 0.2s ease;
541
+ }
542
+
543
+ .model-dropdown::after {
544
+ content: '▾';
545
+ position: absolute;
546
+ right: 16px;
547
+ top: 50%;
548
+ transform: translateY(-50%);
549
+ pointer-events: none;
550
+ color: var(--text-secondary);
551
+ font-size: 1rem;
552
+ }
553
+
554
+ .model-select:focus {
555
+ outline: none;
556
+ border-color: var(--primary-red);
557
+ box-shadow: 0 0 0 2px rgba(225, 71, 70, 0.15);
558
+ }
559
+
560
+ .model-select option {
561
+ font-weight: 500;
562
+ }
563
+
564
+ /* Analysis Grid */
565
+ .analysis-grid {
566
+ display: grid;
567
+ grid-template-columns: 1fr;
568
+ gap: 16px;
569
+ margin-bottom: 10px;
570
+ width: 100%;
571
+ max-width: 100%;
572
+ }
573
+
574
+ @media (min-width: 900px) {
575
+ .analysis-grid {
576
+ grid-template-columns: 1fr 1fr;
577
+ gap: 20px;
578
+ }
579
+ }
580
+
581
+ /* Panel */
582
+ .panel {
583
+ background: var(--soft-white);
584
+ border: 1px solid var(--warm-tan);
585
+ border-radius: var(--border-radius);
586
+ padding: 20px;
587
+ box-shadow: var(--shadow-medium);
588
+ display: flex;
589
+ flex-direction: column;
590
+ }
591
+
592
+ .panel h3 {
593
+ font-family: 'Poppins', sans-serif;
594
+ color: var(--primary-dark);
595
+ margin-bottom: 16px;
596
+ font-size: 1.1rem;
597
+ }
598
+
599
+ /* Input Panel */
600
+ .input-panel {
601
+ height: 100%;
602
+ }
603
+
604
+ .input-panel textarea {
605
+ width: 100%;
606
+ background: var(--warm-cream);
607
+ border: 1px solid var(--warm-tan);
608
+ border-radius: var(--border-radius);
609
+ padding: 14px;
610
+ font-family: inherit;
611
+ font-size: 0.95rem;
612
+ color: var(--text-primary);
613
+ margin-bottom: 16px;
614
+ resize: vertical;
615
+ min-height: 200px;
616
+ flex: 1;
617
+ transition: all 0.2s ease;
618
+ }
619
+
620
+ @media (min-width: 900px) {
621
+ .input-panel textarea {
622
+ min-height: 300px;
623
+ }
624
+ }
625
+
626
+ .input-panel textarea:focus {
627
+ outline: none;
628
+ border-color: var(--primary-red);
629
+ box-shadow: 0 0 0 2px rgba(225, 71, 70, 0.1);
630
+ }
631
+
632
+ /* Buttons */
633
+ .btn {
634
+ padding: 10px 18px;
635
+ border: none;
636
+ border-radius: var(--border-radius-sm);
637
+ font-size: 0.9rem;
638
+ font-weight: 600;
639
+ cursor: pointer;
640
+ transition: all 0.2s ease;
641
+ display: inline-flex;
642
+ align-items: center;
643
+ justify-content: center;
644
+ gap: 6px;
645
+ box-shadow: var(--shadow-soft);
646
+ }
647
+
648
+ .btn:hover {
649
+ transform: translateY(-1px);
650
+ box-shadow: var(--shadow-medium);
651
+ }
652
+
653
+ .btn-primary {
654
+ background: var(--primary-red);
655
+ color: var(--soft-white);
656
+ }
657
+
658
+ .btn-primary:hover {
659
+ background: #B91C1C;
660
+ }
661
+
662
+ .btn-success {
663
+ background: var(--success-green);
664
+ color: var(--soft-white);
665
+ }
666
+
667
+ .btn-success:hover {
668
+ background: #059669;
669
+ }
670
+
671
+ .btn-secondary {
672
+ background: var(--warm-cream);
673
+ border: 1px solid var(--warm-tan);
674
+ color: var(--text-primary);
675
+ }
676
+
677
+ .btn-danger {
678
+ background: #EF4444;
679
+ color: var(--soft-white);
680
+ }
681
+
682
+ .btn-danger:hover {
683
+ background: #DC2626;
684
+ }
685
+
686
+ .btn-icon {
687
+ font-size: 1.2rem;
688
+ }
689
+
690
+ /* Binary Result */
691
+ .binary-placeholder {
692
+ text-align: center;
693
+ padding: 16px 12px;
694
+ border: 1px dashed var(--warm-tan);
695
+ border-radius: var(--border-radius);
696
+ color: var(--text-muted);
697
+ margin-bottom: 10px;
698
+ font-size: 0.8rem;
699
+ }
700
+
701
+ .placeholder-icon {
702
+ font-size: 1.6rem;
703
+ margin-bottom: 4px;
704
+ }
705
+
706
+ .binary-card {
707
+ display: flex;
708
+ align-items: center;
709
+ gap: 10px;
710
+ border-radius: var(--border-radius);
711
+ padding: 10px 12px;
712
+ margin-bottom: 6px;
713
+ box-shadow: var(--shadow-soft);
714
+ border: 1px solid;
715
+ }
716
+
717
+ .binary-card.pass {
718
+ background: #ECFDF5;
719
+ border-color: var(--success-green);
720
+ border-left-width: 4px;
721
+ }
722
+
723
+ .binary-card.warn {
724
+ background: #FEF3C7;
725
+ border-color: var(--warning-amber);
726
+ border-left-width: 4px;
727
+ }
728
+
729
+ .binary-card.fail {
730
+ background: #FEE2E2;
731
+ border-color: var(--primary-red);
732
+ border-left-width: 4px;
733
+ }
734
+
735
+ body.dark-mode .binary-card.pass {
736
+ background: rgba(34, 197, 94, 0.15);
737
+ border-color: #4ADE80;
738
+ }
739
+
740
+ body.dark-mode .binary-card.warn {
741
+ background: rgba(251, 191, 36, 0.18);
742
+ border-color: #FBBF24;
743
+ }
744
+
745
+ body.dark-mode .binary-card.fail {
746
+ background: rgba(248, 113, 113, 0.18);
747
+ border-color: #F87171;
748
+ }
749
+
750
+ .binary-icon {
751
+ font-size: 1.6rem;
752
+ }
753
+
754
+ .binary-label {
755
+ font-size: 0.6rem;
756
+ letter-spacing: 0.08em;
757
+ text-transform: uppercase;
758
+ color: var(--text-muted);
759
+ margin-bottom: 2px;
760
+ }
761
+
762
+ .binary-score-line {
763
+ display: flex;
764
+ align-items: baseline;
765
+ gap: 6px;
766
+ flex-wrap: wrap;
767
+ margin-bottom: 2px;
768
+ }
769
+
770
+ .binary-score-line h2 {
771
+ font-family: 'Poppins', sans-serif;
772
+ font-size: 1rem;
773
+ line-height: 1.1;
774
+ margin: 0;
775
+ }
776
+
777
+ .binary-card.pass .binary-score-line h2 {
778
+ color: #15803D;
779
+ }
780
+
781
+ .binary-card.warn .binary-score-line h2 {
782
+ color: #92400E;
783
+ }
784
+
785
+ .binary-card.fail .binary-score-line h2 {
786
+ color: #B91C1C;
787
+ }
788
+
789
+ body.dark-mode .binary-card.pass .binary-score-line h2 {
790
+ color: #86EFAC;
791
+ }
792
+
793
+ body.dark-mode .binary-card.warn .binary-score-line h2 {
794
+ color: #FCD34D;
795
+ }
796
+
797
+ body.dark-mode .binary-card.fail .binary-score-line h2 {
798
+ color: #FCA5A5;
799
+ }
800
+
801
+ .binary-percentage {
802
+ font-size: 0.85rem;
803
+ font-weight: 600;
804
+ color: var(--text-secondary);
805
+ }
806
+
807
+ .binary-body p {
808
+ color: var(--text-secondary);
809
+ font-size: 0.75rem;
810
+ }
811
+
812
+ body.dark-mode .binary-label,
813
+ body.dark-mode .binary-percentage,
814
+ body.dark-mode .binary-body p {
815
+ color: #E2E8F0;
816
+ }
817
+
818
+ /* Category Results */
819
+ .category-placeholder {
820
+ text-align: center;
821
+ padding: 16px 12px;
822
+ border: 1px dashed var(--warm-tan);
823
+ border-radius: var(--border-radius);
824
+ color: var(--text-muted);
825
+ font-size: 0.8rem;
826
+ }
827
+
828
+ .category-grid {
829
+ display: grid;
830
+ grid-template-columns: repeat(3, minmax(0, 1fr));
831
+ gap: 10px;
832
+ }
833
+
834
+ @media (max-width: 900px) {
835
+ .category-grid {
836
+ grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
837
+ }
838
+ }
839
+
840
+ .category-card {
841
+ background: var(--warm-cream);
842
+ border: 1px solid var(--warm-tan);
843
+ border-radius: var(--border-radius-sm);
844
+ padding: 10px 12px;
845
+ transition: all 0.2s ease;
846
+ display: flex;
847
+ flex-direction: column;
848
+ gap: 6px;
849
+ align-items: center;
850
+ text-align: center;
851
+ }
852
+
853
+ body.dark-mode .category-card {
854
+ background: rgba(15, 23, 42, 0.85);
855
+ border-color: rgba(148, 163, 184, 0.4);
856
+ }
857
+
858
+ .category-card:hover {
859
+ transform: translateY(-1px);
860
+ box-shadow: var(--shadow-soft);
861
+ }
862
+
863
+ .category-label {
864
+ font-weight: 600;
865
+ color: var(--text-primary);
866
+ margin-bottom: 4px;
867
+ font-size: 0.75rem;
868
+ }
869
+
870
+ .category-meter {
871
+ display: grid;
872
+ grid-template-columns: repeat(10, minmax(0, 1fr));
873
+ gap: 2px;
874
+ margin-bottom: 4px;
875
+ width: 100%;
876
+ }
877
+
878
+ .category-meter-segment {
879
+ height: 6px;
880
+ border-radius: 2px;
881
+ background: var(--warm-tan);
882
+ transition: background 0.2s ease;
883
+ }
884
+
885
+ .category-meter-segment.filled.good {
886
+ background: rgba(16, 185, 129, 0.8);
887
+ }
888
+
889
+ .category-meter-segment.filled.warn {
890
+ background: rgba(245, 158, 11, 0.8);
891
+ }
892
+
893
+ .category-meter-segment.filled.bad {
894
+ background: rgba(225, 71, 70, 0.9);
895
+ }
896
+
897
+ .category-score {
898
+ display: flex;
899
+ justify-content: flex-start;
900
+ }
901
+
902
+ .score-chip {
903
+ display: inline-block;
904
+ padding: 2px 6px;
905
+ border-radius: 4px;
906
+ font-weight: 600;
907
+ font-size: 0.65rem;
908
+ }
909
+
910
+ .score-chip.good {
911
+ background: rgba(6, 214, 160, 0.15);
912
+ color: #007A5A;
913
+ }
914
+
915
+ .score-chip.warn {
916
+ background: rgba(255, 183, 3, 0.15);
917
+ color: #A67C00;
918
+ }
919
+
920
+ .score-chip.bad {
921
+ background: rgba(230, 57, 70, 0.15);
922
+ color: #A81017;
923
+ }
924
+
925
+ body.dark-mode .score-chip.good {
926
+ background: rgba(34, 197, 94, 0.25);
927
+ color: #A7F3D0;
928
+ }
929
+
930
+ body.dark-mode .score-chip.warn {
931
+ background: rgba(251, 191, 36, 0.25);
932
+ color: #FDE68A;
933
+ }
934
+
935
+ body.dark-mode .score-chip.bad {
936
+ background: rgba(248, 113, 113, 0.25);
937
+ color: #FCA5A5;
938
+ }
939
+
940
+ /* Feedback Section */
941
+ .feedback-section {
942
+ background: var(--warm-cream);
943
+ border: 1px solid var(--warm-tan);
944
+ border-radius: var(--border-radius);
945
+ padding: 10px 14px;
946
+ margin-top: 8px;
947
+ text-align: center;
948
+ }
949
+
950
+ .feedback-prompt {
951
+ color: var(--text-secondary);
952
+ margin-bottom: 4px;
953
+ font-weight: 500;
954
+ font-size: 0.78rem;
955
+ }
956
+
957
+ .feedback-buttons {
958
+ display: flex;
959
+ gap: 8px;
960
+ margin-bottom: 6px;
961
+ flex-wrap: wrap;
962
+ justify-content: center;
963
+ }
964
+
965
+ .feedback-buttons .btn {
966
+ flex: 0 0 auto;
967
+ padding: 6px 12px;
968
+ font-size: 0.8rem;
969
+ justify-content: center;
970
+ }
971
+
972
+ .feedback-message {
973
+ padding: 8px;
974
+ border-radius: var(--border-radius-sm);
975
+ font-weight: 600;
976
+ text-align: center;
977
+ font-size: 0.85rem;
978
+ }
979
+
980
+ .feedback-message.success {
981
+ background: rgba(6, 214, 160, 0.15);
982
+ color: var(--success-green);
983
+ }
984
+
985
+ .feedback-message.info {
986
+ background: rgba(255, 183, 3, 0.15);
987
+ color: var(--warning-amber);
988
+ }
989
+
990
+ /* Guardrail Comparison */
991
+ .guardrail-intro {
992
+ margin-bottom: 16px;
993
+ background: var(--soft-white);
994
+ border: 1px solid var(--warm-tan);
995
+ border-radius: var(--border-radius);
996
+ padding: 20px;
997
+ box-shadow: var(--shadow-soft);
998
+ }
999
+
1000
+ .guardrail-intro h3 {
1001
+ font-family: 'Poppins', sans-serif;
1002
+ color: var(--primary-dark);
1003
+ margin-bottom: 8px;
1004
+ font-size: 1.1rem;
1005
+ }
1006
+
1007
+ .guardrail-intro p {
1008
+ color: var(--text-secondary);
1009
+ font-size: 0.95rem;
1010
+ }
1011
+
1012
+ .chat-grid {
1013
+ display: grid;
1014
+ grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
1015
+ gap: 22px;
1016
+ margin-bottom: 24px;
1017
+ }
1018
+
1019
+ .chat-panel {
1020
+ background: var(--soft-white);
1021
+ border: 1px solid var(--warm-tan);
1022
+ border-radius: var(--border-radius);
1023
+ overflow: hidden;
1024
+ box-shadow: var(--shadow-medium);
1025
+ }
1026
+
1027
+ .chat-header {
1028
+ background: var(--warm-cream);
1029
+ padding: 10px 14px;
1030
+ display: flex;
1031
+ align-items: center;
1032
+ gap: 10px;
1033
+ border-bottom: 1px solid var(--warm-tan);
1034
+ }
1035
+
1036
+ .chat-icon {
1037
+ font-size: 1.2rem;
1038
+ }
1039
+
1040
+ .chat-header h4 {
1041
+ font-family: 'Poppins', sans-serif;
1042
+ color: var(--primary-dark);
1043
+ font-size: 0.95rem;
1044
+ }
1045
+
1046
+ .chat-messages {
1047
+ padding: 16px;
1048
+ min-height: 320px;
1049
+ max-height: 380px;
1050
+ overflow-y: auto;
1051
+ background: var(--warm-cream);
1052
+ }
1053
+
1054
+ @media (min-width: 900px) {
1055
+ .chat-messages {
1056
+ min-height: 400px;
1057
+ max-height: 500px;
1058
+ }
1059
+ }
1060
+
1061
+ .chat-message {
1062
+ margin-bottom: 8px;
1063
+ padding: 8px 12px;
1064
+ border-radius: var(--border-radius-sm);
1065
+ max-width: 85%;
1066
+ word-wrap: break-word;
1067
+ font-size: 0.85rem;
1068
+ }
1069
+
1070
+ .chat-message.user {
1071
+ background: var(--primary-red);
1072
+ color: var(--soft-white);
1073
+ margin-left: auto;
1074
+ }
1075
+
1076
+ .chat-message.assistant {
1077
+ background: var(--soft-white);
1078
+ border: 1px solid var(--warm-tan);
1079
+ color: var(--text-primary);
1080
+ }
1081
+
1082
+ /* Message Input Section */
1083
+ .message-input-section {
1084
+ background: var(--soft-white);
1085
+ border: 1px solid var(--warm-tan);
1086
+ border-radius: var(--border-radius);
1087
+ padding: 14px;
1088
+ box-shadow: var(--shadow-medium);
1089
+ margin-bottom: 16px;
1090
+ }
1091
+
1092
+ .message-input-section h4 {
1093
+ font-family: 'Poppins', sans-serif;
1094
+ color: var(--primary-dark);
1095
+ margin-bottom: 10px;
1096
+ font-size: 0.95rem;
1097
+ }
1098
+
1099
+ .message-input-group {
1100
+ display: flex;
1101
+ gap: 8px;
1102
+ }
1103
+
1104
+ .message-input-group input {
1105
+ flex: 1;
1106
+ background: var(--warm-cream);
1107
+ border: 1px solid var(--warm-tan);
1108
+ border-radius: var(--border-radius-sm);
1109
+ padding: 10px 14px;
1110
+ font-family: inherit;
1111
+ font-size: 0.9rem;
1112
+ color: var(--text-primary);
1113
+ transition: all 0.2s ease;
1114
+ }
1115
+
1116
+ .message-input-group input:focus {
1117
+ outline: none;
1118
+ border-color: var(--primary-red);
1119
+ box-shadow: 0 0 0 2px rgba(225, 71, 70, 0.1);
1120
+ }
1121
+
1122
+ /* Footer */
1123
+ .footer {
1124
+ background: var(--soft-white);
1125
+ border-top: 1px solid var(--warm-tan);
1126
+ padding: 10px 0;
1127
+ margin-top: 12px;
1128
+ text-align: center;
1129
+ }
1130
+
1131
+ .footer p {
1132
+ color: var(--text-secondary);
1133
+ font-size: 0.85rem;
1134
+ }
1135
+
1136
+ .footer a {
1137
+ color: var(--primary-red);
1138
+ text-decoration: none;
1139
+ font-weight: 600;
1140
+ }
1141
+
1142
+ .footer a:hover {
1143
+ text-decoration: underline;
1144
+ }
1145
+
1146
+ /* Loading State */
1147
+ .loading {
1148
+ opacity: 0.6;
1149
+ pointer-events: none;
1150
+ }
1151
+
1152
+ /* About Page Styles */
1153
+ .about-intro-section {
1154
+ background: transparent;
1155
+ border: none;
1156
+ border-radius: 0;
1157
+ padding: 10px 0 30px;
1158
+ margin-bottom: 16px;
1159
+ box-shadow: none;
1160
+ text-align: center;
1161
+ }
1162
+
1163
+ .about-intro-section h2 {
1164
+ font-family: 'Poppins', sans-serif;
1165
+ font-size: 1.4rem;
1166
+ color: var(--primary-dark);
1167
+ margin-bottom: 10px;
1168
+ }
1169
+
1170
+ .about-intro-section .lead {
1171
+ font-size: 0.95rem;
1172
+ color: var(--text-secondary);
1173
+ line-height: 1.6;
1174
+ margin: 0 auto;
1175
+ max-width: 900px;
1176
+ }
1177
+
1178
+ .about-intro-section .lead + .lead {
1179
+ margin-top: 14px;
1180
+ }
1181
+
1182
+ .about-resources-grid {
1183
+ display: grid;
1184
+ grid-template-columns: repeat(auto-fit, minmax(260px, 1fr));
1185
+ gap: 16px;
1186
+ margin-bottom: 16px;
1187
+ }
1188
+
1189
+ @media (min-width: 900px) {
1190
+ .about-resources-grid {
1191
+ gap: 20px;
1192
+ }
1193
+ }
1194
+
1195
+ .resource-card {
1196
+ background: var(--soft-white);
1197
+ border: 1px solid var(--warm-tan);
1198
+ border-radius: var(--border-radius);
1199
+ padding: 16px;
1200
+ box-shadow: var(--shadow-soft);
1201
+ transition: all 0.2s ease;
1202
+ }
1203
+
1204
+ .resource-card:hover {
1205
+ transform: translateY(-2px);
1206
+ box-shadow: var(--shadow-medium);
1207
+ }
1208
+
1209
+ .resource-card h3 {
1210
+ font-family: 'Poppins', sans-serif;
1211
+ font-size: 1rem;
1212
+ color: var(--primary-dark);
1213
+ margin-bottom: 12px;
1214
+ }
1215
+
1216
+ .resource-list {
1217
+ display: flex;
1218
+ flex-direction: column;
1219
+ gap: 8px;
1220
+ }
1221
+
1222
+ .resource-list a {
1223
+ color: var(--primary-red);
1224
+ text-decoration: none;
1225
+ font-weight: 500;
1226
+ padding: 6px 10px;
1227
+ background: var(--warm-cream);
1228
+ border-radius: var(--border-radius-sm);
1229
+ border-left: 3px solid var(--primary-red);
1230
+ transition: all 0.2s ease;
1231
+ font-size: 0.85rem;
1232
+ outline: none;
1233
+ }
1234
+
1235
+ .resource-list a:hover {
1236
+ background: var(--warm-tan);
1237
+ border-left-color: var(--primary-red);
1238
+ transform: translateX(4px);
1239
+ }
1240
+
1241
+ .resource-list a:focus-visible {
1242
+ outline: none;
1243
+ box-shadow: 0 0 0 2px rgba(225, 71, 70, 0.3);
1244
+ background: var(--soft-white);
1245
+ }
1246
+
1247
+ /* Responsive Design */
1248
+ @media (max-width: 768px) {
1249
+ .about-intro-section {
1250
+ padding: 20px;
1251
+ }
1252
+
1253
+ .about-intro-section h2 {
1254
+ font-size: 1.4rem;
1255
+ }
1256
+
1257
+ .analysis-grid {
1258
+ grid-template-columns: 1fr;
1259
+ }
1260
+
1261
+ .chat-grid {
1262
+ grid-template-columns: 1fr;
1263
+ }
1264
+
1265
+ .tabs {
1266
+ flex-wrap: wrap;
1267
+ }
1268
+
1269
+ #detector-content,
1270
+ #chat-content {
1271
+ width: 100%;
1272
+ }
1273
+
1274
+ .message-input-group {
1275
+ flex-direction: column;
1276
+ }
1277
+
1278
+ .feedback-buttons {
1279
+ flex-direction: column;
1280
+ }
1281
+
1282
+ .model-dropdown {
1283
+ max-width: 100%;
1284
+ }
1285
+
1286
+ .about-resources-grid {
1287
+ grid-template-columns: 1fr;
1288
+ }
1289
+ }
requirements.txt CHANGED
@@ -1,7 +1,12 @@
1
- google-auth==2.40.3
2
  gspread==6.2.1
3
- numpy==2.1.3
4
- openai==1.83.0
5
- safetensors==0.5.3
6
- torch==2.7.0
7
- transformers==4.53.0
 
 
 
 
 
 
1
+ google-auth==2.43.0
2
  gspread==6.2.1
3
+ numpy==2.3.5
4
+ openai==2.8.1
5
+ safetensors==0.6.2
6
+ torch==2.9.1
7
+ transformers==4.57.1
8
+ google-genai==1.51.0
9
+ sentence-transformers==5.1.2
10
+ fastapi==0.115.0
11
+ uvicorn[standard]==0.32.0
12
+ gradio==5.34.2
utils.py CHANGED
@@ -4,11 +4,14 @@ utils.py
4
 
5
  # Standard imports
6
  import os
7
- from typing import List
8
 
9
  # Third party imports
10
  import numpy as np
 
11
  from openai import OpenAI
 
 
12
 
13
  client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
14
 
@@ -39,3 +42,102 @@ def get_embeddings(
39
  # Extract embeddings from response
40
  embeddings = np.array([data.embedding for data in response.data])
41
  return embeddings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  # Standard imports
6
  import os
7
+ from typing import List, Tuple
8
 
9
  # Third party imports
10
  import numpy as np
11
+ from google import genai
12
  from openai import OpenAI
13
+ from sentence_transformers import SentenceTransformer
14
+ from transformers import AutoModel
15
 
16
  client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
17
 
 
42
  # Extract embeddings from response
43
  embeddings = np.array([data.embedding for data in response.data])
44
  return embeddings
45
+
46
+
47
+ MODEL_CONFIGS = {
48
+ "lionguard-2": {
49
+ "label": "LionGuard 2",
50
+ "repo_id": "govtech/lionguard-2",
51
+ "embedding_strategy": "openai",
52
+ "embedding_model": "text-embedding-3-large",
53
+ },
54
+ "lionguard-2-lite": {
55
+ "label": "LionGuard 2 Lite",
56
+ "repo_id": "govtech/lionguard-2-lite",
57
+ "embedding_strategy": "sentence_transformer",
58
+ "embedding_model": "google/embeddinggemma-300m",
59
+ },
60
+ "lionguard-2.1": {
61
+ "label": "LionGuard 2.1",
62
+ "repo_id": "govtech/lionguard-2.1",
63
+ "embedding_strategy": "gemini",
64
+ "embedding_model": "gemini-embedding-001",
65
+ },
66
+ }
67
+
68
+ DEFAULT_MODEL_KEY = "lionguard-2.1"
69
+ MODEL_CACHE = {}
70
+ EMBEDDING_MODEL_CACHE = {}
71
+ current_model_choice = DEFAULT_MODEL_KEY
72
+ GEMINI_CLIENT = None
73
+
74
+
75
+ def resolve_model_key(model_key: str = None) -> str:
76
+ key = model_key or current_model_choice
77
+ if key not in MODEL_CONFIGS:
78
+ raise ValueError(f"Unknown model selection: {key}")
79
+ return key
80
+
81
+
82
+ def load_model_instance(model_key: str):
83
+ key = resolve_model_key(model_key)
84
+ if key not in MODEL_CACHE:
85
+ repo_id = MODEL_CONFIGS[key]["repo_id"]
86
+ MODEL_CACHE[key] = AutoModel.from_pretrained(repo_id, trust_remote_code=True)
87
+ return MODEL_CACHE[key]
88
+
89
+
90
+ def get_sentence_transformer(model_name: str):
91
+ if model_name not in EMBEDDING_MODEL_CACHE:
92
+ EMBEDDING_MODEL_CACHE[model_name] = SentenceTransformer(model_name)
93
+ return EMBEDDING_MODEL_CACHE[model_name]
94
+
95
+
96
+ def get_gemini_client():
97
+ global GEMINI_CLIENT
98
+ if GEMINI_CLIENT is None:
99
+ api_key = os.getenv("GEMINI_API_KEY")
100
+ if not api_key:
101
+ raise EnvironmentError(
102
+ "GEMINI_API_KEY environment variable is required for LionGuard 2.1."
103
+ )
104
+ GEMINI_CLIENT = genai.Client(api_key=api_key)
105
+ return GEMINI_CLIENT
106
+
107
+
108
+ def get_model_embeddings(model_key: str, texts: List[str]) -> np.ndarray:
109
+ key = resolve_model_key(model_key)
110
+ config = MODEL_CONFIGS[key]
111
+ strategy = config["embedding_strategy"]
112
+ model_name = config.get("embedding_model")
113
+
114
+ if strategy == "openai":
115
+ return get_embeddings(texts, model=model_name)
116
+ if strategy == "sentence_transformer":
117
+ embedder = get_sentence_transformer(model_name)
118
+ formatted_texts = [f"task: classification | query: {text}" for text in texts]
119
+ embeddings = embedder.encode(formatted_texts)
120
+ return np.array(embeddings)
121
+ if strategy == "gemini":
122
+ client = get_gemini_client()
123
+ result = client.models.embed_content(model=model_name, contents=texts)
124
+ return np.array([embedding.values for embedding in result.embeddings])
125
+
126
+ raise ValueError(f"Unsupported embedding strategy: {strategy}")
127
+
128
+
129
+ def predict_with_model(texts: List[str], model_key: str = None) -> Tuple[dict, str]:
130
+ key = resolve_model_key(model_key)
131
+ embeddings = get_model_embeddings(key, texts)
132
+ model = load_model_instance(key)
133
+ return model.predict(embeddings), key
134
+
135
+
136
+ def set_active_model(model_key: str) -> str:
137
+ if model_key not in MODEL_CONFIGS:
138
+ return f"⚠️ Unknown model {model_key}"
139
+ global current_model_choice
140
+ current_model_choice = model_key
141
+ load_model_instance(model_key)
142
+ label = MODEL_CONFIGS[model_key]["label"]
143
+ return f"🦁 Using {label} ({model_key})"