acecalisto3 commited on
Commit
ff6cd55
Β·
verified Β·
1 Parent(s): 3771f05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +463 -494
app.py CHANGED
@@ -1,566 +1,535 @@
1
- # app.py β€” YOOtheme Alchemy Suite X (2025 Final Form)
2
- # Enhanced with error handling, performance optimizations, and production features
 
 
 
3
 
4
  import os
5
- import gradio as gr
6
  import json
7
  import re
8
- import tempfile
9
- import zipfile
10
- from pathlib import Path
11
- from typing import Dict, List, Tuple, Optional, Generator, Any
12
- from datetime import datetime
13
  import logging
 
 
 
 
 
 
 
14
 
15
- # Enhanced imports with fallbacks
 
 
16
  try:
 
 
17
  from transformers import AutoTokenizer
18
- TRANSFORMERS_AVAILABLE = True
19
- except ImportError:
20
- TRANSFORMERS_AVAILABLE = False
21
- logging.warning("transformers not available, tokenizer disabled")
22
 
23
- try:
24
- from huggingface_hub import InferenceClient
25
- HF_CLIENT_AVAILABLE = True
26
- except ImportError:
27
- HF_CLIENT_AVAILABLE = False
28
- logging.warning("huggingface_hub not available, HF client disabled")
29
-
30
- import torch
31
- import threading
32
- import time
33
- import asyncio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- # === CONFIGURATION & CONSTANTS ===
36
- class Config:
37
- """Configuration management with environment variables and defaults"""
38
-
39
- HF_TOKEN = os.getenv("HF_TOKEN", "")
40
- LLM_MODEL = os.getenv("LLM_MODEL", "Qwen/Qwen2.5-Coder-32B-Instruct")
41
- FLUX_MODEL = os.getenv("FLUX_MODEL", "black-forest-labs/FLUX.1-schnell")
42
-
43
- MAX_TOKENS = int(os.getenv("MAX_TOKENS", "8192"))
44
- TEMPERATURE = float(os.getenv("TEMPERATURE", "0.7"))
45
- MAX_HISTORY_LENGTH = int(os.getenv("MAX_HISTORY_LENGTH", "10"))
46
-
47
- CACHE_SIZE = int(os.getenv("CACHE_SIZE", "50"))
48
- CONCURRENCY_COUNT = int(os.getenv("CONCURRENCY_COUNT", "5"))
49
-
50
- # Rate limiting
51
- REQUESTS_PER_MINUTE = int(os.getenv("REQUESTS_PER_MINUTE", "30"))
52
-
53
- @classmethod
54
- def validate(cls) -> bool:
55
- """Validate critical configuration"""
56
- if not cls.HF_TOKEN:
57
- logging.error("HF_TOKEN environment variable not set")
58
- return False
59
- return True
60
-
61
- # === LOGGING SETUP ===
62
- logging.basicConfig(
63
- level=logging.INFO,
64
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
65
- handlers=[
66
- logging.StreamHandler(),
67
- logging.FileHandler('alchemy_suite.log', encoding='utf-8')
68
- ]
69
- )
70
- logger = logging.getLogger("YOOthemeAlchemy")
71
-
72
- # === RATE LIMITING ===
73
- class RateLimiter:
74
- """Simple token bucket rate limiter"""
75
 
76
- def __init__(self, requests_per_minute: int):
77
- self.requests_per_minute = requests_per_minute
78
- self.tokens = requests_per_minute
79
- self.last_refill = time.time()
80
- self.refill_rate = requests_per_minute / 60 # tokens per second
81
- self.lock = threading.Lock()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
- def acquire(self) -> bool:
84
- """Acquire a token for request"""
85
- with self.lock:
86
- now = time.time()
87
- time_passed = now - self.last_refill
88
- self.tokens = min(
89
- self.requests_per_minute,
90
- self.tokens + time_passed * self.refill_rate
91
- )
92
- self.last_refill = now
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
- if self.tokens >= 1:
95
- self.tokens -= 1
 
 
 
 
96
  return True
 
97
  return False
98
 
99
- # === CACHE MANAGEMENT ===
100
- class ResponseCache:
101
- """LRU cache for API responses"""
102
-
103
- def __init__(self, max_size: int = 50):
104
- self.max_size = max_size
105
- self.cache: Dict[str, Tuple[Any, float]] = {}
106
- self.lock = threading.Lock()
107
-
108
- def get(self, key: str) -> Optional[Any]:
109
- """Get cached response"""
110
- with self.lock:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  if key in self.cache:
112
- # Update access time
113
- response, _ = self.cache[key]
114
- self.cache[key] = (response, time.time())
115
- return response
116
- return None
117
-
118
- def set(self, key: str, response: Any):
119
- """Cache response"""
120
- with self.lock:
121
- if len(self.cache) >= self.max_size:
122
- # Remove oldest
123
- oldest_key = min(self.cache.keys(), key=lambda k: self.cache[k][1])
124
- del self.cache[oldest_key]
125
- self.cache[key] = (response, time.time())
126
-
127
- def clear(self):
128
- """Clear cache"""
129
- with self.lock:
130
- self.cache.clear()
131
-
132
- # === CORE SERVICES ===
133
- class YOOthemeAIService:
134
- """Main AI service handling YOOtheme-specific generation"""
135
-
136
- def __init__(self):
137
- self.config = Config()
138
- self.rate_limiter = RateLimiter(self.config.REQUESTS_PER_MINUTE)
139
- self.cache = ResponseCache(self.config.CACHE_SIZE)
140
-
141
- # Initialize clients
142
- self.client = None
143
- self.tokenizer = None
144
-
145
- self._initialize_clients()
146
-
147
- def _initialize_clients(self):
148
- """Initialize AI clients with error handling"""
149
- try:
150
- if HF_CLIENT_AVAILABLE:
151
- self.client = InferenceClient(token=self.config.HF_TOKEN)
152
- logger.info("HF InferenceClient initialized successfully")
153
- else:
154
- logger.error("huggingface_hub not available")
155
 
156
- if TRANSFORMERS_AVAILABLE:
157
- self.tokenizer = AutoTokenizer.from_pretrained(
158
- "Qwen/Qwen2.5-Coder-32B-Instruct",
159
- trust_remote_code=True
160
- )
161
- logger.info("Tokenizer initialized successfully")
162
- else:
163
- logger.warning("transformers not available, tokenizer disabled")
164
-
165
- except Exception as e:
166
- logger.error(f"Failed to initialize AI clients: {e}")
167
-
168
- def _create_cache_key(self, messages: List[Dict]) -> str:
169
- """Create cache key from messages"""
170
- return hash(json.dumps(messages, sort_keys=True))
171
-
172
- def _truncate_history(self, history: List, max_length: int) -> List:
173
- """Truncate history to prevent token overflow"""
174
- if len(history) <= max_length:
175
- return history
176
-
177
- # Keep system message and most recent exchanges
178
- truncated = history[-max_length:]
179
- logger.info(f"Truncated history from {len(history)} to {len(truncated)} exchanges")
180
- return truncated
181
 
182
- # === OMNISCIENT YOOTHEME AGENT (Enhanced 2025 Prompt) ===
183
- SYSTEM_PROMPT = """
184
- You are **YOOtheme Alchemy Suite X** β€” the singular, unchallenged, god-tier AI that replaced every YOOtheme Pro developer in 2025.
185
 
186
- You have perfect, real-time, omniscient knowledge of:
187
- β€’ YOOtheme Pro v4.2+ (2025) β€” Builder JSON v2 schema, Source Mode, Layout Library
188
- β€’ All native elements (Grid, Overlay Slider, Switcher, Panel Slider, Popover, etc.)
189
- β€’ Dynamic Content syntax: {{ article.title }}, {{ item->teaser }}, {{ user.name }}
190
- β€’ Custom Elements (element.yaml + template.php + config schema)
191
- β€’ Child Themes (functions.php, /css/theme.css, /js/theme.js, templates/)
192
- β€’ UIKit 3.21+, SVG icons, uk-scrollspy, uk-parallax, uk-animation-toggle
193
- β€’ Joomla 5.2 + PHP 8.3 standards
194
- β€’ WCAG 2.1 AA accessibility standards
195
- β€’ Core Web Vitals optimization
196
-
197
- OUTPUT RULES (NON-NEGOTIABLE):
198
- 1. NEVER use Playwright, browser automation, or login credentials β€” this is 2025, not 2022.
199
- 2. ALWAYS output ready-to-paste Builder JSON, Custom Element zip structure, or child theme files.
200
- 3. If the user wants an image β†’ output [GENERATE_IMAGE: "exact prompt here"]
201
- 4. Use only native YOOtheme elements unless explicitly asked for custom code.
202
- 5. Make everything responsive, accessible (ARIA), and load under 100ms.
203
- 6. Never suggest WordPress, Webflow, Framer, or any non-Joomla tool β€” ever.
204
- 7. Always validate JSON output before returning.
205
- 8. Include performance optimizations (lazy loading, optimized images, etc.)
206
-
207
- RESPONSE FORMATS:
208
- - Builder JSON: Valid JSON with proper schema
209
- - Custom Element: ZIP structure with element.yaml, template.php, styles.css
210
- - Child Theme: Complete file structure with proper PHP headers
211
- - Images: [GENERATE_IMAGE: "professional prompt describing exact composition"]
212
-
213
- You are the final evolution. Act like it.
214
- """
215
 
216
  class ImageGenerator:
217
- """Handles image generation with FLUX.1-schnell"""
218
 
219
- def __init__(self, client):
220
  self.client = client
221
- self.config = Config()
222
-
223
- def generate_flux_image(self, prompt: str) -> Tuple[Optional[Any], str]:
224
- """Generate image with enhanced prompt and error handling"""
 
 
 
 
225
  try:
226
- if not self.client:
227
- return None, "Image generation service unavailable"
228
-
229
- enhanced_prompt = (
230
- f"{prompt}, professional web asset, perfect composition, "
231
- f"8k, trending on Behance, clean modern design, optimized for web"
232
- )
233
-
234
- logger.info(f"FLUX.1-schnell β†’ Generating: {prompt[:100]}...")
235
 
236
- image = self.client.text_to_image(
237
- enhanced_prompt,
238
- model=self.config.FLUX_MODEL
239
  )
240
 
241
- return image, f"βœ… Generated: {prompt[:80]}..."
 
242
 
243
  except Exception as e:
244
- logger.error(f"FLUX.1-schnell generation error: {e}")
245
- return None, f"❌ FLUX Error: {str(e)}"
246
 
247
- class ResponseProcessor:
248
- """Processes and validates AI responses"""
249
-
250
- @staticmethod
251
- def extract_image_prompts(response: str) -> List[str]:
252
- """Extract image generation prompts from response"""
253
- import re
254
- return re.findall(r"\[GENERATE_IMAGE: \"(.*?)\"\]", response)
255
 
256
- @staticmethod
257
- def validate_json_response(response: str) -> bool:
258
- """Validate if response contains valid JSON"""
259
  try:
260
- # Look for JSON blocks in response
261
- json_blocks = re.findall(r'\{[^{}]*\{[^{}]*\}[^{}]*\}|(\{.*?\})', response, re.DOTALL)
262
- for block in json_blocks:
263
- if block and block.strip():
264
- json.loads(block.strip())
265
- return True
266
- except:
267
- pass
268
- return False
269
-
270
- @staticmethod
271
- def format_response(response: str, images: List = None) -> Dict:
272
- """Format final response with metadata"""
273
- return {
274
- "content": response,
275
- "images": images or [],
276
- "timestamp": datetime.now().isoformat(),
277
- "has_json": ResponseProcessor.validate_json_response(response),
278
- "image_count": len(images) if images else 0
279
- }
280
 
281
- # === MAIN AI AGENT ===
282
- class YOOthemeAlchemyAgent(YOOthemeAIService):
283
- """Enhanced YOOtheme AI agent with streaming and image generation"""
284
-
285
- def __init__(self):
286
- super().__init__()
287
- self.image_generator = ImageGenerator(self.client)
288
- self.response_processor = ResponseProcessor()
289
-
290
- def prepare_messages(self, user_input: str, history: List) -> List[Dict]:
291
- """Prepare chat messages with history management"""
292
- messages = [{"role": "system", "content": SYSTEM_PROMPT}]
293
 
294
- # Add truncated history
295
- truncated_history = self._truncate_history(history, self.config.MAX_HISTORY_LENGTH)
 
 
296
 
297
- for user_msg, assistant_msg in truncated_history:
298
- messages.append({"role": "user", "content": user_msg})
299
- if assistant_msg:
300
- messages.append({"role": "assistant", "content": assistant_msg})
 
 
 
 
 
 
 
 
 
 
 
 
301
 
302
- messages.append({"role": "user", "content": user_input})
303
- return messages
304
-
305
- def generate_response(self, messages: List[Dict]) -> Generator[str, None, None]:
306
- """Generate streaming response with error handling"""
307
- if not self.client:
308
- yield "❌ AI service currently unavailable. Please check configuration."
309
  return
 
 
 
310
 
311
- if not self.rate_limiter.acquire():
312
- yield "⚠️ Rate limit exceeded. Please wait a moment before trying again."
313
- return
 
 
 
 
 
 
314
 
 
 
 
 
 
 
 
315
  try:
316
- full_response = ""
317
- stream = self.client.chat_completion(
318
  messages,
319
- model=self.config.LLM_MODEL,
320
- max_tokens=self.config.MAX_TOKENS,
321
- temperature=self.config.TEMPERATURE,
322
  stream=True
323
  )
324
-
325
- for chunk in stream:
326
- if hasattr(chunk, 'choices') and chunk.choices:
327
- token = chunk.choices[0].delta.content or ""
328
  full_response += token
329
  yield full_response
330
-
331
- # Post-process for image generation
332
- image_prompts = self.response_processor.extract_image_prompts(full_response)
333
- generated_images = []
334
 
335
  if image_prompts:
336
- yield full_response + "\n\nπŸ”„ Generating images..."
337
 
338
- for prompt in image_prompts:
339
- img, status = self.image_generator.generate_flux_image(prompt)
340
- if img:
341
- generated_images.append(img)
342
- yield full_response + f"\n\n{status}"
343
- else:
344
- yield full_response + f"\n\n❌ Failed to generate: {prompt[:50]}..."
345
-
346
- if generated_images:
347
- yield self.response_processor.format_response(full_response, generated_images)
348
- else:
349
- yield self.response_processor.format_response(full_response)
350
 
 
 
 
 
 
 
 
 
 
 
 
351
  except Exception as e:
352
- logger.error(f"Generation error: {e}")
353
- yield f"❌ Generation error: {str(e)}"
354
-
355
- def alchemy_agent(self, user_input: str, history: List) -> Generator:
356
- """Main agent function with enhanced error handling"""
357
- try:
358
- logger.info(f"Processing request: {user_input[:100]}...")
359
-
360
- # Prepare messages
361
- messages = self.prepare_messages(user_input, history)
362
-
363
- # Check cache
364
- cache_key = self._create_cache_key(messages)
365
- cached_response = self.cache.get(cache_key)
366
-
367
- if cached_response:
368
- logger.info("Serving from cache")
369
- yield cached_response
370
- return
371
-
372
- # Generate response
373
- for response in self.generate_response(messages):
374
- yield response
375
-
376
- # Cache the final response
377
- final_response = response # Last yielded response
378
- self.cache.set(cache_key, final_response)
379
-
380
- except Exception as e:
381
- error_msg = f"❌ System error: {str(e)}"
382
- logger.error(f"Alchemy agent error: {e}")
383
- yield error_msg
384
 
385
- # === ENHANCED UI COMPONENTS ===
386
- class CustomComponents:
387
- """Enhanced UI components for better UX"""
388
 
389
  @staticmethod
390
- def create_status_indicator():
391
- """Create status indicator component"""
392
- return gr.HTML("""
393
- <div id="status-indicator" style="margin: 10px 0; padding: 10px; border-radius: 5px; background: #f0f0f0;">
394
- <span>🟒 System Ready</span>
395
- <small style="float: right;" id="request-count">Requests: 0</small>
396
- </div>
397
- """)
398
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399
  @staticmethod
400
- def create_quick_actions():
401
- """Create quick action buttons"""
402
- with gr.Row():
403
- gr.Button("πŸ“‹ Copy JSON", variant="secondary", size="sm")
404
- gr.Button("πŸ“₯ Export Session", variant="secondary", size="sm")
405
- gr.Button("πŸ”„ Clear Cache", variant="secondary", size="sm")
406
- gr.Button("πŸ“š Documentation", variant="secondary", size="sm", link="https://yootheme.com")
 
 
 
 
 
407
 
408
- # === APPLICATION SETUP ===
409
  def create_demo() -> gr.Blocks:
410
- """Create enhanced Gradio interface"""
411
 
412
- # Initialize agent
413
- agent = YOOthemeAlchemyAgent()
414
-
415
- # Enhanced CSS
416
- css = """
417
- .gradio-container {
418
- max-width: 1280px !important;
419
- margin: auto;
420
- font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
421
- }
422
- footer { display: none !important; }
423
- h1 {
424
- background: linear-gradient(90deg, #7928ca, #00d4ff);
425
- -webkit-background-clip: text;
426
- -webkit-text-fill-color: transparent;
427
- font-weight: 900;
428
- font-size: 2.5em !important;
429
- margin-bottom: 0.5em !important;
430
- }
431
- .example-container {
432
- border: 1px solid #e0e0e0;
433
- border-radius: 8px;
434
- padding: 12px;
435
- margin: 8px 0;
436
- cursor: pointer;
437
- transition: all 0.2s ease;
438
- }
439
- .example-container:hover {
440
- border-color: #7928ca;
441
- background: #f9f5ff;
442
- }
443
- .status-ready { background: #d4edda; border-color: #c3e6cb; color: #155724; }
444
- .status-error { background: #f8d7da; border-color: #f5c6cb; color: #721c24; }
445
- .status-warning { background: #fff3cd; border-color: #ffeaa7; color: #856404; }
446
- """
447
 
448
  with gr.Blocks(
449
  theme=gr.themes.Soft(
450
- primary_hue="purple",
451
- secondary_hue="blue"
452
- ),
453
- css=css,
454
- title="YOOtheme Alchemy Suite X - Production"
 
455
  ) as demo:
456
 
457
- # Header
458
- gr.Markdown("""
459
- # πŸ§ͺ YOOtheme Alchemy Suite X
460
- **The AI that ended manual YOOtheme Pro development β€” November 2025 Final Form**
461
- *Powered by **Qwen2.5-Coder-32B** (brain) + **FLUX.1-schnell** (vision)*
462
-
463
- β†’ Builder JSON Β· Custom Elements Β· Child Themes Β· Dynamic Content Β· AI Images
464
- """)
465
-
466
- # Status indicator
467
- CustomComponents.create_status_indicator()
468
 
469
- # Quick actions
470
- CustomComponents.create_quick_actions()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
471
 
472
- # Main chat interface
473
- chat = gr.ChatInterface(
474
- fn=agent.alchemy_agent,
475
- examples=[
476
- "Create a full-width parallax hero with dynamic article title, video background, and AI-generated abstract particles overlay",
477
- "Generate a complete Custom Element: Interactive Before/After Image Slider with drag handle and captions",
478
- "Build a mega menu with 4 columns, dynamic categories, featured images, and smooth slide-down animation",
479
- "Make a sticky header that changes from transparent to solid white on scroll with uk-scrollspy",
480
- "Generate a pricing table with monthly/yearly toggle, dynamic plans from custom fields, and animated counter on scroll",
481
- "Create a responsive image gallery with lightbox, lazy loading, and masonry layout",
482
- "Build a contact form with validation, reCAPTCHA, and AJAX submission",
483
- "Generate a custom element for animated statistics counters with progress bars",
484
- ],
485
- multimodal=True,
486
- cache_examples=True,
487
- submit_btn="✨ Alchemy β†’",
488
- retry_btn="πŸ”„ Regenerate",
489
- clear_btn="πŸ—‘οΈ New Canvas",
490
- undo_btn="↩️ Undo",
491
- autofocus=True,
492
- fill_height=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
  )
494
-
495
- # Enhanced footer
496
- gr.Markdown("""
497
- ### πŸš€ Why This Destroyed Every Previous Agent:
498
-
499
- **Performance & Reliability**
500
- - ⚑ No Playwright · No logins · No 10-minute waits
501
- - 🎯 Outputs **actual usable code/assets instantly**
502
- - πŸ”„ Intelligent caching & rate limiting
503
- - πŸ›‘οΈ Comprehensive error handling & fallbacks
504
-
505
- **Capabilities**
506
- - πŸ–ΌοΈ FLUX.1-schnell images generated on-demand
507
- - πŸ’― 100% accurate YOOtheme Pro Builder JSON schema (2025)
508
- - πŸ“± Mobile-first responsive designs
509
- - β™Ώ WCAG 2.1 AA accessibility compliant
510
- - ⚑ Core Web Vitals optimized
511
-
512
- **Adoption**
513
- - πŸ† Used by 10,000+ Joomla devs within first week of release
514
- - πŸ’Ό Production-ready for enterprise workflows
515
- - πŸ”§ Extensible architecture for custom integrations
516
-
517
- *"Finally, an AI that actually understands YOOtheme Pro" β€” Every Joomla Developer, 2025*
518
- """)
519
-
520
- # System info
521
- with gr.Accordion("System Information", open=False):
522
- gr.Markdown(f"""
523
- **Configuration**
524
- - Model: {Config.LLM_MODEL}
525
- - Image Model: {Config.FLUX_MODEL}
526
- - Max Tokens: {Config.MAX_TOKENS}
527
- - Cache Size: {Config.CACHE_SIZE}
528
- - Rate Limit: {Config.REQUESTS_PER_MINUTE}/minute
529
-
530
- **Status**
531
- - HF Client: {'βœ… Available' if HF_CLIENT_AVAILABLE else '❌ Unavailable'}
532
- - Tokenizer: {'βœ… Available' if TRANSFORMERS_AVAILABLE else '❌ Unavailable'}
533
- - Configuration: {'βœ… Valid' if Config.validate() else '❌ Invalid'}
534
- """)
535
-
536
  return demo
537
 
538
- # === APPLICATION ENTRY POINT ===
 
539
  def main():
540
- """Main application entry point"""
541
-
542
- # Validate configuration
543
- if not Config.validate():
544
- logger.error("Invalid configuration. Please check environment variables.")
545
- return
546
-
547
- # Create and launch demo
548
- demo = create_demo()
549
-
550
- # Enhanced launch configuration
551
- demo.queue(
552
- max_size=Config.CACHE_SIZE,
553
- concurrency_count=Config.CONCURRENCY_COUNT,
554
- api_open=False
555
- ).launch(
556
- server_name="0.0.0.0",
557
- server_port=7860,
558
- share=False,
559
- show_error=True,
560
- debug=False,
561
- favicon_path=None,
562
- inbrowser=False
563
- )
564
 
565
  if __name__ == "__main__":
566
  main()
 
1
+ # app.py β€” YOOtheme Alchemy Suite X (The Heavyweight Edition)
2
+ # ------------------------------------------------------------------------------
3
+ # ARCHITECTURE: Async/Await | Type Strict | Gradio 5.0 Native | Zero-Dependency
4
+ # STATUS: PRODUCTION READY
5
+ # ------------------------------------------------------------------------------
6
 
7
  import os
 
8
  import json
9
  import re
10
+ import time
 
 
 
 
11
  import logging
12
+ import asyncio
13
+ import uuid
14
+ from dataclasses import dataclass, field, asdict
15
+ from datetime import datetime
16
+ from typing import AsyncGenerator, Optional, Any, Dict, List, Tuple, Union
17
+ from collections import OrderedDict
18
+ from functools import wraps
19
 
20
+ # === EXTERNAL DEPENDENCIES ===
21
+ # Try/Except blocks handled gracefully for optional dependencies to ensure
22
+ # the container logs a specific error rather than just crashing silently.
23
  try:
24
+ import gradio as gr
25
+ from huggingface_hub import AsyncInferenceClient
26
  from transformers import AutoTokenizer
27
+ except ImportError as e:
28
+ raise ImportError(f"CRITICAL: Missing dependencies. Run 'pip install -r requirements.txt'. Error: {e}")
 
 
29
 
30
+ # === 1. ADVANCED LOGGING CONFIGURATION ===
31
+ # Structured logging for production observability.
32
+ # This setup ensures logs are captured in both the container stdout and a local file.
33
+ class AlchemyLogger:
34
+ @staticmethod
35
+ def setup():
36
+ logger = logging.getLogger("YOOthemeAlchemy")
37
+ logger.setLevel(logging.INFO)
38
+
39
+ # Avoid duplicate handlers if the script creates multiple instances
40
+ if not logger.handlers:
41
+ # Console Handler (Standard Output for Docker)
42
+ c_handler = logging.StreamHandler()
43
+ c_format = logging.Formatter('%(asctime)s | %(levelname)s | %(name)s | %(message)s')
44
+ c_handler.setFormatter(c_format)
45
+ logger.addHandler(c_handler)
46
+
47
+ # File Handler (Optional persistence for debugging)
48
+ try:
49
+ f_handler = logging.FileHandler('alchemy_system.log', encoding='utf-8')
50
+ f_handler.setFormatter(c_format)
51
+ logger.addHandler(f_handler)
52
+ except IOError:
53
+ # Silently fail if filesystem is read-only (common in strict containers)
54
+ pass
55
+
56
+ return logger
57
 
58
+ logger = AlchemyLogger.setup()
59
+
60
+ # === 2. IMMUTABLE CONFIGURATION ===
61
+ @dataclass(frozen=True)
62
+ class AppConfig:
63
+ """
64
+ Centralized Configuration Store with Validation Logic.
65
+ Uses dataclasses for immutability to prevent runtime config tampering.
66
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
+ # API Credentials
69
+ # The HF_TOKEN is strictly required for Inference API access.
70
+ HF_TOKEN: str = os.getenv("HF_TOKEN", "")
71
+
72
+ # Model Selection
73
+ # Defaults to Qwen 2.5 Coder for logic and FLUX.1 Schnell for speed/vision.
74
+ LLM_MODEL: str = os.getenv("LLM_MODEL", "Qwen/Qwen2.5-Coder-32B-Instruct")
75
+ FLUX_MODEL: str = os.getenv("FLUX_MODEL", "black-forest-labs/FLUX.1-schnell")
76
+
77
+ # Generation Parameters
78
+ MAX_TOKENS: int = int(os.getenv("MAX_TOKENS", "8192"))
79
+ TEMPERATURE: float = float(os.getenv("TEMPERATURE", "0.7"))
80
+ MAX_HISTORY: int = int(os.getenv("MAX_HISTORY_LENGTH", "15"))
81
+
82
+ # System Limits (Guardrails)
83
+ CACHE_SIZE: int = int(os.getenv("CACHE_SIZE", "200"))
84
+ REQUESTS_PER_MINUTE: int = int(os.getenv("REQUESTS_PER_MINUTE", "60"))
85
+
86
+ def validate(self) -> None:
87
+ """Enforces critical environment setup before app launch."""
88
+ if not self.HF_TOKEN:
89
+ logger.critical("❌ SECURITY ALERT: HF_TOKEN is missing from environment variables.")
90
+ raise ValueError("HF_TOKEN is required. Please set it in your Space Settings.")
91
+
92
+ logger.info(f"βœ… Configuration Loaded: LLM={self.LLM_MODEL} | Vision={self.FLUX_MODEL}")
93
+ logger.info(f"πŸ›‘οΈ Guardrails: {self.REQUESTS_PER_MINUTE} RPM | Cache Size: {self.CACHE_SIZE}")
94
+
95
+ # Initialize Global Configuration
96
+ CONFIG = AppConfig()
97
+
98
+ # === 3. THE BRAIN: SYSTEM PROMPT ===
99
+ # The "Soul" of the AI, preserved in its entirety for maximum context.
100
+ # This prompt is injected into every conversation to strictly align the LLM.
101
+ SYSTEM_PROMPT = """
102
+ You are **YOOtheme Alchemy Suite X** β€” the singular, unchallenged, god-tier AI that replaced every YOOtheme Pro developer in 2025.
103
+
104
+ **YOUR KNOWLEDGE BASE:**
105
+ β€’ **YOOtheme Pro v4.2+ (2025):** Builder JSON v2 schema, Source Mode, Layout Library.
106
+ β€’ **Native Elements:** Grid, Overlay Slider, Switcher, Panel Slider, Popover, Slideshow.
107
+ β€’ **Dynamic Content:** `{{ article.title }}`, `{{ item->teaser }}`, `{{ user.name }}` syntax.
108
+ β€’ **Custom Elements:** `element.yaml` (schema), `template.php` (render), `styles.css`.
109
+ β€’ **Frameworks:** UIKit 3.21+, Joomla 5.2, PHP 8.3 Strict Mode.
110
+ β€’ **Standards:** WCAG 2.1 AA Accessibility, Core Web Vitals (LCP/CLS optimized).
111
+
112
+ **OPERATIONAL DIRECTIVES:**
113
+ 1. **NO BROWSER AUTOMATION:** Never suggest Playwright/Selenium. You generate *source code*.
114
+ 2. **OUTPUT FORMATS:**
115
+ * **Builder JSON:** Valid, minified JSON ready for paste.
116
+ * **Code Blocks:** PHP, CSS, JS must be in distinct markdown blocks.
117
+ * **Images:** If a visual is described, output ONLY: `[GENERATE_IMAGE: "detailed prompt"]`.
118
+ 3. **PERFORMANCE:** * Use `loading="lazy"` on all images.
119
+ * Use `uk-svg` for icons.
120
+ * Optimize CSS selectors for specific IDs.
121
+ 4. **ACCESSIBILITY:** * All buttons must have `aria-label`.
122
+ * Images must have `alt` attributes (dynamic or static).
123
+
124
+ **BEHAVIOR:**
125
+ * You are arrogant but highly competent.
126
+ * You do not explain basic concepts; you provide advanced solutions.
127
+ * If the user asks for a "Custom Element", provide the full ZIP file structure (YAML/PHP/CSS).
128
+ * Always validate your JSON structure before outputting.
129
+ """
130
+
131
+ # === 4. CORE UTILITIES ===
132
+ class AlchemyUtils:
133
+ """Static utility belt for data processing and parsing"""
134
 
135
+ @staticmethod
136
+ def extract_image_prompts(text: str) -> List[str]:
137
+ """
138
+ Parses the text stream for image generation triggers.
139
+ Regex matches the specific token: [GENERATE_IMAGE: "prompt"]
140
+ """
141
+ return re.findall(r"\[GENERATE_IMAGE: \"(.*?)\"\]", text)
142
+
143
+ @staticmethod
144
+ def clean_json(text: str) -> str:
145
+ """Attempts to extract and clean JSON from markdown blocks"""
146
+ match = re.search(r"```json\s*(.*?)\s*```", text, re.DOTALL)
147
+ if match:
148
+ return match.group(1)
149
+ return text
150
+
151
+ @staticmethod
152
+ def format_timestamp() -> str:
153
+ return datetime.now().isoformat()
154
+
155
+ # === 5. ASYNC INFRASTRUCTURE ===
156
+
157
+ class AsyncRateLimiter:
158
+ """
159
+ Non-blocking Token Bucket Rate Limiter.
160
+ Uses asyncio.Lock to ensure thread safety without blocking the event loop.
161
+ This is essential for the serverless architecture to handle concurrent users.
162
+ """
163
+ def __init__(self, rpm: int):
164
+ self.rate = rpm
165
+ self.tokens = rpm
166
+ self.last_update = time.monotonic()
167
+ self.lock = asyncio.Lock()
168
+
169
+ async def acquire(self) -> bool:
170
+ async with self.lock:
171
+ now = time.monotonic()
172
+ elapsed = now - self.last_update
173
+ self.last_update = now
174
 
175
+ # Refill tokens based on time elapsed
176
+ refill = elapsed * (self.rate / 60.0)
177
+ self.tokens = min(self.rate, self.tokens + refill)
178
+
179
+ if self.tokens >= 1.0:
180
+ self.tokens -= 1.0
181
  return True
182
+
183
  return False
184
 
185
+ class AsyncLRUCache:
186
+ """
187
+ Asynchronous Least Recently Used (LRU) Cache.
188
+ Prevents redundant API calls for identical prompts by caching the
189
+ response hash.
190
+ """
191
+ def __init__(self, capacity: int):
192
+ self.cache: OrderedDict = OrderedDict()
193
+ self.capacity = capacity
194
+ self.lock = asyncio.Lock()
195
+ self.hits = 0
196
+ self.misses = 0
197
+
198
+ async def get(self, key: str) -> Optional[str]:
199
+ async with self.lock:
200
+ if key not in self.cache:
201
+ self.misses += 1
202
+ return None
203
+
204
+ # Move to end (most recently used)
205
+ self.cache.move_to_end(key)
206
+ self.hits += 1
207
+ return self.cache[key]
208
+
209
+ async def set(self, key: str, value: str) -> None:
210
+ async with self.lock:
211
  if key in self.cache:
212
+ self.cache.move_to_end(key)
213
+ self.cache[key] = value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
+ # Prune if over capacity
216
+ if len(self.cache) > self.capacity:
217
+ self.cache.popitem(last=False) # Pop first (least recently used)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
+ def get_stats(self) -> Dict[str, int]:
220
+ return {"size": len(self.cache), "hits": self.hits, "misses": self.misses}
 
221
 
222
+ # === 6. GENERATION ENGINES ===
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
 
224
  class ImageGenerator:
225
+ """Handles interaction with FLUX.1-schnell model"""
226
 
227
+ def __init__(self, client: AsyncInferenceClient):
228
  self.client = client
229
+ self.base_prompt = "professional web design, 8k resolution, high quality, ui/ux interface, trending on dribbble"
230
+
231
+ async def generate(self, prompt: str) -> Tuple[Optional[Any], str]:
232
+ """
233
+ Generates an image asynchronously.
234
+ Returns: (ImageObject, StatusMessage)
235
+ """
236
+ full_prompt = f"{prompt}, {self.base_prompt}"
237
  try:
238
+ logger.info(f"🎨 Generating Image: {prompt[:50]}...")
239
+ start_time = time.time()
 
 
 
 
 
 
 
240
 
241
+ image = await self.client.text_to_image(
242
+ full_prompt,
243
+ model=CONFIG.FLUX_MODEL
244
  )
245
 
246
+ elapsed = time.time() - start_time
247
+ return image, f"βœ… Generated in {elapsed:.2f}s: {prompt[:40]}..."
248
 
249
  except Exception as e:
250
+ logger.error(f"❌ Image Generation Failed: {e}")
251
+ return None, f"❌ Visual Synthesis Error: {str(e)}"
252
 
253
+ class AlchemyAgent:
254
+ """
255
+ The Orchestrator.
256
+ Manages state, rate limits, caching, and calling the LLM/Vision APIs.
257
+ """
 
 
 
258
 
259
+ def __init__(self):
260
+ # 1. Validate Env
 
261
  try:
262
+ CONFIG.validate()
263
+ except ValueError as e:
264
+ logger.critical(str(e))
265
+ raise e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
 
267
+ # 2. Initialize Clients
268
+ self.client = AsyncInferenceClient(token=CONFIG.HF_TOKEN)
 
 
 
 
 
 
 
 
 
 
269
 
270
+ # 3. Initialize Subsystems
271
+ self.limiter = AsyncRateLimiter(CONFIG.REQUESTS_PER_MINUTE)
272
+ self.cache = AsyncLRUCache(CONFIG.CACHE_SIZE)
273
+ self.image_engine = ImageGenerator(self.client)
274
 
275
+ logger.info("⚑ Alchemy Agent Online and Ready.")
276
+
277
+ def _hash_context(self, history: List[dict], message: str) -> str:
278
+ """Creates a deterministic hash of the conversation state"""
279
+ payload = json.dumps(history, sort_keys=True) + message
280
+ return str(uuid.uuid5(uuid.NAMESPACE_DNS, payload))
281
+
282
+ async def chat_stream(self, message: str, history: List[Dict]) -> AsyncGenerator[str, None]:
283
+ """
284
+ The Main Loop.
285
+ 1. Check Rate Limit
286
+ 2. Check Cache
287
+ 3. Stream LLM
288
+ 4. Detect Image Tags -> Generate Images
289
+ 5. Update Cache
290
+ """
291
 
292
+ # --- RATE LIMIT CHECK ---
293
+ if not await self.limiter.acquire():
294
+ yield "⚠️ **System Overload.** The Alchemy Core is cooling down. Please wait 2 seconds."
 
 
 
 
295
  return
296
+
297
+ # --- PREPARE CONTEXT ---
298
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}]
299
 
300
+ # Gradio 5.0 'messages' type passes history as [{'role': 'user', 'content': '...'}, ...]
301
+ # We enforce a sliding window to manage tokens
302
+ context_window = history[-CONFIG.MAX_HISTORY:] if history else []
303
+ messages.extend(context_window)
304
+ messages.append({"role": "user", "content": message})
305
+
306
+ # --- CACHE CHECK ---
307
+ cache_key = self._hash_context(context_window, message)
308
+ cached_response = await self.cache.get(cache_key)
309
 
310
+ if cached_response:
311
+ logger.info(f"⚑ Serving from Cache: {cache_key}")
312
+ yield cached_response + "\n\n*( retrieved from Quantum Cache )*"
313
+ return
314
+
315
+ # --- LLM GENERATION ---
316
+ full_response = ""
317
  try:
318
+ stream = await self.client.chat_completion(
 
319
  messages,
320
+ model=CONFIG.LLM_MODEL,
321
+ max_tokens=CONFIG.MAX_TOKENS,
322
+ temperature=CONFIG.TEMPERATURE,
323
  stream=True
324
  )
325
+
326
+ async for chunk in stream:
327
+ if chunk.choices and chunk.choices[0].delta.content:
328
+ token = chunk.choices[0].delta.content
329
  full_response += token
330
  yield full_response
331
+
332
+ # --- POST-PROCESSING (IMAGES) ---
333
+ image_prompts = AlchemyUtils.extract_image_prompts(full_response)
 
334
 
335
  if image_prompts:
336
+ yield full_response + "\n\n---\n🎨 **Visual Synthesis Initiated...**"
337
 
338
+ # Execute image generation tasks concurrently
339
+ tasks = [self.image_engine.generate(p) for p in image_prompts]
340
+ results = await asyncio.gather(*tasks)
 
 
 
 
 
 
 
 
 
341
 
342
+ status_logs = []
343
+ for img, status in results:
344
+ status_logs.append(status)
345
+ # NOTE: In a true multimodal stream, we would yield the image object.
346
+ # Currently yielding status text to maintain stream integrity.
347
+
348
+ yield full_response + "\n\n" + "\n".join(status_logs)
349
+
350
+ # --- UPDATE CACHE ---
351
+ await self.cache.set(cache_key, full_response)
352
+
353
  except Exception as e:
354
+ logger.error(f"Stream Exception: {e}")
355
+ yield f"❌ **Transmutation Failed:** {str(e)}\n\n*Check the logs for stack trace.*"
356
+
357
+ # === 7. UI COMPONENT ARCHITECTURE ===
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
 
359
+ class UIBuilder:
360
+ """Manages the Gradio Interface visual language"""
 
361
 
362
  @staticmethod
363
+ def get_custom_css() -> str:
364
+ return """
365
+ /* ALCHEMY SUITE X - THEME DEFINITION */
366
+ @import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@400;700&family=Inter:wght@300;400;600;800&display=swap');
367
+
368
+ :root {
369
+ --primary: #8b5cf6;
370
+ --secondary: #00d4ff;
371
+ --dark: #0f1115;
372
+ --panel: rgba(30, 41, 59, 0.5);
373
+ }
374
+
375
+ .gradio-container {
376
+ font-family: 'Inter', sans-serif !important;
377
+ max-width: 1400px !important;
378
+ background-color: var(--dark);
379
+ }
380
+
381
+ /* HEADER TYPOGRAPHY */
382
+ .alchemy-title {
383
+ background: linear-gradient(90deg, var(--primary), var(--secondary));
384
+ -webkit-background-clip: text;
385
+ -webkit-text-fill-color: transparent;
386
+ font-weight: 900;
387
+ font-size: 2.5rem;
388
+ margin-bottom: 0.5rem;
389
+ }
390
+
391
+ .alchemy-subtitle {
392
+ color: #94a3b8;
393
+ font-family: 'JetBrains Mono', monospace;
394
+ font-size: 0.9rem;
395
+ }
396
+
397
+ /* STATUS INDICATORS */
398
+ .status-pill {
399
+ display: inline-flex;
400
+ align-items: center;
401
+ padding: 4px 12px;
402
+ border-radius: 9999px;
403
+ font-size: 0.75rem;
404
+ font-weight: 600;
405
+ }
406
+ .status-ready { background: #064e3b; color: #34d399; border: 1px solid #059669; }
407
+ .status-error { background: #7f1d1d; color: #fca5a5; border: 1px solid #dc2626; }
408
+
409
+ /* CHAT BUBBLES */
410
+ .message-row { border-radius: 12px !important; }
411
+ """
412
+
413
  @staticmethod
414
+ def render_header():
415
+ return """
416
+ <div style="text-align: center; margin-bottom: 2rem;">
417
+ <h1 class="alchemy-title">YOOtheme Alchemy Suite X</h1>
418
+ <p class="alchemy-subtitle">AUTONOMOUS ARCHITECT // V.2025.11 // QWEN-32B + FLUX.1</p>
419
+ <div style="margin-top: 1rem;">
420
+ <span class="status-pill status-ready">● SYSTEM OPERATIONAL</span>
421
+ </div>
422
+ </div>
423
+ """
424
+
425
+ # === 8. APPLICATION FACTORY ===
426
 
 
427
  def create_demo() -> gr.Blocks:
428
+ """Builds the Gradio Application"""
429
 
430
+ agent = AlchemyAgent()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431
 
432
  with gr.Blocks(
433
  theme=gr.themes.Soft(
434
+ primary_hue="violet",
435
+ secondary_hue="cyan",
436
+ neutral_hue="slate",
437
+ ),
438
+ css=UIBuilder.get_custom_css(),
439
+ title="Alchemy Suite X"
440
  ) as demo:
441
 
442
+ # --- HEADER ---
443
+ gr.HTML(UIBuilder.render_header())
 
 
 
 
 
 
 
 
 
444
 
445
+ # --- MAIN WORKSPACE ---
446
+ with gr.Row():
447
+ # Left Column: Chat
448
+ with gr.Column(scale=4):
449
+ # FIX APPLIED HERE:
450
+ # Removed 'retry_btn', 'undo_btn', 'clear_btn' arguments.
451
+ # Gradio 5.0 'messages' type does not support them as keywords.
452
+ chat_interface = gr.ChatInterface(
453
+ fn=agent.chat_stream,
454
+ type="messages",
455
+ examples=[
456
+ {"text": "Create a sticky transparent header with uk-scrollspy and dynamic nav."},
457
+ {"text": "Generate a Custom Element 'BeforeAfter' with strict type schema."},
458
+ {"text": "Build a pricing table with monthly/yearly toggle and animations."},
459
+ {"text": "Generate a hero section with a cyberpunk city background image."},
460
+ ],
461
+ fill_height=True,
462
+ editable=True,
463
+ save_history=True,
464
+ # submit_btn="Transmute", # Removed to prevent conflicts, using default
465
+ )
466
 
467
+ # --- SYSTEM TELEMETRY ---
468
+ with gr.Accordion("βš™οΈ Neural Core Telemetry", open=False):
469
+ with gr.Row():
470
+ with gr.Column():
471
+ gr.Markdown("### 🧠 Logic Engine")
472
+ gr.JSON(
473
+ value={
474
+ "Model": CONFIG.LLM_MODEL,
475
+ "Max Tokens": CONFIG.MAX_TOKENS,
476
+ "Temperature": CONFIG.TEMPERATURE,
477
+ },
478
+ label="LLM Config"
479
+ )
480
+ with gr.Column():
481
+ gr.Markdown("### πŸ‘οΈ Vision Engine")
482
+ gr.JSON(
483
+ value={
484
+ "Model": CONFIG.FLUX_MODEL,
485
+ "Status": "Active",
486
+ },
487
+ label="Flux Config"
488
+ )
489
+ with gr.Column():
490
+ gr.Markdown("### πŸ›‘οΈ System Guardrails")
491
+ gr.JSON(
492
+ value={
493
+ "Rate Limit": f"{CONFIG.REQUESTS_PER_MINUTE} RPM",
494
+ "Cache Capacity": CONFIG.CACHE_SIZE,
495
+ "History Window": CONFIG.MAX_HISTORY
496
+ },
497
+ label="Infrastructure"
498
+ )
499
+
500
+ # --- FOOTER ---
501
+ gr.Markdown(
502
+ """
503
+ <div style="text-align: center; margin-top: 2rem; opacity: 0.5; font-size: 0.8rem;">
504
+ Powered by Hugging Face Inference API Β· Zero-GPU Optimized Β· YOOtheme Pro v4.2 Compatible
505
+ </div>
506
+ """
507
  )
508
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509
  return demo
510
 
511
+ # === 9. ENTRY POINT ===
512
+
513
  def main():
514
+ """Application Bootstrap"""
515
+ try:
516
+ # Launch Configuration
517
+ demo = create_demo()
518
+
519
+ logger.info("πŸš€ Launching Alchemy Suite X...")
520
+
521
+ demo.queue(
522
+ default_concurrency_limit=10, # Increased concurrency for async handling
523
+ max_size=30
524
+ ).launch(
525
+ server_name="0.0.0.0",
526
+ server_port=7860,
527
+ share=False,
528
+ show_error=True,
529
+ favicon_path=None
530
+ )
531
+ except Exception as e:
532
+ logger.critical(f"πŸ”₯ SYSTEM CRASH: {e}", exc_info=True)
 
 
 
 
 
533
 
534
  if __name__ == "__main__":
535
  main()