diwash-barla commited on
Commit
965b251
·
verified ·
1 Parent(s): 3d5a737

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -44
app.py CHANGED
@@ -5,31 +5,25 @@ import threading
5
  import requests
6
  import subprocess
7
  import base64
8
- from flask import Flask, render_template_string, request, jsonify, send_from_directory, abort
9
  import queue
10
  from groq import Groq
11
- import itertools # /// ADDED IMPORT /// - कुंजियों को बारी-बारी से उपयोग करने के लिए
12
 
13
- # ---------- CONFIG (MODIFIED FOR YOUR KEY NAMES) ----------
14
- # Hugging Face Spaces में Settings -> Secrets में इन्हें सेट करें
15
-
16
- # /// MODIFIED SECTION ///
17
- # अब यह btz1, btz2, आदि कुंजियों को ढूंढेगा।
18
  API_KEYS = [os.environ.get(f"btz{i}") for i in range(1, 100) if os.environ.get(f"btz{i}")]
19
  if not API_KEYS: raise KeyError("No 'btzN' environment variables found (e.g., btz1, btz2). Please set them in Hugging Face secrets.")
20
 
21
- GROQ_API_KEY = os.environ.get("GROQ_KEY") # यह माना जा रहा है कि आपके पास GROQ_KEY नाम की एक ही कुंजी है
22
  if not GROQ_API_KEY: raise KeyError("GROQ_KEY environment variable not found.")
23
 
24
- # /// MODIFIED SECTION ///
25
- # अब यह gmni1, gmni2, आदि कुंजियों को ढूंढेगा और बारी-बारी से उपयोग करेगा।
26
  GEMINI_API_KEYS = [os.environ.get(f"gmni{i}") for i in range(1, 100) if os.environ.get(f"gmni{i}")]
27
  if not GEMINI_API_KEYS: raise KeyError("No 'gmniN' environment variables found (e.g., gmni1, gmni2). Please set them in Hugging Face secrets.")
28
- gemini_key_cycler = itertools.cycle(GEMINI_API_KEYS) # बारी-बारी से कुंजी चुनने के लिए
29
 
30
  DEFAULT_BYTEZ_MODEL = "ali-vilab/text-to-video-ms-1.7b"
31
 
32
- # ---------- MODEL HUNTER FUNCTIONS (No changes here) ----------
33
  def find_best_groq_model(api_key):
34
  try:
35
  print("🤖 Hunting for the best Groq model...")
@@ -59,9 +53,8 @@ def find_best_gemini_vision_model(api_key):
59
  raise ValueError("No usable Gemini Vision models found.")
60
  except Exception as e: print(f"🛑 Gemini hunt failed: {e}. Using hardcoded fallback."); return "gemini-1.5-flash-latest"
61
 
62
- # ---------- INITIALIZATION (Modified) ----------
63
  GROQ_MODEL = find_best_groq_model(GROQ_API_KEY)
64
- # /// MODIFIED SECTION /// - We now check with the first Gemini key
65
  GEMINI_VISION_MODEL = find_best_gemini_vision_model(GEMINI_API_KEYS[0])
66
  print(f"✅ Loaded {len(API_KEYS)} Bytez keys and {len(GEMINI_API_KEYS)} Gemini keys. Using Groq: {GROQ_MODEL} and Gemini: {GEMINI_VISION_MODEL}")
67
 
@@ -69,14 +62,14 @@ OUTPUT_FOLDER = "output"
69
  os.makedirs(OUTPUT_FOLDER, exist_ok=True)
70
  os.makedirs("static", exist_ok=True)
71
 
72
- # ---------- APP & STATE (No changes here) ----------
73
  app = Flask(__name__)
74
  progress = { "active": False, "step": 0, "total": 0, "status": "idle", "message": "", "error": None, "video_relpath": None, "live_log": [] }
75
  job_queue = queue.Queue()
76
  generated_clips_dict = {}
77
  clips_lock = threading.Lock()
78
 
79
- # ---------- HELPER FUNCTIONS (No changes in these functions themselves) ----------
80
  def set_progress(log_message=None, **kwargs):
81
  global progress
82
  with threading.Lock():
@@ -120,27 +113,46 @@ def generate_visual_blueprint_with_groq(user_prompt, api_key, model_name):
120
  return visual_blueprint, None
121
  except Exception as e: return None, f"Groq API Error: {e}"
122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  def generate_clip(prompt, idx, api_key, bytez_model):
124
  from bytez import Bytez
125
  sdk = Bytez(api_key)
126
  model = sdk.model(bytez_model)
127
-
128
- # --- MODIFIED SECTION TO FIX THE CRASH ---
129
  try:
130
- # हम उम्मीद करते हैं कि सफलता पर model.run() केवल एक आउटपुट देगा।
131
  out = model.run(prompt)
132
- err = None # यदि यह लाइन चलती है, तो कोई एरर नहीं है।
133
  except Exception as e:
134
- # यदि model.run() के दौरान कोई भी एरर आता है, तो हम उसे यहां पकड़ लेंगे।
135
  print(f"🛑 Error during model.run() with key ...{api_key[-4:]}: {e}")
136
  out = None
137
- err = str(e) # एरर संदेश को सहेजें।
138
- # --- END OF MODIFIED SECTION ---
139
-
140
  if err:
141
- # यदि कोई एरर था, तो यहां से बाहर निकल जाएं।
142
  return None, f"Model Error (Key ...{api_key[-4:]}): {err}"
143
-
144
  filename = f"clip_{idx}_{uuid.uuid4().hex}.mp4"
145
  filepath = os.path.join(OUTPUT_FOLDER, filename)
146
  try:
@@ -154,7 +166,6 @@ def generate_clip(prompt, idx, api_key, bytez_model):
154
  return None, f"Unexpected output type from model: {type(out)}"
155
  except Exception as e:
156
  return None, f"Failed to save or download the generated clip: {e}"
157
-
158
  return filepath, None
159
 
160
  def process_and_merge_clips(clip_files):
@@ -217,18 +228,11 @@ def generate_video_job(prompt, num_clips, bytez_model):
217
  for clip in temp_clip_paths:
218
  if os.path.exists(clip): os.remove(clip)
219
 
220
- # ---------- UI HTML (No changes here) ----------
221
- HTML_TEMPLATE = """
222
- <!doctype html><html lang="en" data-bs-theme="dark"><head><meta charset="utf-8"><title>AI Video Generator</title><meta name="viewport" content="width=device-width, initial-scale=1"><meta name="theme-color" content="#111111"><link rel="manifest" href="/manifest.json"><link rel="apple-touch-icon" href="/static/icons/icon-512x512.png"><link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet"><style>:root { --bs-body-bg: #111; --bs-body-color: #eee; } .card { background: #1a1a1a; border: 1px solid #333; border-radius: 1rem; } .btn-primary { background-color: #0d6efd; border-color: #0d6efd; } .form-control, .form-select { background-color: #222; border-color: #444; color: #eee; } .form-control:focus, .form-select:focus { background-color: #222; border-color: #0d6efd; box-shadow: 0 0 0 .25rem rgba(13, 110, 253, .25); color: #eee; } video { border-radius: 0.75rem; background-color: #000; } .container { max-width: 720px; } .page { display: none; } .page.active { display: block; } .form-label h5 { margin-bottom: 0.5rem; } #liveLog { background-color: #000; border: 1px solid #333; border-radius: 0.5rem; height: 150px; overflow-y: auto; font-family: monospace; font-size: 0.9em; padding: 10px; white-space: pre-wrap; margin-top: 1rem; text-align: left; } #liveLog p { margin: 0; padding: 2px 0; } </style></head><body><div class="container py-4"><div id="inputPage" class="page active"><h2 class="text-center mb-4">🎬 AI Video Generator</h2><div class="card p-4"><div class="mb-3"><label for="prompt" class="form-label"><h5>1. Describe your video (or use an image)</h5></label><textarea id="prompt" class="form-control" rows="3" placeholder="e.g., A robot walking through a neon-lit city in the rain"></textarea></div><div class="mb-3"><label for="imageUpload" class="form-label"><h5>2. Upload Image (Optional)</h5></label><input type="file" id="imageUpload" class="form-control" accept="image/png, image/jpeg"></div><div class="row g-3"><div class="col-md-6 mb-3"><label for="num_clips" class="form-label"><h5>3. Number of clips</h5></label><input id="num_clips" type="number" class="form-control" min="1" max="20" value="3"></div><div class="col-md-6 mb-3"><label for="style" class="form-label"><h5>4. Style</h5></label><select id="style" class="form-select"><option value="none" selected>Default</option><option value="cinematic">Cinematic</option><option value="cartoon">Cartoon</option><option value="realistic">Realistic</option><option value="minimalist">Minimalist</option></select></div></div><div class="mb-3"><label for="bytezModelCustom" class="form-label"><h5>5. Custom AI Model (Optional)</h5></label><input type="text" id="bytezModelCustom" class="form-control" placeholder="Default: ali-vilab/text-to-video-ms-1.7b"></div><div class="d-grid mt-2"><button id="startBtn" class="btn btn-primary btn-lg">🚀 Generate Video</button></div></div><div id="inputError" class="text-danger mt-3 text-center d-none"></div></div><div id="progressPage" class="page"><div class="card p-4 text-center"><div id="progressBox"><h3 class="mb-3">Generating your video...</h3><div class="d-flex justify-content-center align-items-center mb-3"><div class="spinner-border me-3" role="status"></div><div class="mono" id="statusText" style="font-size: 1.1rem;">Initializing...</div></div><div class="progress" role="progressbar" style="height: 10px"><div id="bar" class="progress-bar progress-bar-striped progress-bar-animated" style="width: 2%"></div></div><div id="liveLogContainer"><h6 class="text-secondary mt-3">Mission Control Log:</h6><div id="liveLog"></div></div></div><div id="errorBox" class="d-none"><h3 class="text-danger">An Error Occurred</h3><p id="errorMessage" class="mono bg-dark p-3 rounded"></p><button class="btn btn-secondary" onclick="goHomeAndReset()">Try Again</button></div><div id="resultBox" class="d-none"><h3 class="mb-3">✅ Your video is ready!</h3><video id="player" width="100%" controls playsinline></video><div class="d-grid gap-2 mt-3"><a id="dl" class="btn btn-success btn-lg" download>⬇️ Download Video</a><button class="btn btn-outline-secondary" onclick="goHomeAndReset()">Create Another Video</button></div></div></div></div></div>
223
- <script>
224
- const bytezModelCustomEl = document.getElementById("bytezModelCustom"),startBtn=document.getElementById("startBtn"),promptEl=document.getElementById("prompt"),clipsEl=document.getElementById("num_clips"),styleEl=document.getElementById("style"),imageUploadEl=document.getElementById("imageUpload"),liveLogEl=document.getElementById("liveLog"),inputPage=document.getElementById("inputPage"),progressPage=document.getElementById("progressPage"),progressBox=document.getElementById("progressBox"),statusText=document.getElementById("statusText"),bar=document.getElementById("bar"),errorBox=document.getElementById("errorBox"),errorMessage=document.getElementById("errorMessage"),inputError=document.getElementById("inputError"),resultBox=document.getElementById("resultBox"),player=document.getElementById("player"),dl=document.getElementById("dl");let pollTimer=null;function showPage(e){document.querySelectorAll(".page").forEach(e=>e.classList.remove("active")),document.getElementById(e).classList.add("active")}function resetUI(){inputError.classList.add("d-none"),errorBox.classList.add("d-none"),resultBox.classList.add("d-none"),progressBox.style.display="block",bar.style.width="2%",startBtn.disabled=!1,imageUploadEl.value=null,promptEl.value="",liveLogEl.innerHTML="",bytezModelCustomEl.value=""}function goHomeAndReset(){resetUI(),showPage("inputPage")}async function startJob(){inputError.classList.add("d-none"),liveLogEl.innerHTML="";const e=promptEl.value.trim(),t=imageUploadEl.files[0];if(!e&&!t)return inputError.textContent="Please provide a text prompt or upload an image.",void inputError.classList.remove("d-none");startBtn.disabled=!0,showPage("progressPage");try{const a=new FormData;a.append("prompt",e),a.append("num_clips",parseInt(clipsEl.value,10)),a.append("style",styleEl.value),t&&a.append("image",t);const o=bytezModelCustomEl.value.trim();a.append("bytez_model",o);const n=await fetch("/start",{method:"POST",body:a}),s=await n.json();if(!n.ok)throw new Error(s.error||"Failed to start job.");pollTimer=setInterval(pollProgress,1500)}catch(l){errorMessage.textContent=l.message,progressBox.style.display="none",errorBox.classList.remove("d-none"),startBtn.disabled=!1}}async function pollProgress(){try{const e=await fetch("/progress"),t=await e.json(),a=Math.max(1,t.total),o=Math.min(100,Math.floor(t.step/a*100));if(bar.style.width=o+"%",statusText.textContent=t.message,t.live_log&&(liveLogEl.innerHTML=t.live_log.map(e=>`<p>${e}</p>`).join(""),liveLogEl.scrollTop=liveLogEl.scrollHeight),t.status==="done"&&t.video_relpath){clearInterval(pollTimer);const n=`/${t.video_relpath.replace(/\\\\/g,"/")}`;player.src=n,dl.href=n,dl.download=`ai_video_${Date.now()}.mp4`,progressBox.style.display="none",resultBox.classList.remove("d-none")}else if(t.status==="error"){clearInterval(pollTimer),errorMessage.textContent=t.error||"An unknown error occurred.",progressBox.style.display="none",errorBox.classList.remove("d-none")}}catch(r){clearInterval(pollTimer),errorMessage.textContent="Connection to server lost. Please try again.",progressBox.style.display="none",errorBox.classList.remove("d-none")}}startBtn.addEventListener("click",startJob);if('serviceWorker' in navigator){window.addEventListener('load',()=>{navigator.serviceWorker.register('/service-worker.js').then(reg=>console.log('Service worker registered.')).catch(err=>console.log('Service worker registration failed: ',err));});}
225
- </script>
226
- </body></html>"""
227
-
228
- # ---------- FLASK ROUTES (Modified start route) ----------
229
  @app.route("/", methods=["GET"])
230
  def home():
231
- return render_template_string(HTML_TEMPLATE)
 
232
 
233
  @app.route("/start", methods=["POST"])
234
  def start():
@@ -250,26 +254,29 @@ def start():
250
  image_data = image_file.read()
251
  mime_type = image_file.mimetype
252
  set_progress(status="running", message="Initializing...", error=None, active=True, step=0, log_message="🧠 Director (Gemini) analyzing image...")
253
-
254
- # /// MODIFIED SECTION ///
255
- # अगली उपलब्ध जेमिनी कुंजी का चयन करें
256
  selected_gemini_key = next(gemini_key_cycler)
257
  gemini_prompt, err = get_prompt_from_gemini(image_data, user_prompt, mime_type, selected_gemini_key)
258
-
259
  if err:
260
  set_progress(status="error", error=err, active=False, log_message=f"🛑 Gemini Failure: {err}"); return jsonify({"error": err}), 500
261
  initial_prompt = gemini_prompt
262
  set_progress(log_message=f"🎬 Gemini's Idea: \"{initial_prompt[:80]}...\"")
263
  except Exception as e:
264
  err_msg = f"Failed to process image: {e}"; set_progress(status="error", error=err_msg, active=False, log_message=f"🛑 Image Error: {e}"); return jsonify({"error": err_msg}), 500
 
265
  current_step = 1 if is_image_job else 0
266
  set_progress(status="running", message="Creating blueprint...", active=True, step=current_step, log_message="🎨 Quality Enhancer (Groq) creating blueprint...")
267
  visual_blueprint, err = generate_visual_blueprint_with_groq(initial_prompt, GROQ_API_KEY, GROQ_MODEL)
268
  if err:
269
- set_progress(status="error", error=err, active=False, log_message=f"🛑 Groq Failure: {err}"); return jsonify({"error": err}), 500
270
- set_progress(log_message=f"✨ Final Blueprint: \"{visual_blueprint[:80]}...\"")
 
 
 
 
 
271
  negative_keywords = "blurry, deformed, ugly, bad anatomy, watermark, noise, grain, low quality, distortion, glitch, pixelated, artifacts"
272
- final_prompt = f"{visual_blueprint}, {negative_keywords}"
 
273
  print(f"🚀 Final Prompt for Bytez (Model: {bytez_model}): {final_prompt}")
274
  job_thread = threading.Thread(target=generate_video_job, args=(final_prompt, num_clips, bytez_model), daemon=True)
275
  job_thread.start()
 
5
  import requests
6
  import subprocess
7
  import base64
8
+ from flask import Flask, render_template, request, jsonify, send_from_directory, abort # 'render_template' जोड़ा गया और 'render_template_string' हटाया गया
9
  import queue
10
  from groq import Groq
11
+ import itertools
12
 
13
+ # ---------- CONFIG ----------
 
 
 
 
14
  API_KEYS = [os.environ.get(f"btz{i}") for i in range(1, 100) if os.environ.get(f"btz{i}")]
15
  if not API_KEYS: raise KeyError("No 'btzN' environment variables found (e.g., btz1, btz2). Please set them in Hugging Face secrets.")
16
 
17
+ GROQ_API_KEY = os.environ.get("GROQ_KEY")
18
  if not GROQ_API_KEY: raise KeyError("GROQ_KEY environment variable not found.")
19
 
 
 
20
  GEMINI_API_KEYS = [os.environ.get(f"gmni{i}") for i in range(1, 100) if os.environ.get(f"gmni{i}")]
21
  if not GEMINI_API_KEYS: raise KeyError("No 'gmniN' environment variables found (e.g., gmni1, gmni2). Please set them in Hugging Face secrets.")
22
+ gemini_key_cycler = itertools.cycle(GEMINI_API_KEYS)
23
 
24
  DEFAULT_BYTEZ_MODEL = "ali-vilab/text-to-video-ms-1.7b"
25
 
26
+ # ---------- MODEL HUNTER FUNCTIONS ----------
27
  def find_best_groq_model(api_key):
28
  try:
29
  print("🤖 Hunting for the best Groq model...")
 
53
  raise ValueError("No usable Gemini Vision models found.")
54
  except Exception as e: print(f"🛑 Gemini hunt failed: {e}. Using hardcoded fallback."); return "gemini-1.5-flash-latest"
55
 
56
+ # ---------- INITIALIZATION ----------
57
  GROQ_MODEL = find_best_groq_model(GROQ_API_KEY)
 
58
  GEMINI_VISION_MODEL = find_best_gemini_vision_model(GEMINI_API_KEYS[0])
59
  print(f"✅ Loaded {len(API_KEYS)} Bytez keys and {len(GEMINI_API_KEYS)} Gemini keys. Using Groq: {GROQ_MODEL} and Gemini: {GEMINI_VISION_MODEL}")
60
 
 
62
  os.makedirs(OUTPUT_FOLDER, exist_ok=True)
63
  os.makedirs("static", exist_ok=True)
64
 
65
+ # ---------- APP & STATE ----------
66
  app = Flask(__name__)
67
  progress = { "active": False, "step": 0, "total": 0, "status": "idle", "message": "", "error": None, "video_relpath": None, "live_log": [] }
68
  job_queue = queue.Queue()
69
  generated_clips_dict = {}
70
  clips_lock = threading.Lock()
71
 
72
+ # ---------- HELPER FUNCTIONS ----------
73
  def set_progress(log_message=None, **kwargs):
74
  global progress
75
  with threading.Lock():
 
113
  return visual_blueprint, None
114
  except Exception as e: return None, f"Groq API Error: {e}"
115
 
116
+ def distill_prompt_for_short_video(enhanced_prompt, api_key, model_name):
117
+ print("🧪 Contacting Groq (Prompt Distiller)...")
118
+ try:
119
+ client = Groq(api_key=api_key)
120
+ system_prompt = """You are an expert video editor who specializes in ultra-short clips (3-4 seconds).
121
+ Your task is to take a long, hyper-detailed prompt and distill it into a concise, powerful version perfect for a brief video.
122
+ RULES:
123
+ 1. Identify the single most important subject and a very brief, clear action.
124
+ 2. Focus the entire prompt on ONE dynamic moment or a powerful, static scene. Do not describe multiple complex actions.
125
+ 3. Condense descriptive keywords. Instead of 'photorealistic, 4k, ultra realistic, hyper-detailed', choose the best two, like 'cinematic, hyper-detailed'.
126
+ 4. The final prompt must be significantly shorter but retain the core artistic vision.
127
+ Output ONLY the final, short, distilled prompt.
128
+ """
129
+ response = client.chat.completions.create(
130
+ model=model_name,
131
+ messages=[
132
+ {"role": "system", "content": system_prompt},
133
+ {"role": "user", "content": enhanced_prompt}
134
+ ]
135
+ )
136
+ distilled_prompt = response.choices[0].message.content.strip()
137
+ print(f"✨ Distiller's final prompt: {distilled_prompt}")
138
+ return distilled_prompt, None
139
+ except Exception as e:
140
+ print(f"⚠️ Groq Distiller failed: {e}. Falling back to original enhanced prompt.")
141
+ return enhanced_prompt, None
142
+
143
  def generate_clip(prompt, idx, api_key, bytez_model):
144
  from bytez import Bytez
145
  sdk = Bytez(api_key)
146
  model = sdk.model(bytez_model)
 
 
147
  try:
 
148
  out = model.run(prompt)
149
+ err = None
150
  except Exception as e:
 
151
  print(f"🛑 Error during model.run() with key ...{api_key[-4:]}: {e}")
152
  out = None
153
+ err = str(e)
 
 
154
  if err:
 
155
  return None, f"Model Error (Key ...{api_key[-4:]}): {err}"
 
156
  filename = f"clip_{idx}_{uuid.uuid4().hex}.mp4"
157
  filepath = os.path.join(OUTPUT_FOLDER, filename)
158
  try:
 
166
  return None, f"Unexpected output type from model: {type(out)}"
167
  except Exception as e:
168
  return None, f"Failed to save or download the generated clip: {e}"
 
169
  return filepath, None
170
 
171
  def process_and_merge_clips(clip_files):
 
228
  for clip in temp_clip_paths:
229
  if os.path.exists(clip): os.remove(clip)
230
 
231
+ # ---------- FLASK ROUTES ----------
 
 
 
 
 
 
 
 
232
  @app.route("/", methods=["GET"])
233
  def home():
234
+ # अब यह 'templates' ��ोल्डर से 'index.html' फाइल को प्रस्तुत करेगा।
235
+ return render_template("index.html")
236
 
237
  @app.route("/start", methods=["POST"])
238
  def start():
 
254
  image_data = image_file.read()
255
  mime_type = image_file.mimetype
256
  set_progress(status="running", message="Initializing...", error=None, active=True, step=0, log_message="🧠 Director (Gemini) analyzing image...")
 
 
 
257
  selected_gemini_key = next(gemini_key_cycler)
258
  gemini_prompt, err = get_prompt_from_gemini(image_data, user_prompt, mime_type, selected_gemini_key)
 
259
  if err:
260
  set_progress(status="error", error=err, active=False, log_message=f"🛑 Gemini Failure: {err}"); return jsonify({"error": err}), 500
261
  initial_prompt = gemini_prompt
262
  set_progress(log_message=f"🎬 Gemini's Idea: \"{initial_prompt[:80]}...\"")
263
  except Exception as e:
264
  err_msg = f"Failed to process image: {e}"; set_progress(status="error", error=err_msg, active=False, log_message=f"🛑 Image Error: {e}"); return jsonify({"error": err_msg}), 500
265
+
266
  current_step = 1 if is_image_job else 0
267
  set_progress(status="running", message="Creating blueprint...", active=True, step=current_step, log_message="🎨 Quality Enhancer (Groq) creating blueprint...")
268
  visual_blueprint, err = generate_visual_blueprint_with_groq(initial_prompt, GROQ_API_KEY, GROQ_MODEL)
269
  if err:
270
+ set_progress(status="error", error=err, active=False, log_message=f"🛑 Groq Enhancer Failure: {err}"); return jsonify({"error": err}), 500
271
+ set_progress(log_message=f"✨ Enhanced Blueprint: \"{visual_blueprint[:80]}...\"")
272
+
273
+ set_progress(log_message="🧪 Distilling prompt for short video clip...")
274
+ distilled_prompt, err = distill_prompt_for_short_video(visual_blueprint, GROQ_API_KEY, GROQ_MODEL)
275
+ set_progress(log_message=f"🎯 Final Distilled Prompt: \"{distilled_prompt[:80]}...\"")
276
+
277
  negative_keywords = "blurry, deformed, ugly, bad anatomy, watermark, noise, grain, low quality, distortion, glitch, pixelated, artifacts"
278
+ final_prompt = f"{distilled_prompt}, {negative_keywords}"
279
+
280
  print(f"🚀 Final Prompt for Bytez (Model: {bytez_model}): {final_prompt}")
281
  job_thread = threading.Thread(target=generate_video_job, args=(final_prompt, num_clips, bytez_model), daemon=True)
282
  job_thread.start()