prithivMLmods commited on
Commit
ee4b2e1
·
verified ·
1 Parent(s): bbc3deb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -10
app.py CHANGED
@@ -9,6 +9,7 @@ from typing import Iterable
9
  from gradio.themes import Soft
10
  from gradio.themes.utils import colors, fonts, sizes
11
 
 
12
  colors.orange_red = colors.Color(
13
  name="orange_red",
14
  c50="#FFF0E5",
@@ -77,6 +78,7 @@ class OrangeRedTheme(Soft):
77
 
78
  orange_red_theme = OrangeRedTheme()
79
 
 
80
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
81
  dtype = torch.bfloat16
82
 
@@ -85,6 +87,7 @@ from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
85
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
86
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
87
 
 
88
  print("Loading Qwen Image Edit Pipeline...")
89
  pipe = QwenImageEditPlusPipeline.from_pretrained(
90
  "Qwen/Qwen-Image-Edit-2509",
@@ -214,16 +217,30 @@ def infer(
214
 
215
  width, height = update_dimensions_on_upload(img1_pil)
216
 
217
- result = pipe(
218
- image=[img1_pil, img2_pil],
219
- prompt=prompt,
220
- negative_prompt=negative_prompt,
221
- height=height,
222
- width=width,
223
- num_inference_steps=steps,
224
- generator=generator,
225
- true_cfg_scale=guidance_scale,
226
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
  return result, seed
229
 
@@ -231,6 +248,10 @@ def infer(
231
  def infer_example(image_1, image_2, prompt, lora_adapter):
232
  if image_1 is None or image_2 is None:
233
  return None, 0
 
 
 
 
234
  result, seed = infer(
235
  image_1.convert("RGB"),
236
  image_2.convert("RGB"),
 
9
  from gradio.themes import Soft
10
  from gradio.themes.utils import colors, fonts, sizes
11
 
12
+ # --- Theme Configuration ---
13
  colors.orange_red = colors.Color(
14
  name="orange_red",
15
  c50="#FFF0E5",
 
78
 
79
  orange_red_theme = OrangeRedTheme()
80
 
81
+ # --- Device Setup ---
82
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
83
  dtype = torch.bfloat16
84
 
 
87
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
88
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
89
 
90
+ # --- Model Loading ---
91
  print("Loading Qwen Image Edit Pipeline...")
92
  pipe = QwenImageEditPlusPipeline.from_pretrained(
93
  "Qwen/Qwen-Image-Edit-2509",
 
217
 
218
  width, height = update_dimensions_on_upload(img1_pil)
219
 
220
+ # --- Fix: Explicit Memory Management ---
221
+ # Clear cache before starting the heavy inference process
222
+ torch.cuda.empty_cache()
223
+
224
+ try:
225
+ # Use no_grad to prevent gradient calculation and save memory
226
+ with torch.no_grad():
227
+ result = pipe(
228
+ image=[img1_pil, img2_pil],
229
+ prompt=prompt,
230
+ negative_prompt=negative_prompt,
231
+ height=height,
232
+ width=width,
233
+ num_inference_steps=steps,
234
+ generator=generator,
235
+ true_cfg_scale=guidance_scale,
236
+ ).images[0]
237
+ except Exception as e:
238
+ # If an error occurs, ensure we still clear cache before raising
239
+ torch.cuda.empty_cache()
240
+ raise e
241
+
242
+ # Clear cache after inference is done
243
+ torch.cuda.empty_cache()
244
 
245
  return result, seed
246
 
 
248
  def infer_example(image_1, image_2, prompt, lora_adapter):
249
  if image_1 is None or image_2 is None:
250
  return None, 0
251
+
252
+ # Optional: Clear cache before example inference as well
253
+ torch.cuda.empty_cache()
254
+
255
  result, seed = infer(
256
  image_1.convert("RGB"),
257
  image_2.convert("RGB"),