Elea Zhong commited on
Commit
912c174
·
1 Parent(s): 1201e03

update app

Browse files
Files changed (1) hide show
  1. app.py +58 -19
app.py CHANGED
@@ -13,25 +13,64 @@ import spaces
13
 
14
  import subprocess
15
  GIT_TOKEN = os.environ.get("GIT_TOKEN")
16
- subprocess.run(f"pip install git+https://eleazhong:{GIT_TOKEN}@github.com/wand-ai/wand-ml", shell=True)
17
 
18
- from qwenimage.datamodels import QwenConfig
19
- from qwenimage.debug import ctimed, ftimed
20
- from qwenimage.experiments.experiments_qwen import ExperimentRegistry
21
- from qwenimage.finetuner import QwenLoraFinetuner
22
- from qwenimage.foundation import QwenImageFoundation
23
- from qwenimage.prompt import build_camera_prompt
24
 
25
- # --- Model Loading ---
 
 
 
 
 
 
 
 
 
26
 
27
- foundation = QwenImageFoundation(QwenConfig(
28
- vae_image_size=1024 * 1024,
29
- regression_base_pipe_steps=4,
30
- ))
31
- finetuner = QwenLoraFinetuner(foundation, foundation.config)
32
- finetuner.load("checkpoints/reg-mse-pixel-lpips_005000", lora_rank=32)
33
 
 
 
 
 
 
 
 
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  MAX_SEED = np.iinfo(np.int32).max
37
 
@@ -70,16 +109,16 @@ def run_pipe(
70
 
71
  print(f"{len(pil_images)=}")
72
 
73
- finetuner.enable()
74
- foundation.scheduler.config["base_shift"] = shift
75
- foundation.scheduler.config["max_shift"] = shift
76
 
77
- result = foundation.base_pipe(foundation.INPUT_MODEL(
78
  image=pil_images,
79
  prompt=prompt,
80
  num_inference_steps=num_inference_steps,
81
  generator=generator,
82
- ))[0]
83
 
84
  return result, seed
85
 
 
13
 
14
  import subprocess
15
  GIT_TOKEN = os.environ.get("GIT_TOKEN")
16
+ import subprocess
17
 
18
+ cmd = [
19
+ "pip",
20
+ "install",
21
+ "git+https://eleazhong:${GIT_TOKEN}@github.com/wand-ai/wand-ml",
22
+ ]
 
23
 
24
+ # If GIT_TOKEN is a Python variable, build the string in Python instead:
25
+ # cmd = f"pip install git+https://eleazhong:{GIT_TOKEN}@github.com/wand-ai/wand-ml"
26
+
27
+ proc = subprocess.Popen(
28
+ cmd,
29
+ stdout=subprocess.PIPE,
30
+ stderr=subprocess.STDOUT,
31
+ text=True, # or encoding="utf-8" on older Python
32
+ bufsize=1,
33
+ )
34
 
35
+ for line in proc.stdout:
36
+ print(line, end="") # already has newline
 
 
 
 
37
 
38
+ proc.wait()
39
+ print("Exit code:", proc.returncode)
40
+
41
+ from qwenimage.debug import ctimed
42
+ from qwenimage.foundation import QwenImageEditPlusPipeline, QwenImageTransformer2DModel
43
+
44
+ # --- Model Loading ---
45
 
46
+ # foundation = QwenImageFoundation(QwenConfig(
47
+ # vae_image_size=1024 * 1024,
48
+ # regression_base_pipe_steps=4,
49
+ # ))
50
+ # finetuner = QwenLoraFinetuner(foundation, foundation.config)
51
+ # finetuner.load("checkpoints/reg-mse-pixel-lpips_005000", lora_rank=32)
52
+
53
+
54
+ dtype = torch.bfloat16
55
+ device = "cuda" if torch.cuda.is_available() else "cpu"
56
+
57
+
58
+ pipe = QwenImageEditPlusPipeline.from_pretrained(
59
+ "Qwen/Qwen-Image-Edit-2509",
60
+ transformer=QwenImageTransformer2DModel.from_pretrained(
61
+ "Qwen/Qwen-Image-Edit-2509",
62
+ subfolder='transformer',
63
+ torch_dtype=dtype,
64
+ device_map=device
65
+ ),
66
+ torch_dtype=dtype,
67
+ )
68
+ pipe = pipe.to(device=device, dtype=dtype)
69
+ pipe.load_lora_weights(
70
+ "checkpoints/distill_5k_lora.safetensors",
71
+ adapter_name="fast_5k",
72
+ )
73
+ # pipe.unload_lora_weights()
74
 
75
  MAX_SEED = np.iinfo(np.int32).max
76
 
 
109
 
110
  print(f"{len(pil_images)=}")
111
 
112
+ # finetuner.enable()
113
+ pipe.scheduler.config["base_shift"] = shift
114
+ pipe.scheduler.config["max_shift"] = shift
115
 
116
+ result = pipe(
117
  image=pil_images,
118
  prompt=prompt,
119
  num_inference_steps=num_inference_steps,
120
  generator=generator,
121
+ ).images[0]
122
 
123
  return result, seed
124