Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -103,11 +103,6 @@ os.environ['PYTORCH_NO_CUDA_MEMORY_CACHING'] = '1'
|
|
| 103 |
# CUDA μ΄κΈ°ν λ°©μ§
|
| 104 |
torch.set_grad_enabled(False)
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
# Gradio μΈν°νμ΄μ€μ μ£ΌκΈ°μ μ 리 μΆκ°
|
| 109 |
-
demo.load(periodic_cleanup, every=5) # 5μ΄λ§λ€ μ 리
|
| 110 |
-
|
| 111 |
# Hugging Face ν ν° μ€μ
|
| 112 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 113 |
if HF_TOKEN is None:
|
|
@@ -246,12 +241,12 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
| 246 |
formats=["gaussian", "mesh"],
|
| 247 |
preprocess_image=False,
|
| 248 |
sparse_structure_sampler_params={
|
| 249 |
-
"steps": min(ss_sampling_steps, 8),
|
| 250 |
"cfg_strength": ss_guidance_strength,
|
| 251 |
-
"batch_size": 1
|
| 252 |
},
|
| 253 |
slat_sampler_params={
|
| 254 |
-
"steps": min(slat_sampling_steps, 8),
|
| 255 |
"cfg_strength": slat_guidance_strength,
|
| 256 |
"batch_size": 1
|
| 257 |
},
|
|
@@ -263,8 +258,8 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
| 263 |
# λΉλμ€ λ λλ§ μ΅μ ν
|
| 264 |
video = render_utils.render_video(
|
| 265 |
outputs['gaussian'][0],
|
| 266 |
-
num_frames=30,
|
| 267 |
-
resolution=384
|
| 268 |
)['color']
|
| 269 |
|
| 270 |
video_geo = render_utils.render_video(
|
|
@@ -278,7 +273,7 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
| 278 |
video_geo = [v.cpu().numpy() for v in video_geo]
|
| 279 |
clear_gpu_memory()
|
| 280 |
|
| 281 |
-
#
|
| 282 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
| 283 |
new_trial_id = str(uuid.uuid4())
|
| 284 |
video_path = f"{TMP_DIR}/{new_trial_id}.mp4"
|
|
@@ -294,20 +289,11 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
| 294 |
|
| 295 |
except Exception as e:
|
| 296 |
print(f"Error in image_to_3d: {str(e)}")
|
| 297 |
-
g.trellis_pipeline
|
|
|
|
| 298 |
clear_gpu_memory()
|
| 299 |
return None, None
|
| 300 |
|
| 301 |
-
return generated_image
|
| 302 |
-
except Exception as e:
|
| 303 |
-
print(f"Error in image generation: {str(e)}")
|
| 304 |
-
return None
|
| 305 |
-
finally:
|
| 306 |
-
if torch.cuda.is_available():
|
| 307 |
-
torch.cuda.empty_cache()
|
| 308 |
-
torch.cuda.synchronize()
|
| 309 |
-
gc.collect()
|
| 310 |
-
|
| 311 |
def clear_gpu_memory():
|
| 312 |
"""GPU λ©λͺ¨λ¦¬λ₯Ό λ μ² μ νκ² μ 리νλ ν¨μ"""
|
| 313 |
try:
|
|
@@ -377,7 +363,6 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
|
|
| 377 |
return any(ord('κ°') <= ord(c) <= ord('ν£') for c in text)
|
| 378 |
|
| 379 |
if contains_korean(prompt):
|
| 380 |
-
# Helsinki-NLP/opus-mt-ko-en λͺ¨λΈμ μ¬μ©νμ¬ λ²μ
|
| 381 |
translated = g.translator(prompt)[0]['translation_text']
|
| 382 |
prompt = translated
|
| 383 |
print(f"Translated prompt: {prompt}")
|
|
@@ -418,16 +403,6 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
|
|
| 418 |
torch.cuda.empty_cache()
|
| 419 |
torch.cuda.synchronize()
|
| 420 |
gc.collect()
|
| 421 |
-
|
| 422 |
-
return generated_image
|
| 423 |
-
except Exception as e:
|
| 424 |
-
print(f"Error in image generation: {str(e)}")
|
| 425 |
-
return None
|
| 426 |
-
finally:
|
| 427 |
-
if torch.cuda.is_available():
|
| 428 |
-
torch.cuda.empty_cache()
|
| 429 |
-
torch.cuda.synchronize()
|
| 430 |
-
gc.collect()
|
| 431 |
|
| 432 |
css = """
|
| 433 |
footer {
|
|
|
|
| 103 |
# CUDA μ΄κΈ°ν λ°©μ§
|
| 104 |
torch.set_grad_enabled(False)
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
# Hugging Face ν ν° μ€μ
|
| 107 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 108 |
if HF_TOKEN is None:
|
|
|
|
| 241 |
formats=["gaussian", "mesh"],
|
| 242 |
preprocess_image=False,
|
| 243 |
sparse_structure_sampler_params={
|
| 244 |
+
"steps": min(ss_sampling_steps, 8),
|
| 245 |
"cfg_strength": ss_guidance_strength,
|
| 246 |
+
"batch_size": 1
|
| 247 |
},
|
| 248 |
slat_sampler_params={
|
| 249 |
+
"steps": min(slat_sampling_steps, 8),
|
| 250 |
"cfg_strength": slat_guidance_strength,
|
| 251 |
"batch_size": 1
|
| 252 |
},
|
|
|
|
| 258 |
# λΉλμ€ λ λλ§ μ΅μ ν
|
| 259 |
video = render_utils.render_video(
|
| 260 |
outputs['gaussian'][0],
|
| 261 |
+
num_frames=30,
|
| 262 |
+
resolution=384
|
| 263 |
)['color']
|
| 264 |
|
| 265 |
video_geo = render_utils.render_video(
|
|
|
|
| 273 |
video_geo = [v.cpu().numpy() for v in video_geo]
|
| 274 |
clear_gpu_memory()
|
| 275 |
|
| 276 |
+
# λΉλμ€ μμ± λ° μ μ₯
|
| 277 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
| 278 |
new_trial_id = str(uuid.uuid4())
|
| 279 |
video_path = f"{TMP_DIR}/{new_trial_id}.mp4"
|
|
|
|
| 289 |
|
| 290 |
except Exception as e:
|
| 291 |
print(f"Error in image_to_3d: {str(e)}")
|
| 292 |
+
if hasattr(g.trellis_pipeline, 'to'):
|
| 293 |
+
g.trellis_pipeline.to('cpu')
|
| 294 |
clear_gpu_memory()
|
| 295 |
return None, None
|
| 296 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 297 |
def clear_gpu_memory():
|
| 298 |
"""GPU λ©λͺ¨λ¦¬λ₯Ό λ μ² μ νκ² μ 리νλ ν¨μ"""
|
| 299 |
try:
|
|
|
|
| 363 |
return any(ord('κ°') <= ord(c) <= ord('ν£') for c in text)
|
| 364 |
|
| 365 |
if contains_korean(prompt):
|
|
|
|
| 366 |
translated = g.translator(prompt)[0]['translation_text']
|
| 367 |
prompt = translated
|
| 368 |
print(f"Translated prompt: {prompt}")
|
|
|
|
| 403 |
torch.cuda.empty_cache()
|
| 404 |
torch.cuda.synchronize()
|
| 405 |
gc.collect()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 406 |
|
| 407 |
css = """
|
| 408 |
footer {
|