app
Browse files
app.py
CHANGED
|
@@ -206,14 +206,16 @@ def show_generation_result(model, text, audio_path, motion_length, result_path):
|
|
| 206 |
if audio_path is not None:
|
| 207 |
add_audio(result_path, [audio_path])
|
| 208 |
|
| 209 |
-
def generate(prompt, audio_path, length):
|
| 210 |
if not os.path.exists("outputs"):
|
| 211 |
os.mkdir("outputs")
|
| 212 |
result_path = "outputs/" + str(int(time.time())) + ".mp4"
|
| 213 |
print(audio_path)
|
| 214 |
-
if not
|
| 215 |
audio_path = None
|
| 216 |
-
if audio_path.
|
|
|
|
|
|
|
| 217 |
audio_path = None
|
| 218 |
if len(prompt) == 0:
|
| 219 |
prompt = None
|
|
@@ -240,19 +242,20 @@ input_text = gr.Textbox(
|
|
| 240 |
label="Text prompt:"
|
| 241 |
)
|
| 242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
demo = gr.Interface(
|
| 244 |
fn=generate,
|
| 245 |
-
inputs=[input_text, input_audio, gr.Slider(20, 200, value=60, label="Motion length (fps 20):")],
|
| 246 |
outputs=gr.Video(label="Video:"),
|
| 247 |
examples=[
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
["
|
| 252 |
-
["
|
| 253 |
-
["A person is stretching arms.", "None", 80],
|
| 254 |
-
["", load_file_list["audio_surprise"], 200],
|
| 255 |
-
["", load_file_list["audio_angry"], 200],
|
| 256 |
],
|
| 257 |
title="LMM: Large Motion Model for Unified Multi-Modal Motion Generation",
|
| 258 |
description="\nThis is an interactive demo for LMM. For more information, feel free to visit our project page(https://github.com/mingyuan-zhang/LMM).")
|
|
|
|
| 206 |
if audio_path is not None:
|
| 207 |
add_audio(result_path, [audio_path])
|
| 208 |
|
| 209 |
+
def generate(prompt, audio_check, audio_path, length):
|
| 210 |
if not os.path.exists("outputs"):
|
| 211 |
os.mkdir("outputs")
|
| 212 |
result_path = "outputs/" + str(int(time.time())) + ".mp4"
|
| 213 |
print(audio_path)
|
| 214 |
+
if not audio_check:
|
| 215 |
audio_path = None
|
| 216 |
+
if audio_path is not None and not os.path.exists(audio_path):
|
| 217 |
+
audio_path = None
|
| 218 |
+
if audio_path is not None and audio_path.endswith("placeholder.wav"):
|
| 219 |
audio_path = None
|
| 220 |
if len(prompt) == 0:
|
| 221 |
prompt = None
|
|
|
|
| 242 |
label="Text prompt:"
|
| 243 |
)
|
| 244 |
|
| 245 |
+
audio_check = gr.Checkbox(
|
| 246 |
+
label="Enable audio? "
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
demo = gr.Interface(
|
| 250 |
fn=generate,
|
| 251 |
+
inputs=[input_text, audio_check, input_audio, gr.Slider(20, 200, value=60, label="Motion length (fps 20):")],
|
| 252 |
outputs=gr.Video(label="Video:"),
|
| 253 |
examples=[
|
| 254 |
+
["A person walks in a circle.", False, load_file_list["audio_placeholder"], 120],
|
| 255 |
+
["A person jumps forward.", False, load_file_list["audio_placeholder"], 100],
|
| 256 |
+
["A person is stretching arms.", False, load_file_list["audio_placeholder"], 80],
|
| 257 |
+
["", True, load_file_list["audio_surprise"], 200],
|
| 258 |
+
["", True, load_file_list["audio_angry"], 200],
|
|
|
|
|
|
|
|
|
|
| 259 |
],
|
| 260 |
title="LMM: Large Motion Model for Unified Multi-Modal Motion Generation",
|
| 261 |
description="\nThis is an interactive demo for LMM. For more information, feel free to visit our project page(https://github.com/mingyuan-zhang/LMM).")
|