Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
load_model func
Browse files
app.py
CHANGED
|
@@ -8,7 +8,10 @@ import gradio as gr
|
|
| 8 |
|
| 9 |
def run_xvaserver():
|
| 10 |
try:
|
|
|
|
| 11 |
# start the process without waiting for a response
|
|
|
|
|
|
|
| 12 |
xvaserver = Popen(['python', 'server.py'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
| 13 |
except:
|
| 14 |
import logging
|
|
@@ -32,6 +35,21 @@ def run_xvaserver():
|
|
| 32 |
# Wait for the process to exit
|
| 33 |
xvaserver.wait()
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
def predict(input, pacing):
|
| 36 |
model_type = 'xVAPitch'
|
| 37 |
line = 'Test'
|
|
|
|
| 8 |
|
| 9 |
def run_xvaserver():
|
| 10 |
try:
|
| 11 |
+
import logging
|
| 12 |
# start the process without waiting for a response
|
| 13 |
+
logging.info('loginfo: Running xVAServer subprocess...')
|
| 14 |
+
print('Running xVAServer subprocess...')
|
| 15 |
xvaserver = Popen(['python', 'server.py'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
|
| 16 |
except:
|
| 17 |
import logging
|
|
|
|
| 35 |
# Wait for the process to exit
|
| 36 |
xvaserver.wait()
|
| 37 |
|
| 38 |
+
def load_model():
|
| 39 |
+
model_type = 'xVAPitch'
|
| 40 |
+
language = 'en'
|
| 41 |
+
|
| 42 |
+
data = {
|
| 43 |
+
'outputs': None,
|
| 44 |
+
'version': '3.0',
|
| 45 |
+
'model': 'ccby/ccby_nvidia_hifi_6670_M',
|
| 46 |
+
'modelType': model_type,
|
| 47 |
+
'base_lang': language,
|
| 48 |
+
'pluginsContext': '{}',
|
| 49 |
+
}
|
| 50 |
+
requests.post('http://0.0.0.0:8008/loadModel', json=data)
|
| 51 |
+
return
|
| 52 |
+
|
| 53 |
def predict(input, pacing):
|
| 54 |
model_type = 'xVAPitch'
|
| 55 |
line = 'Test'
|