Spaces:
Runtime error
Runtime error
Commit Β·
860db05
1
Parent(s): e5bd954
temp fix hf space
Browse files- app_merlin_ai_coach.py +9 -8
- components/stage_mapping.py +7 -6
- llm_utils.py +9 -8
app_merlin_ai_coach.py
CHANGED
|
@@ -5,7 +5,7 @@ import json
|
|
| 5 |
from components.stage_mapping import get_stage_and_details, get_stage_list, get_next_stage, STAGE_INSTRUCTIONS
|
| 6 |
import os
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
-
from llama_index.llms.openllm import OpenLLM
|
| 9 |
from llama_index.llms.nebius import NebiusLLM
|
| 10 |
import threading
|
| 11 |
import re
|
|
@@ -41,13 +41,14 @@ if LLM_PROVIDER == "nebius":
|
|
| 41 |
model=NEBIUS_MODEL
|
| 42 |
)
|
| 43 |
else:
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
| 51 |
|
| 52 |
# In-memory storage for session (for demo; use persistent storage for production)
|
| 53 |
conversation_history = []
|
|
|
|
| 5 |
from components.stage_mapping import get_stage_and_details, get_stage_list, get_next_stage, STAGE_INSTRUCTIONS
|
| 6 |
import os
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
+
# from llama_index.llms.openllm import OpenLLM
|
| 9 |
from llama_index.llms.nebius import NebiusLLM
|
| 10 |
import threading
|
| 11 |
import re
|
|
|
|
| 41 |
model=NEBIUS_MODEL
|
| 42 |
)
|
| 43 |
else:
|
| 44 |
+
pass
|
| 45 |
+
# llm = OpenLLM(
|
| 46 |
+
# model=OPENLLM_MODEL,
|
| 47 |
+
# api_base=LLM_API_URL,
|
| 48 |
+
# api_key=LLM_API_KEY,
|
| 49 |
+
# max_new_tokens=2048,
|
| 50 |
+
# temperature=0.7,
|
| 51 |
+
# )
|
| 52 |
|
| 53 |
# In-memory storage for session (for demo; use persistent storage for production)
|
| 54 |
conversation_history = []
|
components/stage_mapping.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
| 2 |
from llama_index.core import VectorStoreIndex, Document
|
| 3 |
-
from llama_index.llms.openllm import OpenLLM
|
| 4 |
from llama_index.llms.nebius import NebiusLLM
|
| 5 |
import requests
|
| 6 |
import os
|
|
@@ -24,11 +24,12 @@ if LLM_PROVIDER == "nebius":
|
|
| 24 |
model=NEBIUS_MODEL
|
| 25 |
)
|
| 26 |
else:
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
| 32 |
|
| 33 |
# Example: Define your stages and their descriptions here
|
| 34 |
STAGE_DOCS = [
|
|
|
|
| 1 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
| 2 |
from llama_index.core import VectorStoreIndex, Document
|
| 3 |
+
# from llama_index.llms.openllm import OpenLLM
|
| 4 |
from llama_index.llms.nebius import NebiusLLM
|
| 5 |
import requests
|
| 6 |
import os
|
|
|
|
| 24 |
model=NEBIUS_MODEL
|
| 25 |
)
|
| 26 |
else:
|
| 27 |
+
pass
|
| 28 |
+
# llm = OpenLLM(
|
| 29 |
+
# model=OPENLLM_MODEL,
|
| 30 |
+
# api_base=LLM_API_URL,
|
| 31 |
+
# api_key=LLM_API_KEY
|
| 32 |
+
# )
|
| 33 |
|
| 34 |
# Example: Define your stages and their descriptions here
|
| 35 |
STAGE_DOCS = [
|
llm_utils.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import os
|
| 2 |
-
from llama_index.llms.openllm import OpenLLM
|
| 3 |
from llama_index.llms.nebius import NebiusLLM
|
| 4 |
|
| 5 |
# ...existing environment variable loading logic...
|
|
@@ -19,13 +19,14 @@ if LLM_PROVIDER == "nebius":
|
|
| 19 |
model=NEBIUS_MODEL
|
| 20 |
)
|
| 21 |
else:
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
|
|
|
| 29 |
|
| 30 |
import re
|
| 31 |
|
|
|
|
| 1 |
import os
|
| 2 |
+
# from llama_index.llms.openllm import OpenLLM
|
| 3 |
from llama_index.llms.nebius import NebiusLLM
|
| 4 |
|
| 5 |
# ...existing environment variable loading logic...
|
|
|
|
| 19 |
model=NEBIUS_MODEL
|
| 20 |
)
|
| 21 |
else:
|
| 22 |
+
pass
|
| 23 |
+
# llm = OpenLLM(
|
| 24 |
+
# model=OPENLLM_MODEL,
|
| 25 |
+
# api_base=LLM_API_URL,
|
| 26 |
+
# api_key=LLM_API_KEY,
|
| 27 |
+
# max_new_tokens=2048,
|
| 28 |
+
# temperature=0.7,
|
| 29 |
+
# )
|
| 30 |
|
| 31 |
import re
|
| 32 |
|