Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -67,21 +67,27 @@ class GenerateRequest(BaseModel):
|
|
| 67 |
|
| 68 |
|
| 69 |
class ModelInfo(BaseModel):
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
AVAILABLE_MODELS = [
|
| 78 |
ModelInfo(
|
| 79 |
id="codellama-7b-instruct",
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
| 85 |
)
|
| 86 |
]
|
| 87 |
|
|
|
|
| 67 |
|
| 68 |
|
| 69 |
class ModelInfo(BaseModel):
|
| 70 |
+
id: str,
|
| 71 |
+
object: str,
|
| 72 |
+
type: str,
|
| 73 |
+
publisher: str,
|
| 74 |
+
arch: str,
|
| 75 |
+
compatibility_type: str,
|
| 76 |
+
quantization: str,
|
| 77 |
+
state: str,
|
| 78 |
+
max_context_length: int
|
| 79 |
|
| 80 |
AVAILABLE_MODELS = [
|
| 81 |
ModelInfo(
|
| 82 |
id="codellama-7b-instruct",
|
| 83 |
+
object="model",
|
| 84 |
+
type="llm",
|
| 85 |
+
publisher="lmstudio-community",
|
| 86 |
+
arch="llama",
|
| 87 |
+
compatibility_type="gguf",
|
| 88 |
+
quantization="Q4_K_M",
|
| 89 |
+
state="loaded",
|
| 90 |
+
max_context_length=32768
|
| 91 |
)
|
| 92 |
]
|
| 93 |
|