Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -50,8 +50,11 @@ load_dotenv()
|
|
| 50 |
app = FastAPI()
|
| 51 |
|
| 52 |
# API Keys
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
| 55 |
INDEX_NAME = "agenticrag"
|
| 56 |
|
| 57 |
if not PINECONE_API_KEY:
|
|
@@ -63,7 +66,7 @@ pc = Pinecone(api_key=PINECONE_API_KEY)
|
|
| 63 |
vector_store = Pinecone.from_existing_index(index_name=INDEX_NAME, embedding=embeddings)
|
| 64 |
|
| 65 |
# Load LLM & Memory
|
| 66 |
-
llm = ChatAnthropic(model="claude-2", temperature=0)
|
| 67 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
| 68 |
|
| 69 |
# Build RAG Chain
|
|
|
|
| 50 |
app = FastAPI()
|
| 51 |
|
| 52 |
# API Keys
|
| 53 |
+
import os
|
| 54 |
+
from dotenv import load_dotenv
|
| 55 |
+
|
| 56 |
+
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
|
| 57 |
+
PINECONE_ENV = os.getenv("PINECONE_ENV")
|
| 58 |
INDEX_NAME = "agenticrag"
|
| 59 |
|
| 60 |
if not PINECONE_API_KEY:
|
|
|
|
| 66 |
vector_store = Pinecone.from_existing_index(index_name=INDEX_NAME, embedding=embeddings)
|
| 67 |
|
| 68 |
# Load LLM & Memory
|
| 69 |
+
llm = ChatAnthropic(model="claude-2", temperature=0, Anthropic_API_Key=os.getenv="Anthropic_API_Key")
|
| 70 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
| 71 |
|
| 72 |
# Build RAG Chain
|