Spaces:
Runtime error
Runtime error
eliujl
commited on
Commit
·
596804b
1
Parent(s):
6c41a5e
added prompt template handling
Browse files
app.py
CHANGED
|
@@ -189,27 +189,36 @@ def use_local_llm(r_llm, local_llm_path):
|
|
| 189 |
return llm
|
| 190 |
|
| 191 |
|
| 192 |
-
def setup_prompt():
|
| 193 |
-
|
| 194 |
-
|
|
|
|
|
|
|
| 195 |
Supply sufficient information, evidence, reasoning, source from the context, etc., to justify your answer with details and logic.
|
| 196 |
Think step by step and do not jump to conclusion during your reasoning at the beginning.
|
| 197 |
Sometimes user's question may appear to be directly related to the context but may still be indirectly related,
|
| 198 |
so try your best to understand the question based on the context and chat history.
|
| 199 |
If questions are asked where there is no relevant context available,
|
| 200 |
respond using out-of-context knowledge with
|
| 201 |
-
"This question does not seem to be relevant to the documents. I am trying to explore knowledge outside the context."
|
| 202 |
-
|
| 203 |
Context: {context}
|
| 204 |
|
| 205 |
-
{chat_history}
|
| 206 |
User: {question}
|
| 207 |
-
Bot:"""
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
)
|
| 212 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
|
| 214 |
def setup_em_llm(OPENAI_API_KEY, temperature, r_llm, local_llm_path):
|
| 215 |
if (r_llm == gpt3p5 or r_llm == gpt4) and OPENAI_API_KEY:
|
|
@@ -325,18 +334,15 @@ def main(pinecone_index_name, chroma_collection_name, persist_directory, docsear
|
|
| 325 |
# number of sources (split-documents when ingesting files); default is 4
|
| 326 |
k = min([20, n_texts])
|
| 327 |
retriever = setup_retriever(docsearch, k)
|
| 328 |
-
|
| 329 |
-
#prompt = setup_prompt()
|
| 330 |
-
|
| 331 |
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
|
| 332 |
-
|
| 333 |
CRqa = ConversationalRetrievalChain.from_llm(
|
| 334 |
llm,
|
| 335 |
chain_type="stuff",
|
| 336 |
retriever=retriever,
|
| 337 |
memory=memory,
|
| 338 |
return_source_documents=True,
|
| 339 |
-
|
| 340 |
)
|
| 341 |
|
| 342 |
st.title(':blue[Chatbot]')
|
|
|
|
| 189 |
return llm
|
| 190 |
|
| 191 |
|
| 192 |
+
def setup_prompt(r_llm):
|
| 193 |
+
B_INST, E_INST = "[INST]", "[/INST]"
|
| 194 |
+
B_SYS_LLAMA, E_SYS_LLAMA = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
| 195 |
+
B_SYS_MIS, E_SYS_MIS = "<s> ", "</s> "
|
| 196 |
+
system_prompt = """Answer the question in your own words as truthfully as possible from the context given to you.
|
| 197 |
Supply sufficient information, evidence, reasoning, source from the context, etc., to justify your answer with details and logic.
|
| 198 |
Think step by step and do not jump to conclusion during your reasoning at the beginning.
|
| 199 |
Sometimes user's question may appear to be directly related to the context but may still be indirectly related,
|
| 200 |
so try your best to understand the question based on the context and chat history.
|
| 201 |
If questions are asked where there is no relevant context available,
|
| 202 |
respond using out-of-context knowledge with
|
| 203 |
+
"This question does not seem to be relevant to the documents. I am trying to explore knowledge outside the context." """
|
| 204 |
+
instruction = """
|
| 205 |
Context: {context}
|
| 206 |
|
| 207 |
+
Chat history: {chat_history}
|
| 208 |
User: {question}
|
| 209 |
+
Bot: answer """
|
| 210 |
+
if r_llm == gpt3p5 or r_llm == gpt4:
|
| 211 |
+
template = system_prompt + instruction
|
| 212 |
+
else:
|
| 213 |
+
entry = local_model_names.index(r_llm)
|
| 214 |
+
if local_model_tuples[entry][4] == 'llama':
|
| 215 |
+
template = B_INST + B_SYS_LLAMA + system_prompt + E_SYS_LLAMA + instruction + E_INST
|
| 216 |
+
else:
|
| 217 |
+
template = B_SYS_MIS + B_INST + system_prompt + E_INST + E_SYS_MIS + B_INST + instruction + E_INST
|
| 218 |
+
prompt = PromptTemplate(
|
| 219 |
+
input_variables=["context", "chat_history", "question"], template=template
|
| 220 |
+
)
|
| 221 |
+
return prompt
|
| 222 |
|
| 223 |
def setup_em_llm(OPENAI_API_KEY, temperature, r_llm, local_llm_path):
|
| 224 |
if (r_llm == gpt3p5 or r_llm == gpt4) and OPENAI_API_KEY:
|
|
|
|
| 334 |
# number of sources (split-documents when ingesting files); default is 4
|
| 335 |
k = min([20, n_texts])
|
| 336 |
retriever = setup_retriever(docsearch, k)
|
| 337 |
+
prompt = setup_prompt(r_llm)
|
|
|
|
|
|
|
| 338 |
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
|
|
|
|
| 339 |
CRqa = ConversationalRetrievalChain.from_llm(
|
| 340 |
llm,
|
| 341 |
chain_type="stuff",
|
| 342 |
retriever=retriever,
|
| 343 |
memory=memory,
|
| 344 |
return_source_documents=True,
|
| 345 |
+
combine_docs_chain_kwargs={'prompt': prompt},
|
| 346 |
)
|
| 347 |
|
| 348 |
st.title(':blue[Chatbot]')
|