Spaces:
Runtime error
Runtime error
| import random | |
| from pydantic import ValidationError | |
| from neo4j_driver import driver | |
| from pydantic_restrictions import AltReviewOut, AltCloseOut, AltInviteOut, SocraticEval, SocraticQuestion | |
| from bielik import llm, modelLanguage | |
| from prompts import ALT_CLOSE_SYSTEM, ALT_INVITE_SYSTEM, EVAL_SYSTEM, build_system_prompt_introduction_chapter_ellis_distortion, build_eval_user_prompt | |
| from helpful_functions import getQuestions, get_last_user_message, introduction_talk, create_interview, check_situation | |
| from classifier import predict_raw, predict_raw1 | |
| from guardian import check_input | |
| from src.helpful_functions import beliefs_check_function | |
| from state import ChatState | |
| from langgraph.graph import StateGraph, START, END | |
| def _short_context(state: ChatState) -> str: | |
| lines = [] | |
| if state.get("distortion"): lines.append(f"- Zniekształcenie: {state['distortion']}") | |
| if state.get("distortion_def"): lines.append(f"- Definicja: {state['distortion_def']}") | |
| if state.get("current_intention"): lines.append(f"- Intencja pytań: {state['current_intention']}") | |
| if state.get("cel"): lines.append(f"- Cel intencji: {state['cel']}") | |
| if state.get("wniosek"): lines.append(f"- Wniosek ktory musi być zawarty w stworzonej alternatywnej myśli: {state['wniosek']}") | |
| if state.get("socratic_question"): lines.append(f"- Ostatnie pytanie sokratejskie: {state['socratic_question']}") | |
| hist = state.get("messages_detect") or state.get("messages") or [] | |
| tail = hist[-6:] if len(hist) > 6 else hist | |
| if tail: | |
| lines.append("- Fragment rozmowy:") | |
| for m in tail: | |
| who = "U" if m["role"] == "user" else "A" | |
| lines.append(f" {who}: {m['content']}") | |
| return "\n".join(lines) if lines else "(brak kontekstu)" | |
| def altthought_invite(state: ChatState) -> str: | |
| user_prompt = f"""Kontekst: | |
| {_short_context(state)} | |
| Historia chatu {state["messages"]} | |
| Zadanie: Napisz krótką, życzliwą prośbę, by użytkownik spróbował sformułować myśl alternatywną.""" | |
| out = llm.with_structured_output(AltInviteOut).invoke([ | |
| {"role": "system", "content": ALT_INVITE_SYSTEM}, | |
| {"role": "user", "content": user_prompt}, | |
| ]) | |
| return out.assistant_message | |
| # ───────────────────────────────────────── | |
| # KROK 2: Review + feedback / poprawka (LLM) | |
| # ───────────────────────────────────────── | |
| def altthought_review(state: ChatState, user_sentence: str) -> AltReviewOut: | |
| query = "MATCH (i:Intent {name:$intent})-[:HAS_CONCLUSION]->(c:Conclusion) RETURN c.must_include AS wniosek, c.example AS przyklad;" | |
| records, _, _ = driver.execute_query( | |
| query, | |
| parameters_={"intent": state["current_intention"]}, | |
| ) | |
| wniosek = records[0]["wniosek"] | |
| przyklad = records[0]["przyklad"] | |
| ALT_REVIEW_SYSTEM = f""" | |
| Jesteś empatycznym asystentem CBT. | |
| Oceń zdanie alternatywnej myśli stworzone przez użytkownika. | |
| DANE WEJSCIOWE: | |
| Stworzona alternatywna myśl przez użytkownika: {user_sentence} | |
| Wniosek, który musi się pojawić w alternatywnej myśli {wniosek} | |
| Przykładowa myśl alternatywna {przyklad} | |
| Historia rozmowy {state["messages"]} | |
| ZADANIE: | |
| 1) Oceń ALT pod kątem obecności powyższego WNIOSKU. | |
| 2) Jeśli WNIOSEK jest obecny → is_ok = true. | |
| 3) Jeśli WNIOSEK jest nieobecny lub niejednoznaczny → is_ok = false i: | |
| - zidentyfikuj, czego brak (konkretne elementy), | |
| - podaj zwięzłe wskazówki JAK użytkownik może ulepszyć ALT, aby zawierała WNIOSEK. | |
| - Nie proponuj gotowej treści; formułuj wskazówki („Zachęcam, abyś…” / „Dodaj…” / „Doprecyzuj…”). | |
| 4) Możesz odwołać się do poprzednich wypowiedzi i/lub zniekształcenia, ale nie podawaj przykładowej nowej myśli. | |
| Zwróć WYŁĄCZNIE JSON zgodny z AltReviewOut. | |
| """ | |
| out = llm.with_structured_output(AltReviewOut).invoke([ | |
| {"role": "system", "content": ALT_REVIEW_SYSTEM} | |
| ]) | |
| return out | |
| # ───────────────────────────────────────── | |
| # KROK 3: Komunikat końcowy (LLM) | |
| # ───────────────────────────────────────── | |
| def altthought_close(state: ChatState, final_sentence: str) -> str: | |
| user_prompt = f"""Kontekst: | |
| {_short_context(state)} | |
| Zatwierdzone zdanie AltThought: | |
| "{final_sentence}" | |
| """ | |
| out = llm.with_structured_output(AltCloseOut).invoke([ | |
| {"role": "system", "content": ALT_CLOSE_SYSTEM}, | |
| {"role": "user", "content": user_prompt}, | |
| ]) | |
| return out.assistant_message | |
| def call_eval_llm(state: ChatState, intent_name, messages): | |
| print(intent_name) | |
| classifier_llm = llm.with_structured_output(SocraticEval) | |
| result = classifier_llm.invoke([ | |
| { | |
| "role": "system", | |
| "content": EVAL_SYSTEM, | |
| }, | |
| {"role": "user", "content": build_eval_user_prompt(state, intent_name, messages)} | |
| ]) | |
| return result.cue_hit, result.route, result.explanation, result.proposition | |
| def detect_distortion(state: ChatState): | |
| if not state.get("messages"): | |
| print("Siema") | |
| state["messages"] = [{ | |
| "role": "assistant", "content": "Cześć! Cieszę się, że jesteś. Co u ciebie, czy masz jakiś problem? Z checią ci pomogę!" | |
| }] | |
| state["awaitingUser"] = True | |
| state["stage"] = "detect_distortion" | |
| return state | |
| else: | |
| state["first_stage_iterations"] += 1 | |
| print(state["first_stage_iterations"]) | |
| print("Siema1") | |
| last_message = get_last_user_message(state) | |
| user_text = (last_message["content"] or "").strip() | |
| if state["distortion"] is None: | |
| result = predict_raw(user_text) | |
| if result != "No Distortion": | |
| thought = beliefs_check_function(user_text) | |
| if thought: | |
| distortion = predict_raw1(user_text) | |
| print(distortion) | |
| state["distortion"] = distortion | |
| state["distortion_text"] = user_text | |
| print("Siema2") | |
| system_prompt = build_system_prompt_introduction_chapter_ellis_distortion(state["distortion"], state["situation"], state["think"], state["emotion"]) | |
| result = introduction_talk(state["messages"], system_prompt) | |
| if state["situation"] == "": | |
| state["situation"] = result.situation | |
| else: | |
| if result.situation != "": | |
| state["situation"] = create_interview(result.situation, state["situation"]) | |
| if state["emotion"] == "": | |
| state["emotion"] = result.emotion | |
| else: | |
| if result.emotion != "": | |
| state["emotion"] = create_interview(result.emotion, state["emotion"]) | |
| if state["think"] == "": | |
| state["think"] = result.think | |
| else: | |
| if result.think != "": | |
| state["think"] = create_interview(result.think, state["think"]) | |
| state["introduction_end_flag"] = result.chapter_end | |
| if state["distortion"] is not None and state["situation"] != "" and state["think"] != "" and state["emotion"] != "": | |
| print("Next") | |
| state["awaitingUser"] = False | |
| state["messages_detect"] = state["messages"] | |
| state["stage"] = "get_distortion_def" | |
| return state | |
| else: | |
| state["messages"].append({"role":"assistant", "content": result.model_output}) | |
| state["awaitingUser"] = True | |
| state["stage"] = "detect_distortion" | |
| return state | |
| def get_distortion_def(state: ChatState): | |
| print("Siema4") | |
| distortion = state["distortion"] | |
| query = """ | |
| MATCH (d:Distortion {name: $name}) | |
| RETURN d.definicja AS definicja | |
| """ | |
| records, summary, keys = driver.execute_query( | |
| query, | |
| parameters_={"name": distortion}, | |
| ) | |
| state["distortion_def"] = records[0]["definicja"] if records else None | |
| state["stage"] = "talk_about_distortion" | |
| state["awaitingUser"] = False | |
| return state | |
| def talk_about_distortion(state: ChatState): | |
| distortion = state["distortion"] | |
| distortion_def = state["distortion_def"] | |
| print("Siema5") | |
| if not state.get("distortion_explained"): | |
| print("Siema6") | |
| system_prompt_talk = f""" | |
| Jesteś empatycznym asystentem CBT. | |
| Użytkownikowi wykryto zniekształcenie poznawcze: | |
| Nazwa: {distortion} | |
| Definicja: {distortion_def} | |
| Przedstaw mu, że wykryłeś u niego zniekształcenie i wyjaśnij je w prosty, życzliwy sposób i zapytaj, czy chce, abyś pomógł mu to wspólnie przepracować. | |
| Język: polski, maksymalnie 2–3 zdania. | |
| """ | |
| llm_reply = llm.invoke([ | |
| { | |
| "role": "system", | |
| "content": system_prompt_talk, | |
| }, | |
| ]) | |
| follow_text = ( | |
| llm_reply if isinstance(llm_reply, str) | |
| else getattr(llm_reply, "content", str(llm_reply)) | |
| ) | |
| state["messages"].append({"role": "assistant", "content": follow_text}) | |
| state["awaitingUser"] = True | |
| state["stage"] = "talk_about_distortion" | |
| state["distortion_explained"] = True | |
| return state | |
| else: | |
| print("Siema7") | |
| last_user_msg = get_last_user_message(state) | |
| if not last_user_msg: | |
| state["awaitingUser"] = True | |
| return state | |
| classify_result = check_situation(last_user_msg["content"]) | |
| state["classify_result"] = classify_result | |
| if classify_result == "understand": | |
| print("Siema8") | |
| state["messages"].append({ | |
| "role": "assistant", | |
| "content": "Super! To przejdźmy teraz do kolejnego kroku" | |
| }) | |
| state["stage"] = "get_intention" | |
| state["awaitingUser"] = False | |
| return state | |
| # elif classify_result == "low_expression": | |
| # system_prompt = f""" | |
| # WEJSCIE | |
| # Historia wiadomości - {state["messages"]} | |
| # | |
| # Użytkownik jest mało wylewny i odpowiada krótko. | |
| # Twoim zadaniem jest napisać 2–3 empatyczne zdania po polsku, które spokojnie i nienachalnie zachęcą go do kontynuowania rozmowy. | |
| # Brzmi naturalnie, bez punktów, presji ani oceniania. | |
| # Na końcu zapytaj czy możemy możemy przejść do działania | |
| # Twoją rolą jest tylko i wyłącznie zachęcenie do działania nie pisz nic innego | |
| # """ | |
| # llm_reply = llm.invoke([ | |
| # { | |
| # "role": "system", | |
| # "content": system_prompt, | |
| # }, | |
| # ]) | |
| # follow_text = ( | |
| # llm_reply if isinstance(llm_reply, str) | |
| # else getattr(llm_reply, "content", str(llm_reply)) | |
| # ) | |
| # state["messages"].append({"role": "assistant", "content": follow_text}) | |
| # state["awaitingUser"] = True | |
| # state["stage"] = "talk_about_distortion" | |
| else: | |
| print("Siema9") | |
| system_prompt = f""" | |
| WEJSCIE | |
| Historia wiadomości - {state["messages"]} | |
| Użytkownik nie zrozumiał wyjaśnienia zniekształcenia. | |
| Nazwa: {distortion} | |
| Definicja: {distortion_def} | |
| Język tylko polski. | |
| Twoje zadanie: | |
| - Wyjaśnij prostszymi słowami (1–2 zdania). | |
| - Dodaj przykład z życia (1–2 zdania). | |
| - Zapytaj, czy teraz jest to jasne i czy możemy przejść do działania. | |
| Maksymalnie 3-4 zdania | |
| """ | |
| llm_reply = llm.invoke([ | |
| { | |
| "role": "system", | |
| "content": system_prompt, | |
| }, | |
| ]) | |
| follow_text = ( | |
| llm_reply if isinstance(llm_reply, str) | |
| else getattr(llm_reply, "content", str(llm_reply)) | |
| ) | |
| state["messages"].append({"role": "assistant", "content": follow_text}) | |
| state["awaitingUser"] = True | |
| state["stage"] = "talk_about_distortion" | |
| return state | |
| def get_intention(state: ChatState): | |
| distortion = state["distortion"] | |
| take_intent = """ | |
| MATCH (d:Distortion {name:$distortion})<-[:TARGETS]-(i:Intent) RETURN i.name AS nazwa ORDER BY nazwa | |
| """ | |
| records, summary, keys = driver.execute_query(take_intent, parameters_={"distortion": distortion}) | |
| result = [] | |
| for record in records: | |
| result.append(record["nazwa"]) | |
| state["priority_check"] = result | |
| state["stage"] = "select_intention" | |
| state["awaitingUser"] = False | |
| return state | |
| def select_intention(state: ChatState): | |
| state["messages_socratic"] = [] | |
| element = random.choice(state["priority_check"]) | |
| state["priority_check"].remove(element) | |
| state["current_intention"] = element | |
| state["question"] = 1 | |
| state["stage"] = "create_socratic_question" | |
| state["awaitingUser"] = False | |
| return state | |
| def create_socratic_question(state: ChatState): | |
| query = """ | |
| MATCH (i:Intent {name:$intencja}) RETURN i.name AS nazwa, i.aim AS cel, i.model_hint AS hint; | |
| """ | |
| records, _, _ = driver.execute_query( | |
| query, | |
| parameters_={"intencja":state["current_intention"]}, | |
| ) | |
| questions = getQuestions(records[0]["nazwa"]) | |
| socratic = state["messages_socratic"] | |
| if not socratic: | |
| creating_question_prompt = f""" | |
| Jesteś chatbotem terapeutycznym prowadzącym dialog sokratejski. | |
| ZADANIE: | |
| Wygeneruj DOKŁADNIE jedno krótkie pytanie po polsku. | |
| WEJŚCIE: | |
| - Zniekształcenie: {state["distortion"]} | |
| - Definicja: {state["distortion_def"]} | |
| - Błąd (cytat): {state["distortion_text"]} | |
| - Historia (P→U): {socratic} ← ostatnia odpowiedź to ostatnia linia zaczynająca się od „U:” | |
| - Intencja: {records[0]["nazwa"]} | |
| - Cel: {records[0]["cel"]} | |
| - Hint: {records[0]["hint"]} | |
| - Pytania referencyjne: {questions} | |
| REGUŁY: | |
| 1) Oprzyj pytanie przede wszystkim na hint + pytaniach referencyjnych. | |
| 2) Pytanie ma przybliżać do celu: {records[0]["cel"]}. | |
| 3) Nawiąż neutralnie do błędu „{state["distortion_text"]}”, eksplorując dowody/zakres/wyjątki/realistyczne alternatywy. Unikaj słowa „Dlaczego”. | |
| 4) Jedno pytanie; bez diagnoz, porad, definicji; bez kilku pytań naraz. | |
| 5) Nie powtarzaj dosłownie wcześniejszych pytań z {questions} ani pytań asystenta z {socratic}; parafrazuj i personalizuj wobec „{state["distortion_text"]}”. | |
| FORMAT WYJŚCIA: | |
| - Zwróć wyłącznie jedno zdanie zakończone „?” — bez cudzysłowów, markdown i etykiet; zero tekstu po „?”. | |
| AUTOKOREKTA: | |
| - Jeśli wygenerowano więcej niż jedno zdanie/linia, zwróć tylko pierwsze do pierwszego „?” włącznie. | |
| - Usuń frazy: "Wyjaśnienie:", "Explanation:", "Uzasadnienie:", "Dlaczego:", "Komentarz:". | |
| - Jeśli >140 znaków, skróć z zachowaniem sensu i „?” na końcu. | |
| """ | |
| else: | |
| creating_question_prompt = f""" | |
| Jesteś chatbotem terapeutycznym prowadzącym dialog sokratejski. | |
| ZADANIE: | |
| Wygeneruj DOKŁADNIE jedno krótkie pytanie po polsku. | |
| WEJŚCIE: | |
| - Zniekształcenie: {state["distortion"]} | |
| - Definicja: {state["distortion_def"]} | |
| - Błąd (cytat): {state["distortion_text"]} | |
| - Historia (P→U): {socratic} ← ostatnia odpowiedź to ostatnia linia zaczynająca się od „U:” | |
| - Intencja: {records[0]["nazwa"]} | |
| - Cel: {records[0]["cel"]} | |
| - Hint: {records[0]["hint"]} | |
| - Braki do celu: {state["decision_explanation"]} | |
| - Wskazówki do kolejnego pytania: {state["proposition"]} | |
| REGUŁY: | |
| 1) Oprzyj pytanie na ostatniej odpowiedzi użytkownika. Jeśli {state["decision_explanation"]} lub {state["proposition"]} nie są puste, wykorzystaj je do domknięcia brakujących informacji prowadzących do celu. | |
| 2) Pytanie ma przybliżać do celu: {records[0]["cel"]}. | |
| 3) Nawiąż neutralnie do błędu „{state["distortion_text"]}”, eksplorując dowody/zakres/wyjątki/alternatywy. Unikaj słowa „Dlaczego”. | |
| 4) Jedno pytanie; bez diagnoz, porad, definicji; bez kilku pytań naraz. | |
| 5) Nie powtarzaj dosłownie pytań z {questions} ani wcześniejszych pytań asystenta z {socratic}; parafrazuj i personalizuj wobec „{state["distortion_text"]}”. | |
| FORMAT WYJŚCIA: | |
| - Zwróć wyłącznie jedno zdanie zakończone „?” — bez cudzysłowów, markdown i etykiet; zero tekstu po „?”. | |
| AUTOKOREKTA: | |
| - Jeśli wygenerowano więcej niż jedno zdanie/linia, zwróć tylko pierwsze do pierwszego „?” włącznie. | |
| - Usuń frazy: "Wyjaśnienie:", "Explanation:", "Uzasadnienie:", "Dlaczego:", "Komentarz:". | |
| - Jeśli >140 znaków, skróć z zachowaniem sensu i „?” na końcu. | |
| """ | |
| question_llm = llm.with_structured_output(SocraticQuestion) | |
| result = question_llm.invoke([ | |
| { | |
| "role": "system", | |
| "content": creating_question_prompt, | |
| }, | |
| ]) | |
| state["messages"].append({"role":"assistant", "content": result.question}) | |
| state["messages_socratic"].append({"role": "assistant", "content": result.question}) | |
| state["stage"] = "analyze_output" | |
| state["awaitingUser"] = True | |
| return state | |
| def analyze_output(state: ChatState): | |
| state["messages_socratic"].append({"role": "user", "content": state["messages"][-1].get("content")}) | |
| cue_hit, confidence, explanation, proposition = call_eval_llm(state, state["current_intention"], state["messages_socratic"]) | |
| state["cue_hit"] = bool(cue_hit) | |
| state["confidence"] = confidence | |
| if cue_hit and confidence == "advance": | |
| state["stage"] = "enter_alt_thought" | |
| return state | |
| elif (not cue_hit) and confidence == "switch": | |
| state["decision_explanation"] = "" | |
| state["proposition"] = "" | |
| state["stage"] = "get_intention" | |
| return state | |
| else: | |
| state["stage"] = "create_socratic_question" | |
| state["decision_explanation"] = explanation | |
| state["proposition"] = proposition | |
| state["question"] = state.get("question") + 1 | |
| return state | |
| def validate_input(state: ChatState): | |
| stage = state.get("stage") | |
| if stage == "detect_distortion": | |
| chapter = "ETAP 1" | |
| elif stage == "talk_about_distortion" or stage == "get_distortion_def": | |
| chapter = "ETAP 2" | |
| elif stage == "create_socratic_question" or stage == "get_intention" or stage == "select_intention" or stage == "analyze_output": | |
| chapter = "ETAP 3" | |
| elif stage == "enter_alt_thought" or stage == "enter_alt_thought" or stage == "handle_alt_thought_input" or stage == "handle_alt_thought_input": | |
| chapter = "ETAP 4" | |
| else: | |
| chapter = "None" | |
| last_user_msg = state.get("last_user_msg_content") | |
| result = check_input(state["messages"], chapter, last_user_msg) | |
| state["last_user_msg"] = False | |
| if result.decision: | |
| state["validated"] = True | |
| state["awaitingUser"] = False | |
| else: | |
| state["noValidated"] = f"{chapter} - {last_user_msg}" | |
| state["explanation"] = result.explanation | |
| state["messages"].append({"role": "assistant", "content": result.message_to_user}) | |
| state["awaitingUser"] = True | |
| return state | |
| def enter_alt_thought(state: ChatState): | |
| result = altthought_invite(state) #TODO zmiana tekstu wprowadzającego do zrównoważonej myśli | |
| state.setdefault("messages", []).append({"role": "assistant", "content": result}) | |
| state["stage"] = "handle_alt_thought_input" | |
| state["awaitingUser"] = True | |
| return state | |
| def handle_alt_thought_input(state: ChatState): | |
| user_msg = next((m for m in reversed(state.get("messages", [])) if m["role"] == "user"), None) | |
| if not user_msg: | |
| state["awaitingUser"] = True | |
| return state | |
| user_sentence = (user_msg["content"] or "").strip() | |
| try: | |
| review = altthought_review(state, user_sentence) | |
| except ValidationError: | |
| msg = altthought_invite(state) | |
| state["messages"].append({"role": "assistant", "content": msg}) | |
| state["stage"] = "handle_alt_thought_input" | |
| state["awaitingUser"] = True | |
| return state | |
| if review.is_ok: | |
| final_sentence = user_sentence | |
| closing = altthought_close(state, final_sentence) | |
| state["messages"].append({"role": "assistant", "content": f"Zatwierdzona myśl: „{final_sentence}”"}) | |
| state["messages"].append({"role": "assistant", "content": closing}) | |
| state["stage"] = "end" | |
| state["awaitingUser"] = False | |
| return state | |
| else: | |
| state["messages"].append({"role": "assistant", "content": review.assistant_message}) | |
| state["stage"] = "handle_alt_thought_input" | |
| state["awaitingUser"] = True | |
| return state | |
| def global_router(state: ChatState) -> str: | |
| if state.get("awaitingUser"): | |
| print("[ROUTER] awaitingUser=True → __end__") | |
| return "__end__" | |
| stage = state.get("stage") | |
| print(f"[ROUTER] stage={stage} (fallback)") | |
| if not state.get("validated") and state.get("last_user_msg"): | |
| return "validate_input" | |
| if stage == "end": | |
| return "__end__" | |
| if stage == "get_distortion_def": | |
| return "get_distortion_def" | |
| if stage == "talk_about_distortion": | |
| return "talk_about_distortion" | |
| if stage == "get_intention": | |
| return "get_intention" | |
| # if stage == "get_socratic_question": | |
| # return "get_socratic_question" | |
| if stage == "select_intention": | |
| return "select_intention" | |
| if stage == "create_socratic_question": | |
| return "create_socratic_question" | |
| if stage == "analyze_output": | |
| return "analyze_output" | |
| if stage == "enter_alt_thought": | |
| return "enter_alt_thought" | |
| if stage == "handle_alt_thought_input": | |
| return "handle_alt_thought_input" | |
| print("[ROUTER] default → detect_distortion") | |
| return "detect_distortion" | |
| graph_builder = StateGraph(ChatState) | |
| graph_builder.add_node("detect_distortion", detect_distortion) | |
| graph_builder.add_node("get_distortion_def", get_distortion_def) | |
| graph_builder.add_node("talk_about_distortion", talk_about_distortion) | |
| graph_builder.add_node("get_intention", get_intention) | |
| graph_builder.add_node("select_intention", select_intention) | |
| # graph_builder.add_node("get_socratic_question", get_socratic_question) | |
| graph_builder.add_node("create_socratic_question", create_socratic_question) | |
| graph_builder.add_node("analyze_output", analyze_output) | |
| graph_builder.add_node("enter_alt_thought", enter_alt_thought) | |
| graph_builder.add_node("handle_alt_thought_input", handle_alt_thought_input) | |
| graph_builder.add_node("validate_input", validate_input) | |
| graph_builder.add_conditional_edges(START, global_router, { | |
| "detect_distortion": "detect_distortion", | |
| "get_distortion_def": "get_distortion_def", | |
| "talk_about_distortion": "talk_about_distortion", | |
| "get_intention": "get_intention", | |
| "select_intention": "select_intention", | |
| # "get_socratic_question": "get_socratic_question", | |
| "create_socratic_question": "create_socratic_question", | |
| "analyze_output": "analyze_output", | |
| "enter_alt_thought": "enter_alt_thought", | |
| "handle_alt_thought_input": "handle_alt_thought_input", | |
| "validate_input": "validate_input", | |
| "__end__": END, | |
| }) | |
| edge_map = { | |
| "detect_distortion": "detect_distortion", | |
| "get_distortion_def": "get_distortion_def", | |
| "talk_about_distortion": "talk_about_distortion", | |
| "get_intention": "get_intention", | |
| "select_intention": "select_intention", | |
| # "get_socratic_question": "get_socratic_question", | |
| "create_socratic_question": "create_socratic_question", | |
| "analyze_output": "analyze_output", | |
| "enter_alt_thought": "enter_alt_thought", | |
| "handle_alt_thought_input": "handle_alt_thought_input", | |
| "validate_input": "validate_input", | |
| "__end__": END, | |
| } | |
| for node in ["detect_distortion", "get_distortion_def","talk_about_distortion","get_intention","select_intention", "create_socratic_question", "analyze_output", "enter_alt_thought", "handle_alt_thought_input", "validate_input"]: | |
| graph_builder.add_conditional_edges(node, global_router, edge_map) | |
| graph = graph_builder.compile() | |