sahanes commited on
Commit
f85c983
1 Parent(s): 802dcc5

Add Docker image and app files

Browse files
Files changed (4) hide show
  1. .gitignore +6 -0
  2. Dockerfile +38 -0
  3. app.py +506 -0
  4. requirements.txt +21 -0
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ .env
2
+ __pycache__/
3
+ .chainlit
4
+ *.faiss
5
+ *.pkl
6
+ .files
Dockerfile ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FROM python:3.10
2
+ # RUN useradd -m -u 1000 user
3
+ # USER user
4
+ # ENV HOME=/home/user \
5
+ # PATH=/home/user/.local/bin:$PATH
6
+ # WORKDIR $HOME/app
7
+ # COPY --chown=user . $HOME/app
8
+ # COPY ./requirements.txt ~/app/requirements.txt
9
+ # RUN pip install --upgrade pip
10
+ # RUN pip install -r requirements.txt
11
+ # COPY . .
12
+ # CMD ["chainlitdocker buildx build .", "run", "app.py", "--port", "7860"]
13
+ FROM python:3.10-slim
14
+
15
+ # Create a non-root user
16
+ RUN useradd -m -u 1000 user
17
+ USER user
18
+
19
+ # Set environment variables
20
+ ENV HOME=/home/user
21
+ ENV PATH=/home/user/.local/bin:$PATH
22
+
23
+ # Set working directory
24
+ WORKDIR $HOME/app
25
+
26
+ # Copy requirements file
27
+ COPY --chown=user . $HOME/app
28
+ COPY ./requirements.txt $HOME/app/requirements.txt
29
+
30
+ # Upgrade pip and install dependencies
31
+ RUN pip install --timeout=100 --index-url https://pypi.org/simple --upgrade pip
32
+ RUN pip install --timeout=100 --index-url https://pypi.org/simple -r requirements.txt
33
+
34
+ # Copy the rest of the application files
35
+ COPY . .
36
+
37
+ # Set the entrypoint command
38
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import operator
3
+ from operator import itemgetter
4
+ from typing import Annotated, Sequence, TypedDict
5
+
6
+ import chainlit as cl
7
+ from dotenv import load_dotenv
8
+ from langchain.retrievers import ParentDocumentRetriever
9
+ from langchain.schema.output_parser import StrOutputParser
10
+ from langchain.schema.runnable import RunnablePassthrough
11
+ from langchain.schema.runnable.config import RunnableConfig
12
+ from langchain.storage import InMemoryStore
13
+
14
+ # from langchain_core.output_parsers import StrOutputParser
15
+ from langchain.tools import tool
16
+ from langchain_community.document_loaders import ArxivLoader
17
+ from langchain_community.tools.arxiv.tool import ArxivQueryRun
18
+ from langchain_community.tools.ddg_search import DuckDuckGoSearchRun
19
+ from langchain_community.tools.pubmed.tool import PubmedQueryRun
20
+
21
+ # from langgraph.graph.message import add_messages
22
+ from langchain_core.messages import (
23
+ BaseMessage,
24
+ FunctionMessage,
25
+ SystemMessage,
26
+ )
27
+ from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
28
+ from langchain_core.utils.function_calling import convert_to_openai_function
29
+ from langchain_openai import ChatOpenAI
30
+ from langchain_openai.embeddings import OpenAIEmbeddings
31
+ from langchain_qdrant import Qdrant
32
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
33
+ from langgraph.graph import END, StateGraph
34
+ from langgraph.checkpoint.aiosqlite import AsyncSqliteSaver
35
+
36
+ # from langchain_community.tools.pubmed.tool import PubmedQueryRun
37
+ from langgraph.prebuilt import ToolExecutor, ToolInvocation
38
+ from qdrant_client import QdrantClient
39
+ from qdrant_client.models import Distance, VectorParams
40
+
41
+ # GLOBAL SCOPE - ENTIRE APPLICATION HAS ACCESS TO VALUES SET IN THIS SCOPE #
42
+ # ---- ENV VARIABLES ---- #
43
+ """
44
+ This function will load our environment file (.env) if it is present.
45
+
46
+ NOTE: Make sure that .env is in your .gitignore file - it is by default, but please ensure it remains there.
47
+ """
48
+ load_dotenv()
49
+
50
+ """
51
+ We will load our environment variables here.
52
+ """
53
+
54
+ # ---- GLOBAL DECLARATIONS ---- #
55
+
56
+
57
+ # -- RETRIEVAL -- #
58
+ """
59
+ 1. Load Documents from Text File
60
+ 2. Split Documents into Chunks
61
+ 3. Load HuggingFace Embeddings (remember to use the URL we set above)
62
+ 4. Index Files if they do not exist, otherwise load the vectorstore
63
+ """
64
+ ### 1. CREATE TEXT LOADER AND LOAD DOCUMENTS
65
+ ### NOTE: PAY ATTENTION TO THE PATH THEY ARE IN.
66
+
67
+
68
+ docs = ArxivLoader(
69
+ query='"mental health counseling" AND (data OR analytics OR "machine learning")',
70
+ load_max_docs=2,
71
+ sort_by="submittedDate",
72
+ sort_order="descending",
73
+ ).load()
74
+
75
+
76
+ ### 2. CREATE QDRANT CLIENT VECTORE STORE
77
+
78
+ client = QdrantClient(":memory:")
79
+ client.create_collection(
80
+ collection_name="split_parents",
81
+ vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
82
+ )
83
+
84
+ vectorstore = Qdrant(
85
+ client,
86
+ collection_name="split_parents",
87
+ embeddings=OpenAIEmbeddings(model="text-embedding-3-small"),
88
+ )
89
+
90
+ store = InMemoryStore()
91
+
92
+ ### 3. CREATE PARENT DOCUMENT TEXT SPLITTER AND RETRIEVER INITIATED
93
+
94
+ parent_document_retriever = ParentDocumentRetriever(
95
+ vectorstore=vectorstore,
96
+ docstore=store,
97
+ child_splitter=RecursiveCharacterTextSplitter(chunk_size=400),
98
+ parent_splitter=RecursiveCharacterTextSplitter(chunk_size=2000),
99
+ )
100
+ parent_document_retriever.add_documents(docs)
101
+
102
+ ### 4. CREATE PROMPT OBJECT
103
+ RAG_PROMPT = """\
104
+ Your are a professional mental helth advisor. Use the following context to answer the user's query. If you cannot answer the question, please respond with 'I don't know'.
105
+
106
+ Question:
107
+ {question}
108
+
109
+ Context:
110
+ {context}
111
+ """
112
+
113
+ rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
114
+
115
+ ### 5. CREATE CHAIN PIPLINE RETRIVER
116
+
117
+ openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo", streaming=True)
118
+
119
+
120
+ def create_qa_chain(retriever):
121
+ mentahealth_qa_llm = openai_chat_model
122
+
123
+ created_qa_chain = (
124
+ {
125
+ "context": itemgetter("question") | retriever,
126
+ "question": itemgetter("question"),
127
+ }
128
+ | RunnablePassthrough.assign(context=itemgetter("context"))
129
+ | {
130
+ "response": rag_prompt | mentahealth_qa_llm | StrOutputParser(),
131
+ "context": itemgetter("context"),
132
+ }
133
+ )
134
+ return created_qa_chain
135
+
136
+
137
+ ### 6. DEFINE LIST OF TOOLS AVAILABLE FOR AND TOOL EXECUTOR WRAPPED AROUND THEM
138
+
139
+
140
+ @tool
141
+ async def rag_tool(question: str) -> str:
142
+ """Use this tool to retrieve relevant information from the knowledge base."""
143
+ # advanced_rag_prompt=ChatPromptTemplate.from_template(INSTRUCTION_PROMPT_TEMPLATE.format(user_query=question))
144
+ parent_document_retriever_qa_chain = create_qa_chain(parent_document_retriever)
145
+ response = await parent_document_retriever_qa_chain.ainvoke({"question": question})
146
+
147
+ return response["response"]
148
+
149
+
150
+ tool_belt = [
151
+ rag_tool,
152
+ PubmedQueryRun(),
153
+ ArxivQueryRun(),
154
+ DuckDuckGoSearchRun(),
155
+ ]
156
+
157
+ tool_executor = ToolExecutor(tool_belt)
158
+
159
+
160
+ ### 7. CONVERT TOOLS INTO THE FORMAT COMAPTIBLE WITH OPENAI'S FUNCTION CALLING API THEN BINDING THEM TO MODEL TO BE USED WHEN GENERATION
161
+ model = ChatOpenAI(temperature=0, streaming=True)
162
+
163
+ functions = [convert_to_openai_function(t) for t in tool_belt]
164
+ model = model.bind_functions(functions)
165
+ model = model.with_config(tags=["final_node"])
166
+
167
+ ### 8. USING the TypedDict FROM THE typing module AND THE langchain_core.messages module, A CUSTOM TYPE NAMED AgentState CREATED.
168
+ # THE AgentState type HAS A FIELD NAMED <messages> THAT IS OF TYPE Annotated[Sequence[BaseMessage], operator.add].
169
+ # Sequence[BaseMessage]: INDICATES THAT MESSAGES ARE A SEQUENCE OF BaseMessage OBJECTS.
170
+ # Annotated: USED TO ATTACH MEATADATA TO THE TYPE, THEN THE MESSAGE FIELD TREATED AS CONCATENABLE SEQUENCE OF BASEMASSAGES TO OPERATOR.ADD FUNCTION.
171
+
172
+
173
+ class AgentState(TypedDict):
174
+ messages: Annotated[Sequence[BaseMessage], operator.add]
175
+
176
+
177
+ ### 9. TWO FUNCTIONS DEFINED: 1. call_model AND 2. call_tool FUNCTIONS
178
+ # 1. INVOKES THE MODEL BY THE MESSAGES EXTRACTED FROM THE STATE RETURNING A DICT CONTAINING THE RESPONSE MESSAGE,
179
+ # 2.1 ToolInvocation OBJECT CREATED USING THE NAME AND ARGUMENTS EXTRACTED FROM THE LAST MASSAGE EXTRACTED FROM THE STATE,
180
+ # 2.2. tool_executor IS INVOKED BY THE CREATED toolInvocation OBJECT
181
+ # 2.3 FunctionMessage OBJECT IS CREATED WITH THE tool_executor RESPONSE AND THE NAME OF THAT TOOL
182
+ # 2.4 RETURN IS A DICT CONTAINING FunctionMessage OBJECT.
183
+
184
+
185
+ async def call_model(state):
186
+ messages = state["messages"]
187
+ response = await model.ainvoke(messages)
188
+ return {"messages": [response]}
189
+
190
+
191
+ async def call_tool(state):
192
+ last_message = state["messages"][-1]
193
+
194
+ action = ToolInvocation(
195
+ tool=last_message.additional_kwargs["function_call"]["name"],
196
+ tool_input=json.loads(
197
+ last_message.additional_kwargs["function_call"]["arguments"]
198
+ ),
199
+ )
200
+
201
+ print()
202
+ print(last_message.additional_kwargs["function_call"]["name"])
203
+ print()
204
+ response = await tool_executor.ainvoke(action)
205
+
206
+ function_message = FunctionMessage(content=str(response), name=action.tool)
207
+
208
+ return {"messages": [function_message]}
209
+
210
+
211
+ ###10. GRAPG CREATION WITH HELPFULNESS EVALUATION
212
+ # should_continue CHECKS IF THE LAST MASSAGE IN THE STATE IS TO CONTINUE (additional_kwargs EXISTS) OR END.
213
+ # THE add_conditional_edges() method IS ORIGINATED FROM THIS REPONSE, EITHER TRANSITION TO ACTION NODE OR END.
214
+
215
+
216
+ def should_continue(state):
217
+ last_message = state["messages"][-1]
218
+
219
+ if "function_call" not in last_message.additional_kwargs:
220
+ return "end"
221
+
222
+ return "continue"
223
+
224
+
225
+ async def check_helpfulness(state):
226
+ initial_query = state["messages"][0]
227
+ final_response = state["messages"][-1]
228
+
229
+ # adding artificial_loop
230
+
231
+ if len(state["messages"]) > 20:
232
+ return "end"
233
+
234
+ prompt_template = """\
235
+ Given an initial query and a final response, determine if the final response is extremely helpful or not. Please indicate helpfulness with a 'Y'\
236
+ and unhelpfulness as an 'N'.
237
+
238
+ Initial Query:
239
+ {initial_query}
240
+
241
+ Final Response:
242
+ {final_response}"""
243
+
244
+ prompt_template = PromptTemplate.from_template(prompt_template)
245
+
246
+ helpfulness_check_model = ChatOpenAI(model="gpt-4")
247
+
248
+ helpfulness_check_chain = (
249
+ prompt_template | helpfulness_check_model | StrOutputParser()
250
+ )
251
+
252
+ helpfulness_response = await helpfulness_check_chain.ainvoke(
253
+ {"initial_query": initial_query, "final_response": final_response}
254
+ )
255
+
256
+ if "Y" in helpfulness_response:
257
+ print("helpful!")
258
+ return "end"
259
+
260
+ else:
261
+ print(" Not helpful!!")
262
+ return "continue"
263
+
264
+
265
+ def dummy_node(state):
266
+ return
267
+
268
+
269
+ ### 11. SETTING THE GRAPH WORKFLOW:
270
+ # 1. AN INSTANCE OF THE STATEGRAPH CREATED OF THE TYPE AgentState. THREE NODES ADDED TO THE GRAPH USING add_node() method:
271
+ # 1.1 THE "agent" NODE IS ASSOCIATED WITH THE call_model FUNCTION.
272
+ # 1.2 THE "action" NODE IS ASSOCIATED WITH THE call_tool FUNCTION.
273
+ # 1.3 THE "passthrough" NODE IS A CUSTOM NODE THAT IS ASSOCIATED WITH CHECKING HELPFULNESS.
274
+ # 1.5 THE CONDITIONAL EDGES
275
+ # 1.5.1 BETWEEN agent NODE AND THE OTHER TWO NODES TO EITHER action NODE OR passthrough NODE
276
+ # 1.5.2 BETWEEN passthrough NODE AND agen NODE OR END NODE.
277
+ # 1.5.3 BETWEEN agent AND action NODES AS MODEL HAS ACCESS TO TOOLS FOR RESPONSE GENERATION.
278
+ def get_state_update_bot():
279
+ workflow = StateGraph(AgentState)
280
+
281
+ workflow.add_node("agent", call_model) # agent node has access to llm
282
+ workflow.add_node("action", call_tool) # action node has access to tools
283
+ workflow.set_entry_point("agent")
284
+ workflow.add_conditional_edges(
285
+ "agent",
286
+ should_continue,
287
+ {
288
+ "continue": "action", # tools
289
+ "end": END,
290
+ },
291
+ )
292
+ workflow.add_edge("action", "agent") # tools
293
+ state_update_bot = workflow.compile()
294
+
295
+ return state_update_bot
296
+
297
+
298
+ # --------------------------------------------------
299
+ from langgraph.checkpoint.memory import MemorySaver
300
+
301
+ def get_state_update_bot_with_helpfullness_node():
302
+ # memory = MemorySaver()
303
+
304
+ graph_with_helpfulness_check = StateGraph(AgentState)
305
+
306
+ graph_with_helpfulness_check.add_node("agent", call_model)
307
+ graph_with_helpfulness_check.add_node("action", call_tool)
308
+ graph_with_helpfulness_check.add_node("passthrough", dummy_node)
309
+
310
+ graph_with_helpfulness_check.set_entry_point("agent")
311
+
312
+ graph_with_helpfulness_check.add_conditional_edges(
313
+ "agent", should_continue, {"continue": "action", "end": "passthrough"}
314
+ )
315
+
316
+ graph_with_helpfulness_check.add_conditional_edges(
317
+ "passthrough", check_helpfulness, {"continue": "agent", "end": END}
318
+ )
319
+
320
+ graph_with_helpfulness_check.add_edge("action", "agent")
321
+ memory=AsyncSqliteSaver.from_conn_string(":memory:")
322
+ return graph_with_helpfulness_check.compile(checkpointer=memory)
323
+
324
+
325
+ ### 12.
326
+ # def convert_inputs(input_object):
327
+ # system_prompt = f"""You are a qualified psychologist providing mental health advice. Be empathetic in your responses.
328
+ # Always provide a complete response. Be empathetic and provide a follow-up question to find a resolution.
329
+ # First, look up the RAG (retrieval-augmented generation) and then arxiv research or use InternetSearch:
330
+
331
+
332
+
333
+ # You will operate in a loop of Thought, Action, PAUSE, and Observation. At the end of the loop, you will provide an Answer.
334
+
335
+ # Instructions:
336
+
337
+ # Thought: Describe your thoughts about the user's question.
338
+ # Action: Choose one of the available actions to gather information or provide insights.
339
+ # PAUSE: Pause to allow the action to complete.
340
+ # Observation: Review the results of the action.
341
+
342
+ # Available Actions:
343
+
344
+ # Use the tools at your disposal to look up information or resolve the consultancy. You are allowed to make multiple calls (either together or in sequence).:
345
+
346
+ # 1. rag_tool: RAG (Retrieval-Augmented Generation) to access relevant mental health information.
347
+ # 2. DuckDuckGoSearchRun: Perform an online search: InternetSearch to find up-to-date resources and recommendations.
348
+ # 3. ArxivQueryRun: Find relevant research or content.
349
+ # 3. PubMedQuerRun: Find a specific coping strategies or management techniques by doing research paper
350
+
351
+ # You may make multiple calls to these tools as needed to provide comprehensive advice.
352
+
353
+ # Present your final response in a clear, structured format, including a chart of recommended actions if appropriate.
354
+
355
+ # User's question: {input_object["messages"]}
356
+
357
+ # Response: Your task is When responding to users' personal issues or concerns:
358
+
359
+ # 1. With a brief empathetic acknowledgment of the user's situation, continue
360
+ # 2. Provide practical, actionable advice that often includes
361
+ # 3. Suggesting professional help (e.g., therapists, counselors) when appropriate
362
+ # 4. Encouraging open communication and dialogue with involved parties and
363
+ # 5. Recommending self-reflection or exploration of emotions and values and
364
+ # 6. Offering specific coping strategies or management techniques
365
+ # """
366
+ # return {"messages": [SystemMessage(content=system_prompt)]}
367
+ def convert_inputs(input_object):
368
+ system_prompt = f"""You are a qualified psychologist providing mental health advice. Be empathetic in your responses.
369
+ Always provide a complete response. Be empathetic and provide a follow-up question to find a resolution.
370
+
371
+ You must Use the tools at your dsiposal.
372
+ You must consult pubmed, then ragtool, then duckduckgo_results_json.
373
+ You must make multiple calls to these tools as needed to provide comprehensive advice.
374
+
375
+
376
+ User's question: {input_object["messages"]}
377
+ """
378
+ return {"messages": [SystemMessage(content=system_prompt)]}
379
+
380
+
381
+ # Define the function to parse the output
382
+ def parse_output(input_state):
383
+ return input_state
384
+
385
+
386
+ # bot_with_helpfulness_check=get_state_update_bot_with_helpfullness_node() # type:
387
+ # bot=get_state_update_bot()
388
+
389
+ # Create the agent chain
390
+ # agent_chain = convert_inputs | bot_with_helpfulness_check# | StrOutputParser()#| parse_output
391
+
392
+ # Run the agent chain with the input
393
+ # messages=agent_chain.invoke({"question": mental_health_counseling_data['test'][14]['Context']})
394
+ import uuid
395
+ # ---------------------------------------------------------------------------------------------------------
396
+ # DEPLOYMENT
397
+ # ---------------------------------------------------------------------------------------------------------
398
+ from langchain_core.messages import HumanMessage
399
+
400
+ @cl.author_rename
401
+ def rename(original_author: str):
402
+ """
403
+ This function can be used to rename the 'author' of a message.
404
+
405
+ In this case, we're overriding the 'Assistant' author to be 'Paul Graham Essay Bot'.
406
+ """
407
+ rename_dict = {"Assistant": "Mental Health Advisor Bot"}
408
+ return rename_dict.get(original_author, original_author)
409
+
410
+
411
+ @cl.on_chat_start
412
+ async def start_chat():
413
+ """
414
+ This function will be called at the start of every user session.
415
+
416
+ We will build our LCEL RAG chain here, and store it in the user session.
417
+
418
+ The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
419
+ """
420
+
421
+ ### BUILD LCEL RAG CHAIN THAT ONLY RETURNS TEXT
422
+ # lcel_rag_chain = ( {"context": itemgetter("query") | hf_retriever, "query": itemgetter("query")}
423
+
424
+ # | rag_prompt | hf_llm
425
+ # )
426
+ memory=MemorySaver
427
+ bot_with_helpfulness_check = get_state_update_bot_with_helpfullness_node()#(checkpointer=memory)
428
+ # type: ignore
429
+ lcel_agent_langgraph_chain = (
430
+ convert_inputs | bot_with_helpfulness_check) #| StrOutputParser())
431
+
432
+ # bot=get_state_update_bot()
433
+
434
+ # lcel_agent_chain = convert_inputs | bot| parse_output# StrOutputParser()
435
+
436
+ cl.user_session.set("langgraph_agent_chain", lcel_agent_langgraph_chain)
437
+
438
+ # Create a thread id and pass it as configuration
439
+ # to be able to use Langgraph's MemorySaver
440
+ conversation_id = str(uuid.uuid4())
441
+ config = {"configurable": {"thread_id": conversation_id}}
442
+ cl.user_session.set("config", config)
443
+
444
+
445
+
446
+ @cl.on_message
447
+ async def main(message: cl.Message):
448
+ """
449
+ This function will be called every time a message is recieved from a session.
450
+
451
+ """
452
+ # msg is the human message, could be mixed with system message.
453
+ # agent_message is the agent's response.
454
+
455
+ graph = cl.user_session.get("langgraph_agent_chain")
456
+ config = cl.user_session.get("config")
457
+ final_output=""
458
+
459
+ # inputs = {"messages": [("user", message.content)]}
460
+ inputs={"messages": [HumanMessage(message.content)]}
461
+
462
+ agent_message = cl.Message(content="")
463
+ await agent_message.send()
464
+
465
+
466
+ # final_output=""
467
+
468
+ async for event in graph.astream_events(
469
+ inputs,
470
+ config=config,#=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
471
+ version="v2",
472
+ ):
473
+
474
+ kind = event["event"]
475
+ tags = event.get("tags", [])
476
+ name=event.get("name", "")
477
+ print()
478
+ print(f"Received event: {event}") # Debugging statement
479
+ print()
480
+ if kind == "on_chain_start":
481
+ if (
482
+ event["name"] == "Agent"
483
+ ): # Was assigned when creating the agent with `.with_config({"run_name": "Agent"})`
484
+ print(
485
+ f"Starting agent: {event['name']} with input: {event['data'].get('input')}"
486
+ )
487
+
488
+ # await agent_message.send()
489
+ elif kind == "on_chain_end" and name=="RunnableSequence":#"tool_end" in tags:
490
+ if 'output' in event['data'] and "agent" in event["data"]['output']:
491
+ agent_output=event["data"]["output"]["agent"]
492
+ if "messages" in agent_output and agent_output["messages"]:
493
+ final_output=agent_output["messages"][0].content
494
+ await agent_message.stream_token(final_output)
495
+
496
+ # elif kind=="on_chain_stream":
497
+ # data=event['data']
498
+ # if data["chunk"].content:
499
+ # print(f"Streaming content: {data['chunk'].content}")
500
+ # await agent_message.stream_token(data["chunk"].content)
501
+
502
+
503
+ await agent_message.send()
504
+
505
+ #docker build -t llm-app-langgraph-react-chainlit-mentalmindbt .
506
+ #docker run -it -p 7860:7860 llm-app-langgraph-react-chainlit-mentalmindbt:latest
requirements.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ chainlit==1.1.306
2
+ langchain==0.2.10
3
+ langchain_community==0.2.9
4
+ langchain_core==0.2.22
5
+ langchain_openai==0.1.17
6
+ langchain_qdrant==0.1.2
7
+ langchain_text_splitters==0.2.2
8
+ langgraph==0.1.9
9
+ python-dotenv==1.0.1
10
+ qdrant_client==1.10.1
11
+ arxiv
12
+ duckduckgo-search
13
+ pubmed
14
+ duckduckgo_search==5.3.1b1
15
+ PyMuPDF
16
+ xmltodict
17
+ aiosqlite
18
+ #numpy>=1.21.0
19
+ #pandas>=1.3.0
20
+ #scikit-learn>=0.24.2
21
+ #ragas>=0.1.0