XThomasBU commited on
Commit
e029e22
1 Parent(s): aaaac46

improvements in literali, chainlit, chat

Browse files
code/main.py CHANGED
@@ -1,18 +1,14 @@
1
  import json
2
- import textwrap
3
- from typing import Any, Callable, Dict, List, Literal, Optional, no_type_check
4
- import chainlit as cl
5
- from chainlit import run_sync
6
- from chainlit.config import config
7
  import yaml
8
  import os
9
-
 
10
  from modules.chat.llm_tutor import LLMTutor
11
  from modules.chat_processor.chat_processor import ChatProcessor
12
  from modules.config.constants import LLAMA_PATH
13
  from modules.chat.helpers import get_sources
14
-
15
- from chainlit.input_widget import Select, Switch, Slider
16
 
17
  USER_TIMEOUT = 60_000
18
  SYSTEM = "System 🖥️"
@@ -24,73 +20,79 @@ ERROR = "Error 🚫"
24
 
25
  class Chatbot:
26
  def __init__(self):
27
- self.llm_tutor = None
28
- self.chain = None
29
- self.chat_processor = None
30
  self.config = self._load_config()
31
 
32
  def _load_config(self):
 
 
 
33
  with open("modules/config/config.yml", "r") as f:
34
- config = yaml.safe_load(f)
35
- return config
36
 
37
  @no_type_check
38
- async def setup_llm(self) -> None:
39
- """From the session `llm_settings`, create new LLMConfig and LLM objects,
40
- save them in session state."""
41
-
42
- old_config = self.config.copy() # create a copy of the previous config
43
- new_config = (
44
- self.config.copy()
45
- ) # create the new config as a copy of the previous config
46
-
47
  llm_settings = cl.user_session.get("llm_settings", {})
48
- chat_profile = llm_settings.get("chat_model")
49
- retriever_method = llm_settings.get("retriever_method")
50
- memory_window = llm_settings.get("memory_window")
51
- ELI5 = llm_settings.get("ELI5")
52
-
53
- self._configure_llm(chat_profile)
54
 
55
  chain = cl.user_session.get("chain")
56
- memory = chain.memory
57
- new_config["vectorstore"][
58
- "db_option"
59
- ] = retriever_method # update the retriever method in the config
60
- new_config["llm_params"][
61
- "memory_window"
62
- ] = memory_window # update the memory window in the config
63
- new_config["llm_params"]["ELI5"] = ELI5
64
 
65
- # self.llm_tutor.update_llm(new_config) # TODO: Fi this!!!
66
- self.llm_tutor = LLMTutor(
67
- self.config, user={"user_id": "abc123", "session_id": "789"}
68
- )
 
 
 
 
 
69
  self.chain = self.llm_tutor.qa_bot(memory=memory)
70
 
71
  tags = [chat_profile, self.config["vectorstore"]["db_option"]]
72
- self.chat_processor = ChatProcessor(self.llm_tutor, tags=tags)
73
 
74
  cl.user_session.set("chain", self.chain)
75
  cl.user_session.set("llm_tutor", self.llm_tutor)
76
  cl.user_session.set("chat_processor", self.chat_processor)
77
 
78
  @no_type_check
79
- async def update_llm(self, new_settings: Dict[str, Any]) -> None:
80
- """Update LLMConfig and LLM from settings, and save in session state."""
 
 
 
 
 
81
  cl.user_session.set("llm_settings", new_settings)
82
  await self.inform_llm_settings()
83
  await self.setup_llm()
84
 
85
  async def make_llm_settings_widgets(self, config=None):
 
 
 
 
 
 
86
  config = config or self.config
87
  await cl.ChatSettings(
88
  [
89
  cl.input_widget.Select(
90
  id="chat_model",
91
  label="Model Name (Default GPT-3)",
92
- values=["llama", "gpt-3.5-turbo-1106", "gpt-4"],
93
- initial_index=0,
94
  ),
95
  cl.input_widget.Select(
96
  id="retriever_method",
@@ -109,28 +111,33 @@ class Chatbot:
109
  cl.input_widget.Switch(
110
  id="view_sources", label="View Sources", initial=False
111
  ),
112
- cl.input_widget.Switch(
113
- id="ELI5", label="Explain Like I'm 5 (ELI5)", initial=False
 
 
 
114
  ),
115
- # cl.input_widget.TextInput(
116
- # id="vectorstore",
117
- # label="temp",
118
- # initial="None",
119
- # ),
120
  ]
121
- ).send() # type: ignore
122
 
123
  @no_type_check
124
- async def inform_llm_settings(self) -> None:
 
 
 
125
  llm_settings: Dict[str, Any] = cl.user_session.get("llm_settings", {})
126
  llm_tutor = cl.user_session.get("llm_tutor")
127
- settings_dict = dict(
128
- model=llm_settings.get("chat_model"),
129
- retriever=llm_settings.get("retriever_method"),
130
- memory_window=llm_settings.get("memory_window"),
131
- num_docs_in_db=len(llm_tutor.vector_db),
132
- view_sources=llm_settings.get("view_sources"),
133
- )
 
 
 
 
134
  await cl.Message(
135
  author=SYSTEM,
136
  content="LLM settings have been updated. You can continue with your Query!",
@@ -140,11 +147,14 @@ class Chatbot:
140
  display="side",
141
  content=json.dumps(settings_dict, indent=4),
142
  language="json",
143
- )
144
  ],
145
  ).send()
146
 
147
  async def set_starters(self):
 
 
 
148
  return [
149
  cl.Starter(
150
  label="recording on CNNs?",
@@ -168,64 +178,73 @@ class Chatbot:
168
  ),
169
  ]
170
 
171
- async def chat_profile(self):
172
- return [
173
- cl.ChatProfile(
174
- name="gpt-3.5-turbo-1106",
175
- markdown_description="Use OpenAI API for **gpt-3.5-turbo-1106**.",
176
- ),
177
- cl.ChatProfile(
178
- name="gpt-4",
179
- markdown_description="Use OpenAI API for **gpt-4**.",
180
- ),
181
- cl.ChatProfile(
182
- name="Llama",
183
- markdown_description="Use the local LLM: **Tiny Llama**.",
184
- ),
185
- ]
186
-
187
  def rename(self, orig_author: str):
 
 
 
 
 
 
 
 
 
188
  rename_dict = {"Chatbot": "AI Tutor"}
189
  return rename_dict.get(orig_author, orig_author)
190
 
191
  async def start(self):
192
- await self.make_llm_settings_widgets(self.config)
193
-
194
- # chat_profile = cl.user_session.get("chat_profile")
195
- # if chat_profile:
196
- # self._configure_llm(chat_profile)
197
-
198
- self.llm_tutor = LLMTutor(
199
- self.config, user={"user_id": "abc123", "session_id": "789"}
200
- )
201
- self.chain = self.llm_tutor.qa_bot()
202
- tags = [self.config["vectorstore"]["db_option"]]
203
- self.chat_processor = ChatProcessor(self.llm_tutor, tags=tags)
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  cl.user_session.set("llm_tutor", self.llm_tutor)
206
  cl.user_session.set("chain", self.chain)
207
- cl.user_session.set("counter", 20)
208
  cl.user_session.set("chat_processor", self.chat_processor)
209
 
210
  async def on_chat_end(self):
 
 
 
 
211
  await cl.Message(content="Sorry, I have to go now. Goodbye!").send()
212
 
213
  async def main(self, message):
 
 
 
 
 
 
214
  chain = cl.user_session.get("chain")
215
- counter = cl.user_session.get("counter")
216
  llm_settings = cl.user_session.get("llm_settings", {})
217
  view_sources = llm_settings.get("view_sources", False)
218
 
219
- counter += 1
220
- cl.user_session.set("counter", counter)
221
-
222
  processor = cl.user_session.get("chat_processor")
223
  res = await processor.rag(message.content, chain)
224
 
225
- print(res)
226
-
227
  answer = res.get("answer", res.get("result"))
228
-
229
  answer_with_sources, source_elements, sources_dict = get_sources(
230
  res, answer, view_sources=view_sources
231
  )
@@ -233,26 +252,16 @@ class Chatbot:
233
 
234
  await cl.Message(content=answer_with_sources, elements=source_elements).send()
235
 
236
- def _configure_llm(self, chat_profile):
237
- chat_profile = chat_profile.lower()
238
- if chat_profile in ["gpt-3.5-turbo-1106", "gpt-4"]:
239
- self.config["llm_params"]["llm_loader"] = "openai"
240
- self.config["llm_params"]["openai_params"]["model"] = chat_profile
241
- elif chat_profile == "llama":
242
- self.config["llm_params"]["llm_loader"] = "local_llm"
243
- self.config["llm_params"]["local_llm_params"]["model"] = LLAMA_PATH
244
- self.config["llm_params"]["local_llm_params"]["model_type"] = "llama"
245
- elif chat_profile == "mistral":
246
- self.config["llm_params"]["llm_loader"] = "local_llm"
247
- self.config["llm_params"]["local_llm_params"]["model"] = MISTRAL_PATH
248
- self.config["llm_params"]["local_llm_params"]["model_type"] = "mistral"
249
 
250
 
251
  chatbot = Chatbot()
252
-
253
- # Register functions to Chainlit events
254
  cl.set_starters(chatbot.set_starters)
255
- # cl.set_chat_profiles(chatbot.chat_profile)
256
  cl.author_rename(chatbot.rename)
257
  cl.on_chat_start(chatbot.start)
258
  cl.on_chat_end(chatbot.on_chat_end)
 
1
  import json
 
 
 
 
 
2
  import yaml
3
  import os
4
+ from typing import Any, Dict, no_type_check
5
+ import chainlit as cl
6
  from modules.chat.llm_tutor import LLMTutor
7
  from modules.chat_processor.chat_processor import ChatProcessor
8
  from modules.config.constants import LLAMA_PATH
9
  from modules.chat.helpers import get_sources
10
+ import copy
11
+ from typing import Optional
12
 
13
  USER_TIMEOUT = 60_000
14
  SYSTEM = "System 🖥️"
 
20
 
21
  class Chatbot:
22
  def __init__(self):
23
+ """
24
+ Initialize the Chatbot class.
25
+ """
26
  self.config = self._load_config()
27
 
28
  def _load_config(self):
29
+ """
30
+ Load the configuration from a YAML file.
31
+ """
32
  with open("modules/config/config.yml", "r") as f:
33
+ return yaml.safe_load(f)
 
34
 
35
  @no_type_check
36
+ async def setup_llm(self):
37
+ """
38
+ Set up the LLM with the provided settings. Update the configuration and initialize the LLM tutor.
39
+ """
 
 
 
 
 
40
  llm_settings = cl.user_session.get("llm_settings", {})
41
+ chat_profile, retriever_method, memory_window, llm_style = (
42
+ llm_settings.get("chat_model"),
43
+ llm_settings.get("retriever_method"),
44
+ llm_settings.get("memory_window"),
45
+ llm_settings.get("llm_style"),
46
+ )
47
 
48
  chain = cl.user_session.get("chain")
49
+ memory = chain.memory if chain else []
 
 
 
 
 
 
 
50
 
51
+ old_config = copy.deepcopy(self.config)
52
+ self.config["vectorstore"]["db_option"] = retriever_method
53
+ self.config["llm_params"]["memory_window"] = memory_window
54
+ self.config["llm_params"]["llm_style"] = llm_style
55
+ self.config["llm_params"]["llm_loader"] = chat_profile
56
+
57
+ self.llm_tutor.update_llm(
58
+ old_config, self.config
59
+ ) # update only attributes that are changed
60
  self.chain = self.llm_tutor.qa_bot(memory=memory)
61
 
62
  tags = [chat_profile, self.config["vectorstore"]["db_option"]]
63
+ self.chat_processor.config = self.config
64
 
65
  cl.user_session.set("chain", self.chain)
66
  cl.user_session.set("llm_tutor", self.llm_tutor)
67
  cl.user_session.set("chat_processor", self.chat_processor)
68
 
69
  @no_type_check
70
+ async def update_llm(self, new_settings: Dict[str, Any]):
71
+ """
72
+ Update the LLM settings and reinitialize the LLM with the new settings.
73
+
74
+ Args:
75
+ new_settings (Dict[str, Any]): The new settings to update.
76
+ """
77
  cl.user_session.set("llm_settings", new_settings)
78
  await self.inform_llm_settings()
79
  await self.setup_llm()
80
 
81
  async def make_llm_settings_widgets(self, config=None):
82
+ """
83
+ Create and send the widgets for LLM settings configuration.
84
+
85
+ Args:
86
+ config: The configuration to use for setting up the widgets.
87
+ """
88
  config = config or self.config
89
  await cl.ChatSettings(
90
  [
91
  cl.input_widget.Select(
92
  id="chat_model",
93
  label="Model Name (Default GPT-3)",
94
+ values=["local_llm", "gpt-3.5-turbo-1106", "gpt-4"],
95
+ initial_index=1,
96
  ),
97
  cl.input_widget.Select(
98
  id="retriever_method",
 
111
  cl.input_widget.Switch(
112
  id="view_sources", label="View Sources", initial=False
113
  ),
114
+ cl.input_widget.Select(
115
+ id="llm_style",
116
+ label="Type of Conversation (Default Normal)",
117
+ values=["Normal", "ELI5", "Socratic"],
118
+ initial_index=0,
119
  ),
 
 
 
 
 
120
  ]
121
+ ).send()
122
 
123
  @no_type_check
124
+ async def inform_llm_settings(self):
125
+ """
126
+ Inform the user about the updated LLM settings and display them as a message.
127
+ """
128
  llm_settings: Dict[str, Any] = cl.user_session.get("llm_settings", {})
129
  llm_tutor = cl.user_session.get("llm_tutor")
130
+ settings_dict = {
131
+ "model": llm_settings.get("chat_model"),
132
+ "retriever": llm_settings.get("retriever_method"),
133
+ "memory_window": llm_settings.get("memory_window"),
134
+ "num_docs_in_db": (
135
+ len(llm_tutor.vector_db)
136
+ if llm_tutor and hasattr(llm_tutor, "vector_db")
137
+ else 0
138
+ ),
139
+ "view_sources": llm_settings.get("view_sources"),
140
+ }
141
  await cl.Message(
142
  author=SYSTEM,
143
  content="LLM settings have been updated. You can continue with your Query!",
 
147
  display="side",
148
  content=json.dumps(settings_dict, indent=4),
149
  language="json",
150
+ ),
151
  ],
152
  ).send()
153
 
154
  async def set_starters(self):
155
+ """
156
+ Set starter messages for the chatbot.
157
+ """
158
  return [
159
  cl.Starter(
160
  label="recording on CNNs?",
 
178
  ),
179
  ]
180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  def rename(self, orig_author: str):
182
+ """
183
+ Rename the original author to a more user-friendly name.
184
+
185
+ Args:
186
+ orig_author (str): The original author's name.
187
+
188
+ Returns:
189
+ str: The renamed author.
190
+ """
191
  rename_dict = {"Chatbot": "AI Tutor"}
192
  return rename_dict.get(orig_author, orig_author)
193
 
194
  async def start(self):
195
+ """
196
+ Start the chatbot, initialize settings widgets,
197
+ and display and load previous conversation if chat logging is enabled.
198
+ """
199
+ await cl.Message(content="Welcome back! Setting up your session...").send()
 
 
 
 
 
 
 
200
 
201
+ await self.make_llm_settings_widgets(self.config)
202
+ user = cl.user_session.get("user")
203
+ self.user = {
204
+ "user_id": user.identifier,
205
+ "session_id": "1234",
206
+ }
207
+ cl.user_session.set("user", self.user)
208
+ self.chat_processor = ChatProcessor(self.config, self.user)
209
+ self.llm_tutor = LLMTutor(self.config, user=self.user)
210
+ if self.config["chat_logging"]["log_chat"]:
211
+ # get previous conversation of the user
212
+ memory = self.chat_processor.processor.prev_conv
213
+ if len(self.chat_processor.processor.prev_conv) > 0:
214
+ for idx, conv in enumerate(self.chat_processor.processor.prev_conv):
215
+ await cl.Message(
216
+ author="User", content=conv[0], type="user_message"
217
+ ).send()
218
+ await cl.Message(author="AI Tutor", content=conv[1]).send()
219
+ else:
220
+ memory = []
221
+ self.chain = self.llm_tutor.qa_bot(memory=memory)
222
  cl.user_session.set("llm_tutor", self.llm_tutor)
223
  cl.user_session.set("chain", self.chain)
 
224
  cl.user_session.set("chat_processor", self.chat_processor)
225
 
226
  async def on_chat_end(self):
227
+ """
228
+ Handle the end of the chat session by sending a goodbye message.
229
+ # TODO: Not used as of now - useful when the implementation for the conversation limiting is implemented
230
+ """
231
  await cl.Message(content="Sorry, I have to go now. Goodbye!").send()
232
 
233
  async def main(self, message):
234
+ """
235
+ Process and Display the Conversation.
236
+
237
+ Args:
238
+ message: The incoming chat message.
239
+ """
240
  chain = cl.user_session.get("chain")
 
241
  llm_settings = cl.user_session.get("llm_settings", {})
242
  view_sources = llm_settings.get("view_sources", False)
243
 
 
 
 
244
  processor = cl.user_session.get("chat_processor")
245
  res = await processor.rag(message.content, chain)
246
 
 
 
247
  answer = res.get("answer", res.get("result"))
 
248
  answer_with_sources, source_elements, sources_dict = get_sources(
249
  res, answer, view_sources=view_sources
250
  )
 
252
 
253
  await cl.Message(content=answer_with_sources, elements=source_elements).send()
254
 
255
+ def auth_callback(self, username: str, password: str) -> Optional[cl.User]:
256
+ return cl.User(
257
+ identifier=username,
258
+ metadata={"role": "admin", "provider": "credentials"},
259
+ )
 
 
 
 
 
 
 
 
260
 
261
 
262
  chatbot = Chatbot()
263
+ cl.password_auth_callback(chatbot.auth_callback)
 
264
  cl.set_starters(chatbot.set_starters)
 
265
  cl.author_rename(chatbot.rename)
266
  cl.on_chat_start(chatbot.start)
267
  cl.on_chat_end(chatbot.on_chat_end)
code/modules/chat/base.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class BaseRAG:
2
+ """
3
+ Base class for RAG chatbot.
4
+ """
5
+
6
+ def __init__():
7
+ pass
8
+
9
+ def invoke():
10
+ """
11
+ Invoke the RAG chatbot.
12
+ """
13
+ pass
code/modules/chat/chat_model_loader.py CHANGED
@@ -1,4 +1,4 @@
1
- from langchain_community.chat_models import ChatOpenAI
2
  from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
3
  from transformers import AutoTokenizer, TextStreamer
4
  from langchain_community.llms import LlamaCpp
@@ -7,6 +7,7 @@ import transformers
7
  import os
8
  from langchain.callbacks.manager import CallbackManager
9
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
 
10
 
11
 
12
  class ChatModelLoader:
@@ -15,15 +16,12 @@ class ChatModelLoader:
15
  self.huggingface_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
16
 
17
  def load_chat_model(self):
18
- if self.config["llm_params"]["llm_loader"] == "openai":
19
- llm = ChatOpenAI(
20
- model_name=self.config["llm_params"]["openai_params"]["model"]
21
- )
22
  elif self.config["llm_params"]["llm_loader"] == "local_llm":
23
  n_batch = 512 # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
24
- model_path = self.config["llm_params"]["local_llm_params"]["model"]
25
  llm = LlamaCpp(
26
- model_path=model_path,
27
  n_batch=n_batch,
28
  n_ctx=2048,
29
  f16_kv=True,
@@ -34,5 +32,7 @@ class ChatModelLoader:
34
  ],
35
  )
36
  else:
37
- raise ValueError("Invalid LLM Loader")
 
 
38
  return llm
 
1
+ from langchain_openai import ChatOpenAI
2
  from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
3
  from transformers import AutoTokenizer, TextStreamer
4
  from langchain_community.llms import LlamaCpp
 
7
  import os
8
  from langchain.callbacks.manager import CallbackManager
9
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
10
+ from modules.config.constants import LLAMA_PATH
11
 
12
 
13
  class ChatModelLoader:
 
16
  self.huggingface_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
17
 
18
  def load_chat_model(self):
19
+ if self.config["llm_params"]["llm_loader"] in ["gpt-3.5-turbo-1106", "gpt-4"]:
20
+ llm = ChatOpenAI(model_name=self.config["llm_params"]["llm_loader"])
 
 
21
  elif self.config["llm_params"]["llm_loader"] == "local_llm":
22
  n_batch = 512 # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
 
23
  llm = LlamaCpp(
24
+ model_path=LLAMA_PATH,
25
  n_batch=n_batch,
26
  n_ctx=2048,
27
  f16_kv=True,
 
32
  ],
33
  )
34
  else:
35
+ raise ValueError(
36
+ f"Invalid LLM Loader: {self.config['llm_params']['llm_loader']}"
37
+ )
38
  return llm
code/modules/chat/helpers.py CHANGED
@@ -1,7 +1,5 @@
1
- from modules.config.constants import *
2
  import chainlit as cl
3
- from langchain_core.prompts import PromptTemplate
4
- from langchain_core.prompts import ChatPromptTemplate
5
 
6
 
7
  def get_sources(res, answer, view_sources=False):
@@ -93,38 +91,18 @@ def get_prompt(config, prompt_type):
93
  llm_params = config["llm_params"]
94
  llm_loader = llm_params["llm_loader"]
95
  use_history = llm_params["use_history"]
96
-
97
- print("llm_params: ", llm_params)
98
- print("ELI5", llm_params["ELI5"])
99
-
100
- print("\n\n")
101
 
102
  if prompt_type == "qa":
103
- if llm_loader == "openai":
104
- if llm_params["ELI5"]:
105
- return ELI5_PROMPT_WITH_HISTORY
106
  else:
107
- return (
108
- OPENAI_PROMPT_WITH_HISTORY
109
- if use_history
110
- else OPENAI_PROMPT_NO_HISTORY
111
- )
112
- elif (
113
- llm_loader == "local_llm"
114
- and llm_params.get("local_llm_params") == "tiny-llama"
115
- ):
116
- return (
117
- TINYLLAMA_PROMPT_TEMPLATE_WITH_HISTORY
118
- if use_history
119
- else TINYLLAMA_PROMPT_TEMPLATE_NO_HISTORY
120
- )
121
  elif prompt_type == "rephrase":
122
- prompt = ChatPromptTemplate.from_messages(
123
- [
124
- ("system", OPENAI_REPHRASE_PROMPT),
125
- ("human", "{question}, {chat_history}"),
126
- ]
127
- )
128
- return OPENAI_REPHRASE_PROMPT
129
-
130
- return None
 
1
+ from modules.config.prompts import prompts
2
  import chainlit as cl
 
 
3
 
4
 
5
  def get_sources(res, answer, view_sources=False):
 
91
  llm_params = config["llm_params"]
92
  llm_loader = llm_params["llm_loader"]
93
  use_history = llm_params["use_history"]
94
+ llm_style = llm_params["llm_style"].lower()
 
 
 
 
95
 
96
  if prompt_type == "qa":
97
+ if llm_loader == "local_llm":
98
+ if use_history:
99
+ return prompts["tiny_llama"]["prompt_with_history"]
100
  else:
101
+ return prompts["tiny_llama"]["prompt_no_history"]
102
+ else:
103
+ if use_history:
104
+ return prompts["openai"]["prompt_with_history"][llm_style]
105
+ else:
106
+ return prompts["openai"]["prompt_no_history"]
 
 
 
 
 
 
 
 
107
  elif prompt_type == "rephrase":
108
+ return prompts["openai"]["rephrase_prompt"]
 
 
 
 
 
 
 
 
code/modules/chat/langchain/langchain_rag.py CHANGED
@@ -1,12 +1,14 @@
1
  from langchain_core.prompts import ChatPromptTemplate
2
 
3
  from modules.chat.langchain.utils import *
 
 
4
 
5
 
6
- class CustomConversationalRetrievalChain:
7
  def __init__(self, llm, memory, retriever, qa_prompt: str, rephrase_prompt: str):
8
  """
9
- Initialize the CustomConversationalRetrievalChain class.
10
 
11
  Args:
12
  llm (LanguageModelLike): The language model instance.
@@ -16,7 +18,7 @@ class CustomConversationalRetrievalChain:
16
  rephrase_prompt (str): The rephrase prompt string.
17
  """
18
  self.llm = llm
19
- self.memory = memory
20
  self.retriever = retriever
21
  self.qa_prompt = qa_prompt
22
  self.rephrase_prompt = rephrase_prompt
@@ -30,12 +32,8 @@ class CustomConversationalRetrievalChain:
30
  "without the chat history. Do NOT answer the question, just "
31
  "reformulate it if needed and otherwise return it as is."
32
  )
33
- self.contextualize_q_prompt = ChatPromptTemplate.from_messages(
34
- [
35
- ("system", contextualize_q_system_prompt),
36
- MessagesPlaceholder("chat_history"),
37
- ("human", "{input}"),
38
- ]
39
  )
40
 
41
  # History-aware retriever
@@ -53,13 +51,7 @@ class CustomConversationalRetrievalChain:
53
  "\n\n"
54
  "{context}"
55
  )
56
- self.qa_prompt_template = ChatPromptTemplate.from_messages(
57
- [
58
- ("system", qa_system_prompt),
59
- MessagesPlaceholder("chat_history"),
60
- ("human", "{input}"),
61
- ]
62
- )
63
 
64
  # Question-answer chain
65
  self.question_answer_chain = create_stuff_documents_chain(
@@ -121,6 +113,9 @@ class CustomConversationalRetrievalChain:
121
  """
122
  if (user_id, conversation_id) not in self.store:
123
  self.store[(user_id, conversation_id)] = InMemoryHistory()
 
 
 
124
  return self.store[(user_id, conversation_id)]
125
 
126
  def invoke(self, user_query, config):
@@ -133,5 +128,22 @@ class CustomConversationalRetrievalChain:
133
  Returns:
134
  dict: The output variables.
135
  """
136
- print(user_query, config)
137
- return self.rag_chain.invoke(user_query, config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from langchain_core.prompts import ChatPromptTemplate
2
 
3
  from modules.chat.langchain.utils import *
4
+ from langchain.memory import ChatMessageHistory
5
+ from modules.chat.base import BaseRAG
6
 
7
 
8
+ class Langchain_RAG(BaseRAG):
9
  def __init__(self, llm, memory, retriever, qa_prompt: str, rephrase_prompt: str):
10
  """
11
+ Initialize the Langchain_RAG class.
12
 
13
  Args:
14
  llm (LanguageModelLike): The language model instance.
 
18
  rephrase_prompt (str): The rephrase prompt string.
19
  """
20
  self.llm = llm
21
+ self.memory = self.add_history_from_list(memory)
22
  self.retriever = retriever
23
  self.qa_prompt = qa_prompt
24
  self.rephrase_prompt = rephrase_prompt
 
32
  "without the chat history. Do NOT answer the question, just "
33
  "reformulate it if needed and otherwise return it as is."
34
  )
35
+ self.contextualize_q_prompt = ChatPromptTemplate.from_template(
36
+ contextualize_q_system_prompt
 
 
 
 
37
  )
38
 
39
  # History-aware retriever
 
51
  "\n\n"
52
  "{context}"
53
  )
54
+ self.qa_prompt_template = ChatPromptTemplate.from_template(qa_system_prompt)
 
 
 
 
 
 
55
 
56
  # Question-answer chain
57
  self.question_answer_chain = create_stuff_documents_chain(
 
113
  """
114
  if (user_id, conversation_id) not in self.store:
115
  self.store[(user_id, conversation_id)] = InMemoryHistory()
116
+ self.store[(user_id, conversation_id)].add_messages(
117
+ self.memory.messages
118
+ ) # add previous messages to the store. Note: the store is in-memory.
119
  return self.store[(user_id, conversation_id)]
120
 
121
  def invoke(self, user_query, config):
 
128
  Returns:
129
  dict: The output variables.
130
  """
131
+ res = self.rag_chain.invoke(user_query, config)
132
+ res["rephrase_prompt"] = self.rephrase_prompt
133
+ res["qa_prompt"] = self.qa_prompt
134
+ return res
135
+
136
+ def add_history_from_list(self, history_list):
137
+ """
138
+ Add messages from a list to the chat history.
139
+
140
+ Args:
141
+ messages (list): The list of messages to add.
142
+ """
143
+ history = ChatMessageHistory()
144
+
145
+ for idx, message_pairs in enumerate(history_list):
146
+ history.add_user_message(message_pairs[0])
147
+ history.add_ai_message(message_pairs[1])
148
+
149
+ return history
code/modules/chat/langchain/utils.py CHANGED
@@ -62,38 +62,6 @@ class CustomRunnableWithHistory(RunnableWithMessageHistory):
62
  return messages
63
 
64
 
65
- def _get_chat_history(chat_history: List[CHAT_TURN_TYPE], n: int = None) -> str:
66
- """
67
- Convert chat history to a formatted string.
68
-
69
- Args:
70
- chat_history (List[CHAT_TURN_TYPE]): The chat history.
71
-
72
- Returns:
73
- str: The formatted chat history.
74
- """
75
- _ROLE_MAP = {"human": "Student: ", "ai": "AI Tutor: "}
76
- buffer = ""
77
- if n is not None:
78
- # Calculate the number of turns to take (2 turns per pair)
79
- turns_to_take = n * 2
80
- chat_history = chat_history[-turns_to_take:]
81
- for dialogue_turn in chat_history:
82
- if isinstance(dialogue_turn, BaseMessage):
83
- role_prefix = _ROLE_MAP.get(dialogue_turn.type, f"{dialogue_turn.type}: ")
84
- buffer += f"\n{role_prefix}{dialogue_turn.content}"
85
- elif isinstance(dialogue_turn, tuple):
86
- human = "Student: " + dialogue_turn[0]
87
- ai = "AI Tutor: " + dialogue_turn[1]
88
- buffer += "\n" + "\n".join([human, ai])
89
- else:
90
- raise ValueError(
91
- f"Unsupported chat history format: {type(dialogue_turn)}."
92
- f" Full chat history: {chat_history} "
93
- )
94
- return buffer
95
-
96
-
97
  class InMemoryHistory(BaseChatMessageHistory, BaseModel):
98
  """In-memory implementation of chat message history."""
99
 
 
62
  return messages
63
 
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  class InMemoryHistory(BaseChatMessageHistory, BaseModel):
66
  """In-memory implementation of chat message history."""
67
 
code/modules/chat/langgraph/langgraph_rag.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/langchain-ai/langgraph/blob/main/examples/rag/langgraph_crag.ipynb?ref=blog.langchain.dev
2
+
3
+ from typing import List
4
+
5
+ from typing_extensions import TypedDict
6
+ from langgraph.graph import END, StateGraph, START
7
+ from modules.chat.base import BaseRAG
8
+ from langchain.memory import ChatMessageHistory
9
+ from langchain_core.prompts import ChatPromptTemplate
10
+ from langchain_core.pydantic_v1 import BaseModel, Field
11
+ from langchain_openai import ChatOpenAI
12
+ from langchain_core.output_parsers import StrOutputParser
13
+ from langchain_core.prompts import ChatPromptTemplate
14
+
15
+
16
+ class GradeDocuments(BaseModel):
17
+ """Binary score for relevance check on retrieved documents."""
18
+
19
+ binary_score: str = Field(
20
+ description="Documents are relevant to the question, 'yes' or 'no'"
21
+ )
22
+
23
+
24
+ class GraphState(TypedDict):
25
+ """
26
+ Represents the state of our graph.
27
+
28
+ Attributes:
29
+ question: question
30
+ generation: LLM generation
31
+ documents: list of documents
32
+ """
33
+
34
+ question: str
35
+ generation: str
36
+ documents: List[str]
37
+
38
+
39
+ class Langgraph_RAG(BaseRAG):
40
+ def __init__(self, llm, memory, retriever, qa_prompt: str, rephrase_prompt: str):
41
+ """
42
+ Initialize the Langgraph_RAG class.
43
+
44
+ Args:
45
+ llm (LanguageModelLike): The language model instance.
46
+ memory (BaseChatMessageHistory): The chat message history instance.
47
+ retriever (BaseRetriever): The retriever instance.
48
+ qa_prompt (str): The QA prompt string.
49
+ rephrase_prompt (str): The rephrase prompt string.
50
+ """
51
+ self.llm = llm
52
+ self.structured_llm_grader = llm.with_structured_output(GradeDocuments)
53
+ self.memory = self.add_history_from_list(memory)
54
+ self.retriever = retriever
55
+ self.qa_prompt = (
56
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
57
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
58
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
59
+ "Context:\n{context}\n\n"
60
+ "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
61
+ "Student: {question}\n"
62
+ "AI Tutor:"
63
+ )
64
+ self.rephrase_prompt = rephrase_prompt
65
+ self.store = {}
66
+
67
+ ## Fix below ##
68
+
69
+ system = """You are a grader assessing relevance of a retrieved document to a user question. \n
70
+ If the document contains keyword(s) or semantic meaning related to the question, grade it as relevant. \n
71
+ Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question."""
72
+ grade_prompt = ChatPromptTemplate.from_messages(
73
+ [
74
+ ("system", system),
75
+ (
76
+ "human",
77
+ "Retrieved document: \n\n {document} \n\n User question: {question}",
78
+ ),
79
+ ]
80
+ )
81
+
82
+ self.retrieval_grader = grade_prompt | self.structured_llm_grader
83
+
84
+ system = """You a question re-writer that converts an input question to a better version that is optimized \n
85
+ for web search. Look at the input and try to reason about the underlying semantic intent / meaning."""
86
+ re_write_prompt = ChatPromptTemplate.from_messages(
87
+ [
88
+ ("system", system),
89
+ (
90
+ "human",
91
+ "Here is the initial question: \n\n {question} \n Formulate an improved question.",
92
+ ),
93
+ ]
94
+ )
95
+
96
+ self.question_rewriter = re_write_prompt | self.llm | StrOutputParser()
97
+
98
+ # Generate
99
+ self.qa_prompt_template = ChatPromptTemplate.from_template(self.qa_prompt)
100
+ self.rag_chain = self.qa_prompt_template | self.llm | StrOutputParser()
101
+
102
+ ###
103
+
104
+ # build the agentic graph
105
+ self.app = self.create_agentic_graph()
106
+
107
+ def retrieve(self, state):
108
+ """
109
+ Retrieve documents
110
+
111
+ Args:
112
+ state (dict): The current graph state
113
+
114
+ Returns:
115
+ state (dict): New key added to state, documents, that contains retrieved documents
116
+ """
117
+ print("---RETRIEVE---")
118
+ question = state["question"]
119
+
120
+ # Retrieval
121
+ documents = self.retriever.get_relevant_documents(question)
122
+ return {"documents": documents, "question": question}
123
+
124
+ def generate(self, state):
125
+ """
126
+ Generate answer
127
+
128
+ Args:
129
+ state (dict): The current graph state
130
+
131
+ Returns:
132
+ state (dict): New key added to state, generation, that contains LLM generation
133
+ """
134
+ print("---GENERATE---")
135
+ question = state["question"]
136
+ documents = state["documents"]
137
+
138
+ # RAG generation
139
+ generation = self.rag_chain.invoke({"context": documents, "question": question})
140
+ return {"documents": documents, "question": question, "generation": generation}
141
+
142
+ def transform_query(self, state):
143
+ """
144
+ Transform the query to produce a better question.
145
+
146
+ Args:
147
+ state (dict): The current graph state
148
+
149
+ Returns:
150
+ state (dict): Updates question key with a re-phrased question
151
+ """
152
+
153
+ print("---TRANSFORM QUERY---")
154
+ question = state["question"]
155
+ documents = state["documents"]
156
+
157
+ # Re-write question
158
+ better_question = self.question_rewriter.invoke({"question": question})
159
+ return {"documents": documents, "question": better_question}
160
+
161
+ def grade_documents(self, state):
162
+ """
163
+ Determines whether the retrieved documents are relevant to the question.
164
+
165
+ Args:
166
+ state (dict): The current graph state
167
+
168
+ Returns:
169
+ state (dict): Updates documents key with only filtered relevant documents
170
+ """
171
+
172
+ print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
173
+ question = state["question"]
174
+ documents = state["documents"]
175
+
176
+ # Score each doc
177
+ filtered_docs = []
178
+ web_search = "No"
179
+ for d in documents:
180
+ score = self.retrieval_grader.invoke(
181
+ {"question": question, "document": d.page_content}
182
+ )
183
+ grade = score.binary_score
184
+ if grade == "yes":
185
+ print("---GRADE: DOCUMENT RELEVANT---")
186
+ filtered_docs.append(d)
187
+ else:
188
+ print("---GRADE: DOCUMENT NOT RELEVANT---")
189
+ web_search = "Yes"
190
+ continue
191
+ return {
192
+ "documents": filtered_docs,
193
+ "question": question,
194
+ "web_search": web_search,
195
+ }
196
+
197
+ def decide_to_generate(self, state):
198
+ """
199
+ Determines whether to generate an answer, or re-generate a question.
200
+
201
+ Args:
202
+ state (dict): The current graph state
203
+
204
+ Returns:
205
+ str: Binary decision for next node to call
206
+ """
207
+
208
+ print("---ASSESS GRADED DOCUMENTS---")
209
+ state["question"]
210
+ web_search = state["web_search"]
211
+ state["documents"]
212
+
213
+ if web_search == "Yes":
214
+ # All documents have been filtered check_relevance
215
+ # We will re-generate a new query
216
+ print(
217
+ "---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, TRANSFORM QUERY---"
218
+ )
219
+ return "transform_query"
220
+ else:
221
+ # We have relevant documents, so generate answer
222
+ print("---DECISION: GENERATE---")
223
+ return "generate"
224
+
225
+ def create_agentic_graph(self):
226
+ """
227
+ Create an agentic graph to answer questions.
228
+
229
+ Returns:
230
+ dict: Agentic graph
231
+ """
232
+ self.workflow = StateGraph(GraphState)
233
+ self.workflow.add_node("retrieve", self.retrieve)
234
+ self.workflow.add_node(
235
+ "grade_documents", self.grade_documents
236
+ ) # grade documents
237
+ self.workflow.add_node("generate", self.generate) # generatae
238
+ self.workflow.add_node(
239
+ "transform_query", self.transform_query
240
+ ) # transform_query
241
+
242
+ # build the graph
243
+ self.workflow.add_edge(START, "retrieve")
244
+ self.workflow.add_edge("retrieve", "grade_documents")
245
+ self.workflow.add_conditional_edges(
246
+ "grade_documents",
247
+ self.decide_to_generate,
248
+ {
249
+ "transform_query": "transform_query",
250
+ "generate": "generate",
251
+ },
252
+ )
253
+
254
+ self.workflow.add_edge("transform_query", "generate")
255
+ self.workflow.add_edge("generate", END)
256
+
257
+ # Compile
258
+ app = self.workflow.compile()
259
+ return app
260
+
261
+ def invoke(self, user_query, config):
262
+ """
263
+ Invoke the chain.
264
+
265
+ Args:
266
+ kwargs: The input variables.
267
+
268
+ Returns:
269
+ dict: The output variables.
270
+ """
271
+
272
+ inputs = {
273
+ "question": user_query["input"],
274
+ }
275
+
276
+ for output in self.app.stream(inputs):
277
+ for key, value in output.items():
278
+ # Node
279
+ print(f"Node {key} returned: {value}")
280
+ print("\n\n")
281
+
282
+ print(value["generation"])
283
+
284
+ # rename generation to answer
285
+ value["answer"] = value.pop("generation")
286
+ value["context"] = value.pop("documents")
287
+
288
+ return value
289
+
290
+ def add_history_from_list(self, history_list):
291
+ """
292
+ Add messages from a list to the chat history.
293
+
294
+ Args:
295
+ messages (list): The list of messages to add.
296
+ """
297
+ history = ChatMessageHistory()
298
+
299
+ for idx, message_pairs in enumerate(history_list):
300
+ history.add_user_message(message_pairs[0])
301
+ history.add_ai_message(message_pairs[1])
302
+
303
+ return history
code/modules/chat/llm_tutor.py CHANGED
@@ -2,7 +2,8 @@ from modules.chat.helpers import get_prompt
2
  from modules.chat.chat_model_loader import ChatModelLoader
3
  from modules.vectorstore.store_manager import VectorStoreManager
4
  from modules.retriever.retriever import Retriever
5
- from modules.chat.langchain.langchain_rag import CustomConversationalRetrievalChain
 
6
 
7
 
8
  class LLMTutor:
@@ -19,7 +20,7 @@ class LLMTutor:
19
  self.llm = self.load_llm()
20
  self.user = user
21
  self.logger = logger
22
- self.vector_db = VectorStoreManager(config, logger=self.logger)
23
  self.qa_prompt = get_prompt(config, "qa") # Initialize qa_prompt
24
  self.rephrase_prompt = get_prompt(
25
  config, "rephrase"
@@ -28,28 +29,31 @@ class LLMTutor:
28
  self.vector_db.create_database()
29
  self.vector_db.save_database()
30
 
31
- def update_llm(self, new_config):
32
  """
33
  Update the LLM and VectorStoreManager based on new configuration.
34
 
35
  Args:
36
  new_config (dict): New configuration dictionary.
37
  """
38
- changes = self.get_config_changes(self.config, new_config)
39
- self.config = new_config
40
 
41
- if "chat_model" in changes:
 
 
 
 
42
  self.llm = self.load_llm() # Reinitialize LLM if chat_model changes
43
 
44
- if "vectorstore" in changes:
45
  self.vector_db = VectorStoreManager(
46
  self.config, logger=self.logger
47
- ) # Reinitialize VectorStoreManager if vectorstore changes
48
  if self.config["vectorstore"]["embedd_files"]:
49
  self.vector_db.create_database()
50
  self.vector_db.save_database()
51
 
52
- if "ELI5" in changes:
53
  self.qa_prompt = get_prompt(
54
  self.config, "qa"
55
  ) # Update qa_prompt if ELI5 changes
@@ -66,9 +70,21 @@ class LLMTutor:
66
  dict: Dictionary containing the changes.
67
  """
68
  changes = {}
69
- for key in new_config:
70
- if old_config.get(key) != new_config[key]:
71
- changes[key] = (old_config.get(key), new_config[key])
 
 
 
 
 
 
 
 
 
 
 
 
72
  return changes
73
 
74
  def retrieval_qa_chain(self, llm, qa_prompt, rephrase_prompt, db, memory=None):
@@ -87,14 +103,26 @@ class LLMTutor:
87
  """
88
  retriever = Retriever(self.config)._return_retriever(db)
89
 
90
- if self.config["llm_params"]["use_history"]:
91
- self.qa_chain = CustomConversationalRetrievalChain(
92
  llm=llm,
93
  memory=memory,
94
  retriever=retriever,
95
  qa_prompt=qa_prompt,
96
  rephrase_prompt=rephrase_prompt,
97
  )
 
 
 
 
 
 
 
 
 
 
 
 
98
  return self.qa_chain
99
 
100
  def load_llm(self):
@@ -108,7 +136,7 @@ class LLMTutor:
108
  llm = chat_model_loader.load_chat_model()
109
  return llm
110
 
111
- def qa_bot(self, memory=None, qa_prompt=None, rephrase_prompt=None):
112
  """
113
  Create a QA bot instance.
114
 
@@ -120,34 +148,14 @@ class LLMTutor:
120
  Returns:
121
  Chain: The QA bot chain instance.
122
  """
123
- if qa_prompt is None:
124
- qa_prompt = get_prompt(self.config, "qa")
125
- if rephrase_prompt is None:
126
- rephrase_prompt = get_prompt(self.config, "rephrase")
127
-
128
- print("using qa_prompt: ", qa_prompt)
129
- print("\n\n\n")
130
- # exit()
131
- db = self.vector_db.load_database()
132
  # sanity check to see if there are any documents in the database
133
- if len(db) == 0:
134
  raise ValueError(
135
  "No documents in the database. Populate the database first."
136
  )
137
- qa = self.retrieval_qa_chain(self.llm, qa_prompt, rephrase_prompt, db, memory)
138
 
139
- return qa
140
-
141
- def final_result(query):
142
- """
143
- Get the final result for a given query.
144
 
145
- Args:
146
- query (str): The query string.
147
-
148
- Returns:
149
- str: The response string.
150
- """
151
- qa_result = qa_bot()
152
- response = qa_result({"query": query})
153
- return response
 
2
  from modules.chat.chat_model_loader import ChatModelLoader
3
  from modules.vectorstore.store_manager import VectorStoreManager
4
  from modules.retriever.retriever import Retriever
5
+ from modules.chat.langchain.langchain_rag import Langchain_RAG
6
+ from modules.chat.langgraph.langgraph_rag import Langgraph_RAG
7
 
8
 
9
  class LLMTutor:
 
20
  self.llm = self.load_llm()
21
  self.user = user
22
  self.logger = logger
23
+ self.vector_db = VectorStoreManager(config, logger=self.logger).load_database()
24
  self.qa_prompt = get_prompt(config, "qa") # Initialize qa_prompt
25
  self.rephrase_prompt = get_prompt(
26
  config, "rephrase"
 
29
  self.vector_db.create_database()
30
  self.vector_db.save_database()
31
 
32
+ def update_llm(self, old_config, new_config):
33
  """
34
  Update the LLM and VectorStoreManager based on new configuration.
35
 
36
  Args:
37
  new_config (dict): New configuration dictionary.
38
  """
39
+ changes = self.get_config_changes(old_config, new_config)
 
40
 
41
+ print("\n\n\n")
42
+ print("Changes: ", changes)
43
+ print("\n\n\n")
44
+
45
+ if "llm_params.llm_loader" in changes:
46
  self.llm = self.load_llm() # Reinitialize LLM if chat_model changes
47
 
48
+ if "vectorstore.db_option" in changes:
49
  self.vector_db = VectorStoreManager(
50
  self.config, logger=self.logger
51
+ ).load_database() # Reinitialize VectorStoreManager if vectorstore changes
52
  if self.config["vectorstore"]["embedd_files"]:
53
  self.vector_db.create_database()
54
  self.vector_db.save_database()
55
 
56
+ if "llm_params.llm_style" in changes:
57
  self.qa_prompt = get_prompt(
58
  self.config, "qa"
59
  ) # Update qa_prompt if ELI5 changes
 
70
  dict: Dictionary containing the changes.
71
  """
72
  changes = {}
73
+
74
+ def compare_dicts(old, new, parent_key=""):
75
+ for key in new:
76
+ full_key = f"{parent_key}.{key}" if parent_key else key
77
+ if isinstance(new[key], dict) and isinstance(old.get(key), dict):
78
+ compare_dicts(old.get(key, {}), new[key], full_key)
79
+ elif old.get(key) != new[key]:
80
+ changes[full_key] = (old.get(key), new[key])
81
+ # Include keys that are in old but not in new
82
+ for key in old:
83
+ if key not in new:
84
+ full_key = f"{parent_key}.{key}" if parent_key else key
85
+ changes[full_key] = (old[key], None)
86
+
87
+ compare_dicts(old_config, new_config)
88
  return changes
89
 
90
  def retrieval_qa_chain(self, llm, qa_prompt, rephrase_prompt, db, memory=None):
 
103
  """
104
  retriever = Retriever(self.config)._return_retriever(db)
105
 
106
+ if self.config["llm_params"]["llm_arch"] == "langchain":
107
+ self.qa_chain = Langchain_RAG(
108
  llm=llm,
109
  memory=memory,
110
  retriever=retriever,
111
  qa_prompt=qa_prompt,
112
  rephrase_prompt=rephrase_prompt,
113
  )
114
+ elif self.config["llm_params"]["llm_arch"] == "langgraph_agentic":
115
+ self.qa_chain = Langgraph_RAG(
116
+ llm=llm,
117
+ memory=memory,
118
+ retriever=retriever,
119
+ qa_prompt=qa_prompt,
120
+ rephrase_prompt=rephrase_prompt,
121
+ )
122
+ else:
123
+ raise ValueError(
124
+ f"Invalid LLM Architecture: {self.config['llm_params']['llm_arch']}"
125
+ )
126
  return self.qa_chain
127
 
128
  def load_llm(self):
 
136
  llm = chat_model_loader.load_chat_model()
137
  return llm
138
 
139
+ def qa_bot(self, memory=None):
140
  """
141
  Create a QA bot instance.
142
 
 
148
  Returns:
149
  Chain: The QA bot chain instance.
150
  """
 
 
 
 
 
 
 
 
 
151
  # sanity check to see if there are any documents in the database
152
+ if len(self.vector_db) == 0:
153
  raise ValueError(
154
  "No documents in the database. Populate the database first."
155
  )
 
156
 
157
+ qa = self.retrieval_qa_chain(
158
+ self.llm, self.qa_prompt, self.rephrase_prompt, self.vector_db, memory
159
+ )
 
 
160
 
161
+ return qa
 
 
 
 
 
 
 
 
code/modules/chat_processor/base.py CHANGED
@@ -2,11 +2,17 @@
2
 
3
 
4
  class ChatProcessorBase:
5
- def __init__(self, config):
6
- self.config = config
7
 
8
  def process(self, message):
9
  """
10
  Processes and Logs the message
11
  """
12
  raise NotImplementedError("process method not implemented")
 
 
 
 
 
 
 
2
 
3
 
4
  class ChatProcessorBase:
5
+ def __init__(self):
6
+ pass
7
 
8
  def process(self, message):
9
  """
10
  Processes and Logs the message
11
  """
12
  raise NotImplementedError("process method not implemented")
13
+
14
+ async def rag(self, user_query: dict, config: dict, chain):
15
+ """
16
+ Retrieves the response from the chain
17
+ """
18
+ raise NotImplementedError("rag method not implemented")
code/modules/chat_processor/chat_processor.py CHANGED
@@ -2,12 +2,11 @@ from modules.chat_processor.literal_ai import LiteralaiChatProcessor
2
 
3
 
4
  class ChatProcessor:
5
- def __init__(self, llm_tutor, tags=None):
6
- self.llm_tutor = llm_tutor
7
- self.config = self.llm_tutor.config
8
  self.chat_processor_type = self.config["chat_logging"]["platform"]
9
  self.logging = self.config["chat_logging"]["log_chat"]
10
- self.user = self.llm_tutor.user
11
  if tags is None:
12
  self.tags = self._create_tags()
13
  else:
@@ -18,12 +17,11 @@ class ChatProcessor:
18
  def _create_tags(self):
19
  tags = []
20
  tags.append(self.config["vectorstore"]["db_option"])
21
- tags.append(self.config["llm_params"]["chat_profile"])
22
  return tags
23
 
24
  def _init_processor(self):
25
  if self.chat_processor_type == "literalai":
26
- self.processor = LiteralaiChatProcessor(self.tags)
27
  else:
28
  raise ValueError(
29
  f"Chat processor type {self.chat_processor_type} not supported"
@@ -42,7 +40,7 @@ class ChatProcessor:
42
  "configurable": {
43
  "user_id": self.user["user_id"],
44
  "conversation_id": self.user["session_id"],
45
- "memory_window": self.llm_tutor.config["llm_params"]["memory_window"],
46
  }
47
  }
48
 
 
2
 
3
 
4
  class ChatProcessor:
5
+ def __init__(self, config, user, tags=None):
6
+ self.config = config
 
7
  self.chat_processor_type = self.config["chat_logging"]["platform"]
8
  self.logging = self.config["chat_logging"]["log_chat"]
9
+ self.user = user
10
  if tags is None:
11
  self.tags = self._create_tags()
12
  else:
 
17
  def _create_tags(self):
18
  tags = []
19
  tags.append(self.config["vectorstore"]["db_option"])
 
20
  return tags
21
 
22
  def _init_processor(self):
23
  if self.chat_processor_type == "literalai":
24
+ self.processor = LiteralaiChatProcessor(self.user, self.tags)
25
  else:
26
  raise ValueError(
27
  f"Chat processor type {self.chat_processor_type} not supported"
 
40
  "configurable": {
41
  "user_id": self.user["user_id"],
42
  "conversation_id": self.user["session_id"],
43
+ "memory_window": self.config["llm_params"]["memory_window"],
44
  }
45
  }
46
 
code/modules/chat_processor/literal_ai.py CHANGED
@@ -1,18 +1,91 @@
1
  from literalai import LiteralClient
 
 
 
2
  import os
3
  from .base import ChatProcessorBase
4
 
5
 
6
  class LiteralaiChatProcessor(ChatProcessorBase):
7
- def __init__(self, tags=None):
 
 
 
8
  self.literal_client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))
 
 
 
9
  self.literal_client.reset_context()
10
- with self.literal_client.thread(name="TEST") as thread:
11
- self.thread_id = thread.id
12
- self.thread = thread
13
- if tags is not None and type(tags) == list:
14
- self.thread.tags = tags
15
- print(f"Thread ID: {self.thread}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  def process(self, user_message, assistant_message, source_dict):
18
  with self.literal_client.thread(thread_id=self.thread_id) as thread:
@@ -29,9 +102,9 @@ class LiteralaiChatProcessor(ChatProcessorBase):
29
 
30
  async def rag(self, user_query: dict, config: dict, chain):
31
  with self.literal_client.step(
32
- type="retrieval", name="RAG", thread_id=self.thread_id
33
  ) as step:
34
  step.input = {"question": user_query["input"]}
35
- res = await chain.invoke(user_query, config)
36
  step.output = res
37
  return res
 
1
  from literalai import LiteralClient
2
+ from literalai.api import LiteralAPI
3
+ from literalai.filter import Filter as ThreadFilter
4
+
5
  import os
6
  from .base import ChatProcessorBase
7
 
8
 
9
  class LiteralaiChatProcessor(ChatProcessorBase):
10
+ def __init__(self, user=None, tags=None):
11
+ super().__init__()
12
+ self.user = user
13
+ self.tags = tags
14
  self.literal_client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))
15
+ self.literal_api = LiteralAPI(
16
+ api_key=os.getenv("LITERAL_API_KEY"), url=os.getenv("LITERAL_API_URL")
17
+ )
18
  self.literal_client.reset_context()
19
+ self.user_info = self._fetch_userinfo()
20
+ self.user_thread = self._fetch_user_threads()
21
+ if len(self.user_thread["data"]) == 0:
22
+ self.thread = self._create_user_thread()
23
+ else:
24
+ self.thread = self._get_user_thread()
25
+ self.thread_id = self.thread["id"]
26
+
27
+ self.prev_conv = self._get_prev_k_conversations()
28
+
29
+ def _get_user_thread(self):
30
+ thread = self.literal_api.get_thread(id=self.user_thread["data"][0]["id"])
31
+ return thread.to_dict()
32
+
33
+ def _create_user_thread(self):
34
+ thread = self.literal_api.create_thread(
35
+ name=f"{self.user_info['identifier']}",
36
+ participant_id=self.user_info["metadata"]["id"],
37
+ environment="dev",
38
+ )
39
+
40
+ return thread.to_dict()
41
+
42
+ def _get_prev_k_conversations(self, k=3):
43
+
44
+ steps = self.thread["steps"]
45
+ conversation_pairs = []
46
+ count = 0
47
+ for i in range(len(steps) - 1, 0, -1):
48
+ if (
49
+ steps[i - 1]["type"] == "user_message"
50
+ and steps[i]["type"] == "assistant_message"
51
+ ):
52
+ user_message = steps[i - 1]["output"]["content"]
53
+ assistant_message = steps[i]["output"]["content"]
54
+ conversation_pairs.append((user_message, assistant_message))
55
+
56
+ count += 1
57
+ if count >= k:
58
+ break
59
+
60
+ # Return the last k conversation pairs, reversed to maintain chronological order
61
+ return conversation_pairs[::-1]
62
+
63
+ def _fetch_user_threads(self):
64
+ filters = filters = [
65
+ {
66
+ "operator": "eq",
67
+ "field": "participantId",
68
+ "value": self.user_info["metadata"]["id"],
69
+ }
70
+ ]
71
+ user_threads = self.literal_api.get_threads(filters=filters)
72
+ return user_threads.to_dict()
73
+
74
+ def _fetch_userinfo(self):
75
+ user_info = self.literal_api.get_or_create_user(
76
+ identifier=self.user["user_id"]
77
+ ).to_dict()
78
+ # TODO: Have to do this more elegantly
79
+ # update metadata with unique id for now
80
+ # (literalai seems to not return the unique id as of now,
81
+ # so have to explicitly update it in the metadata)
82
+ user_info = self.literal_api.update_user(
83
+ id=user_info["id"],
84
+ metadata={
85
+ "id": user_info["id"],
86
+ },
87
+ ).to_dict()
88
+ return user_info
89
 
90
  def process(self, user_message, assistant_message, source_dict):
91
  with self.literal_client.thread(thread_id=self.thread_id) as thread:
 
102
 
103
  async def rag(self, user_query: dict, config: dict, chain):
104
  with self.literal_client.step(
105
+ type="retrieval", name="RAG", thread_id=self.thread_id, tags=self.tags
106
  ) as step:
107
  step.input = {"question": user_query["input"]}
108
+ res = chain.invoke(user_query, config)
109
  step.output = res
110
  return res
code/modules/config/config.yml CHANGED
@@ -9,7 +9,7 @@ vectorstore:
9
  data_path: '../storage/data' # str
10
  url_file_path: '../storage/data/urls.txt' # str
11
  expand_urls: True # bool
12
- db_option : 'RAGatouille' # str [FAISS, Chroma, RAGatouille, RAPTOR]
13
  db_path : '../vectorstores' # str
14
  model : 'sentence-transformers/all-MiniLM-L6-v2' # str [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002']
15
  search_top_k : 3 # int
@@ -26,15 +26,15 @@ vectorstore:
26
  index_name: "new_idx" # str
27
 
28
  llm_params:
 
29
  use_history: True # bool
30
  memory_window: 3 # int
31
- ELI5: False # bool
32
- llm_loader: 'openai' # str [local_llm, openai]
33
  openai_params:
34
- model: 'gpt-3.5-turbo-1106' # str [gpt-3.5-turbo-1106, gpt-4]
35
  local_llm_params:
36
- model: 'tiny-llama'
37
- temperature: 0.7
38
 
39
  chat_logging:
40
  log_chat: False # bool
 
9
  data_path: '../storage/data' # str
10
  url_file_path: '../storage/data/urls.txt' # str
11
  expand_urls: True # bool
12
+ db_option : 'FAISS' # str [FAISS, Chroma, RAGatouille, RAPTOR]
13
  db_path : '../vectorstores' # str
14
  model : 'sentence-transformers/all-MiniLM-L6-v2' # str [sentence-transformers/all-MiniLM-L6-v2, text-embedding-ada-002']
15
  search_top_k : 3 # int
 
26
  index_name: "new_idx" # str
27
 
28
  llm_params:
29
+ llm_arch: 'langchain' # [langchain, langgraph_agentic]
30
  use_history: True # bool
31
  memory_window: 3 # int
32
+ llm_style: 'Normal' # str [Normal, ELI5, Socratic]
33
+ llm_loader: 'gpt-3.5-turbo-1106' # str [local_llm, gpt-3.5-turbo-1106, gpt-4]
34
  openai_params:
35
+ temperature: 0.7 # float
36
  local_llm_params:
37
+ temperature: 0.7 # float
 
38
 
39
  chat_logging:
40
  log_chat: False # bool
code/modules/config/constants.py CHANGED
@@ -8,87 +8,13 @@ load_dotenv()
8
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
9
  HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
10
  LITERAL_API_KEY = os.getenv("LITERAL_API_KEY")
 
11
 
12
- opening_message = f"Hey, What Can I Help You With?\n\nYou can me ask me questions about the course logistics, course content, about the final project, or anything else!"
13
-
14
- # Prompt Templates
15
-
16
- OPENAI_REPHRASE_PROMPT = (
17
- "You are someone that rephrases statements. Rephrase the student's question to add context from their chat history if relevant, ensuring it remains from the student's point of view. "
18
- "Incorporate relevant details from the chat history to make the question clearer and more specific. "
19
- "Do not change the meaning of the original statement, and maintain the student's tone and perspective. "
20
- "If the question is conversational and doesn't require context, do not rephrase it. "
21
- "Example: If the student previously asked about backpropagation in the context of deep learning and now asks 'what is it', rephrase to 'What is backpropagation.'. "
22
- "Example: Do not rephrase if the user is asking something specific like 'cool, suggest a project with transformers to use as my final project' "
23
- "Chat history: \n{chat_history}\n"
24
- "Rephrase the following question only if necessary: '{input}'"
25
- )
26
-
27
- OPENAI_PROMPT_WITH_HISTORY = (
28
- "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
29
- "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
30
- "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
31
- "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
32
- "Chat History:\n{chat_history}\n\n"
33
- "Context:\n{context}\n\n"
34
- "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
35
- "Student: {input}\n"
36
- "AI Tutor:"
37
- )
38
 
39
- ELI5_PROMPT_WITH_HISTORY = (
40
- "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context in the simplest way possible, as if you are explaining to a 5-year-old. Only use the context if it helps make things clearer. The context is ordered by relevance. "
41
- "If you don't know the answer, do your best without making things up. Keep the conversation simple and easy to understand. "
42
- "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
43
- "Speak in a friendly and engaging manner, like talking to a curious child. Avoid complex terms.\n\n"
44
- "Chat History:\n{chat_history}\n\n"
45
- "Context:\n{context}\n\n"
46
- "Answer the student's question below in a friendly, simple, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
47
- "Give a very detailed narrative explanation. Use examples wherever you can to aid in the explanation. Remember, explain it as if you are talking to a 5-year-old.\n"
48
- "Student: {input}\n"
49
- "AI Tutor:"
50
- )
51
-
52
- OPENAAI_PROMPT_NO_HISTORY = (
53
- "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
54
- "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
55
- "Provide links from the source_file metadata. Use the source context that is most relevant. "
56
- "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
57
- "Context:\n{context}\n\n"
58
- "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
59
- "Student: {input}\n"
60
- "AI Tutor:"
61
- )
62
-
63
-
64
- TINYLLAMA_PROMPT_TEMPLATE_NO_HISTORY = (
65
- "<|im_start|>system\n"
66
- "Assistant is an intelligent chatbot designed to help students with questions regarding the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance.\n"
67
- "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally.\n"
68
- "Provide links from the source_file metadata. Use the source context that is most relevant.\n"
69
- "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
70
- "<|im_end|>\n\n"
71
- "<|im_start|>user\n"
72
- "Context:\n{context}\n\n"
73
- "Question: {input}\n"
74
- "<|im_end|>\n\n"
75
- "<|im_start|>assistant"
76
- )
77
 
78
- TINYLLAMA_PROMPT_TEMPLATE_WITH_HISTORY = (
79
- "<|im_start|>system\n"
80
- "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
81
- "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
82
- "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
83
- "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
84
- "<|im_end|>\n\n"
85
- "<|im_start|>user\n"
86
- "Chat History:\n{chat_history}\n\n"
87
- "Context:\n{context}\n\n"
88
- "Question: {input}\n"
89
- "<|im_end|>\n\n"
90
- "<|im_start|>assistant"
91
- )
92
  # Model Paths
93
 
94
  LLAMA_PATH = "../storage/models/tinyllama-1.1b-chat-v1.0.Q5_K_M.gguf"
 
8
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
9
  HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
10
  LITERAL_API_KEY = os.getenv("LITERAL_API_KEY")
11
+ LITERAL_API_URL = os.getenv("LITERAL_API_URL")
12
 
13
+ OAUTH_GOOGLE_CLIENT_ID = os.getenv("OAUTH_GOOGLE_CLIENT_ID")
14
+ OAUTH_GOOGLE_CLIENT_SECRET = os.getenv("OAUTH_GOOGLE_CLIENT_SECRET")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ opening_message = f"Hey, What Can I Help You With?\n\nYou can me ask me questions about the course logistics, course content, about the final project, or anything else!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  # Model Paths
19
 
20
  LLAMA_PATH = "../storage/models/tinyllama-1.1b-chat-v1.0.Q5_K_M.gguf"
code/modules/config/prompts.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prompts = {
2
+ "openai": {
3
+ "rephrase_prompt": (
4
+ "You are someone that rephrases statements. Rephrase the student's question to add context from their chat history if relevant, ensuring it remains from the student's point of view. "
5
+ "Incorporate relevant details from the chat history to make the question clearer and more specific. "
6
+ "Do not change the meaning of the original statement, and maintain the student's tone and perspective. "
7
+ "If the question is conversational and doesn't require context, do not rephrase it. "
8
+ "Example: If the student previously asked about backpropagation in the context of deep learning and now asks 'what is it', rephrase to 'What is backpropagation.'. "
9
+ "Example: Do not rephrase if the user is asking something specific like 'cool, suggest a project with transformers to use as my final project' "
10
+ "Chat history: \n{chat_history}\n"
11
+ "Rephrase the following question only if necessary: '{input}'"
12
+ "Rephrased Question:'"
13
+ ),
14
+ "prompt_with_history": {
15
+ "normal": (
16
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
17
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
18
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
19
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
20
+ "Chat History:\n{chat_history}\n\n"
21
+ "Context:\n{context}\n\n"
22
+ "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
23
+ "Student: {input}\n"
24
+ "AI Tutor:"
25
+ ),
26
+ "eli5": (
27
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context in the simplest way possible, as if you are explaining to a 5-year-old. Only use the context if it helps make things clearer. The context is ordered by relevance. "
28
+ "If you don't know the answer, do your best without making things up. Keep the conversation simple and easy to understand. "
29
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
30
+ "Speak in a friendly and engaging manner, like talking to a curious child. Avoid complex terms.\n\n"
31
+ "Chat History:\n{chat_history}\n\n"
32
+ "Context:\n{context}\n\n"
33
+ "Answer the student's question below in a friendly, simple, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
34
+ "Give a long very detailed narrative explanation. Use examples wherever you can to aid in the explanation. Remember, explain it as if you are talking to a 5-year-old, so construct a long narrative that builds up to the answer.\n"
35
+ "5-year-old Student: {input}\n"
36
+ "AI Tutor:"
37
+ ),
38
+ "socratic": (
39
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Your goal is to guide the student towards understanding using the Socratic method. Ask thought-provoking questions to encourage critical thinking and self-discovery. Use the provided context only when relevant. The context is ordered by relevance.\n\n"
40
+ "Guidelines for the Socratic approach:\n"
41
+ "Guidelines:"
42
+ "1. Begin with a concise, direct answer to the student's question."
43
+ "2. Follow up with 1-2 thought-provoking questions to encourage critical thinking."
44
+ "3. Provide additional explanations or context if necessary to move the conversation forward."
45
+ "4. End with an open-ended question that invites further exploration."
46
+ "Based on the chat history determine which guideline to follow., and answer accordingly\n\n"
47
+ "If the student is stuck, offer gentle hints or break down the concept into simpler parts. Maintain a friendly, engaging tone throughout the conversation.\n\n"
48
+ "Use chat history and context as guides, but avoid repeating past responses. Provide links from the source_file metadata when appropriate. Use the most relevant source context.\n\n"
49
+ "Chat History:\n{chat_history}\n\n"
50
+ "Context:\n{context}\n\n"
51
+ "Engage with the student's question below using the Socratic method. Ask probing questions to guide their thinking and encourage deeper understanding. Only provide direct answers if absolutely necessary.\n"
52
+ "Student: {input}\n"
53
+ "AI Tutor:"
54
+ ),
55
+ },
56
+ "prompt_no_history": (
57
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
58
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
59
+ "Provide links from the source_file metadata. Use the source context that is most relevant. "
60
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n\n"
61
+ "Context:\n{context}\n\n"
62
+ "Answer the student's question below in a friendly, concise, and engaging manner. Use the context and history only if relevant, otherwise, engage in a free-flowing conversation.\n"
63
+ "Student: {input}\n"
64
+ "AI Tutor:"
65
+ ),
66
+ },
67
+ "tiny_llama": {
68
+ "prompt_no_history": (
69
+ "system\n"
70
+ "Assistant is an intelligent chatbot designed to help students with questions regarding the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance.\n"
71
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally.\n"
72
+ "Provide links from the source_file metadata. Use the source context that is most relevant.\n"
73
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
74
+ "\n\n"
75
+ "user\n"
76
+ "Context:\n{context}\n\n"
77
+ "Question: {input}\n"
78
+ "\n\n"
79
+ "assistant"
80
+ ),
81
+ "prompt_with_history": (
82
+ "system\n"
83
+ "You are an AI Tutor for the course DS598, taught by Prof. Thomas Gardos. Answer the user's question using the provided context. Only use the context if it is relevant. The context is ordered by relevance. "
84
+ "If you don't know the answer, do your best without making things up. Keep the conversation flowing naturally. "
85
+ "Use chat history and context as guides but avoid repeating past responses. Provide links from the source_file metadata. Use the source context that is most relevant. "
86
+ "Speak in a friendly and engaging manner, like talking to a friend. Avoid sounding repetitive or robotic.\n"
87
+ "\n\n"
88
+ "user\n"
89
+ "Chat History:\n{chat_history}\n\n"
90
+ "Context:\n{context}\n\n"
91
+ "Question: {input}\n"
92
+ "\n\n"
93
+ "assistant"
94
+ ),
95
+ },
96
+ }
code/modules/vectorstore/raptor.py CHANGED
@@ -5,7 +5,7 @@ import os
5
  import numpy as np
6
  import pandas as pd
7
  import umap
8
- from langchain_core.prompts import ChatPromptTemplate
9
  from langchain_core.output_parsers import StrOutputParser
10
  from sklearn.mixture import GaussianMixture
11
  from langchain_community.chat_models import ChatOpenAI
 
5
  import numpy as np
6
  import pandas as pd
7
  import umap
8
+ from langchain_core.prompts.chat import ChatPromptTemplate
9
  from langchain_core.output_parsers import StrOutputParser
10
  from sklearn.mixture import GaussianMixture
11
  from langchain_community.chat_models import ChatOpenAI