amanda103 commited on
Commit
295ab5b
1 Parent(s): 0043c9e

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +13 -9
  2. cli_app.py +9 -11
app.py CHANGED
@@ -1,9 +1,10 @@
1
  import os
2
  from typing import Optional, Tuple
3
  import gradio as gr
4
- from cli_app import get_chain
5
  from threading import Lock
6
  from langchain.vectorstores import Pinecone
 
 
7
  from langchain.embeddings.openai import OpenAIEmbeddings
8
  import pinecone
9
 
@@ -13,27 +14,30 @@ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
13
  PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX_NAME")
14
 
15
 
16
- def grab_vector_connection():
 
 
17
  embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
18
  pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_API_ENV)
19
  vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, embeddings)
20
- qa_chain = get_chain(vectorstore)
21
- return qa_chain
22
 
23
 
24
  class ChatWrapper:
25
  def __init__(self):
26
  self.lock = Lock()
27
 
28
- def __call__(self, inp: str, history: Optional[Tuple[str, str]], chain):
 
 
29
  """Execute the chat functionality."""
30
  self.lock.acquire()
31
- if not chain:
32
- chain = grab_vector_connection()
33
  try:
34
  history = history or []
35
- # Run chain and append input.
36
- output = chain({"question": inp, "chat_history": history})["answer"]
37
  history.append((inp, output))
38
  except Exception as e:
39
  raise e
 
1
  import os
2
  from typing import Optional, Tuple
3
  import gradio as gr
 
4
  from threading import Lock
5
  from langchain.vectorstores import Pinecone
6
+ from langchain.llms import OpenAI
7
+ from langchain.chains.question_answering import load_qa_chain
8
  from langchain.embeddings.openai import OpenAIEmbeddings
9
  import pinecone
10
 
 
14
  PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX_NAME")
15
 
16
 
17
+ def get_chain_and_vectorstore():
18
+ llm = OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)
19
+ chain = load_qa_chain(llm)
20
  embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
21
  pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_API_ENV)
22
  vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, embeddings)
23
+ return chain, vectorstore
 
24
 
25
 
26
  class ChatWrapper:
27
  def __init__(self):
28
  self.lock = Lock()
29
 
30
+ def __call__(
31
+ self, inp: str, history: Optional[Tuple[str, str]], chain, vectorstore=None
32
+ ):
33
  """Execute the chat functionality."""
34
  self.lock.acquire()
35
+ if not chain or not vectorstore:
36
+ chain, vectorstore = get_chain_and_vectorstore()
37
  try:
38
  history = history or []
39
+ docs = vectorstore.similarity_search(inp, k=2)
40
+ output = chain.run(input_documents=docs, question=inp, chat_history=history)
41
  history.append((inp, output))
42
  except Exception as e:
43
  raise e
cli_app.py CHANGED
@@ -1,8 +1,8 @@
1
  from langchain.prompts.prompt import PromptTemplate
2
  from langchain.llms import OpenAI
3
- from langchain.chains import ConversationalRetrievalChain, ChatVectorDBChain
4
  from langchain.vectorstores import Pinecone
5
  from langchain.embeddings.openai import OpenAIEmbeddings
 
6
  import pinecone
7
  import os
8
 
@@ -38,13 +38,8 @@ QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "cont
38
 
39
  def get_chain(vector):
40
  llm = OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)
41
- qa_chain = ChatVectorDBChain.from_llm(
42
- llm,
43
- vector,
44
- qa_prompt=QA_PROMPT,
45
- condense_question_prompt=CONDENSE_QUESTION_PROMPT,
46
- )
47
- return qa_chain
48
 
49
 
50
  if __name__ == "__main__":
@@ -57,7 +52,10 @@ if __name__ == "__main__":
57
  while True:
58
  print("Human:")
59
  question = input()
60
- result = qa_chain({"question": question, "chat_history": chat_history})
61
- chat_history.append((question, result["answer"]))
 
 
 
62
  print("AI:")
63
- print(result["answer"])
 
1
  from langchain.prompts.prompt import PromptTemplate
2
  from langchain.llms import OpenAI
 
3
  from langchain.vectorstores import Pinecone
4
  from langchain.embeddings.openai import OpenAIEmbeddings
5
+ from langchain.chains.question_answering import load_qa_chain
6
  import pinecone
7
  import os
8
 
 
38
 
39
  def get_chain(vector):
40
  llm = OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)
41
+ chain = load_qa_chain(llm)
42
+ return chain
 
 
 
 
 
43
 
44
 
45
  if __name__ == "__main__":
 
52
  while True:
53
  print("Human:")
54
  question = input()
55
+ docs = vectorstore.similarity_search(question, k=2)
56
+ result = qa_chain.run(
57
+ input_documents=docs, question=question, chat_history=chat_history
58
+ )
59
+ chat_history.append(result)
60
  print("AI:")
61
+ print(result)