app.py CHANGED
@@ -1,53 +1,39 @@
1
  import os
2
  import gradio as gr
3
- from langchain.vectorstores import Chroma
4
- from langchain.chains import ConversationalRetrievalChain
5
- from langchain.embeddings.openai import OpenAIEmbeddings
6
- # convo chain lib
7
- from langchain.embeddings.openai import OpenAIEmbeddings
8
- from langchain.vectorstores import Chroma
9
- from langchain.text_splitter import CharacterTextSplitter
10
- from langchain.llms import OpenAI
11
- from langchain.chains import ConversationalRetrievalChain
12
- from langchain.chat_models import ChatOpenAI
13
- from langchain.prompts.chat import (
14
- ChatPromptTemplate,
15
- SystemMessagePromptTemplate,
16
- AIMessagePromptTemplate,
17
- HumanMessagePromptTemplate,
18
- )
19
- from langchain.schema import (
20
- AIMessage,
21
- HumanMessage,
22
- SystemMessage
23
- )
24
  OPENAI_API_KEY=os.environ["OPENAI_API_KEY"]
25
- embedding = OpenAIEmbeddings()
26
- vectorstore = Chroma(persist_directory='vectorstore', embedding_function=embedding)
27
- retriever = vectorstore.as_retriever()
28
- aisyah_template="""
29
- Answer each question truthfully using the Malaysia's Form 1 History data provided. Your answers should be concise and straight to the point.
30
- For questions that are open-ended, which require subjective judgment or opinion, you may not find a definitive answer in the textbook.
31
- However, you should still address the question's directive based on the data's context. Ideally, your answer should provide 3 points that support your response.
32
- You are encouraged to better provide positive suggestions for concepts that are less ethical.
33
- Please keep in mind that the scope of the data provided is limited to the content covered in the Malaysia's Form 1 History textbook.
34
- ---------------
35
- {context}"""
36
- ##If you don't know the answer, just say that you don't know, don't try to make up an answer.
37
- system_template="""Use the following pieces of context to answer the users question.
38
- ----------------
39
- {context}"""
40
- ##If you don't know the answer, just say that you don't know, don't try to make up an answer.
41
- messages = [
42
- SystemMessagePromptTemplate.from_template(aisyah_template),
43
- HumanMessagePromptTemplate.from_template("{question}")
44
  ]
45
- prompt = ChatPromptTemplate.from_messages(messages)
46
- qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), retriever, return_source_documents=True, qa_prompt=prompt)
 
47
 
48
- def predict(input, chat_historyhistory=[]):
49
- response = qa({"question":input, "chat_history":history})
50
- return response, chat_history
 
 
 
 
51
 
52
  with gr.Blocks() as demo:
53
  chatbot = gr.Chatbot()
 
1
  import os
2
  import gradio as gr
3
+ import pinecone
4
+ from llama_index import GPTIndexMemory, GPTPineconeIndex
5
+ from langchain.agents import Tool
6
+ from langchain.chains.conversation.memory import ConversationBufferMemory
7
+ from langchain import OpenAI
8
+ from langchain.agents import initialize_agent
9
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  OPENAI_API_KEY=os.environ["OPENAI_API_KEY"]
11
+ PINECONE_API_KEY=os.environ["PINECONE_API_KEY"]
12
+
13
+ pinecone.init(api_key=PINECONE_API_KEY, environment="us-east1-gcp")
14
+
15
+ pindex=pinecone.Index("sejarah")
16
+ indexed_pinecone=GPTPineconeIndex([], pinecone_index=pindex)
17
+
18
+ tools = [
19
+ Tool(
20
+ name = "GPT Index",
21
+ func=lambda q: str(indexed_pinecone.query(q)),
22
+ description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
23
+ return_direct=True
24
+ )
 
 
 
 
 
25
  ]
26
+ memory = GPTIndexMemory(index=indexed_pinecone, memory_key="chat_history", query_kwargs={"response_mode": "compact"})
27
+ llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo")
28
+ agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory, verbose=True)
29
 
30
+ def predict(input, history=[]):
31
+ response = agent_chain.run(input)
32
+ history = history + [(input, response)]
33
+ response = history
34
+ # response = [response]
35
+ # return response, response
36
+ return response, response
37
 
38
  with gr.Blocks() as demo:
39
  chatbot = gr.Chatbot()
requirements.txt CHANGED
@@ -1,2 +1,3 @@
 
1
  langchain
2
- chromadb
 
1
+ pinecone-client
2
  langchain
3
+ llama-index
vectorstore/id_to_uuid_4a7cd045-8fcd-408a-bdb3-8a9fc28f86f1.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:48f1f3f4b73629f466ef59d65c75535b6dd4d58621163844becab73d2a564ce1
3
- size 13822
 
 
 
 
vectorstore/index_4a7cd045-8fcd-408a-bdb3-8a9fc28f86f1.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a1888601e08d21fd3e26fe6ff0250f02f53bb1a47517c023d26385aeea34bef
3
- size 2718756
 
 
 
 
vectorstore/index_metadata_4a7cd045-8fcd-408a-bdb3-8a9fc28f86f1.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a51476f8e31e4c6d0a73764cae8c6e9b335079a8e41df49d2f135624fae202cb
3
- size 74
 
 
 
 
vectorstore/uuid_to_id_4a7cd045-8fcd-408a-bdb3-8a9fc28f86f1.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:48b72a1b9d227ebf7f00e1e0049a67fda48d45dfe3e91a384b8a500179c82c60
3
- size 16176