File size: 6,429 Bytes
42e3a78
 
 
 
 
2f2c2eb
42e3a78
2f2c2eb
 
10f03bd
2f2c2eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42e3a78
 
 
3e07b40
42e3a78
 
3e07b40
42e3a78
 
2f2c2eb
42e3a78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f2c2eb
 
 
 
 
 
 
 
 
 
 
42e3a78
 
2f2c2eb
 
 
42e3a78
2f2c2eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42e3a78
 
 
2f2c2eb
 
42e3a78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import time

import streamlit as st
from streamlit_chat import message
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import textwrap

from chat import generate_response, generate_tag

@st.cache_data()
def create_database():
    import json
    from langchain.docstore.document import Document
    from langchain.embeddings import HuggingFaceEmbeddings
    from langchain.vectorstores import FAISS

    # Maybe it is better to save the embeddings than the text dataset
    if "db" not in st.session_state:
        json_file_path = "./new_dataset.json"

        string_chunks = []

        with open(json_file_path, "r") as json_file:
            for line in json_file:
                if line != '\n':
                    json_string = json.loads(line)
                    string_chunks.append(json_string)
        documents_ = []
        for line in string_chunks:
            loader = Document(page_content=line)
            documents_.append(loader)
        embeddings = HuggingFaceEmbeddings()


        db = FAISS.from_documents(documents_, embeddings)
        print(type(db))
        return db

db = create_database()

if "tokenizer" not in st.session_state:
    st.session_state["tokenizer"] = AutoTokenizer.from_pretrained(
        "MBZUAI/LaMini-Flan-T5-783M"
    )
    st.session_state["model"] = AutoModelForSeq2SeqLM.from_pretrained(
        "MBZUAI/LaMini-Flan-T5-783M"
    )

st.title("BGPT : Bibek's Personal Chatbot")
# Storing the chat
if "generated" not in st.session_state:
    st.session_state["generated"] = []

if "past" not in st.session_state:
    st.session_state["past"] = []


# We will get the user's input by calling the get_text function
def get_text():
    input_text = st.text_input("Enter your inquiries here: ", "Hi!!")
    return input_text


user_input = get_text()
def wrap_text_preserve_newlines(text, width=110):
    # Split the input text into lines based on newline characters
    lines = text.split('\n')

    # Wrap each line individually
    wrapped_lines = [textwrap.fill(line, width=width) for line in lines]

    # Join the wrapped lines back together using newline characters
    wrapped_text = '\n'.join(wrapped_lines).replace("page_content=", "").replace("metadata={}", "")

    return wrapped_text

if user_input:

    tag = generate_tag(user_input)
    
    start = time.time()
    # print(tag)
    if tag in ["greeting"]:
        output = "Hello 👋! Thanks for visiting!\n I am BGPT! I am here to assist you in obtaining information about Bibek. Feel free to ask me any questions about Bibek. These are some sample questions:\n (I) Tell me about Bibek.\n (II) What skills does Bibek have?\n (III) What work experience does Bibek have?\n (IV) What is Bibek's educational background?\n (V) What awards has Bibek won?\n (VI) What projects have Bibek completed? &\n (VII) How can I contact Bibek?"
    else:
        tokenizer = st.session_state["tokenizer"]
        model = st.session_state["model"]
        docs = db.similarity_search(user_input)
        output = wrap_text_preserve_newlines(str(docs[0]))
        if tag in ["welcome", "thanks", "exit"]:
            input = user_input    
        elif tag in ["BibekBOT-introduction"]:
            input = "I am BGPT, a large language model. I am here to assist you in obtaining information about Bibek. Feel free to ask me any questions about Bibek and I will make every effort to respond to all inquiries. These are some sample questions:\n (I) Tell me about Bibek.\n (II) What skills does Bibek have?\n (III) What work experience does Bibek have?\n (IV) What is Bibek's educational background?\n (V) What awards has Bibek won?\n (VI) What projects have Bibek completed? &\n (VII) How can I contact Bibek?. \n Can you paraphrase the above without changing the tone and contents." 
        elif tag in ["decline"]:
            input = "Okay, if there's anything else I can assist with, please don't hesitate to ask. \n Can you paraphrase the above without changing much content and tone."
        else: 
            # output = generate_response(user_input)
            task_description_prompt = "I want you to act like my personal assistant chatbot named 'BGPT'. You are provided with some content and you will get one question. Try to answer the question in details based on the provided content. You may paraphrase the contents to reach your answer too. The below is the content: \n"
            prompt_template = "\nBased on the above content, try to answer the following question.\n\n"
            end_prompt = "\nPlease make meaningful sentence and try to be descriptive as possible responding with many sentences and ending with proper punctuations. If you think the content doesn't contain good answer to the question, give some polite respones telling them that you do not have specific response to the query and apologize and refer them to contact Bibek directly.\n"  # NoQA"
            short_response_template = "\nIf your response is very short like 1 or 2 sentence, add a followup sentence like 'Let me know if there's anything else I can help you with. or If there's anything else I can assist with, please don't hesitate to ask. I mean something similar in polite way."  # NoQA

            input = task_description_prompt + output + prompt_template + user_input + end_prompt
            
        input_ids = tokenizer(
            input,
            return_tensors="pt",
        ).input_ids

        outputs = model.generate(input_ids, max_length=512, do_sample=True)
        output = tokenizer.decode(outputs[0]).strip("<pad></s>").strip()

    end = time.time()

    # print(input)

    print("Time for model inference: ", end - start)
    # Checks for memory overflow
    if len(st.session_state.past) == 15:
        st.session_state.past.pop(0)
        st.session_state.generated.pop(0)

    # store the output
    st.session_state.past.append(user_input)
    st.session_state.generated.append(output)

if st.session_state["generated"]:
    # print(st.session_state)
    for i in range(len(st.session_state["generated"]) - 1, -1, -1):
        message(
            st.session_state["generated"][i],
            avatar_style="bottts",
            seed=39,
            key=str(i),  # NoQA
        )
        message(
            st.session_state["past"][i],
            is_user=True,
            avatar_style="identicon",
            seed=4,
            key=str(i) + "_user",
        )  # NoQA