File size: 1,358 Bytes
7da8368
519b826
50ccb93
7da8368
 
 
 
 
41c82ad
7da8368
 
3979dfb
7da8368
 
 
 
 
 
bb96974
 
 
 
7da8368
bb96974
 
7da8368
 
 
 
bb96974
 
 
7da8368
bb96974
 
7da8368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from LLM import LLM
import streamlit as st

def format_chat_history(chat_history):
    formatted_history = ""
    for chat in chat_history:
        formatted_history += f"{chat[0]}: {chat[1]}\n"
    return formatted_history

def main():
    st.title("LLM Chat")

    model = "gpt2"
    llm = LLM(model)

    chat_history = []
    context = "You are an helpfully assistant in a school. You are helping a student with his homework."
    chat = llm.get_chat(context=context)
    user_input = st.text_input("User:")
    button = st.button("Send")
    chat_area = st.empty()

    while True:
        print(user_input)
        if button:
            if user_input:
                chat_history.append(("User", user_input))
                bot_response = chat.answerStoreHistory(qn=user_input)
                chat_history.append(("Bot", bot_response))
                print(chat_history)

            chat_area.text(format_chat_history(chat_history))

if __name__ == "__main__":
    main()



# model = st.text_input("model name: ")
#
# while model == "":
#     time.sleep(0.1)
#
# # model = "mosaicml/mpt-7b-chat"
#
#
# st.write("Model name: ", model)
# st.write("Loading model...")
#
# llm = LLM(model)
# chat = llm.get_chat(context=context)
# while True:
#     qn = input("Question: ")
#     if qn == "exit":
#         break
#     chat.answerStoreHistory(qn=qn)