LLM_local / app.py
omerdan03's picture
use demo chat
7da8368
raw
history blame
1.25 kB
from LLM import LLM
import streamlit as st
def format_chat_history(chat_history):
formatted_history = ""
for chat in chat_history:
formatted_history += f"{chat[0]}: {chat[1]}\n"
return formatted_history
def main():
st.title("LLM Chat")
model = "gpt2"
llm = LLM(model)
chat_history = []
context = "You are an helpfully assistant in a school. You are helping a student with his homework."
chat = llm.get_chat(context=context)
while True:
user_input = st.text_input("User:")
if st.button("Send"):
if user_input:
chat_history.append(("User", user_input))
bot_response = chat.answerStoreHistory(qn=user_input)
chat_history.append(("Bot", bot_response))
st.text_area("Chat History:", value=format_chat_history(chat_history), readonly=True)
# model = st.text_input("model name: ")
#
# while model == "":
# time.sleep(0.1)
#
# # model = "mosaicml/mpt-7b-chat"
#
#
# st.write("Model name: ", model)
# st.write("Loading model...")
#
# llm = LLM(model)
# chat = llm.get_chat(context=context)
# while True:
# qn = input("Question: ")
# if qn == "exit":
# break
# chat.answerStoreHistory(qn=qn)