import streamlit as st import tiktoken from loguru import logger from langchain.chains import ConversationalRetrievalChain from langchain.chat_models import ChatOpenAI from langchain.document_loaders.pdf import (PyPDFLoader, PyMuPDFLoader) from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings import HuggingFaceEmbeddings from langchain.memory import ConversationBufferMemory from langchain.vectorstores import FAISS # from streamlit_chat import message from langchain.callbacks import get_openai_callback from langchain.memory import StreamlitChatMessageHistory from gtts import gTTS from IPython.display import Audio, display from pydub import AudioSegment #사이트 관련 함수 def main(): st.set_page_config( page_title="차량용 Q&A 챗봇", page_icon=":car:") st.title("차량용 Q&A 챗봇 :car:") if "conversation" not in st.session_state: st.session_state.conversation = None if "chat_history" not in st.session_state: st.session_state.chat_history = None if "processComplete" not in st.session_state: st.session_state.processComplete = None with st.sidebar: uploaded_files = st.file_uploader("차량 메뉴얼 PDF 파일을 넣어주세요.", type=['pdf'], accept_multiple_files=True) openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") process = st.button("실행") if process: if not openai_api_key: st.info("Open AI키를 입력해주세요.") st.stop() files_text = get_text(uploaded_files) text_chunks = get_text_chunks(files_text) vetorestore = get_vectorstore(text_chunks) st.session_state.conversation = get_conversation_chain(vetorestore, openai_api_key) st.session_state.processComplete = True if 'messages' not in st.session_state: st.session_state['messages'] = [{"role": "assistant", "content": "안녕하세요! 주어진 문서에 대해 궁금하신 것이 있으면 언제든 물어봐주세요!"}] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) history = StreamlitChatMessageHistory(key="chat_messages") # Chat logic if query := st.chat_input("질문을 입력해주세요."): st.session_state.messages.append({"role": "user", "content": query}) with st.chat_message("user"): st.markdown(query) with st.chat_message("assistant"): chain = st.session_state.conversation with st.spinner("Thinking..."): result = chain({"question": query}) with get_openai_callback() as cb: st.session_state.chat_history = result['chat_history'] response = result['answer'] source_documents = result['source_documents'] # Text-to-Speech 변환 tts = gTTS(text=response, lang='ko') tts.save('output.mp3') # 음성 파일 저장 # 음성 파일 로드 audio = AudioSegment.from_file("output.mp3", format="mp3") # Streamlit에서 음성 재생 st.audio(audio.export(format='mp3').read(), start_time=0) st.markdown(response) with st.expander("참고 문서 확인"): st.markdown(source_documents[0].metadata['source'], help=source_documents[0].page_content) st.markdown(source_documents[1].metadata['source'], help=source_documents[1].page_content) st.markdown(source_documents[2].metadata['source'], help=source_documents[2].page_content) # Add assistant message to chat history st.session_state.messages.append({"role": "assistant", "content": response}) #토큰화 시키는 곳 def tiktoken_len(text): tokenizer = tiktoken.get_encoding("cl100k_base") tokens = tokenizer.encode(text) return len(tokens) #pdfload코드 def get_text(docs): doc_list = [] for doc in docs: file_name = doc.name # doc 객체의 이름을 파일 이름으로 사용 with open(file_name, "wb") as file: # 파일을 doc.name으로 저장 file.write(doc.getvalue()) logger.info(f"Uploaded {file_name}") if '.pdf' in doc.name: loader = PyMuPDFLoader(file_name) documents = loader.load_and_split() doc_list.extend(documents) return doc_list #textsplitter 코드 def get_text_chunks(text): text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=100, length_function=tiktoken_len ) chunks = text_splitter.split_documents(text) return chunks #임베딩 및 벡터저장 코드 def get_vectorstore(text_chunks): embeddings = HuggingFaceEmbeddings( model_name="jhgan/ko-sroberta-multitask", model_kwargs={'device': 'cpu'}, encode_kwargs={'normalize_embeddings': True} ) vectordb = FAISS.from_documents(text_chunks, embeddings) return vectordb #리트리버 및 llm코드 def get_conversation_chain(vetorestore, openai_api_key): llm = ChatOpenAI(openai_api_key=openai_api_key, model_name='gpt-3.5-turbo', temperature=0) conversation_chain = ConversationalRetrievalChain.from_llm( llm=llm, chain_type="stuff", retriever=vetorestore.as_retriever(search_type='mmr', vervose=True), memory=ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer'), get_chat_history=lambda h: h, return_source_documents=True, verbose=True ) return conversation_chain if __name__ == '__main__': main()