awacke1 commited on
Commit
53e1c4e
1 Parent(s): 1f66ab5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -57
app.py CHANGED
@@ -1,26 +1,20 @@
1
  import streamlit as st
 
2
  import os
3
  import json
4
  import random
5
-
6
-
7
- # Imports
8
  import base64
9
  import glob
10
- import json
11
  import math
12
  import openai
13
- import os
14
  import pytz
15
  import re
16
  import requests
17
- import streamlit as st
18
  import textract
19
  import time
20
  import zipfile
21
  import huggingface_hub
22
  import dotenv
23
- import streamlit.components.v1 as components # Import Streamlit Components for HTML5
24
 
25
  from audio_recorder_streamlit import audio_recorder
26
  from bs4 import BeautifulSoup
@@ -29,12 +23,6 @@ from datetime import datetime
29
  from dotenv import load_dotenv
30
  from huggingface_hub import InferenceClient
31
  from io import BytesIO
32
- from langchain.chat_models import ChatOpenAI
33
- from langchain.chains import ConversationalRetrievalChain
34
- from langchain.embeddings import OpenAIEmbeddings
35
- from langchain.memory import ConversationBufferMemory
36
- from langchain.text_splitter import CharacterTextSplitter
37
- from langchain.vectorstores import FAISS
38
  from openai import ChatCompletion
39
  from PyPDF2 import PdfReader
40
  from templates import bot_template, css, user_template
@@ -55,15 +43,6 @@ st.set_page_config(
55
  }
56
  )
57
 
58
- experimentalSubProgram="""
59
-
60
- import streamlit as st
61
- import random
62
- import numpy as np
63
-
64
- """
65
-
66
-
67
  def SpeechSynthesis(result):
68
  documentHTML5='''
69
  <!DOCTYPE html>
@@ -90,9 +69,7 @@ def SpeechSynthesis(result):
90
  </body>
91
  </html>
92
  '''
93
-
94
  components.html(documentHTML5, width=1280, height=300)
95
- #return result
96
 
97
  PromptPrefix = 'Create a graphic novel story with streamlit markdown outlines and tables with appropriate emojis for graphic novel rules defining the method steps of play. Use story structure architect rules using plan, structure and top three dramatic situations matching the theme for topic of '
98
  PromptPrefix2 = 'Create a streamlit python user app with full code listing to create a UI implementing the usable choose your own adventure graphic novel rules and story using streamlit, session_state, file_uploader, camera_input, on_change = funcction callbacks, randomness and dice rolls using emojis and st.markdown, st.expander, st.columns and other UI controls in streamlit as a game interface and create inline data tables for entities implemented as variables with python list dictionaries for the game rule entities and stats. Design it as a fun data driven app and show full python code listing for this ruleset and thematic story plot line: '
@@ -944,7 +921,6 @@ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
944
  st.write(time.time() - start_time)
945
  return full_reply_content
946
 
947
- # 12. Embedding VectorDB for LLM query of documents to text to compress inputs and prompt together as Chat memory using Langchain
948
  @st.cache_resource
949
  def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
950
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
@@ -1057,24 +1033,15 @@ def get_zip_download_link(zip_file):
1057
  return href
1058
 
1059
  # 14. Inference Endpoints for Whisper (best fastest STT) on NVIDIA T4 and Llama (best fastest AGI LLM) on NVIDIA A10
1060
- # My Inference Endpoint
1061
  API_URL_IE = f'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud'
1062
- # Original
1063
  API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
1064
  MODEL2 = "openai/whisper-small.en"
1065
  MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
1066
- #headers = {
1067
- # "Authorization": "Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
1068
- # "Content-Type": "audio/wav"
1069
- #}
1070
- # HF_KEY = os.getenv('HF_KEY')
1071
  HF_KEY = st.secrets['HF_KEY']
1072
  headers = {
1073
  "Authorization": f"Bearer {HF_KEY}",
1074
  "Content-Type": "audio/wav"
1075
  }
1076
-
1077
- #@st.cache_resource
1078
  def query(filename):
1079
  with open(filename, "rb") as f:
1080
  data = f.read()
@@ -1119,23 +1086,19 @@ def whisper_main():
1119
  transcript=''
1120
  st.write(transcript)
1121
 
1122
-
1123
  # Whisper to GPT: New!! ---------------------------------------------------------------------
1124
  st.write('Reasoning with your inputs with GPT..')
1125
  response = chat_with_model(transcript)
1126
  st.write('Response:')
1127
  st.write(response)
1128
-
1129
  filename = generate_filename(response, "txt")
1130
  create_file(filename, transcript, response, should_save)
1131
  # Whisper to GPT: New!! ---------------------------------------------------------------------
1132
 
1133
-
1134
  # Whisper to Llama:
1135
  response = StreamLLMChatResponse(transcript)
1136
  filename_txt = generate_filename(transcript, "md")
1137
  create_file(filename_txt, transcript, response, should_save)
1138
-
1139
  filename_wav = filename_txt.replace('.txt', '.wav')
1140
  import shutil
1141
  try:
@@ -1143,30 +1106,12 @@ def whisper_main():
1143
  shutil.copyfile(filename, filename_wav)
1144
  except:
1145
  st.write('.')
1146
-
1147
  if os.path.exists(filename):
1148
  os.remove(filename)
1149
 
1150
- #st.experimental_rerun()
1151
- #except:
1152
- # st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
1153
-
1154
-
1155
-
1156
- # Sample function to demonstrate a response, replace with your own logic
1157
- def StreamMedChatResponse(topic):
1158
- st.write(f"Showing resources or questions related to: {topic}")
1159
-
1160
-
1161
-
1162
-
1163
  # 17. Main
1164
  def main():
1165
- prompt = f"Write ten funny jokes that are tweet length stories that make you laugh. Show as markdown outline with emojis for each."
1166
- # Add Wit and Humor buttons
1167
- # add_witty_humor_buttons()
1168
- # add_medical_exam_buttons()
1169
-
1170
  with st.expander("Prompts 📚", expanded=False):
1171
  example_input = st.text_input("Enter your prompt text for Llama:", value=prompt, help="Enter text to get a response from DromeLlama.")
1172
  if st.button("Run Prompt With Llama model", help="Click to run the prompt."):
 
1
  import streamlit as st
2
+ import streamlit.components.v1 as components
3
  import os
4
  import json
5
  import random
 
 
 
6
  import base64
7
  import glob
 
8
  import math
9
  import openai
 
10
  import pytz
11
  import re
12
  import requests
 
13
  import textract
14
  import time
15
  import zipfile
16
  import huggingface_hub
17
  import dotenv
 
18
 
19
  from audio_recorder_streamlit import audio_recorder
20
  from bs4 import BeautifulSoup
 
23
  from dotenv import load_dotenv
24
  from huggingface_hub import InferenceClient
25
  from io import BytesIO
 
 
 
 
 
 
26
  from openai import ChatCompletion
27
  from PyPDF2 import PdfReader
28
  from templates import bot_template, css, user_template
 
43
  }
44
  )
45
 
 
 
 
 
 
 
 
 
 
46
  def SpeechSynthesis(result):
47
  documentHTML5='''
48
  <!DOCTYPE html>
 
69
  </body>
70
  </html>
71
  '''
 
72
  components.html(documentHTML5, width=1280, height=300)
 
73
 
74
  PromptPrefix = 'Create a graphic novel story with streamlit markdown outlines and tables with appropriate emojis for graphic novel rules defining the method steps of play. Use story structure architect rules using plan, structure and top three dramatic situations matching the theme for topic of '
75
  PromptPrefix2 = 'Create a streamlit python user app with full code listing to create a UI implementing the usable choose your own adventure graphic novel rules and story using streamlit, session_state, file_uploader, camera_input, on_change = funcction callbacks, randomness and dice rolls using emojis and st.markdown, st.expander, st.columns and other UI controls in streamlit as a game interface and create inline data tables for entities implemented as variables with python list dictionaries for the game rule entities and stats. Design it as a fun data driven app and show full python code listing for this ruleset and thematic story plot line: '
 
921
  st.write(time.time() - start_time)
922
  return full_reply_content
923
 
 
924
  @st.cache_resource
925
  def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
926
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
 
1033
  return href
1034
 
1035
  # 14. Inference Endpoints for Whisper (best fastest STT) on NVIDIA T4 and Llama (best fastest AGI LLM) on NVIDIA A10
 
1036
  API_URL_IE = f'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud'
 
1037
  API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
1038
  MODEL2 = "openai/whisper-small.en"
1039
  MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
 
 
 
 
 
1040
  HF_KEY = st.secrets['HF_KEY']
1041
  headers = {
1042
  "Authorization": f"Bearer {HF_KEY}",
1043
  "Content-Type": "audio/wav"
1044
  }
 
 
1045
  def query(filename):
1046
  with open(filename, "rb") as f:
1047
  data = f.read()
 
1086
  transcript=''
1087
  st.write(transcript)
1088
 
 
1089
  # Whisper to GPT: New!! ---------------------------------------------------------------------
1090
  st.write('Reasoning with your inputs with GPT..')
1091
  response = chat_with_model(transcript)
1092
  st.write('Response:')
1093
  st.write(response)
 
1094
  filename = generate_filename(response, "txt")
1095
  create_file(filename, transcript, response, should_save)
1096
  # Whisper to GPT: New!! ---------------------------------------------------------------------
1097
 
 
1098
  # Whisper to Llama:
1099
  response = StreamLLMChatResponse(transcript)
1100
  filename_txt = generate_filename(transcript, "md")
1101
  create_file(filename_txt, transcript, response, should_save)
 
1102
  filename_wav = filename_txt.replace('.txt', '.wav')
1103
  import shutil
1104
  try:
 
1106
  shutil.copyfile(filename, filename_wav)
1107
  except:
1108
  st.write('.')
 
1109
  if os.path.exists(filename):
1110
  os.remove(filename)
1111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112
  # 17. Main
1113
  def main():
1114
+ prompt = PromptPrefix2
 
 
 
 
1115
  with st.expander("Prompts 📚", expanded=False):
1116
  example_input = st.text_input("Enter your prompt text for Llama:", value=prompt, help="Enter text to get a response from DromeLlama.")
1117
  if st.button("Run Prompt With Llama model", help="Click to run the prompt."):