Monke64 commited on
Commit
74202db
1 Parent(s): dfe9a5c

Added minor changes to UI

Browse files
Files changed (2) hide show
  1. app.py +5 -0
  2. pages/1_Prompt Engineering.py +62 -26
app.py CHANGED
@@ -9,6 +9,11 @@ st.markdown(
9
  """
10
  This proof-of-concept web application aims to showcase the text generation capabilities
11
  of ChatGPT together with prompt engineering techniques.
 
 
 
 
 
12
  """
13
  )
14
  st.image("images/Chatgpt.jpg")
 
9
  """
10
  This proof-of-concept web application aims to showcase the text generation capabilities
11
  of ChatGPT together with prompt engineering techniques.
12
+ ### Tutorial
13
+ 1. Go to the Prompt-Engineering Page
14
+ 2. Insert your OpenAI API key to use ChatGPT. You can find it [here](https://platform.openai.com/api-keys)
15
+ 3. Use the selection options to get access to various prompt-engineering techniques and examples.
16
+ 4. Copy and paste the examples into the chatbox to see how it works.
17
  """
18
  )
19
  st.image("images/Chatgpt.jpg")
pages/1_Prompt Engineering.py CHANGED
@@ -1,5 +1,32 @@
1
  import streamlit as st
2
  from openai import OpenAI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  @st.cache_resource
5
  def init_PE():
@@ -75,35 +102,44 @@ for PEs in st.session_state.PE_options:
75
  unsafe_allow_html=True
76
  )
77
 
 
 
78
  st.session_state.API_key = st.sidebar.text_input("Insert your openAI API key here")
79
- if "openai_model" not in st.session_state:
80
- st.session_state["openai_model"] = "gpt-3.5-turbo"
81
-
82
- if "GPTmessages" not in st.session_state:
83
- st.session_state.GPTmessages = []
84
-
85
- if "API_key" not in st.session_state:
86
- st.session_state.API_key = None
87
 
88
  if st.session_state.API_key:
 
 
 
 
89
  client = OpenAI(api_key=st.session_state.API_key)
90
- for message in st.session_state.GPTmessages:
91
- with st.chat_message(message["role"]):
92
- st.markdown(message["content"])
93
 
94
- if prompt := st.chat_input("What is up?"):
95
- st.session_state.GPTmessages.append({"role": "user", "content": prompt})
96
- with st.chat_message("user"):
97
- st.markdown(prompt)
98
 
99
- with st.chat_message("assistant"):
100
- stream = client.chat.completions.create(
101
- model=st.session_state["openai_model"],
102
- messages=[
103
- {"role": m["role"], "content": m["content"]}
104
- for m in st.session_state.GPTmessages
105
- ],
106
- stream=True,
107
- )
108
- response = st.write_stream(stream)
109
- st.session_state.GPTmessages.append({"role": "assistant", "content": response})
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from openai import OpenAI
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
5
+ from langchain.llms import HuggingFacePipeline
6
+ from langchain import PromptTemplate, LLMChain
7
+ # @st.cache_resource
8
+ # def init_Phi2():
9
+ # torch.random.manual_seed(0)
10
+ # model = AutoModelForCausalLM.from_pretrained(
11
+ # "microsoft/Phi-3-mini-4k-instruct",
12
+ # device_map="auto",
13
+ # torch_dtype="auto",
14
+ # trust_remote_code=True,
15
+ # )
16
+ # tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct",trust_remote_code=True)
17
+ # pipe = pipeline(
18
+ # "text-generation",
19
+ # model=model,
20
+ # tokenizer=tokenizer,
21
+ # )
22
+ #
23
+ # generation_args = {
24
+ # "max_new_tokens": 500,
25
+ # "return_full_text": False,
26
+ # "temperature": 0.0,
27
+ # "do_sample": False,
28
+ # }
29
+ # local_llm = HuggingFacePipeline(pipeline=pipe)
30
 
31
  @st.cache_resource
32
  def init_PE():
 
102
  unsafe_allow_html=True
103
  )
104
 
105
+ #st.session_state.model = st.sidebar.selectbox("Choose Your Model",["GPT","Phi-3"])
106
+ #if st.session_state.model == "GPT":
107
  st.session_state.API_key = st.sidebar.text_input("Insert your openAI API key here")
 
 
 
 
 
 
 
 
108
 
109
  if st.session_state.API_key:
110
+ if "openai_model" not in st.session_state:
111
+ st.session_state["openai_model"] = "gpt-3.5-turbo"
112
+ if "GPTmessages" not in st.session_state:
113
+ st.session_state.GPTmessages = []
114
  client = OpenAI(api_key=st.session_state.API_key)
115
+ for message in st.session_state.GPTmessages:
116
+ with st.chat_message(message["role"]):
117
+ st.markdown(message["content"])
118
 
119
+ if prompt := st.chat_input("What is up?"):
120
+ st.session_state.GPTmessages.append({"role": "user", "content": prompt})
121
+ with st.chat_message("user"):
122
+ st.markdown(prompt)
123
 
124
+ with st.chat_message("assistant"):
125
+ stream = client.chat.completions.create(
126
+ model=st.session_state["openai_model"],
127
+ messages=[
128
+ {"role": m["role"], "content": m["content"]}
129
+ for m in st.session_state.GPTmessages
130
+ ],
131
+ stream=True,
132
+ )
133
+ response = st.write_stream(stream)
134
+ st.session_state.GPTmessages.append({"role": "assistant", "content": response})
135
+
136
+ #else:
137
+ # if "Phimessages" not in st.session_state:
138
+ # st.session_state.Phimessages = []
139
+ # for message in st.session_state.Phimessages:
140
+ # with st.chat_message(message["role"]):
141
+ # st.markdown(message["content"])
142
+ # if prompt := st.chat_input("What is up?"):
143
+ # st.session_state.GPTmessages.append({"role": "user", "content": prompt})
144
+ # with st.chat_message("user"):
145
+ # st.markdown(prompt)