Monke64 commited on
Commit
3370a4e
1 Parent(s): 9755323

Added app files

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ caenv/
app.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ st.set_page_config(
4
+ page_title="Home"
5
+ )
6
+
7
+ st.title("Large Language Models Showcase")
8
+ st.markdown(
9
+ """
10
+ This proof-of-concept web application aims to showcase the text generation capabilities
11
+ of ChatGPT together with prompt engineering techniques.
12
+ """
13
+ )
14
+ st.image("images/Chatgpt.jpg")
images/Chatgpt.jpg ADDED
images/LLama.jpg ADDED
images/Phi.png ADDED
pages/1_Prompt Engineering.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from openai import OpenAI
3
+
4
+ @st.cache_resource
5
+ def init_PE():
6
+ data = {"Persona Pattern":{"Intuition":"Tell the LLM to act as a specific person,object or entity and then provide them with a relevant task.","Format":"Act as entity X, perform task Y.","Examples":"Act as a expert AI Engineer, explain to me how back-propagation works." },
7
+ "Question Refinement":{"Intuition":"To ensure that the LLM can suggest potentially better or more refined questions for the user to ask.","Format":"Whenever I ask a question, suggest a better question and ask me if I would like to use it instead.","Examples":"Just enter the above prompt into the chat before asking your question" },
8
+ "Cognitive Verifier":{"Intuition":"To force the LLM to subdivide the original question into multiple questions and use the answers to all those questions to fit into the final answer","Format":"When you are asked a question,follow these rules:<br>Generate a number of additional questions that would help more accurately answer the question. Combine the answers to the individual questions to produce the final answer to the overall question.","Examples":"Just enter this prompt into the chat before asking your question." },
9
+ "Audience Persona":{"Intuition":"Intuition is for the LLM to know your level of expertise so that it can better tailor it's answers to your needs","Format":"Explain X to me. Assume I am persona Y.","Examples":"Explain the self-attention mechanism to me. Assume I am a beginner with 0 knowledge." },
10
+ "Flipped Interaction":{"Intuition":"For the LLM to be like a diagnoser and ask the user questions until a result is met.","Format":"I would like you to ask me questions to help me achieve X.","Examples":"I would like you to ask me questions to help me create variations of my marketing materials. You should ask questions until you have sufficient information about my current draft messages, audience, and goals. Ask me the first question." },
11
+ "Few-Shot Prompting":{"Intuition":"To give the LLM some input and output pairs and hopefully it can learn and generate the expected output from given input. Very similar to conventional training paradigm minus weight updates.","Format":"Input:X, Output: Y(Simple)<br>Situation:X, Think_1:Y,Action_1:Z,Think_2: A, Action_2: B(Step by step) Situation:X, Think_1:Y,Action_1:Z,Think_2: A, Action_2: B(Step by step)","Examples":"1. Review: The film was absolutely fantastic, with a gripping storyline and excellent acting. Classification: Positive <br> 2. Review: I found the movie to be a complete waste of time, with poor plot development and bad acting. Classification: Negative"},
12
+ "Chain-Of-Thought":{"Intuition":"Similar to few-shot prompting except now we provide reasoning for the output as well.","Format":"Question, step by step solution and then solution","Examples":"Solve the following problem step-by-step:<br>Problem: What is the result of 8 multiplied by 7 plus 6?<br>Step-by-step solution:<br>1. First, calculate the multiplication: 8 * 7 = 56 <br>2. Then, add 6 to the result: 56 + 6 = 62 <br> Answer: 62"},
13
+ "Reason+Act Prompting":{"Intuition":"Very similar to Chain-Of-Thought except now we also teach the LLM to use external tools to get information.","Format":"Question,Think,Action,Result","Examples":"**Question:** Aside from the apple remote, what other device can control the program apple remote? <br>**Task:** I need to find out search Apple Remote and find out the devices it was originally programmed to interact with<br>**Action:** Search [Apple Remote](https://en.wikipedia.org/wiki/Apple_Remote_Desktop)<br>**Result:** Apple Remote Desktop (ARD) is a Macintosh application produced by Apple Inc., first released on March 14, 2002, that replaced a similar product called Apple Network Assistant" },
14
+ "Gameplay Pattern":{"Intuition":"Imagine you want to learn about a new topic, you can format it as a game and play with the LLM.","Format":"Create a game about X topic. Describe the rules.","Examples":"Create a game about prompt engineering. Ask me a series of questions about prompt engineering and see how many I get right. If I get it wrong correct me." },
15
+ "Template Pattern":{"Intuition":"Basically give a template of the output that you want the LLM to follow.","Format":"Task, template","Examples":"Create a random strength workout for me today with complementary exercises. I am going to provide a template for your output . CAPITALIZED WORDS are my placeholders for content. Try to fit the output into one or more of the placeholders that I list. Please preserve the formatting and overall template that I provide.This is the template: NAME, REPS @ SETS, MUSCLE GROUPS WORKED, DIFFICULTY SCALE 1-5, FORM NOTES" },
16
+ "Meta Language":{"Intuition":"Basically teaching the LLM your own secret language like when you say X you mean Y.","Format":"When I say X, I mean Y (or would like you to do Y)","Examples":"When I say variations of companies, I mean give me ten different variations of tech companies" },
17
+ "Recipe Prompting":{"Intuition":"Asking a question to the LLM and also letting it know that there exists intermediate steps required.","Format":"I would like to achieve X. I know that I need to perform steps A,B,C. Please provide me with the necessary steps as well as any missing steps.","Examples":"I would like to drive to MBS from Jurong. I know that I want to go through AYR and I don't want to drive more than 300 miles per day.Provide a complete sequence of steps for me. Fill in any missing steps." },
18
+ "Alternative Approach":{"Intuition":"To allow the LLM to return users alternative approaches to solve a problem","Format":"If there are alternative ways to accomplish a task X that I give you, list the best alternate approaches","Examples":"Just input the above prompt before asking your question or task" },
19
+ "Outline Expansion":{"Intuition":"Give the LLM a topic to provide an outline on and then proceed to ask it to expand on a certain part.","Format":"ct as an outline expander. Generate a bullet point outline based on the input that I give you and then ask me for which bullet point you should expand on. Create a new outline for the bullet point that I select. At the end, ask me for what bullet point to expand next.","Examples":"Act as an outline expander. Generate a bullet point outline based on the input that I give you and then ask me for which bullet point you should expand on. Each bullet can have at most 3-5 sub bullets. The bullets should be numbered using the pattern [A-Z].[i-v].[* through ****]. Create a new outline for the bullet point that I select. At the end, ask me for what bullet point to expand next. Ask me for what to outline." }}
20
+ return data
21
+
22
+ PE_info = init_PE()
23
+
24
+ st.title("ChatGPT + Prompt Engineering")
25
+ st.markdown(
26
+ """
27
+ Welcome to the main section! Here
28
+ we aim to introduce some common techniques used to generate
29
+ consistent outputs from large language models like GPT.
30
+ """
31
+ )
32
+ if "PE_options" not in st.session_state:
33
+ st.session_state.PE_options = ["Persona Pattern"]
34
+
35
+ if "selector" not in st.session_state:
36
+ st.session_state.selector = "Default"
37
+
38
+ if "use_case" not in st.session_state:
39
+ st.session_state.use_case = None
40
+
41
+ st.session_state.selector = st.selectbox("Choose how you want to select your prompt engineering techniques",["Free and Easy","Recommendation"])
42
+ if st.session_state.selector == "Free and Easy":
43
+ default_options = st.multiselect("Select which Prompt Engineering options you are interested in",["Persona Pattern","Question Refinement","Cognitive Verifier",
44
+ "Audience Persona","Flipped Interaction", "Few-Shot Prompting","Chain-Of-Thought",
45
+ "Reason+Act Prompting","Gameplay Pattern","Template Prompting",
46
+ "Meta Language Creation","Recipe Prompting","Alternative Approach",
47
+ "Outline Expansion"])
48
+
49
+ else:
50
+ use_case = st.selectbox("Choose what you want the LLM to do",["Be adept at a new task", "Teach you something",
51
+ "Diagnose Your Problem","Provide Solutions"])
52
+ if use_case == "Be adept at a new task":
53
+ default_options = ["Few-Shot Prompting","Chain-Of-Thought","Template Pattern","Meta Language","Reason+Act Prompting"]
54
+ elif use_case == "Teach you something":
55
+ default_options = ["Audience Persona","Outline Expansion","Gameplay Pattern"]
56
+ elif use_case == "Diagnose Your Problem":
57
+ default_options = ["Persona Pattern","Cognitive Verifier","Flipped Interaction","Question Refinement","Recipe Prompting"]
58
+ else:
59
+ default_options = ["Alternative Approach"]
60
+
61
+ st.session_state.PE_options = default_options
62
+ for PEs in st.session_state.PE_options:
63
+ with st.container():
64
+ st.subheader(PEs)
65
+ with st.expander("View Explanation and Examples"):
66
+ st.markdown(
67
+ f"""
68
+ **<span style="color: #AEC6CF;">1. Intuition </span>**
69
+ <span style="color: lightgray;">{PE_info[PEs]["Intuition"]}</span>
70
+ **<span style="color: #AEC6CF;">2. Format </span>**
71
+ <span style="color: lightgray;">{PE_info[PEs]["Format"]}</span>
72
+ **<span style="color: #AEC6CF;">3. Example</span>**
73
+ <span style="color: lightgray;">{PE_info[PEs]["Examples"]}</span>
74
+ """,
75
+ unsafe_allow_html=True
76
+ )
77
+
78
+ st.session_state.API_key = st.sidebar.text_input("Insert your openAI API key here")
79
+ if "openai_model" not in st.session_state:
80
+ st.session_state["openai_model"] = "gpt-3.5-turbo"
81
+
82
+ if "GPTmessages" not in st.session_state:
83
+ st.session_state.GPTmessages = []
84
+
85
+ if "API_key" not in st.session_state:
86
+ st.session_state.API_key = None
87
+
88
+ if st.session_state.API_key:
89
+ client = OpenAI(api_key=st.session_state.API_key)
90
+ for message in st.session_state.GPTmessages:
91
+ with st.chat_message(message["role"]):
92
+ st.markdown(message["content"])
93
+
94
+ if prompt := st.chat_input("What is up?"):
95
+ st.session_state.GPTmessages.append({"role": "user", "content": prompt})
96
+ with st.chat_message("user"):
97
+ st.markdown(prompt)
98
+
99
+ with st.chat_message("assistant"):
100
+ stream = client.chat.completions.create(
101
+ model=st.session_state["openai_model"],
102
+ messages=[
103
+ {"role": m["role"], "content": m["content"]}
104
+ for m in st.session_state.GPTmessages
105
+ ],
106
+ stream=True,
107
+ )
108
+ response = st.write_stream(stream)
109
+ st.session_state.GPTmessages.append({"role": "assistant", "content": response})
requirements.txt ADDED
Binary file (2.63 kB). View file
 
unused_pages/1_ChatGPT.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from openai import OpenAI
3
+
4
+ st.set_page_config(
5
+ page_title="ChatGPT"
6
+ )
7
+
8
+ st.title("ChatGPT Showcase")
9
+ st.session_state.API_key = st.sidebar.text_input("Insert your openAI API key here")
10
+
11
+ client = OpenAI(api_key=st.session_state.API_key)
12
+
13
+ if "openai_model" not in st.session_state:
14
+ st.session_state["openai_model"] = "gpt-3.5-turbo"
15
+
16
+ if "GPTmessages" not in st.session_state:
17
+ st.session_state.GPTmessages = []
18
+
19
+ if "API_key" not in st.session_state:
20
+ st.session_state.API_key = None
21
+
22
+ if st.session_state.API_key:
23
+ for message in st.session_state.GPTmessages:
24
+ with st.chat_message(message["role"]):
25
+ st.markdown(message["content"])
26
+
27
+ if prompt := st.chat_input("What is up?"):
28
+ st.session_state.GPTmessages.append({"role": "user", "content": prompt})
29
+ with st.chat_message("user"):
30
+ st.markdown(prompt)
31
+
32
+ with st.chat_message("assistant"):
33
+ stream = client.chat.completions.create(
34
+ model=st.session_state["openai_model"],
35
+ messages=[
36
+ {"role": m["role"], "content": m["content"]}
37
+ for m in st.session_state.GPTmessages
38
+ ],
39
+ stream=True,
40
+ )
41
+ response = st.write_stream(stream)
42
+ st.session_state.GPTmessages.append({"role": "assistant", "content": response})
unused_pages/x_LlaMA.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import streamlit as st
2
+ # from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+ # import transformers
4
+ # import torch
5
+ #
6
+ # st.set_page_config(
7
+ # page_title="Falcon 11B"
8
+ # )
9
+ #
10
+ # st.title("Falcon 11B Showcase")
11
+ # @st.cache_resource
12
+ # def Chat_model():
13
+ # model_name = "tiiuae/falcon-11B"
14
+ # model = AutoModelForCausalLM.from_pretrained(model_name)
15
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
16
+ # pipeline = transformers.pipeline(
17
+ # "text-generation",
18
+ # model=model,
19
+ # tokenizer=tokenizer,
20
+ # torch_dtype=torch.bfloat16,
21
+ # device_map="auto",
22
+ # )
23
+ # return pipeline,tokenizer
24
+ #
25
+ # def get_text_output(user_input,pipeline,tokenizer):
26
+ # sequences = pipeline(
27
+ # user_input,
28
+ # max_length=200,
29
+ # do_sample=True,
30
+ # top_k=10,
31
+ # num_return_sequences=1,
32
+ # eos_token_id=tokenizer.eos_token_id,
33
+ # )
34
+ # return sequences
35
+ #
36
+ # if "Falcon_messages" not in st.session_state:
37
+ # st.session_state.Falcon_messages = []
38
+ #
39
+ # if "Falcon_model" not in st.session_state:
40
+ # st.session_state.Falcon_model,st.session_state.tokeniser = Chat_model()
41
+ #
42
+ # for message in st.session_state.Falcon_messages:
43
+ # with st.chat_message(message["role"]):
44
+ # st.markdown(message["content"])
45
+ #
46
+ # if prompt := st.chat_input("What is up?"):
47
+ # st.session_state.Falcon_messages.append({"role": "user", "content": prompt})
48
+ # with st.chat_message("user"):
49
+ # st.markdown(prompt)
50
+ # with st.chat_message("assistant"):
51
+ # response = get_text_output(prompt,st.session_state.Falcon_model,st.session_state.tokeniser)
52
+ # st.session_state.Falcon_messages.append({"role": "assistant", "content": response})
unused_pages/x_Prompt_Engineering.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import streamlit as st
2
+ #
3
+ # # @st.cache_resource
4
+ # # def init_PE():
5
+ # # data = {"Persona Pattern":{"Intuition":"","Format":2,"Examples":3 },
6
+ # # "Question Refinement":{"Intuition":,"Format":,"Examples": },
7
+ # # "Cognitive Verifier":{"Intuition":,"Format":,"Examples": },
8
+ # # "Audience Persona":{"Intuition":,"Format":,"Examples": },
9
+ # # "Flipped Interaction":{"Intuition":,"Format":,"Examples": },
10
+ # # "Few-Shot Prompting":{"Intuition":,"Format":,"Examples": },
11
+ # # "Chain-Of-Thought":{"Intuition":,"Format":,"Examples": },
12
+ # # "Reason+Act Prompting":{"Intuition":,"Format":,"Examples": },
13
+ # # "Gameplay Pattern":{"Intuition":,"Format":,"Examples": },
14
+ # # "Template Pattern":{"Intuition":,"Format":,"Examples": },
15
+ # # "Meta Language":{"Intuition":,"Format":,"Examples": },
16
+ # # "Alternative Approach":{"Intuition":,"Format":,"Examples": },
17
+ # # "Outline Expansion":{"Intuition":,"Format":,"Examples": }}
18
+ # st.title("Prompt Engineering Techniques")
19
+ # st.markdown(
20
+ # """
21
+ # Welcome to the prompt engineering section! Here
22
+ # we aim to introduce some common techniques used to generate
23
+ # consistent outputs from large language models like GPT and LlaMA.
24
+ # """
25
+ # )
26
+ # PE_options = st.multiselect("Select which Prompt Engineering options you are interested in",["Persona Pattern","Question Refinement","Cognitive Verifier",
27
+ # "Audience Persona","Flipped Interaction", "Few-Shot Prompting","Chain-Of-Thought",
28
+ # "Reason+Act Prompting","Gameplay Pattern","Template Prompting",
29
+ # "Meta Language Creation","Recipe Prompting","Alternative Approach",
30
+ # "Outline Expansion"])
31
+ # c11,c21 = st.columns([1,1])
32
+ # with c11:
33
+ # st.markdown(
34
+ # """
35
+ # ### Persona Pattern
36
+ # """
37
+ # )
38
+ # with st.expander("View Explanation and Examples"):
39
+ # st.markdown(
40
+ # """
41
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**
42
+ # <span style="color: lightgray;">Tell the LLM to act as a specific person,object or entity and then
43
+ # provide them with a relevant task.</span>
44
+ # **<span style="color: #AEC6CF;">2. Format </span>**
45
+ # <span style="color: lightgray;">"Act as entity X, perform task Y."</span>
46
+ # **<span style="color: #AEC6CF;">3. Example</span>**
47
+ # <span style="color: lightgray;">"Act as a expert AI Engineer, explain to me how back-propagation works."</span>
48
+ # """,
49
+ # unsafe_allow_html=True
50
+ # )
51
+ #
52
+ # with c21:
53
+ # st.markdown(
54
+ # """
55
+ # ### Question Refinement
56
+ # """
57
+ # )
58
+ # with st.expander("View Explanation and Examples"):
59
+ # st.markdown(
60
+ # """
61
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
62
+ # <span style="color: lightgray;">To ensure that the LLM can suggest
63
+ # potentially better or more refined questions for the user to ask.</span><br>
64
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
65
+ # <span style="color: lightgray;">"Whenever I ask a question, suggest a
66
+ # better question and ask me if I would like to use it instead.</span><br>
67
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
68
+ # <span style="color: lightgray;">Just enter this prompt into the chat before asking your question</span><br>
69
+ # """,
70
+ # unsafe_allow_html=True
71
+ #
72
+ # )
73
+ # c21,c22 = st.columns([1,1])
74
+ # with c21:
75
+ # st.markdown(
76
+ # """
77
+ # ### Cognitive Verifier
78
+ # """
79
+ # )
80
+ # with st.expander("View Explanations and Examples"):
81
+ # st.markdown(
82
+ # """
83
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**
84
+ # <span style="color: lightgray;">To force the LLM to subdivide the original question
85
+ # into multiple questions and use the answers to all those questions to fit into the final answer</span>
86
+ # **<span style="color: #AEC6CF;">2. Format </span>**
87
+ # <span style="color: lightgray;">"When you are asked a question,follow these rules:<br>Generate a number of additional questions that would help more accurately answer the question.<br>
88
+ # Combine the answers to the individual questions to produce the final answer to the overall question."</span>
89
+ # **<span style="color: #AEC6CF;">3. Example</span>**
90
+ # <span style="color: lightgray;">Just enter this prompt into the chat before asking your question.</span>
91
+ # """,
92
+ # unsafe_allow_html=True
93
+ # )
94
+ #
95
+ # with c22:
96
+ # st.markdown(
97
+ # """
98
+ # ### Audience Persona
99
+ # """
100
+ # )
101
+ # with st.expander("View Explanations and Examples"):
102
+ # st.markdown(
103
+ # """
104
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
105
+ # <span style="color: lightgray;">Intuition is for the LLM to know your level of
106
+ # expertise so that it can better tailor it's answers to your needs</span><br>
107
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
108
+ # <span style="color: lightgray;">"Explain X to me. Assume I am persona Y."</span><br>
109
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
110
+ # <span style="color: lightgray;">"Explain the self-attention mechanism to me.
111
+ # Assume I am a beginner with 0 knowledge."</span>
112
+ # """,
113
+ # unsafe_allow_html=True
114
+ # )
115
+ #
116
+ # c31,c32 = st.columns([1,1])
117
+ # with c31:
118
+ # st.markdown(
119
+ # """
120
+ # ### Flipped Interaction
121
+ # """
122
+ # )
123
+ # with st.expander("View Explanations and Examples"):
124
+ # st.markdown(
125
+ # """
126
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
127
+ # <span style="color: lightgray;">For the LLM to be like a diagnoser and ask the
128
+ # user questions until a result is met.</span><br>
129
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
130
+ # <span style="color: lightgray;">"I would like you to ask me questions to help me
131
+ # achieve X."</span><br>
132
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
133
+ # <span style="color: lightgray;">"I would like you to ask me questions to help me create variations of my marketing materials. You should ask questions until you have sufficient information about my
134
+ # current draft messages, audience, and goals. Ask me the first question."</span><br>
135
+ # """,
136
+ # unsafe_allow_html=True
137
+ # )
138
+ #
139
+ # with c32:
140
+ # st.markdown(
141
+ # """
142
+ # ### Few-Shot Prompting
143
+ # """
144
+ # )
145
+ # with st.expander("View Explanations and Examples"):
146
+ # st.markdown(
147
+ # """
148
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
149
+ # <span style="color: lightgray;">To give the LLM some input and output
150
+ # pairs and hopefully it can learn and generate the expected output
151
+ # from given input. Very similar to conventional training paradigm minus
152
+ # weight updates.</span><br>
153
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
154
+ # <span style="color: lightgray;">"Input:X, Output: Y"(Simple)<br>
155
+ # "Situation:X, Think_1:Y,Action_1:Z,Think_2: A, Action_2: B"(Step by step)</span><br>
156
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
157
+ # <span style="color: lightgray;">"1. Review: "The film was absolutely fantastic, with a gripping storyline and excellent acting."
158
+ # Classification: Positive <br> 2. Review: "I found the movie to be a complete waste of time, with poor plot development and bad acting."
159
+ # Classification: Negative"</span><br>
160
+ # """,
161
+ # unsafe_allow_html=True
162
+ # )
163
+ #
164
+ # c41,c42 = st.columns([1,1])
165
+ #
166
+ # with c41:
167
+ # st.markdown(
168
+ # """
169
+ # ### Chain-Of-Thought
170
+ # """
171
+ # )
172
+ # with st.expander("View Explanations and Examples"):
173
+ # st.markdown(
174
+ # """
175
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
176
+ # <span style="color: lightgray;">Similar to few-shot prompting except
177
+ # now we provide reasoning for the output as well.</span><br>
178
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
179
+ # <span style="color: lightgray;">"Question, step by step solution and then solution"</span><br>
180
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
181
+ # <span style="color: lightgray;">"Solve the following problem step-by-step:
182
+ #
183
+ # Problem: What is the result of 8 multiplied by 7 plus 6?
184
+ #
185
+ # Step-by-step solution:
186
+ # 1. First, calculate the multiplication: 8 * 7 = 56
187
+ # 2. Then, add 6 to the result: 56 + 6 = 62
188
+ #
189
+ # Answer: 62"</span><br>
190
+ # """,
191
+ # unsafe_allow_html=True
192
+ # )
193
+ #
194
+ # with c42:
195
+ # st.markdown(
196
+ # """
197
+ # ### Reason+Act Prompting
198
+ # """
199
+ # )
200
+ # with st.expander("View Explanations and Examples"):
201
+ # st.markdown(
202
+ # """
203
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
204
+ # <span style="color: lightgray;">Very similar to Chain-Of-Thought except now
205
+ # we also teach the LLM to use external tools to get information.</span><br>
206
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
207
+ # <span style="color: lightgray;">"Question,Think,Action,Result"</span><br>
208
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
209
+ # <span style="color: lightgray;">"**Question:** Aside from the apple remote, what other
210
+ # device can control the program apple remote? <br>
211
+ # **Task:** I need to find out search Apple Remote and find out the devices it was originally programmed to interact with<br>
212
+ # **Action:** Search [Apple Remote](https://en.wikipedia.org/wiki/Apple_Remote_Desktop)<br>
213
+ # **Result:** Apple Remote Desktop (ARD) is a Macintosh application produced by Apple Inc., first released on March 14, 2002, that replaced a similar product called Apple Network Assistant"</span><br>
214
+ # """,
215
+ # unsafe_allow_html=True
216
+ # )
217
+ #
218
+ # c51,c52 = st.columns([1,1])
219
+ # with c51:
220
+ # st.markdown(
221
+ # """
222
+ # ### Gameplay Pattern
223
+ # """
224
+ # )
225
+ # with st.expander("View Explanations and Examples"):
226
+ # st.markdown(
227
+ # """
228
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
229
+ # <span style="color: lightgray;">Imagine you want to learn about a new topic
230
+ # , you can format it as a game and play with the LLM.</span><br>
231
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
232
+ # <span style="color: lightgray;">Create a game about X topic. Describe the rules.</span><br>
233
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
234
+ # <span style="color: lightgray;">"Create a game about prompt engineering. Ask me a series of questions
235
+ # about prompt engineering and see how many I get right. If I get it wrong correct me."</span><br>
236
+ # """,
237
+ # unsafe_allow_html=True
238
+ # )
239
+ #
240
+ # with c52:
241
+ # st.markdown(
242
+ # """
243
+ # ### Template Prompting
244
+ # """
245
+ # )
246
+ # with st.expander("View Explanations and Examples"):
247
+ # st.markdown(
248
+ # """
249
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
250
+ # <span style="color: lightgray;">Basically give a template of the output that you want the
251
+ # LLM to follow. </span><br>
252
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
253
+ # <span style="color: lightgray;">Task, template </span><br>
254
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
255
+ # <span style="color: lightgray;">"Create a random strength workout for me today with complementary exercises. I am going to provide a template for your output . CAPITALIZED WORDS are my placeholders for content. Try to fit the output into one or more of the placeholders that I list.
256
+ # Please preserve the formatting and overall template that I provide.
257
+ # This is the template: NAME, REPS @ SETS, MUSCLE GROUPS WORKED, DIFFICULTY SCALE 1-5, FORM NOTES"</span><br>
258
+ # """,
259
+ # unsafe_allow_html=True
260
+ # )
261
+ #
262
+ # c61,c62 = st.columns([1,1])
263
+ #
264
+ # with c61:
265
+ # st.markdown(
266
+ # """
267
+ # ### Meta Language Creation
268
+ # """
269
+ # )
270
+ # with st.expander("View Explanations and Examples"):
271
+ # st.markdown(
272
+ # """
273
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
274
+ # <span style="color: lightgray;">Basically teaching the LLM your own
275
+ # secret language like when you say X you mean Y.</span><br>
276
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
277
+ # <span style="color: lightgray;">"When I say X, I mean Y (or would like you to do Y)"</span><br>
278
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
279
+ # <span style="color: lightgray;">"When I say "variations of companies", I mean give me ten different variations of tech companies"</span><br>
280
+ # """
281
+ # )
282
+ #
283
+ # with c62:
284
+ # st.markdown(
285
+ # """
286
+ # ### Recipe Prompting
287
+ # """
288
+ # )
289
+ # with st.expander("View Explanations and Examples"):
290
+ # st.markdown(
291
+ # """
292
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
293
+ # <span style="color: lightgray;">Asking a question to the LLM and also
294
+ # letting it know that there exists intermediate steps required.</span><br>
295
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
296
+ # <span style="color: lightgray;">I would like to achieve X.
297
+ # I know that I need to perform steps A,B,C. Please provide me with the
298
+ # necessary steps as well as any missing steps.</span><br>
299
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
300
+ # <span style="color: lightgray;">"I would like to drive to MBS from Jurong. I know that I want to go through AYR and I don't want to drive more than 300 miles per day.
301
+ # Provide a complete sequence of steps for me. Fill in any missing steps."</span><br>
302
+ # """,
303
+ # unsafe_allow_html=True
304
+ # )
305
+ #
306
+ # c71,c72 = st.columns([1,1])
307
+ #
308
+ # with c71:
309
+ # st.markdown(
310
+ # """
311
+ # ### Alternative Approach
312
+ # """
313
+ # )
314
+ # with st.expander("View Explanations and Examples"):
315
+ # st.markdown(
316
+ # """
317
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
318
+ # <span style="color: lightgray;">To allow the LLM to return users
319
+ # alternative approaches to solve a problem.</span><br>
320
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
321
+ # <span style="color: lightgray;"></span>"If there are alternative ways to accomplish a task X that I give you, list the best alternate approaches "<br>
322
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
323
+ # <span style="color: lightgray;">Just input the above prompt before asking your question or task.</span><br>
324
+ # """,
325
+ # unsafe_allow_html=True
326
+ # )
327
+ #
328
+ # with c72:
329
+ # st.markdown(
330
+ # """
331
+ # ### Outline Expansion
332
+ # """
333
+ # )
334
+ # with st.expander("View Explanations and Examples"):
335
+ # st.markdown(
336
+ # """
337
+ # **<span style="color: #AEC6CF;">1. Intuition </span>**<br>
338
+ # <span style="color: lightgray;">Give the LLM a topic to provide
339
+ # an outline on and then proceed to ask it to expand on a certain part.</span><br>
340
+ # **<span style="color: #AEC6CF;">2. Format </span>**<br>
341
+ # <span style="color: lightgray;">"Act as an outline expander.
342
+ # Generate a bullet point outline based on the input that I give you and then ask me for which bullet point you should expand on.
343
+ # Create a new outline for the bullet point that I select.
344
+ # At the end, ask me for what bullet point to expand next."</span><br>
345
+ # **<span style="color: #AEC6CF;">3. Example</span>**<br>
346
+ # <span style="color: lightgray;">"Act as an outline expander. Generate a bullet point outline based on the input that I give you and then ask me for which bullet point you should expand on. Each bullet can have at most 3-5 sub bullets. The bullets should be numbered using the pattern [A-Z].[i-v].[* through ****]. Create a new outline for the bullet point that I select. At the end, ask me for what bullet point to expand next. Ask me for what to outline."</span><br>
347
+ # """,
348
+ # unsafe_allow_html=True
349
+ # )
350
+ #