File size: 12,241 Bytes
3370a4e
 
74202db
 
f866549
 
74202db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3370a4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dfe9a5c
3370a4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74202db
 
3370a4e
 
 
74202db
 
 
 
3370a4e
74202db
 
 
3370a4e
74202db
 
 
 
3370a4e
74202db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import streamlit as st
from openai import OpenAI
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
#from langchain.llms import HuggingFacePipeline
#from langchain import PromptTemplate, LLMChain
# @st.cache_resource
# def init_Phi2():
#     torch.random.manual_seed(0)
#     model = AutoModelForCausalLM.from_pretrained(
#         "microsoft/Phi-3-mini-4k-instruct",
#         device_map="auto",
#         torch_dtype="auto",
#         trust_remote_code=True,
#     )
#     tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct",trust_remote_code=True)
#     pipe = pipeline(
#         "text-generation",
#         model=model,
#         tokenizer=tokenizer,
#     )
#
#     generation_args = {
#         "max_new_tokens": 500,
#         "return_full_text": False,
#         "temperature": 0.0,
#         "do_sample": False,
#     }
#     local_llm = HuggingFacePipeline(pipeline=pipe)

@st.cache_resource
def init_PE():
    data = {"Persona Pattern":{"Intuition":"Tell the LLM to act as a specific person,object or entity and then provide them with a relevant task.","Format":"Act as entity X, perform task Y.","Examples":"Act as a expert AI Engineer, explain to me how back-propagation works." },
            "Question Refinement":{"Intuition":"To ensure that the LLM can suggest potentially better or more refined questions for the user to ask.","Format":"Whenever I ask a question, suggest a better question and ask me if I would like to use it instead.","Examples":"Just enter the above prompt into the chat before asking your question" },
            "Cognitive Verifier":{"Intuition":"To force the LLM to subdivide the original question into multiple questions and use the answers to all those questions to fit into the final answer","Format":"When you are asked a question,follow these rules:<br>Generate a number of additional questions that would help more accurately answer the question. Combine the answers to the individual questions to produce the final answer to the overall question.","Examples":"Just enter this prompt into the chat before asking your question." },
            "Audience Persona":{"Intuition":"Intuition is for the LLM to know your level of expertise so that it can better tailor it's answers to your needs","Format":"Explain X to me. Assume I am persona Y.","Examples":"Explain the self-attention mechanism to me. Assume I am a beginner with 0 knowledge." },
            "Flipped Interaction":{"Intuition":"For the LLM to be like a diagnoser and ask the user questions until a result is met.","Format":"I would like you to ask me questions to help me achieve X.","Examples":"I would like you to ask me questions to help me create variations of my marketing materials.  You should ask questions until you have sufficient information about my current draft messages, audience, and goals. Ask me the first question." },
            "Few-Shot Prompting":{"Intuition":"To give the LLM some input and output pairs and hopefully it can learn and generate the expected output from given input. Very similar to conventional training paradigm minus weight updates.","Format":"Input:X, Output: Y(Simple)<br>Situation:X, Think_1:Y,Action_1:Z,Think_2: A, Action_2: B(Step by step) Situation:X, Think_1:Y,Action_1:Z,Think_2: A, Action_2: B(Step by step)","Examples":"1. Review: The film was absolutely fantastic, with a gripping storyline and excellent acting. Classification: Positive <br> 2. Review: I found the movie to be a complete waste of time, with poor plot development and bad acting. Classification: Negative"},
            "Chain-Of-Thought":{"Intuition":"Similar to few-shot prompting except now we provide reasoning for the output as well.","Format":"Question, step by step solution and then solution","Examples":"Solve the following problem step-by-step:<br>Problem: What is the result of 8 multiplied by 7 plus 6?<br>Step-by-step solution:<br>1. First, calculate the multiplication: 8 * 7 = 56 <br>2. Then, add 6 to the result: 56 + 6 = 62 <br> Answer: 62"},
            "Reason+Act Prompting":{"Intuition":"Very similar to Chain-Of-Thought except now we also teach the LLM to use external tools to get information.","Format":"Question,Think,Action,Result","Examples":"**Question:** Aside from the apple remote, what other device can control the program apple remote? <br>**Task:** I need to find out search Apple Remote and find out the devices it was originally programmed to interact with<br>**Action:** Search [Apple Remote](https://en.wikipedia.org/wiki/Apple_Remote_Desktop)<br>**Result:** Apple Remote Desktop (ARD) is a Macintosh application produced by Apple Inc., first released on March 14, 2002, that replaced a similar product called Apple Network Assistant" },
            "Gameplay Pattern":{"Intuition":"Imagine you want to learn about a new topic, you can format it as a game and play with the LLM.","Format":"Create a game about X topic. Describe the rules.","Examples":"Create a game about prompt engineering. Ask me a series of questions about prompt engineering and see how many I get right. If I get it wrong correct me." },
            "Template Pattern":{"Intuition":"Basically give a template of the output that you want the LLM to follow.","Format":"Task, template","Examples":"Create a random strength workout for me today with complementary exercises. I am going to provide a template for your output . CAPITALIZED WORDS are my placeholders for content. Try to fit the output into one or more of the placeholders that I list. Please preserve the formatting and overall template that I provide.This is the template: NAME, REPS @ SETS, MUSCLE GROUPS WORKED, DIFFICULTY SCALE 1-5, FORM NOTES" },
            "Meta Language":{"Intuition":"Basically teaching the LLM your own secret language like when you say X you mean Y.","Format":"When I say X, I mean Y (or would like you to do Y)","Examples":"When I say variations of companies, I mean give me ten different variations of tech companies" },
            "Recipe Prompting":{"Intuition":"Asking a question to the LLM and also letting it know that there exists intermediate steps required.","Format":"I would like to achieve X. I know that I need to perform steps A,B,C. Please provide me with the necessary steps as well as any missing steps.","Examples":"I would like to drive to MBS from Jurong. I know that I want to go through AYR and I don't want to drive more than 300 miles per day.Provide a complete sequence of steps for me. Fill in any missing steps." },
            "Alternative Approach":{"Intuition":"To allow the LLM to return users alternative approaches to solve a problem","Format":"If there are alternative ways to accomplish a task X that I give you, list the best alternate approaches","Examples":"Just input the above prompt before asking your question or task" },
            "Outline Expansion":{"Intuition":"Give the LLM a topic to provide an outline on and then proceed to ask it to expand on a certain part.","Format":"ct as an outline expander. Generate a bullet point outline based on the input that I give you and then ask me for which bullet point you should expand on. Create a new outline for the bullet point that I select. At the end, ask me for what bullet point to expand next.","Examples":"Act as an outline expander. Generate a bullet point outline based on the input that I give you and then ask me for which bullet point you should expand on. Each bullet can have at most 3-5 sub bullets. The bullets should be numbered using the pattern [A-Z].[i-v].[* through ****]. Create a new outline for the bullet point that I select.  At the end, ask me for what bullet point to expand next. Ask me for what to outline." }}
    return data

PE_info = init_PE()

st.title("ChatGPT + Prompt Engineering")
st.markdown(
    """
    Welcome to the main section! Here 
    we aim to introduce some common techniques used to generate
    consistent outputs from large language models like GPT.  
    """
)
if "PE_options" not in st.session_state:
    st.session_state.PE_options = ["Persona Pattern"]

if "selector" not in st.session_state:
    st.session_state.selector = "Default"

if "use_case" not in st.session_state:
    st.session_state.use_case = None

st.session_state.selector = st.selectbox("Choose how you want to select your prompt engineering techniques",["Free and Easy","Recommendation"])
if st.session_state.selector == "Free and Easy":
    default_options = st.multiselect("Select which Prompt Engineering options you are interested in",["Persona Pattern","Question Refinement","Cognitive Verifier",
                             "Audience Persona","Flipped Interaction", "Few-Shot Prompting","Chain-Of-Thought",
                             "Reason+Act Prompting","Gameplay Pattern","Template Prompting",
                             "Meta Language","Recipe Prompting","Alternative Approach",
                             "Outline Expansion"])

else:
    use_case = st.selectbox("Choose what you want the LLM to do",["Be adept at a new task", "Teach you something",
                                                                "Diagnose Your Problem","Provide Solutions"])
    if use_case == "Be adept at a new task":
        default_options = ["Few-Shot Prompting","Chain-Of-Thought","Template Pattern","Meta Language","Reason+Act Prompting"]
    elif use_case == "Teach you something":
        default_options = ["Audience Persona","Outline Expansion","Gameplay Pattern"]
    elif use_case == "Diagnose Your Problem":
        default_options = ["Persona Pattern","Cognitive Verifier","Flipped Interaction","Question Refinement","Recipe Prompting"]
    else:
        default_options = ["Alternative Approach"]

st.session_state.PE_options = default_options
for PEs in st.session_state.PE_options:
    with st.container():
        st.subheader(PEs)
        with st.expander("View Explanation and Examples"):
            st.markdown(
            f"""
            **<span style="color: #AEC6CF;">1. Intuition </span>**  
            <span style="color: lightgray;">{PE_info[PEs]["Intuition"]}</span>  
            **<span style="color: #AEC6CF;">2. Format </span>**  
            <span style="color: lightgray;">{PE_info[PEs]["Format"]}</span>  
            **<span style="color: #AEC6CF;">3. Example</span>**  
            <span style="color: lightgray;">{PE_info[PEs]["Examples"]}</span>  
            """,
            unsafe_allow_html=True
        )

#st.session_state.model = st.sidebar.selectbox("Choose Your Model",["GPT","Phi-3"])
#if st.session_state.model == "GPT":
st.session_state.API_key = st.sidebar.text_input("Insert your openAI API key here")

if st.session_state.API_key:
    if "openai_model" not in st.session_state:
        st.session_state["openai_model"] = "gpt-3.5-turbo"
    if "GPTmessages" not in st.session_state:
        st.session_state.GPTmessages = []
    client = OpenAI(api_key=st.session_state.API_key)
    for message in st.session_state.GPTmessages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

    if prompt := st.chat_input("What is up?"):
        st.session_state.GPTmessages.append({"role": "user", "content": prompt})
        with st.chat_message("user"):
            st.markdown(prompt)

        with st.chat_message("assistant"):
            stream = client.chat.completions.create(
                model=st.session_state["openai_model"],
                messages=[
                    {"role": m["role"], "content": m["content"]}
                    for m in st.session_state.GPTmessages
                ],
                stream=True,
            )
            response = st.write_stream(stream)
        st.session_state.GPTmessages.append({"role": "assistant", "content": response})

#else:
    # if "Phimessages" not in st.session_state:
    #     st.session_state.Phimessages = []
    # for message in st.session_state.Phimessages:
    #     with st.chat_message(message["role"]):
    #         st.markdown(message["content"])
    # if prompt := st.chat_input("What is up?"):
    #     st.session_state.GPTmessages.append({"role": "user", "content": prompt})
    #     with st.chat_message("user"):
    #         st.markdown(prompt)