File size: 3,871 Bytes
30f2a35
0708e87
30f2a35
 
0708e87
30f2a35
 
 
 
 
 
 
 
 
 
 
 
 
0708e87
 
30f2a35
0708e87
30f2a35
0708e87
30f2a35
 
0708e87
30f2a35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0708e87
30f2a35
 
 
 
0708e87
30f2a35
 
 
 
0708e87
30f2a35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0708e87
30f2a35
 
 
 
 
0708e87
30f2a35
 
 
 
 
 
 
 
 
0708e87
 
30f2a35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0708e87
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import time
import gradio as gr
from os import getenv
from openai import OpenAI

client = OpenAI(
    base_url="https://openrouter.ai/api/v1",
    api_key=getenv("OPENROUTER_API_KEY"),
)

css = """
.thought {
    opacity: 0.8; 
    font-family: "Courier New", monospace;
    border: 1px gray solid;
    padding: 10px;
    border-radius: 5px;
}
"""

js = """

"""

with open("contemplator.txt", "r") as f:
    system_msg = f.read()

def streaming(message, history, system_msg, model):
    messages = [
        {
            "role": "system",
            "content": system_msg
        }
    ]
    for user, assistant in history:
        messages.append({
            "role": "user",
            "content": user
        })
        messages.append({
            "role": "assistant",
            "content": assistant
        })

    messages.append({
        "role": "user",
        "content": message
    })

    completion = client.chat.completions.create(
        model=model,
        messages=messages,
        max_completion_tokens=100000,
        stream=True,
    )
    
    reply = ""
    
    start_time = time.time()
    for i, chunk in enumerate(completion):
        reply += chunk.choices[0].delta.content
        answer = ""
        if not "</inner_thoughts>" in reply:
            thought_text = f'<div class="thought">{reply.replace("<inner_thoughts>", "").strip()}</div>'
        else:
            thought_text = f'<div class="thought">{reply.replace("<inner_thoughts>", "").split("</inner_thoughts>")[0].strip()}</div>'
            answer = reply.split("</inner_thoughts>")[1].replace("<final_answer>", "").replace("</final_answer>", "").strip()
        thinking_prompt = "<p>" + "Thinking" + "." * (i % 5 + 1) + "</p>"
        yield thinking_prompt + thought_text + "<br>" + answer

    thinking_prompt = f"<p>Thought for {time.time() - start_time:.2f} seconds</p>"
    yield thinking_prompt + thought_text + "<br>" + answer
        
markdown = """
## 🫐 Overthink 1(o1)

Insprired by how o1 works, this LLM is instructed to generate very long and detailed chain-of-thoughts. It will think extra hard before providing an answer. 

Actually this does help with reasoning, compared to normal step-by-step reasoning. I wrote a blog post about this [here](https://huggingface.co/blog/wenbopan/recreating-o1).

Sometimes this LLM overthinks for super simple questions, but it's fun to watch. Hope you enjoy it!

### System Message

This is done by instructing the model with a large system message, which you can check on the top tab.
"""

with gr.Blocks(theme=gr.themes.Soft(), css=css, fill_height=True) as demo:
    with gr.Row(equal_height=True):
        with gr.Column(scale=1, min_width=300):
            with gr.Tab("Settings"):
                gr.Markdown(markdown)
                model = gr.Dropdown(["nousresearch/hermes-3-llama-3.1-405b:free", "nousresearch/hermes-3-llama-3.1-70b", "meta-llama/llama-3.1-405b-instruct"], value="nousresearch/hermes-3-llama-3.1-405b:free", label="Model")
                show_thoughts = gr.Checkbox(True, label="Show Thoughts", interactive=True)
            with gr.Tab("System Message"):
                system_msg = gr.TextArea(system_msg, label="System Message")
        with gr.Column(scale=3, min_width=300):
            gr.ChatInterface(
                streaming, 
                additional_inputs=[
                    system_msg,
                    model
                ],
                examples=[
                    ["How do you do?    ", None, None, None],
                    ["How many R's in strawberry?", None, None, None],
                    ["Solve the puzzle of 24 points: 2 4 9 1", None, None, None],
                    ["Find x such that ⌈xβŒ‰ + x = 23/7. Express x as a common fraction.", None, None, None],
                ],
            )

if __name__ == "__main__":
    demo.launch()