Norod78's picture
Hebrew-Mistral-7B
5cd48aa verified
raw
history blame
No virus
4.57 kB
import os
from threading import Thread
from typing import Iterator
import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
MAX_MAX_NEW_TOKENS = 1024
DEFAULT_MAX_NEW_TOKENS = 256
MAX_INPUT_TOKEN_LENGTH = 512
DESCRIPTION = """\
# Yam-Peleg's Hebrew-Mistral-7B
Hebrew-Mistral-7B was introduced in [this Facebook post](https://www.facebook.com/groups/MDLI1/posts/2701023256728372/).
Please, check the [original model card](https://huggingface.co/yam-peleg/Hebrew-Mistral-7B) for more details.
You can see the other Hebrew models by Yam [here](https://huggingface.co/collections/yam-peleg/hebrew-models-65e957875324e2b9a4b68f08)
# Note: Use this model for only for completing sentences.
## While the user interface is of a chatbot for convenience, this is a base model and is not fine-tuned for chatbot tasks or instruction following tasks. As such, the model is not provided a chat history and will complete your text based on the last given prompt only.
"""
LICENSE = """
<p/>
---
A derivative work of [mistral-7b](https://mistral.ai/news/announcing-mistral-7b/) by Mistral-AI.
The model and space are released under the Apache 2.0 license
"""
if not torch.cuda.is_available():
DESCRIPTION += "\n<p>Running on CPU ๐Ÿฅถ This demo does not work on CPU.</p>"
if torch.cuda.is_available():
model_id = "yam-peleg/Hebrew-Mistral-7B"
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True)
tokenizer_id = "yam-peleg/Hebrew-Mistral-7B"
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
@spaces.GPU
def generate(
message: str,
chat_history: list[tuple[str, str]],
max_new_tokens: int = 1024,
temperature: float = 0.2,
top_p: float = 0.7,
top_k: int = 30,
repetition_penalty: float = 1.0,
) -> Iterator[str]:
input_ids = tokenizer([message], return_tensors="pt").input_ids
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
{"input_ids": input_ids},
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
num_beams=1,
pad_token_id = tokenizer.eos_token_id,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=5,
early_stopping=True,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
chat_interface = gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(rtl=True, show_copy_button=True),
textbox=gr.Textbox(text_align = 'right', rtl = True),
additional_inputs=[
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.1,
maximum=4.0,
step=0.1,
value=0.3,
),
gr.Slider(
label="Top-p (nucleus sampling)",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.3,
),
gr.Slider(
label="Top-k",
minimum=1,
maximum=1000,
step=1,
value=30,
),
],
stop_btn=None,
examples=[
["ืžืชื›ื•ืŸ ืœืขื•ื’ืช ืฉื•ืงื•ืœื“:"],
["ื”ืื™ืฉ ื”ืื—ืจื•ืŸ ื‘ืขื•ืœื ื™ืฉื‘ ืœื‘ื“ ื‘ื—ื“ืจื•, ื›ืฉืœืคืชืข ื ืฉืžืขื”"],
["ืฉืคืช ื”ืชื›ื ื•ืช ืคื™ื™ื˜ื•ืŸ ื”ื™ื"],
["ื”ืขืœื™ืœื” ืฉืœ ืกื™ื ื“ืจืœื”"],
["ืฉืืœื”: ืžื”ื™ ืขื™ืจ ื”ื‘ื™ืจื” ืฉืœ ืžื“ื™ื ืช ื™ืฉืจืืœ?\nืชืฉื•ื‘ื”:"],
["ืฉืืœื”: ืื ื™ ืžืžืฉ ืขื™ื™ืฃ, ืžื” ื›ื“ืื™ ืœื™ ืœืขืฉื•ืช?\nืชืฉื•ื‘ื”:"],
],
)
with gr.Blocks(css="style.css") as demo:
gr.Markdown(DESCRIPTION)
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
chat_interface.render()
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.queue(max_size=20).launch()