File size: 2,417 Bytes
6cd6c38 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
---
library_name: transformers
datasets:
- Suraponn/thai_instruction_sft
language:
- th
base_model: meta-llama/Meta-Llama-3.1-8B
---
![](https://lh7-rt.googleusercontent.com/docsz/AD_4nXeiuCm7c8lEwEJuRey9kiVZsRn2W-b4pWlu3-X534V3YmVuVc2ZL-NXg2RkzSOOS2JXGHutDuyyNAUtdJI65jGTo8jT9Y99tMi4H4MqL44Uc5QKG77B0d6-JfIkZHFaUA71-RtjyYZWVIhqsNZcx8-OMaA?key=xt3VSDoCbmTY7o-cwwOFwQ)
# QuantFactory/llama_3.1_8B_Thai_instruct-GGUF
This is quantized version of [Suraponn/llama_3.1_8B_Thai_instruct](https://huggingface.co/Suraponn/llama_3.1_8B_Thai_instruct) created using llama.cpp
# Original Model Card
import json
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
model_id = "Suraponn/llama_3.1_8B_Thai_instruct"
tokenizer = AutoTokenizer.from_pretrained(
model_id,
)
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="cuda:0",
torch_dtype=torch.float16,
)
config_setting = AutoConfig.from_pretrained(
model_id,
add_special_tokens=True,
)
if tokenizer.chat_template is None:
tokenizer.chat_template = tokenizer.default_chat_template
if not "system" in tokenizer.chat_template and "system" in tokenizer.default_chat_template:
tokenizer.chat_template = tokenizer.default_chat_template
s_split = "เขียนบทความเกี่ยวกับการออกกำลังกายให้ถูกต้อง"
chat = [
{
"role": "system",
"content": "You are a helpfull assistant. Please respond in Thai."
},
{
"role": "user",
"content": s_split,
},
]
tokenizer.use_default_system_prompt = False
extract_input = tokenizer.apply_chat_template(chat, tokenize=False , add_generation_prompt=True)
#extract_input = extract_input.split(s_split)[0]
print("------------\n" + extract_input + "\n------------")
inputs = tokenizer(
extract_input,
return_tensors="pt",
add_special_tokens = False,
)
#print(inputs)
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("<|eot_id|>"),
]
#print(terminators)
inputs = inputs.to(model.device)
with torch.no_grad():
tokens = model.generate(
**inputs,
max_new_tokens=2048,
do_sample=True,
eos_token_id=terminators,
temperature=0.7,
#top_p=1,
)
output = tokenizer.decode(tokens[0])
print(output)
|