AkimfromParis commited on
Commit
18f1516
1 Parent(s): 6f22c3e

Llama tokenizer change

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -42,19 +42,19 @@ dtype: bfloat16
42
  ## 🤗 Usage for HuggingFace
43
 
44
  ```python
45
- from transformers import LlamaTokenizerFast, AutoModelForCausalLM
46
  from transformers import pipeline
47
  import torch
48
 
49
  model_name = "AkimfromParis/Hinoki-Sak-Sta-slerp-7B"
50
 
51
- tokenizer = LlamaTokenizerFast.from_pretrained(model_name)
52
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16)
53
 
54
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
55
 
56
  messages = [
57
- {"role": "system", "content": "あなたは誠実で優秀な日本人のアシスタントです。以下のトピックに関する詳細な情報を提供してください。"},
58
  {"role": "user", "content": "大谷翔平選手は誰ですか?"},
59
  ]
60
  print(pipe(messages, max_new_tokens=512)[0]['generated_text'][-1])
 
42
  ## 🤗 Usage for HuggingFace
43
 
44
  ```python
45
+ from transformers import AutoTokenizer, AutoModelForCausalLM
46
  from transformers import pipeline
47
  import torch
48
 
49
  model_name = "AkimfromParis/Hinoki-Sak-Sta-slerp-7B"
50
 
51
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
52
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16)
53
 
54
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
55
 
56
  messages = [
57
+ {"role": "system","content": "あなたは誠実で優秀な日本人のアシスタントです。以下のトピックに関する詳細な情報を提供してください。"},
58
  {"role": "user", "content": "大谷翔平選手は誰ですか?"},
59
  ]
60
  print(pipe(messages, max_new_tokens=512)[0]['generated_text'][-1])