ryanzhangfan commited on
Commit
2c69671
1 Parent(s): 94d0495

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -10
README.md CHANGED
@@ -26,10 +26,10 @@ import torch
26
  from transformers import AutoModelForCausalLM, AutoTokenizer
27
 
28
 
29
- tokenizer = AutoTokenizer.from_pretrained("/BAAI/Emu2-Chat")
30
 
31
  model = AutoModelForCausalLM.from_pretrained(
32
- "/BAAI/Emu2-Chat",
33
  torch_dtype=torch.bfloat16,
34
  low_cpu_mem_usage=True,
35
  trust_remote_code=True).to('cuda').eval()
@@ -68,10 +68,10 @@ import torch
68
  from transformers import AutoModelForCausalLM, AutoTokenizer
69
 
70
 
71
- tokenizer = AutoTokenizer.from_pretrained("/BAAI/Emu2-Chat")
72
 
73
  model = AutoModelForCausalLM.from_pretrained(
74
- "/BAAI/Emu2-Chat",
75
  torch_dtype=torch.bfloat16,
76
  low_cpu_mem_usage=True,
77
  trust_remote_code=True).to('cuda').eval()
@@ -116,11 +116,11 @@ import torch
116
  from transformers import AutoModelForCausalLM, AutoTokenizer
117
  from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch
118
 
119
- tokenizer = AutoTokenizer.from_pretrained("/BAAI/Emu2-Chat")
120
 
121
  with init_empty_weights():
122
  model = AutoModelForCausalLM.from_pretrained(
123
- "/BAAI/Emu2-Chat",
124
  torch_dtype=torch.bfloat16,
125
  low_cpu_mem_usage=True,
126
  trust_remote_code=True)
@@ -167,11 +167,11 @@ import torch
167
  from transformers import AutoModelForCausalLM, AutoTokenizer
168
  from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch
169
 
170
- tokenizer = AutoTokenizer.from_pretrained("/BAAI/Emu2-Chat")
171
 
172
  with init_empty_weights():
173
  model = AutoModelForCausalLM.from_pretrained(
174
- "/BAAI/Emu2-Chat",
175
  torch_dtype=torch.bfloat16,
176
  low_cpu_mem_usage=True,
177
  trust_remote_code=True)
@@ -226,10 +226,10 @@ import torch
226
  from transformers import AutoModelForCausalLM, AutoTokenizer
227
 
228
 
229
- tokenizer = AutoTokenizer.from_pretrained("/BAAI/Emu2-Chat")
230
 
231
  model = AutoModelForCausalLM.from_pretrained(
232
- "/BAAI/Emu2-Chat",
233
  load_in_4bit=True,
234
  trust_remote_code=True,
235
  bnb_4bit_compute_dtype=torch.float16).eval()
 
26
  from transformers import AutoModelForCausalLM, AutoTokenizer
27
 
28
 
29
+ tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")
30
 
31
  model = AutoModelForCausalLM.from_pretrained(
32
+ "BAAI/Emu2-Chat",
33
  torch_dtype=torch.bfloat16,
34
  low_cpu_mem_usage=True,
35
  trust_remote_code=True).to('cuda').eval()
 
68
  from transformers import AutoModelForCausalLM, AutoTokenizer
69
 
70
 
71
+ tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")
72
 
73
  model = AutoModelForCausalLM.from_pretrained(
74
+ "BAAI/Emu2-Chat",
75
  torch_dtype=torch.bfloat16,
76
  low_cpu_mem_usage=True,
77
  trust_remote_code=True).to('cuda').eval()
 
116
  from transformers import AutoModelForCausalLM, AutoTokenizer
117
  from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch
118
 
119
+ tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")
120
 
121
  with init_empty_weights():
122
  model = AutoModelForCausalLM.from_pretrained(
123
+ "BAAI/Emu2-Chat",
124
  torch_dtype=torch.bfloat16,
125
  low_cpu_mem_usage=True,
126
  trust_remote_code=True)
 
167
  from transformers import AutoModelForCausalLM, AutoTokenizer
168
  from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch
169
 
170
+ tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")
171
 
172
  with init_empty_weights():
173
  model = AutoModelForCausalLM.from_pretrained(
174
+ "BAAI/Emu2-Chat",
175
  torch_dtype=torch.bfloat16,
176
  low_cpu_mem_usage=True,
177
  trust_remote_code=True)
 
226
  from transformers import AutoModelForCausalLM, AutoTokenizer
227
 
228
 
229
+ tokenizer = AutoTokenizer.from_pretrained("BAAI/Emu2-Chat")
230
 
231
  model = AutoModelForCausalLM.from_pretrained(
232
+ "BAAI/Emu2-Chat",
233
  load_in_4bit=True,
234
  trust_remote_code=True,
235
  bnb_4bit_compute_dtype=torch.float16).eval()