safihaider commited on
Commit
3f9ab1d
1 Parent(s): f60145e

model change

Browse files
Files changed (2) hide show
  1. ChatController.py +1 -1
  2. ChatService.py +2 -2
ChatController.py CHANGED
@@ -9,7 +9,7 @@ app = Flask(__name__)
9
  CORS(app)
10
 
11
  chatService = ChatService()
12
- chatService.load_model("collinear-ai/Llama-2-7b-chat-crs")
13
 
14
  @app.route("/chat", methods=['POST'])
15
  @cross_origin(origin='*')
 
9
  CORS(app)
10
 
11
  chatService = ChatService()
12
+ chatService.load_model("collinear-ai/LLaMA-2-7B-chat-csr")
13
 
14
  @app.route("/chat", methods=['POST'])
15
  @cross_origin(origin='*')
ChatService.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import torch
3
  import transformers
4
- # from huggingface_hub import login
5
 
6
  os.environ["CUDA_VISIBLE_DEVICES"] = "0"
7
 
@@ -19,7 +19,7 @@ class ChatService:
19
  gpu_count = torch.cuda.device_count()
20
  print('gpu_count', gpu_count)
21
 
22
- # login("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")
23
 
24
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
25
  pipeline = transformers.pipeline(
 
1
  import os
2
  import torch
3
  import transformers
4
+ from huggingface_hub import login
5
 
6
  os.environ["CUDA_VISIBLE_DEVICES"] = "0"
7
 
 
19
  gpu_count = torch.cuda.device_count()
20
  print('gpu_count', gpu_count)
21
 
22
+ login("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")
23
 
24
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
25
  pipeline = transformers.pipeline(