Tonic commited on
Commit
685a1c4
1 Parent(s): 9b11223

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -22,15 +22,15 @@ rm_model = AutoModelForSequenceClassification.from_pretrained('OpenAssistant/rew
22
 
23
  @spaces.GPU
24
  def generate_text(usertitle, content, temperature, max_length, N=3):
25
- msg ={
26
- 'title': usertitle,
27
- 'content': content
28
- }
29
- # input_text = f"title: {usertitle}\ncontent: {content}"
30
  inputs = tokenizer.apply_chat_template(msg, return_tensors='pt').cuda()
31
  attention_mask = torch.ones(inputs['input_ids'].shape, dtype=torch.long, device='cuda')
32
  generated_sequences = model.generate(inputs['input_ids'], attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
33
- decoded_sequences = [tokenizer.decode(g, skip_special_tokens=True) for g in generated_sequences]
34
 
35
  def score(sequence):
36
  inputs = rm_tokenizer(sequence, return_tensors='pt', padding=True, truncation=True, max_length=512).to('cuda')
 
22
 
23
  @spaces.GPU
24
  def generate_text(usertitle, content, temperature, max_length, N=3):
25
+ # msg ={
26
+ # 'title': usertitle,
27
+ # 'content': content
28
+ # }
29
+ input_text = f"[[[title:]]] {usertitle}\n[[[content:]]]{content}\n\n"
30
  inputs = tokenizer.apply_chat_template(msg, return_tensors='pt').cuda()
31
  attention_mask = torch.ones(inputs['input_ids'].shape, dtype=torch.long, device='cuda')
32
  generated_sequences = model.generate(inputs['input_ids'], attention_mask=attention_mask, temperature=temperature, max_length=max_length, pad_token_id=tokenizer.eos_token_id, num_return_sequences=N, do_sample=True)
33
+ decoded_sequences = [tokenizer.decode(g) for g in generated_sequences]#.strip().split(tokenizer.eos_token)[0]
34
 
35
  def score(sequence):
36
  inputs = rm_tokenizer(sequence, return_tensors='pt', padding=True, truncation=True, max_length=512).to('cuda')