jian1114 commited on
Commit
00bedb5
β€’
1 Parent(s): c4c4111

update app file

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -7,12 +7,16 @@ tokenizer = PreTrainedTokenizerFast.from_pretrained(model_name)
7
  model = BartForConditionalGeneration.from_pretrained(model_name)
8
 
9
  def process_paragraph(paragraph):
10
- input_ids = tokenizer.encode(paragraph, return_tensors='pt', max_length=1024)
 
 
 
 
11
  output = model.generate(input_ids, max_length=32, num_beams=10, early_stopping=True)
12
  subheading = tokenizer.decode(output[0], skip_special_tokens=True)
13
 
14
  subheading_final = "" # μ‹€μ œ λ°˜ν™˜ν•  μ†Œμ œλͺ©
15
- check_list = ["em class", "violet_text", "green_text", "red_text","blue_text"]
16
  if subheading=="O" or "OO" in subheading:
17
  subheading_final = "πŸ˜’μ†Œμ œλͺ© 생성 μ‹€νŒ¨: 더 μžμ„Έν•œ λ‚΄μš©μ΄ ν•„μš”ν•©λ‹ˆλ‹€."
18
  elif any(x in subheading for x in check_list):
 
7
  model = BartForConditionalGeneration.from_pretrained(model_name)
8
 
9
  def process_paragraph(paragraph):
10
+ # Return a list from tokenizer.encode instead of tensor
11
+ input_ids_list = tokenizer.encode(paragraph, max_length=1024)
12
+ # Convert the list to tensor when needed
13
+ input_ids = torch.tensor([input_ids_list])
14
+
15
  output = model.generate(input_ids, max_length=32, num_beams=10, early_stopping=True)
16
  subheading = tokenizer.decode(output[0], skip_special_tokens=True)
17
 
18
  subheading_final = "" # μ‹€μ œ λ°˜ν™˜ν•  μ†Œμ œλͺ©
19
+ check_list = ["em class", "violet_text", "green_text", "red_text", "blue_text"]
20
  if subheading=="O" or "OO" in subheading:
21
  subheading_final = "πŸ˜’μ†Œμ œλͺ© 생성 μ‹€νŒ¨: 더 μžμ„Έν•œ λ‚΄μš©μ΄ ν•„μš”ν•©λ‹ˆλ‹€."
22
  elif any(x in subheading for x in check_list):