voidful commited on
Commit
a197664
1 Parent(s): 9434dcf

Create README.md

Browse files

add usage example

Files changed (1) hide show
  1. README.md +28 -0
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Usage:
2
+ ```
3
+ import nlp2
4
+ import json
5
+ from datasets import load_dataset
6
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
7
+ from asrp.code2voice_model.hubert import hifigan_hubert_layer6_code100
8
+ import IPython.display as ipd
9
+
10
+ tokenizer = AutoTokenizer.from_pretrained("Oscarshih/long-t5-base-SQA-15ep")
11
+ model = AutoModelForSeq2SeqLM.from_pretrained("Oscarshih/long-t5-base-SQA-15ep")
12
+ dataset = load_dataset("voidful/NMSQA-CODE")
13
+ cs = hifigan_hubert_layer6_code100()
14
+
15
+ qa_item = dataset['dev'][0]
16
+ question_unit = json.loads(qa_item['hubert_100_question_unit'])[0]["merged_code"]
17
+ context_unit = json.loads(qa_item['hubert_100_context_unit'])[0]["merged_code"]
18
+ answer_unit = json.loads(qa_item['hubert_100_answer_unit'])[0]["merged_code"]
19
+
20
+ # groundtruth answer
21
+ ipd.Audio(data=cs(answer_unit), autoplay=False, rate=cs.sample_rate)
22
+
23
+ # predict answer
24
+ inputs = tokenizer("".join([f"v_tok_{i}" for i in question_unit]) + "".join([f"v_tok_{i}" for i in context_unit]), return_tensors="pt")
25
+ code = tokenizer.batch_decode(model.generate(**inputs,max_length=1024))[0]
26
+ code = [int(i) for i in code.replace("</s>","").replace("<s>","").split("v_tok_")[1:]]
27
+ ipd.Audio(data=cs(code), autoplay=False, rate=cs.sample_rate)
28
+ ```