FangyuLei commited on
Commit
43cfb4b
1 Parent(s): 1dc6989

Upload tatqa_utils.py

Browse files
Files changed (1) hide show
  1. tatqa_utils.py +144 -0
tatqa_utils.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import string
3
+ from typing import List
4
+ import numpy as np
5
+
6
+ def scale_to_num(scale):
7
+ scale = scale.lower()
8
+ num = 1
9
+ if 'hundred' in scale: # hundred
10
+ num = 100
11
+ elif 'thousand' in scale: # thousand
12
+ num = 1000
13
+ elif 'million' in scale: # million
14
+ num = 1000000
15
+ elif 'billion' in scale: # billion
16
+ num = 1000000000
17
+ elif 'percent' in scale: # percent
18
+ num = 0.01
19
+ return num
20
+
21
+ def extract_one_num_from_str(s):
22
+ s = _clean_num(s)
23
+ r_num = r"([+-]?\d+(\.\d+)?)|([+-]?\.\d+)"
24
+ groups = re.findall(r_num, s)
25
+ if len(groups) == 0:
26
+ return None
27
+ num = groups[0][0]
28
+ if num == '':
29
+ return None
30
+ if '.' in num:
31
+ return float(num)
32
+ return int(num)
33
+
34
+ EXCLUDE_IN_NUM = "'\"\\$€£¥%(),[]"
35
+ def _clean_num(text:str):
36
+ return "".join([ch for ch in str(text) if ch not in EXCLUDE_IN_NUM])
37
+
38
+
39
+ def is_number(text: str) -> bool:
40
+ try:
41
+ words = " ".join([_clean_num(w) for w in text.split()]).split()
42
+ if len(words) == 0:
43
+ """1023 or 1 million"""
44
+ return False
45
+ num = float(words[0])
46
+ if np.isnan(num):
47
+ return False
48
+ if len(words) >= 2:
49
+ if scale_to_num(words[1]) == 1:
50
+ return False
51
+ return True
52
+ except ValueError:
53
+ return False
54
+ # except AttributeError:
55
+ # return False
56
+
57
+ def negative_num_handle(x):
58
+ """
59
+ :param x: transform (134) -> -134
60
+ :return:
61
+ """
62
+ all = re.findall('(\([\d.\s]+\))', x.strip())
63
+ if len(all) > 0:
64
+ return -1
65
+ return 1
66
+
67
+ def percent_num_handle(x):
68
+ """
69
+ :param x: transform 12% -> 12/100
70
+ :return:
71
+ """
72
+ all = re.findall('([\d.\s]+%)', x.strip())
73
+ if len(all) > 0:
74
+ return 0.01
75
+ return 1
76
+
77
+ def word_scale_handle(x):
78
+ """
79
+ :param x: 1 million = 1,000,000
80
+ :return:
81
+ """
82
+ iter = re.finditer('([\d.]+\s?[a-zA-Z]+)', x)
83
+ for one in iter:
84
+ text = one.group(0).lower()
85
+ scale_val = scale_to_num(text)
86
+ return scale_val
87
+ return 1
88
+
89
+ def to_number(text:str) -> float:
90
+ num = extract_one_num_from_str(text)
91
+ scale_val = word_scale_handle(text)
92
+ negative_flag = negative_num_handle(text)
93
+ percent_flag = percent_num_handle(text)
94
+ if num is not None:
95
+ return round(num * scale_val * negative_flag * percent_flag, 4)
96
+ return None
97
+
98
+ def remove_articles(text: str) -> str:
99
+ regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
100
+ return re.sub(regex, ' ', text)
101
+
102
+ def white_space_fix(text: str) -> str:
103
+ return ' '.join(text.split())
104
+
105
+ EXCLUDE = set(string.punctuation)
106
+ def remove_punc(text: str) -> str:
107
+ if not is_number(text):
108
+ return ''.join(ch for ch in text if ch not in EXCLUDE)
109
+ else:
110
+ return text
111
+
112
+ def lower(text: str) -> str:
113
+ return text.lower()
114
+
115
+ def tokenize(text: str) -> List[str]:
116
+ return re.split(" ", text)
117
+
118
+
119
+ def normalize_number(text: str) -> str:
120
+ if is_number(text):
121
+ return str(to_number(text))
122
+ else:
123
+ return text
124
+
125
+ def normalize_answer(text: str) -> str:
126
+ """Lower text and remove punctuation, articles and extra whitespace."""
127
+ parts = [white_space_fix(remove_articles(normalize_number(remove_punc(lower(token)))))
128
+ for token in tokenize(text)]
129
+ parts = [part for part in parts if part.strip()]
130
+ normalized = ' '.join(parts).strip()
131
+ return normalized
132
+
133
+
134
+ STRIPPED_CHARACTERS = string.punctuation + ''.join([u"‘", u"’", u"´", u"`", "_"])
135
+ def ws_tokenize(text):
136
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
137
+ text = text.strip().lower()
138
+ if not text:
139
+ return []
140
+ text = white_space_fix(text)
141
+ tokens = text.split()
142
+ tokens = [token.strip(STRIPPED_CHARACTERS) for token in tokens]
143
+ return tokens
144
+