Spaces:
Sleeping
Sleeping
Create common.py
Browse files
common.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import re
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
import tiktoken
|
6 |
+
from langchain.text_splitter import TokenTextSplitter
|
7 |
+
|
8 |
+
def strtobool(val):
|
9 |
+
val = val.lower()
|
10 |
+
if val in ('yes', 'true', 't', '1'):
|
11 |
+
return True
|
12 |
+
elif val in ('no', 'false', 'f', '0'):
|
13 |
+
return False
|
14 |
+
else:
|
15 |
+
raise ValueError(f"Invalid truth value {val}")
|
16 |
+
|
17 |
+
|
18 |
+
def split_camel_case(word):
|
19 |
+
# This regular expression pattern matches the transition from a lowercase letter to an uppercase letter
|
20 |
+
pattern = re.compile(r'(?<=[a-z])(?=[A-Z])')
|
21 |
+
|
22 |
+
# Replace the matched pattern (the empty string between lowercase and uppercase letters) with a space
|
23 |
+
split_word = pattern.sub(' ', word)
|
24 |
+
|
25 |
+
return split_word
|
26 |
+
|
27 |
+
|
28 |
+
# Function to split tokens into chunks
|
29 |
+
def chunk_tokens(tokens, max_len):
|
30 |
+
for i in range(0, len(tokens), max_len):
|
31 |
+
yield tokens[i:i + max_len]
|
32 |
+
|
33 |
+
|
34 |
+
def update_nested_dict(d, u):
|
35 |
+
for k, v in u.items():
|
36 |
+
if isinstance(v, dict):
|
37 |
+
d[k] = update_nested_dict(d.get(k, {}), v)
|
38 |
+
else:
|
39 |
+
d[k] = v
|
40 |
+
return d
|
41 |
+
|
42 |
+
|
43 |
+
def cleanInputText(textInputLLM):
|
44 |
+
|
45 |
+
# Sequentially applying all the replacements and cleaning operations on textInputLLM
|
46 |
+
|
47 |
+
# Using regular expressions substitution
|
48 |
+
textInputLLM = re.sub(r'\(\'\\n\\n', ' ', textInputLLM)
|
49 |
+
textInputLLM = re.sub(r'\(\"\\n\\n', ' ', textInputLLM)
|
50 |
+
textInputLLM = re.sub(r'\\n\\n\',\)', ' ', textInputLLM)
|
51 |
+
textInputLLM = re.sub(r'\\n\\n\",\)', ' ', textInputLLM)
|
52 |
+
|
53 |
+
# Applying replacements with while loops since we need repetition until conditions are met
|
54 |
+
while re.search(r'##\n', textInputLLM):
|
55 |
+
textInputLLM = re.sub(r"##\n", '. ', textInputLLM)
|
56 |
+
while '###' in textInputLLM:
|
57 |
+
textInputLLM = textInputLLM.replace("###", ' ')
|
58 |
+
while '##' in textInputLLM:
|
59 |
+
textInputLLM = textInputLLM.replace("##", ' ')
|
60 |
+
while ' # ' in textInputLLM:
|
61 |
+
textInputLLM = textInputLLM.replace(" # ", ' ')
|
62 |
+
while '--' in textInputLLM:
|
63 |
+
textInputLLM = textInputLLM.replace("--", '-')
|
64 |
+
while re.search(r'\\\\-', textInputLLM):
|
65 |
+
textInputLLM = re.sub(r"\\\\-", '.', textInputLLM)
|
66 |
+
while re.search(r'\*\*\n', textInputLLM):
|
67 |
+
textInputLLM = re.sub(r"\*\*\n", '. ', textInputLLM)
|
68 |
+
while re.search(r'\*\*\*', textInputLLM):
|
69 |
+
textInputLLM = re.sub(r"\*\*\*", ' ', textInputLLM)
|
70 |
+
while re.search(r'\*\*', textInputLLM):
|
71 |
+
textInputLLM = re.sub(r"\*\*", ' ', textInputLLM)
|
72 |
+
while re.search(r' \* ', textInputLLM):
|
73 |
+
textInputLLM = re.sub(r" \* ", ' ', textInputLLM)
|
74 |
+
while re.search(r'is a program of the\n\nInternational Society for Infectious Diseases', textInputLLM):
|
75 |
+
textInputLLM = re.sub(
|
76 |
+
r'is a program of the\n\nInternational Society for Infectious Diseases',
|
77 |
+
'is a program of the International Society for Infectious Diseases',
|
78 |
+
textInputLLM,
|
79 |
+
flags=re.M
|
80 |
+
)
|
81 |
+
|
82 |
+
# Optionally, if you want to include these commented out operations:
|
83 |
+
# while re.search(r'\n\n', textInputLLM):
|
84 |
+
# textInputLLM = re.sub(r'\n\n', '. ', textInputLLM)
|
85 |
+
# while re.search(r'\n', textInputLLM):
|
86 |
+
# textInputLLM = re.sub(r'\n', ' ', textInputLLM)
|
87 |
+
|
88 |
+
while re.search(r' \*\.', textInputLLM):
|
89 |
+
textInputLLM = re.sub(r' \*\.', ' .', textInputLLM)
|
90 |
+
while ' ' in textInputLLM:
|
91 |
+
textInputLLM = textInputLLM.replace(" ", ' ')
|
92 |
+
while re.search(r'\.\.', textInputLLM):
|
93 |
+
textInputLLM = re.sub(r'\.\.', '.', textInputLLM)
|
94 |
+
while re.search(r'\. \.', textInputLLM):
|
95 |
+
textInputLLM = re.sub(r'\. \.', '.', textInputLLM)
|
96 |
+
|
97 |
+
# Final cleanup replacements
|
98 |
+
textInputLLM = re.sub(r'\(\"\.', ' ', textInputLLM)
|
99 |
+
textInputLLM = re.sub(r'\(\'\.', ' ', textInputLLM)
|
100 |
+
textInputLLM = re.sub(r'\",\)', ' ', textInputLLM)
|
101 |
+
textInputLLM = re.sub(r'\',\)', ' ', textInputLLM)
|
102 |
+
|
103 |
+
# Strip leading/trailing whitespaces
|
104 |
+
textInputLLM = textInputLLM.strip()
|
105 |
+
|
106 |
+
return textInputLLM
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
def encoding_getter(encoding_type: str):
|
111 |
+
"""
|
112 |
+
Returns the appropriate encoding based on the given encoding type (either an encoding string or a model name).
|
113 |
+
|
114 |
+
tiktoken supports three encodings used by OpenAI models:
|
115 |
+
|
116 |
+
Encoding name OpenAI models
|
117 |
+
cl100k_base gpt-4, gpt-3.5-turbo, text-embedding-ada-002
|
118 |
+
p50k_base Codex models, text-davinci-002, text-davinci-003
|
119 |
+
r50k_base (or gpt2) GPT-3 models like davinci
|
120 |
+
|
121 |
+
https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
122 |
+
|
123 |
+
"""
|
124 |
+
if "k_base" in encoding_type:
|
125 |
+
return tiktoken.get_encoding(encoding_type)
|
126 |
+
else:
|
127 |
+
try:
|
128 |
+
my_enc = tiktoken.encoding_for_model(encoding_type)
|
129 |
+
return my_enc
|
130 |
+
except Exception as err:
|
131 |
+
my_enc = tiktoken.get_encoding("cl100k_base") #default for gpt-4, gpt-3.5-turbo
|
132 |
+
return my_enc
|
133 |
+
|
134 |
+
|
135 |
+
def tokenizer(string: str, encoding_type: str) -> list:
|
136 |
+
"""
|
137 |
+
Returns the tokens in a text string using the specified encoding.
|
138 |
+
"""
|
139 |
+
encoding = encoding_getter(encoding_type)
|
140 |
+
tokens = encoding.encode(string)
|
141 |
+
return tokens
|
142 |
+
|
143 |
+
|
144 |
+
def token_counter(string: str, encoding_type: str) -> int:
|
145 |
+
"""
|
146 |
+
Returns the number of tokens in a text string using the specified encoding.
|
147 |
+
"""
|
148 |
+
num_tokens = len(tokenizer(string, encoding_type))
|
149 |
+
return num_tokens
|
150 |
+
|
151 |
+
|
152 |
+
# Function to extract words from a given text
|
153 |
+
def extract_words(text, putInLower=False):
|
154 |
+
# Use regex to find all words (sequences of alphanumeric characters)
|
155 |
+
if putInLower:
|
156 |
+
return [word.lower() for word in re.findall(r'\b\w+\b', text)]
|
157 |
+
else:
|
158 |
+
return [word for word in re.findall(r'\b\w+\b', text)] #re.findall(r'\b\w+\b', text)
|
159 |
+
|
160 |
+
# Function to check if all words from 'compound_word' are in the 'word_list'
|
161 |
+
def all_words_in_list(compound_word, word_list, putInLower=False):
|
162 |
+
words_to_check = extract_words(compound_word, putInLower=putInLower)
|
163 |
+
if putInLower:
|
164 |
+
return all(word.lower() in word_list for word in words_to_check)
|
165 |
+
else:
|
166 |
+
return all(word in word_list for word in words_to_check)
|
167 |
+
|
168 |
+
|
169 |
+
def row_to_dict_string(rrrow, columnsDict):
|
170 |
+
formatted_items = []
|
171 |
+
for col in rrrow.index:
|
172 |
+
if col not in columnsDict:
|
173 |
+
continue
|
174 |
+
value = rrrow[col]
|
175 |
+
# Check if the value is an instance of a number (int, float, etc.)
|
176 |
+
if isinstance(value, (int, float)):
|
177 |
+
formatted_items.append(f'"{col}": {value}') # Use double quotes for keys
|
178 |
+
else:
|
179 |
+
formatted_items.append(
|
180 |
+
f'"{col}": "{value}"') # Use double quotes for keys and string values
|
181 |
+
# Join items and enclose them in {}
|
182 |
+
return '{' + ', '.join(formatted_items) + '}'
|
183 |
+
|
184 |
+
#
|
185 |
+
# def row_to_dict_string(rrrow):
|
186 |
+
# formatted_items = []
|
187 |
+
# for col in rrrow.index:
|
188 |
+
# value = rrrow[col]
|
189 |
+
# # Check if the value is an instance of a number (int, float, etc.)
|
190 |
+
# if isinstance(value, (int, float)):
|
191 |
+
# formatted_items.append(f"'{col}': {value}")
|
192 |
+
# else:
|
193 |
+
# formatted_items.append(f"'{col}': '{value}'")
|
194 |
+
# # Join items and enclose them in {}
|
195 |
+
# return '{' + ', '.join(formatted_items) + '}'
|
196 |
+
|
197 |
+
|
198 |
+
def rescale_exponential_to_linear(df, column, new_min=0.5, new_max=1.0):
|
199 |
+
# Get the original exponential scores
|
200 |
+
original_scores = df[column]
|
201 |
+
|
202 |
+
# Normalize the scores to a 0-1 range
|
203 |
+
min_score = original_scores.min()
|
204 |
+
max_score = original_scores.max()
|
205 |
+
normalized_scores = (original_scores - min_score) / (max_score - min_score)
|
206 |
+
|
207 |
+
# Rescale the normalized scores to the interval [0.5, 1.0]
|
208 |
+
linear_scores = new_min + (normalized_scores * (new_max - new_min))
|
209 |
+
|
210 |
+
# Assign the linear scores back to the dataframe
|
211 |
+
df[column] = linear_scores
|
212 |
+
|
213 |
+
return df
|
214 |
+
|
215 |
+
|
216 |
+
def rescale_exponential_to_logarithmic(df, column, new_min=0.5, new_max=1.0):
|
217 |
+
# Ensure all values are positive and greater than zero, because log(0) is undefined
|
218 |
+
epsilon = 1e-10
|
219 |
+
df[column] = df[column] + epsilon
|
220 |
+
|
221 |
+
# Apply logarithmic transformation
|
222 |
+
log_transformed_scores = np.log(df[column])
|
223 |
+
|
224 |
+
# Normalize the log-transformed scores to a 0-1 range
|
225 |
+
min_score = log_transformed_scores.min()
|
226 |
+
max_score = log_transformed_scores.max()
|
227 |
+
normalized_log_scores = (log_transformed_scores - min_score) / (max_score - min_score)
|
228 |
+
|
229 |
+
# Rescale the normalized scores to the interval [0.5, 1.0]
|
230 |
+
logarithmic_scores = new_min + (normalized_log_scores * (new_max - new_min))
|
231 |
+
|
232 |
+
# Assign the logarithmically scaled scores back to the dataframe
|
233 |
+
df[column] = logarithmic_scores
|
234 |
+
|
235 |
+
return df
|