id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
1,313 | import os
import copy
import logging
import torch
import numpy as np
import time
from filelock import FileLock
import json
import itertools
import random
import transformers
from src.processors import processors_mapping, num_labels_mapping, output_modes_mapping, compute_metrics_mapping, median_mapping
from transformers.data.processors.utils import InputFeatures
from transformers import DataProcessor, InputExample
import dataclasses
from dataclasses import dataclass
from typing import List, Optional, Union
from sentence_transformers import SentenceTransformer, util
from copy import deepcopy
import pandas as pd
def input_example_to_string(example, sep_token):
if example.text_b is None:
return example.text_a
else:
# Warning: very simple hack here
return example.text_a + ' ' + sep_token + ' ' + example.text_b | null |
1,314 | import os
import copy
import logging
import torch
import numpy as np
import time
from filelock import FileLock
import json
import itertools
import random
import transformers
from src.processors import processors_mapping, num_labels_mapping, output_modes_mapping, compute_metrics_mapping, median_mapping
from transformers.data.processors.utils import InputFeatures
from transformers import DataProcessor, InputExample
import dataclasses
from dataclasses import dataclass
from typing import List, Optional, Union
from sentence_transformers import SentenceTransformer, util
from copy import deepcopy
import pandas as pd
logger = logging.getLogger(__name__)
def input_example_to_tuple(example):
if example.text_b is None:
if pd.isna(example.text_a) or example.text_a is None:
return ['']
logger.warn("Empty input")
else:
return [example.text_a]
else:
return [example.text_a, example.text_b] | null |
1,315 | import os
import copy
import logging
import torch
import numpy as np
import time
from filelock import FileLock
import json
import itertools
import random
import transformers
from src.processors import processors_mapping, num_labels_mapping, output_modes_mapping, compute_metrics_mapping, median_mapping
from transformers.data.processors.utils import InputFeatures
from transformers import DataProcessor, InputExample
import dataclasses
from dataclasses import dataclass
from typing import List, Optional, Union
from sentence_transformers import SentenceTransformer, util
from copy import deepcopy
import pandas as pd
logger = logging.getLogger(__name__)
def tokenize_multipart_input(
input_text_list,
max_length,
tokenizer,
task_name=None,
prompt=False,
template=None,
label_word_list=None,
first_sent_limit=None,
other_sent_limit=None,
gpt3=False,
truncate_head=False,
support_labels=None,
prompt_num=10,
):
def enc(text):
return tokenizer.encode(text, add_special_tokens=False)
input_ids = []
attention_mask = []
token_type_ids = [] # Only for BERT
mask_pos = None # Position of the mask token
if prompt:
"""
Concatenate all sentences and prompts based on the provided template.
Template example: '*cls*It was*mask*.*sent_0**<sep>*label_0:*sent_1**<sep>**label_1*:*sent_2**<sep>*'
*xx* represent variables:
*cls*: cls_token
*mask*: mask_token
*sep*: sep_token
*sep+*: sep_token, also means +1 for segment id
*sent_i*: sentence i (input_text_list[i])
*sent-_i*: same as above, but delete the last token
*sentl_i*: same as above, but use lower case for the first word
*sentl-_i*: same as above, but use lower case for the first word and delete the last token
*+sent_i*: same as above, but add a space before the sentence
*+sentl_i*: same as above, but add a space before the sentence and use lower case for the first word
*label_i*: label_word_list[i]
*label_x*: label depends on the example id (support_labels needed). this is only used in GPT-3's in-context learning
Use "_" to replace space.
PAY ATTENTION TO SPACE!! DO NOT leave space before variables, for this will lead to extra space token.
"""
assert template is not None
special_token_mapping = {
'cls': tokenizer.cls_token_id, 'mask': tokenizer.mask_token_id, 'sep': tokenizer.sep_token_id, 'sep+': tokenizer.sep_token_id,
'prompt': [-(i+1) for i in range(prompt_num)]
}
template_list = template.split('*') # Get variable list in the template
segment_id = 0 # Current segment id. Segment id +1 if encountering sep+.
st_prompt = 0 # The start position of prompt
for part_id, part in enumerate(template_list):
new_tokens = []
segment_plus_1_flag = False
if part in special_token_mapping:
if part == 'cls' and 'T5' in type(tokenizer).__name__:
# T5 does not have cls token
continue
if part != 'prompt':
new_tokens.append(special_token_mapping[part])
else:
new_tokens += special_token_mapping[part]
st_prompt = len(input_ids)
if part == 'sep+':
segment_plus_1_flag = True
elif part[:6] == 'label_':
# Note that label_word_list already has extra space, so do not add more space ahead of it.
label_id = int(part.split('_')[1])
label_word = label_word_list[label_id]
new_tokens.append(label_word)
elif part[:7] == 'labelx_':
instance_id = int(part.split('_')[1])
label_id = support_labels[instance_id]
label_word = label_word_list[label_id]
new_tokens.append(label_word)
elif part[:5] == 'sent_':
sent_id = int(part.split('_')[1])
new_tokens += enc(input_text_list[sent_id])
elif part[:6] == '+sent_':
# Add space
sent_id = int(part.split('_')[1])
new_tokens += enc(' ' + input_text_list[sent_id])
elif part[:6] == 'sent-_':
# Delete the last token
sent_id = int(part.split('_')[1])
new_tokens += enc(input_text_list[sent_id][:-1])
elif part[:6] == 'sentl_':
# Lower case the first token
sent_id = int(part.split('_')[1])
text = input_text_list[sent_id]
text = text[:1].lower() + text[1:]
new_tokens += enc(text)
elif part[:7] == '+sentl_':
# Lower case the first token and add space
sent_id = int(part.split('_')[1])
text = input_text_list[sent_id]
text = text[:1].lower() + text[1:]
new_tokens += enc(' ' + text)
elif part[:7] == 'sentl-_':
# Lower case the first token and discard the last token
sent_id = int(part.split('_')[1])
text = input_text_list[sent_id]
text = text[:1].lower() + text[1:]
new_tokens += enc(text[:-1])
elif part[:6] == 'sentu_':
# Upper case the first token
sent_id = int(part.split('_')[1])
text = input_text_list[sent_id]
text = text[:1].upper() + text[1:]
new_tokens += enc(text)
elif part[:7] == '+sentu_':
# Upper case the first token and add space
sent_id = int(part.split('_')[1])
text = input_text_list[sent_id]
text = text[:1].upper() + text[1:]
new_tokens += enc(' ' + text)
else:
# Just natural language prompt
part = part.replace('_', ' ')
# handle special case when T5 tokenizer might add an extra space
if len(part) == 1:
new_tokens.append(tokenizer._convert_token_to_id(part))
else:
new_tokens += enc(part)
if part[:4] == 'sent' or part[1:5] == 'sent':
# If this part is the sentence, limit the sentence length
sent_id = int(part.split('_')[1])
if sent_id == 0:
if first_sent_limit is not None:
new_tokens = new_tokens[:first_sent_limit]
else:
if other_sent_limit is not None:
new_tokens = new_tokens[:other_sent_limit]
input_ids += new_tokens
attention_mask += [1 for i in range(len(new_tokens))]
token_type_ids += [segment_id for i in range(len(new_tokens))]
if segment_plus_1_flag:
segment_id += 1
else:
input_ids = [tokenizer.cls_token_id]
attention_mask = [1]
token_type_ids = [0]
for sent_id, input_text in enumerate(input_text_list):
if input_text is None:
# Do not have text_b
continue
if pd.isna(input_text) or input_text is None:
# Empty input
input_text = ''
input_tokens = enc(input_text) + [tokenizer.sep_token_id]
input_ids += input_tokens
attention_mask += [1 for i in range(len(input_tokens))]
token_type_ids += [sent_id for i in range(len(input_tokens))]
if 'T5' in type(tokenizer).__name__: # T5 does not have CLS token
input_ids = input_ids[1:]
attention_mask = attention_mask[1:]
token_type_ids = token_type_ids[1:]
# Padding
if first_sent_limit is not None and len(input_ids) > max_length:
# If using sentence limit, the total length still exceeds the maximum limit, report a warning
logger.warn("Input exceeds max_length limit: {}".format(tokenizer.decode(input_ids)))
while len(input_ids) < max_length:
input_ids.append(tokenizer.pad_token_id)
attention_mask.append(0)
token_type_ids.append(0)
# Truncate
if len(input_ids) > max_length:
if truncate_head:
logger.info("Truncate Head!")
input_ids = input_ids[-max_length:]
attention_mask = attention_mask[-max_length:]
token_type_ids = token_type_ids[-max_length:]
else:
logger.info("Truncate Tail!"+str(len(input_ids)))
# Default is to truncate the tail
if tokenizer.mask_token_id in input_ids[max_length:]:
mask_pos = input_ids.index(tokenizer.mask_token_id)
len_to_truncate = len(input_ids)-max_length
l, r = mask_pos-1-len_to_truncate, mask_pos-1
input_ids = input_ids[:l]+input_ids[r:]
attention_mask = attention_mask[:l]+attention_mask[r:]
token_type_ids = token_type_ids[:l]+token_type_ids[r:]
else:
input_ids = input_ids[:max_length]
attention_mask = attention_mask[:max_length]
token_type_ids = token_type_ids[:max_length]
# Find mask token
if prompt:
mask_pos = [input_ids.index(tokenizer.mask_token_id)]
# Make sure that the masked position is inside the max_length
if mask_pos[0] >= max_length:
logger.info(input_ids)
logger.info(len(input_ids))
logger.info(mask_pos)
assert mask_pos[0] < max_length
result = {'input_ids': input_ids, 'attention_mask': attention_mask}
if 'BERT' in type(tokenizer).__name__:
# Only provide token type ids for BERT
result['token_type_ids'] = token_type_ids
if prompt:
result['mask_pos'] = mask_pos
return result | null |
1,316 | import os
import copy
import logging
import torch
import numpy as np
import time
from filelock import FileLock
import json
import itertools
import random
import transformers
from transformers.data.processors.utils import InputFeatures
from transformers import DataProcessor, InputExample
from transformers.data.processors.glue import *
from transformers.data.metrics import glue_compute_metrics
import dataclasses
from dataclasses import dataclass, asdict
from typing import List, Optional, Union
from sentence_transformers import SentenceTransformer, util
from copy import deepcopy
import pandas as pd
import logging
def text_classification_metrics(task_name, preds, labels):
return {"acc": (preds == labels).mean()} | null |
1,317 | import argparse
import pandas as pd
import json
import numpy as np
import torch
import os
from torch import device
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction, GlueDataset
from transformers import GlueDataTrainingArguments, glue_compute_metrics
from transformers.data.metrics import simple_accuracy
from transformers.data.processors.glue import glue_processors
def get_glue_label(task, line):
if task in ["MNLI", "MRPC", "QNLI", "QQP", "RTE", "SNLI", "SST-2", "STS-B", "WNLI", "CoLA"]:
line = line.strip().split('\t')
if task == 'CoLA':
return line[1]
elif task == 'MNLI':
return line[-1]
elif task == 'MRPC':
return line[0]
elif task == 'QNLI':
return line[-1]
elif task == 'QQP':
return line[-1]
elif task == 'RTE':
return line[-1]
elif task == 'SNLI':
return line[-1]
elif task == 'SST-2':
return line[-1]
elif task == 'STS-B':
return 0 if float(line[-1]) < 2.5 else 1
elif task == 'WNLI':
return line[-1]
else:
raise NotImplementedError
else:
raise NotImplementedError
def get_labels(data_dir, k, seed, task, print_name):
if print_name in ['sst-5', 'mr', 'cr', 'mpqa', 'subj', 'trec']:
data = pd.read_csv(os.path.join(data_dir, print_name, '{}-{}'.format(k, seed), 'test.csv'), header=None).values.tolist()
labels = np.zeros((len(data)), dtype=np.uint8)
for i, example in enumerate(data):
labels[i] = example[0]
elif print_name in ["MNLI", "MRPC", "QNLI", "QQP", "RTE", "SNLI", "SST-2", "STS-B", "WNLI", "CoLA"]:
lines = []
file_name = os.path.join(data_dir, print_name, '{}-{}'.format(k, seed), 'test.tsv')
if task == 'mnli':
file_name = os.path.join(data_dir, print_name, '{}-{}'.format(k, seed), 'test_matched.tsv')
elif task == 'mnli-mm':
file_name = os.path.join(data_dir, print_name, '{}-{}'.format(k, seed), 'test_mismatched.tsv')
for line in open(file_name):
lines.append(line.strip())
if task != 'cola':
lines = lines[1:]
label_list = glue_processors[task]().get_labels()
label_map = {k: i for i, k in enumerate(label_list)}
if task == 'sts-b':
label_map = {0: 0, 1: 1}
label_ids = np.zeros((len(lines)))
for line_id, line in enumerate(lines):
label_ids[line_id] = label_map[get_glue_label(print_name, line)]
return label_ids | null |
1,318 | import os
import pandas as pd
import numpy as np
import os
def merge(table1, table2, table3):
t1 = pd.read_csv(table1, header=None, dtype=np.float64)
t2 = pd.read_csv(table2, header=None, dtype=np.float64)
for i in range(8):
t1.at[8, i] = t1[i].to_numpy().mean()
t2.at[8, i] = t2[i].to_numpy().mean()
t1.to_csv(table1, index=False, header=False)
t2.to_csv(table2, index=False, header=False)
res = pd.DataFrame([['NaN' for i in range(8)] for j in range(9)], dtype='str')
max_value = t1.max(axis=1)
for i in range(9):
for j in range(8):
if t1.at[i, j] == max_value.at[i]:
res.at[i, j] = f"\\textbf{{{round(t1.at[i, j]*100, 1)}}}$_{{{round(t2.at[i, j]*100, 1)}}}$"
else:
res.at[i, j] = f"{round(t1.at[i, j]*100, 1)}$_{{{round(t2.at[i, j]*100, 1)}}}$"
res.to_csv(table3, index=False, header=False) | null |
1,319 | import argparse
import os
import numpy as np
import pandas as pd
from pandas import DataFrame
def get_label(task, line):
if task in ["MNLI", "MRPC", "QNLI", "QQP", "RTE", "SST-2", "STS-B", "CoLA"]:
# GLUE style
line = line.strip().split('\t')
if task == 'CoLA':
return line[1]
elif task == 'MNLI':
return line[-1]
elif task == 'MRPC':
return line[0]
elif task == 'QNLI':
return line[-1]
elif task == 'QQP':
return line[-1]
elif task == 'RTE':
return line[-1]
elif task == 'SST-2':
return line[-1]
elif task == 'STS-B':
return 0 if float(line[-1]) < 2.5 else 1
else:
raise NotImplementedError
else:
return line[0] | null |
1,320 | import argparse
import os
import numpy as np
import pandas as pd
from pandas import DataFrame
def load_datasets(data_dir, tasks):
datasets = {}
for task in tasks:
if task in ["MNLI", "MRPC", "QNLI", "QQP", "RTE", "SST-2", "STS-B", "CoLA"]:
# GLUE style (tsv)
dataset = {}
dirname = os.path.join(data_dir, task)
if task == "MNLI":
splits = ["train", "dev_matched", "dev_mismatched"]
else:
splits = ["train", "dev"]
for split in splits:
filename = os.path.join(dirname, f"{split}.tsv")
with open(filename, "r") as f:
lines = f.readlines()
dataset[split] = lines
datasets[task] = dataset
else:
# Other datasets (csv)
dataset = {}
dirname = os.path.join(data_dir, task)
splits = ["train", "test"]
for split in splits:
filename = os.path.join(dirname, f"{split}.csv")
dataset[split] = pd.read_csv(filename, header=None)
datasets[task] = dataset
return datasets | null |
1,321 | import argparse
import os
import numpy as np
import pandas as pd
from pandas import DataFrame
The provided code snippet includes necessary dependencies for implementing the `split_header` function. Write a Python function `def split_header(task, lines)` to solve the following problem:
Returns if the task file has a header or not. Only for GLUE tasks.
Here is the function:
def split_header(task, lines):
"""
Returns if the task file has a header or not. Only for GLUE tasks.
"""
if task in ["CoLA"]:
return [], lines
elif task in ["MNLI", "MRPC", "QNLI", "QQP", "RTE", "SST-2", "STS-B"]:
return lines[0:1], lines[1:]
else:
raise ValueError("Unknown GLUE task.") | Returns if the task file has a header or not. Only for GLUE tasks. |
1,322 | import transformers
from transformers import T5ForConditionalGeneration, T5Tokenizer
import argparse
import torch
import os
from tqdm import tqdm
import json
import argparse
import pandas as pd
def generate(dataset, template, model, tokenizer, target_number, mapping, beam, label=None, length_limit=None, truncate=None):
"""
Generate templates based on given inputs
label: Only use instances with this label (deprecated)
length_limit: At least generate content as long as length_limit (deprecated)
"""
input_texts = []
input_tensors = []
max_length = 0
# Process the inputs
for item in dataset:
if label is None or item['label'] == label:
input_text = get_text(template, item['text'], item['label'], tokenizer, mapping)
if truncate is not None:
if truncate == 'head':
input_text = input_text[-256:]
elif truncate == 'tail':
input_text = input_text[:256]
else:
raise NotImplementedError
input_ids = torch.tensor(input_text).long()
max_length = max(max_length, input_ids.size(-1))
input_tensors.append(input_ids)
# Concatenate inputs as a batch
input_ids = torch.zeros((len(input_tensors), max_length)).long()
attention_mask = torch.zeros((len(input_tensors), max_length)).long()
for i in range(len(input_tensors)):
input_ids[i, :input_tensors[i].size(-1)] = input_tensors[i]
attention_mask[i, :input_tensors[i].size(-1)] = 1
# Print some examples
print('####### example #######')
print(tokenizer.decode(input_ids[0]))
print(tokenizer.decode(input_ids[1]))
print(tokenizer.decode(input_ids[2]))
print('####### example #######\n')
input_ids = input_ids.cuda()
attention_mask = attention_mask.cuda()
assert len(input_tensors) > 0
# Maximum generate content length
max_length = 20
start_mask = tokenizer._convert_token_to_id('<extra_id_0>')
ori_decoder_input_ids = torch.zeros((input_ids.size(0), max_length)).long()
ori_decoder_input_ids[..., 0] = model.config.decoder_start_token_id
# decoder_input_ids: decoder inputs for next regressive generation
# ll: log likelihood
# output_id: which part of generated contents we are at
# output: generated content so far
# last_length (deprecated): how long we have generated for this part
current_output = [{'decoder_input_ids': ori_decoder_input_ids, 'll': 0, 'output_id': 1, 'output': [], 'last_length': -1}]
for i in tqdm(range(max_length - 2)):
new_current_output = []
for item in current_output:
if item['output_id'] > target_number:
# Enough contents
new_current_output.append(item)
continue
decoder_input_ids = item['decoder_input_ids']
# Forward
batch_size = 32
turn = input_ids.size(0) // batch_size
if input_ids.size(0) % batch_size != 0:
turn += 1
aggr_output = []
for t in range(turn):
start = t * batch_size
end = min((t + 1) * batch_size, input_ids.size(0))
with torch.no_grad():
aggr_output.append(model(input_ids[start:end], attention_mask=attention_mask[start:end], decoder_input_ids=decoder_input_ids.cuda()[start:end])[0])
aggr_output = torch.cat(aggr_output, 0)
# Gather results across all input sentences, and sort generated tokens by log likelihood
aggr_output = aggr_output.mean(0)
log_denominator = torch.logsumexp(aggr_output[i], -1).item()
ids = list(range(model.config.vocab_size))
ids.sort(key=lambda x: aggr_output[i][x].item(), reverse=True)
ids = ids[:beam+3]
for word_id in ids:
output_id = item['output_id']
if word_id == start_mask - output_id or word_id == tokenizer._convert_token_to_id('</s>'):
# Finish one part
if length_limit is not None and item['last_length'] < length_limit[output_id - 1]:
check = False
else:
check = True
output_id += 1
last_length = 0
else:
last_length = item['last_length'] + 1
check = True
output_text = item['output'] + [word_id]
ll = item['ll'] + aggr_output[i][word_id] - log_denominator
new_decoder_input_ids = decoder_input_ids.new_zeros(decoder_input_ids.size())
new_decoder_input_ids[:] = decoder_input_ids
new_decoder_input_ids[..., i + 1] = word_id
# Forbid single space token, "....", and ".........."
if word_id in [3, 19794, 22354]:
check = False
# Forbid continuous "."
if len(output_text) > 1 and output_text[-2] == 5 and output_text[-1] == 5:
check = False
if check:
# Add new results to beam search pool
new_item = {'decoder_input_ids': new_decoder_input_ids, 'll': ll, 'output_id': output_id, 'output': output_text, 'last_length': last_length}
new_current_output.append(new_item)
if len(new_current_output) == 0:
break
new_current_output.sort(key=lambda x: x['ll'], reverse=True)
new_current_output = new_current_output[:beam]
current_output = new_current_output
result = []
print("####### generated results #######")
for item in current_output:
generate_text = ''
for token in item['output']:
generate_text += tokenizer._convert_id_to_token(token)
print('--------------')
print('score:', item['ll'].item())
print('generated ids', item['output'])
print('generated text', generate_text)
result.append(generate_text)
print("####### generated results #######\n")
return result
def load_dataset(task, data_dir):
if task in ["MNLI", "MRPC", "QNLI", "QQP", "RTE", "SNLI", "SST-2", "STS-B", "WNLI", "CoLA"]:
lines = open(os.path.join(data_dir, 'train.tsv')).readlines()
if task != 'CoLA':
lines = lines[1:]
dataset = []
for line in lines:
line = line.strip().split('\t')
if task == 'CoLA':
dataset.append({'label': line[1], 'text': [line[-1]]})
elif task == 'MNLI':
dataset.append({'label': line[-1], 'text': [line[8], line[9]]})
elif task == 'MRPC':
dataset.append({'label': line[0], 'text': [line[-2], line[-1]]})
elif task == 'QNLI':
dataset.append({'label': line[-1], 'text': [line[1], line[2]]})
elif task == 'QQP':
dataset.append({'label': line[-1], 'text': [line[3], line[4]]})
elif task == 'RTE':
dataset.append({'label': line[-1], 'text': [line[1], line[2]]})
elif task == 'SNLI':
dataset.append({'label': line[-1], 'text': [line[7], line[8]]})
elif task == 'SST-2':
dataset.append({'label': line[-1], 'text': [line[0]]})
elif task == 'STS-B':
dataset.append({'label': '0' if float(line[-1]) < 2.5 else '1', 'text': [line[-3], line[-2]]})
elif task == 'WNLI':
dataset.append({'label': line[-1], 'text': [line[1], line[2]]})
else:
raise NotImplementedError
else:
lines = pd.read_csv(os.path.join(data_dir, 'train.csv')).values.tolist()
dataset = []
for line in lines:
dataset.append({'label': line[0], 'text': [line[1]]})
return dataset
def search_template(model, tokenizer, task_name, k, seed, beam, output_dir, data_dir):
print('#', task_name, k, seed, beam)
dataset_path = os.path.join(data_dir, task_name, "{}-{}".format(k, seed))
dataset = load_dataset(task_name, dataset_path)
print('|', 'dataset examples')
print('|', dataset[0])
print('|', dataset[-1])
print()
# Manual label word mappings
map_of_mapping = {
'SST-2': {'0':'terrible','1':'great'},
'sst-5': {0:'terrible',1:'bad',2:'okay',3:'good',4:'great'},
'mr': {0:'terrible',1:'great'},
'cr': {0:'terrible',1:'great'},
'subj': {0:'subjective',1:'objective'},
'trec': {0:'Description',1:'Entity',2:'Expression',3:'Human',4:'Location',5:'Number'},
'mpqa': {0:'terrible',1:'great'},
'CoLA': {'0':'incorrect','1':'correct'},
'MRPC': {'0':'No','1':'Yes'},
'QQP': {'0':'No','1':'Yes'},
'STS-B': {'0':'No','1':'Yes'},
'MNLI': {'contradiction':'No','entailment':'Yes','neutral':'Maybe'},
'SNLI': {'contradiction':'No','entailment':'Yes','neutral':'Maybe'},
'QNLI': {'not_entailment':'No','entailment':'Yes'},
'RTE': {'not_entailment':'No','entailment':'Yes'}
}
mapping = map_of_mapping[task_name]
print('|', 'mapping')
print('|', mapping)
os.makedirs(output_dir, exist_ok=True)
os.makedirs(os.path.join(output_dir, task_name), exist_ok=True)
f = open(os.path.join(output_dir, task_name, "{}-{}.txt".format(k, seed)), 'w')
if task_name in ['SST-2', 'sst-5', 'mr', 'cr', 'subj', 'trec', 'CoLA', 'mpqa']:
# Single sentence tasks
# We take two kinds of templates: put [MASK] at the beginning or the end
template = "*cls**sentu_0**<extra_id_0>**label**<extra_id_1>**sep+*"
generate_text = generate(dataset, template, model, tokenizer, target_number=2, mapping=mapping, beam=beam, label=None, truncate='head')[:beam//2]
print("####### generated templates #######")
for text in generate_text:
# Transform T5 outputs to our template format
text = text.replace('<extra_id_0>', '*cls**sent_0*')
text = text.replace('<extra_id_1>', '*mask*')
text = text.replace('<extra_id_2>', '*sep+*')
text = text.replace('</s>', '*sep+*')
text = text.replace('▁', '_')
print(text)
f.write(text + '\n')
print("####### generated templates #######\n")
template = "*cls*.*<extra_id_0>**label**<extra_id_1>**+sentu_0**sep+*"
generate_text = generate(dataset, template, model, tokenizer, target_number=2, mapping=mapping, beam=beam, label=None, truncate='tail')[:beam//2]
print("####### generated templates #######")
for text in generate_text:
# Transform T5 outputs to our template format
text = text.replace('<extra_id_0>', '*cls*')
text = text.replace('<extra_id_1>', '*mask*')
text = text.replace('<extra_id_2>', '*+sent_0**sep+*')
text = text.replace('</s>', '*+sent_0**sep+*')
text = text.replace('▁', '_')
print(text)
f.write(text + '\n')
print("####### generated templates #######\n")
elif task_name in ['MRPC', 'QQP', 'STS-B', 'MNLI', 'SNLI', 'QNLI', 'RTE']:
# Sentence pair tasks
# We always put [MASK] between the two sentences
template = "*cls**sent-_0**<extra_id_0>**label**<extra_id_1>**+sentl_1**sep+*"
generate_text = generate(dataset, template, model, tokenizer, target_number=2, mapping=mapping, beam=beam, label=None)
print("####### generated templates #######")
for text in generate_text:
# Transform T5 outputs to our template format
text = text.replace('<extra_id_0>', '*cls**sent-_0*')
text = text.replace('<extra_id_1>', '*mask*')
text = text.replace('<extra_id_2>', '*+sentl_1**sep+*')
text = text.replace('</s>', '*+sentl_1**sep+*')
text = text.replace('▁', '_')
print(text)
f.write(text + '\n')
print("####### generated templates #######\n")
else:
raise NotImplementedError | null |
1,323 | from sentence_transformers import SentenceTransformer, util
import argparse
import os
import numpy as np
from tqdm import tqdm
import pandas as pd
def get_sentence(task, line):
if task in ['mr', 'sst-5', 'subj', 'trec', 'cr', 'mpqa']:
# Text classification tasks
if line[1] is None or pd.isna(line[1]):
return ''
else:
return line[1]
else:
# GLUE tasks
line = line.strip().split('\t')
if task == 'CoLA':
return line[-1]
elif task == 'MNLI':
return line[8] + ' ' + line[9]
elif task == 'MRPC':
return line[-2] + ' ' + line[-1]
elif task == 'QNLI':
return line[1] + ' ' + line[2]
elif task == 'QQP':
return line[3] + ' ' + line[4]
elif task == 'RTE':
return line[1] + ' ' + line[2]
elif task == 'SNLI':
return line[7] + ' ' + line[8]
elif task == 'SST-2':
return line[0]
elif task == 'STS-B':
return line[-3] + ' ' + line[-2]
elif task == 'WNLI':
return line[1] + ' ' + line[2]
else:
raise NotImplementedError | null |
1,324 | from sentence_transformers import SentenceTransformer, util
import argparse
import os
import numpy as np
from tqdm import tqdm
import pandas as pd
def split_header(task, lines):
"""Returns if the task file has a header or not."""
if task in ["CoLA"]:
return [], lines
elif task in ["MNLI", "MRPC", "QNLI", "QQP", "RTE", "SNLI", "SST-2", "STS-B", "WNLI"]:
return lines[0:1], lines[1:]
else:
raise ValueError("Unknown GLUE task.")
def load_datasets(data_dir, task, do_test=False):
dataset = {}
if task == "MNLI":
splits = ["train", "dev_matched"]
if do_test:
splits += ['test_matched', 'test_mismatched']
else:
splits = ["train", "dev"]
if do_test:
splits.append('test')
for split in splits:
if task in ['mr', 'sst-5', 'subj', 'trec', 'cr', 'mpqa']:
filename = os.path.join(data_dir, f"{split}.csv")
dataset[split] = pd.read_csv(filename, header=None).values.tolist()
else:
filename = os.path.join(data_dir, f"{split}.tsv")
with open(filename, "r") as f:
lines = f.readlines()
header, content = split_header(task, lines)
dataset[split] = content
return dataset | null |
1,326 | import numpy as np
import scipy
import math
import sklearn
import collections
from logging import getLogger
from .qa_utils import normalize_squad, qa_metrics
import sklearn.metrics
The provided code snippet includes necessary dependencies for implementing the `perplexity` function. Write a Python function `def perplexity(outputs, targets,ignore_index=-100)` to solve the following problem:
Computes the perplexity accuracy.
Here is the function:
def perplexity(outputs, targets,ignore_index=-100):
"""Computes the perplexity accuracy."""
ce = -np.log(outputs).mean()
# ce = F.cross_entropy(torch.Tensor(outputs).view(-1, outputs.shape[-1]), torch.Tensor(targets).view(-1).long(),ignore_index=ignore_index)
return {"perplexity":float(np.exp(ce))} | Computes the perplexity accuracy. |
1,330 | import numpy as np
import scipy
import math
import sklearn
import collections
from logging import getLogger
from .qa_utils import normalize_squad, qa_metrics
import sklearn.metrics
def transform_for_generation(predictions, targets):
mapping = {k: i for i, k in enumerate(set(targets))}
targets = np.asarray([mapping[k] for k in targets])
predictions = np.asarray([mapping[k] if k in mapping else (t+1)%len(mapping) for t, k in zip(targets, predictions)])
return predictions, targets | null |
1,337 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForImageClassification,
)
from transformers import Trainer as HfTrainer
import torch.nn as nn
def get_prompts(task, tokenizer, data_args, template_id="0", verbalizer_id="0"):
# from openpromptu.prompts import ManualVerbalizer
# from openpromptu.prompts import ManualTemplate
# from openpromptu import TokenizerWrapper
# template = ManualTemplate(text = task.templates_text[template_id])
# verbalizer = ManualVerbalizer(tokenizer=tokenizer, classes = task.labels_list, label_words=task.verbalizers[verbalizer_id])
# tokenizer_wrapper = TokenizerWrapper(max_seq_length=data_args.max_source_length, tokenizer=tokenizer, truncate_method="balanced", mask_token_func=mask_token_func)
return None, None, None | null |
1,338 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForImageClassification,
)
from transformers import Trainer as HfTrainer
import torch.nn as nn
def preprocess_function(raw_example, **kwargs):
# from IPython import embed; embed(header="Therefa")
tokenizer = kwargs['tokenizer']
# print(np.array(raw_example['img']).shape)
model_inputs = tokenizer(np.array(raw_example['image']), return_tensors='pt')
model_inputs['pixel_values'] = model_inputs['pixel_values'].squeeze()
model_inputs['labels'] = raw_example['label']
return model_inputs | null |
1,339 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForImageClassification,
)
from transformers import Trainer as HfTrainer
import torch.nn as nn
def compute_metrics(eval_preds, dataset_name, eval_metric):
# from IPython import embed; embed(header="In compute metrics")
preds, labels = eval_preds.predictions, eval_preds.label_ids
preds = np.argmax(preds, axis=-1)
result = {}
average_metrics = []
for metric in eval_metric:
metric_item = metric(preds, labels)
metric_value = list(metric_item.values())
result.update(metric_item)
average_metrics.extend(metric_value)
print("average:",average_metrics)
average_metric = sum(average_metrics)/len(average_metrics)
result.update({"average_metrics":average_metric})
return result | null |
1,340 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForImageClassification,
)
from transformers import Trainer as HfTrainer
import torch.nn as nn
def mask_token_func(tokenizer, ith_mask=0):
return tokenizer.mask_token | null |
1,341 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForImageClassification,
)
from transformers import Trainer as HfTrainer
import torch.nn as nn
def get_remove_columns(dataset_features):
# dataset_features.pop("label")
# print("remove_columns: {}".format(dataset_features))
return dataset_features | null |
1,342 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForImageClassification,
)
from transformers import Trainer as HfTrainer
import torch.nn as nn
def get_backbone(model_args, **kwargs):
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.dropout_rate = 0.0
tokenizer = AutoFeatureExtractor.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.num_labels = model_args.num_classes
old_classifier = model.classifier
model.classifier = nn.Linear(old_classifier.in_features, config.num_labels)
return config, tokenizer, model | null |
1,343 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def get_remove_columns(dataset_features):
return dataset_features | null |
1,344 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def preprocess_function(raw_example, **kwargs):
# max_target_length += 1
tokenizer = kwargs['tokenizer']
data_args = kwargs['data_args']
template = kwargs['template']
verbalizer = kwargs['verbalizer']
tokenizer_wrapper = kwargs['tokenizer_wrapper']
split = kwargs['split']
example = InputExample(**raw_example)
example = verbalizer.wrap_one_example(example)
example, other = template.wrap_one_example(example)
input_sentence = tokenizer_wrapper.merge_wrapped_example(example)
model_inputs = tokenizer(input_sentence, max_length=256,
padding="max_length", truncation=True)
with tokenizer.as_target_tokenizer():
label = tokenizer(other['tgt_text']).input_ids
model_inputs["labels"] = label
return model_inputs | null |
1,345 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def get_backbone(model_args, **kwargs):
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.dropout_rate = 0.0
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
return config, tokenizer, model | null |
1,346 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def mask_token_func(tokenizer, ith_mask):
return tokenizer.additional_special_tokens[ith_mask]
def get_prompts(task, tokenizer, data_args, template_id="0", verbalizer_id="0"):
from openpromptu.prompts import GenerationVerbalizer
from openpromptu.prompts import ManualTemplate
from openpromptu import TokenizerWrapper
template = ManualTemplate(text = task.templates_text[template_id])
verbalizer = GenerationVerbalizer(tokenizer=tokenizer, classes = task.labels_list, label_words=task.verbalizers[verbalizer_id])
tokenizer_wrapper = TokenizerWrapper(max_seq_length=data_args.max_source_length, tokenizer=tokenizer, truncate_method="balanced", mask_token_func=mask_token_func)
return template, verbalizer, tokenizer_wrapper | null |
1,347 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
CLIPConfig,
CLIPProcessor,
CLIPModel,
)
from transformers import ViTFeatureExtractor
from PIL import Image
from transformers import Trainer as HfTrainer
import torch.nn as nn
def mask_token_func(tokenizer, ith_mask=0):
return tokenizer.mask_token
def get_prompts(task, tokenizer, data_args, template_id="clip", verbalizer_id="clip"):
from openpromptu.prompts import GenerationVerbalizer
from openpromptu.prompts import ManualTemplate
from openpromptu import TokenizerWrapper
template = ManualTemplate(text = task.templates_text[template_id])
verbalizer = GenerationVerbalizer(tokenizer=tokenizer, classes = task.labels_list, label_words=task.verbalizers[verbalizer_id])
tokenizer_wrapper = TokenizerWrapper(max_seq_length=data_args.max_source_length, tokenizer=tokenizer.tokenizer, truncate_method="balanced", mask_token_func=mask_token_func)
return template, verbalizer, tokenizer_wrapper | null |
1,348 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
CLIPConfig,
CLIPProcessor,
CLIPModel,
)
from transformers import ViTFeatureExtractor
from PIL import Image
from transformers import Trainer as HfTrainer
import torch.nn as nn
def preprocess_function(raw_example, **kwargs):
# from IPython import embed; embed(header="Therefa")
tokenizer = kwargs['tokenizer']
# ["a photo of {}" for i in range()]
data_args = kwargs['data_args']
template = kwargs['template']
verbalizer = kwargs['verbalizer']
tokenizer_wrapper = kwargs['tokenizer_wrapper']
example = InputExample(raw_example)
texts = []
for candidate_label in range(verbalizer.num_classes):
tgt_text = verbalizer.wrap_one_example(label=candidate_label)
wrapped_example, other = template.wrap_one_example(example)
input_sentence = tokenizer_wrapper.merge_wrapped_example(wrapped_example, tgt_texts=[tgt_text])
texts.append(input_sentence)
# from IPython import embed; embed()/
image = Image.open(raw_example['image_file_path'])
model_inputs = tokenizer(images=image, text=texts, max_length=16, padding="max_length", truncation=True, return_tensors='pt')
# from IPython import embed; embed()
model_inputs["pixel_values"] = model_inputs["pixel_values"].squeeze()
model_inputs["label"] = example.label
return model_inputs | null |
1,349 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
CLIPConfig,
CLIPProcessor,
CLIPModel,
)
from transformers import ViTFeatureExtractor
from PIL import Image
from transformers import Trainer as HfTrainer
import torch.nn as nn
def compute_metrics(eval_preds, dataset_name, eval_metric):
# from IPython import embed; embed(header="In compute metrics")
preds, labels = eval_preds.predictions, eval_preds.label_ids
preds = np.argmax(preds, axis=-1)
result = {}
average_metrics = []
for metric in eval_metric:
metric_item = metric(preds, labels)
metric_value = list(metric_item.values())
result.update(metric_item)
average_metrics.extend(metric_value)
print("average:",average_metrics)
average_metric = sum(average_metrics)/len(average_metrics)
result.update({"average_metrics":average_metric})
return result | null |
1,350 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
CLIPConfig,
CLIPProcessor,
CLIPModel,
)
from transformers import ViTFeatureExtractor
from PIL import Image
from transformers import Trainer as HfTrainer
import torch.nn as nn
def get_remove_columns(dataset_features):
# from IPython import embed; embed(header="in remoev")
dataset_features.remove("labels")
print("remove_columns: {}".format(dataset_features))
return dataset_features | null |
1,351 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
CLIPConfig,
CLIPProcessor,
CLIPModel,
)
from transformers import ViTFeatureExtractor
from PIL import Image
from transformers import Trainer as HfTrainer
import torch.nn as nn
def get_backbone(model_args, **kwargs):
config = CLIPConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# config.dropout_rate = 0.0
tokenizer = CLIPProcessor.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = CLIPModel.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# config.num_labels = model_args.num_classes
# old_classifier = model.classifier
# model.classifier = nn.Linear(old_classifier.in_features, config.num_labels)
return config, tokenizer, model | null |
1,352 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
)
from transformers import Trainer as HfTrainer
def preprocess_function(raw_example, **kwargs):
tokenizer = kwargs['tokenizer']
data_args = kwargs['data_args']
template = kwargs['template']
verbalizer = kwargs['verbalizer']
tokenizer_wrapper = kwargs['tokenizer_wrapper']
example = InputExample(**raw_example)
example, other = template.wrap_one_example(example)
input_sentence = tokenizer_wrapper.merge_wrapped_example(example)
model_inputs = tokenizer(input_sentence, max_length=data_args.max_source_length,
padding="max_length", truncation=True)
return model_inputs | null |
1,353 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
)
from transformers import Trainer as HfTrainer
def compute_metrics(eval_preds, dataset_name, eval_metric):
# from IPython import embed; embed(header="In compute metrics")
preds, labels = eval_preds.predictions, eval_preds.label_ids
preds = np.argmax(preds, axis=-1)
result = {}
average_metrics = []
for metric in eval_metric:
metric_item = metric(preds, labels)
metric_value = list(metric_item.values())
result.update(metric_item)
average_metrics.extend(metric_value)
print("average:",average_metrics)
average_metric = sum(average_metrics)/len(average_metrics)
result.update({"average_metrics":average_metric})
return result | null |
1,354 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
)
from transformers import Trainer as HfTrainer
def get_remove_columns(dataset_features):
dataset_features.remove("label")
return dataset_features | null |
1,355 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
)
from transformers import Trainer as HfTrainer
def mask_token_func(tokenizer, ith_mask=0):
return tokenizer.mask_token
def get_prompts(task, tokenizer, data_args, template_id="0", verbalizer_id="0"):
from openpromptu.prompts import ManualVerbalizer
from openpromptu.prompts import ManualTemplate
from openpromptu import TokenizerWrapper
template = ManualTemplate(text = task.templates_text[template_id])
verbalizer = ManualVerbalizer(tokenizer=tokenizer, classes = task.labels_list, label_words=task.verbalizers[verbalizer_id])
tokenizer_wrapper = TokenizerWrapper(max_seq_length=data_args.max_source_length, tokenizer=tokenizer, truncate_method="balanced", mask_token_func=mask_token_func)
# from IPython import embed; embed()
return template, verbalizer, tokenizer_wrapper | null |
1,356 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
)
from transformers import Trainer as HfTrainer
def get_backbone(model_args, **kwargs):
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.dropout_rate = 0.0
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model.resize_token_embeddings(len(tokenizer))
return config, tokenizer, model | null |
1,359 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
import numpy as np
from transformers import (
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
)
from transformers import Trainer as HfTrainer
def get_remove_columns(dataset_features):
# from IPython import embed; embed(header="get_remove_columns")
dataset_features.remove("label")
return dataset_features | null |
1,362 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import numpy as np
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
)
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
import copy
from torch.nn import CrossEntropyLoss
def preprocess_function(raw_example, **kwargs):
tokenizer = kwargs['tokenizer']
data_args = kwargs['data_args']
template = kwargs['template']
verbalizer = kwargs['verbalizer']
tokenizer_wrapper = kwargs['tokenizer_wrapper']
example = InputExample(**raw_example)
# example = verbalizer.wrap_one_example(example)
example, other = template.wrap_one_example(example)
input_sentence = tokenizer_wrapper.merge_wrapped_example(example)
model_inputs = tokenizer(input_sentence, max_length=data_args.max_source_length,
padding="max_length", truncation=True)
return model_inputs | null |
1,363 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import numpy as np
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
)
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
import copy
from torch.nn import CrossEntropyLoss
def compute_metrics(eval_preds, dataset_name, eval_metric):
pass | null |
1,364 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import numpy as np
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
)
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
import copy
from torch.nn import CrossEntropyLoss
def get_remove_columns(dataset_features):
# dataset_features.remove("label")
return dataset_features | null |
1,365 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import numpy as np
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
)
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
import copy
from torch.nn import CrossEntropyLoss
def mask_token_func(tokenizer, ith_mask=0):
return tokenizer.pad_token
def get_prompts(task, tokenizer, data_args, template_id="0", verbalizer_id="0"):
from openpromptu.prompts import GenerationVerbalizer
from openpromptu.prompts import ManualTemplate
from openpromptu import TokenizerWrapper
template = ManualTemplate(text = task.templates_text[template_id])
verbalizer = GenerationVerbalizer(tokenizer=tokenizer, classes = None, label_words=None)
tokenizer_wrapper = TokenizerWrapper(max_seq_length=data_args.max_source_length, tokenizer=tokenizer, truncate_method="balanced", mask_token_func=mask_token_func)
return template, verbalizer, tokenizer_wrapper | null |
1,366 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import numpy as np
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
)
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
import copy
from torch.nn import CrossEntropyLoss
def get_backbone(model_args, **kwargs):
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# config.dropout_rate = 0.0
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
return config, tokenizer, model | null |
1,369 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def get_backbone(model_args, **kwargs):
config = AutoConfig.from_pretrained(
# model_args.config_name if model_args.config_name else model_args.model_name_or_path,
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.dropout_rate = 0.0
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
return config, tokenizer, model | null |
1,370 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def mask_token_func(tokenizer, ith_mask=0):
def get_prompts(task, tokenizer, data_args, template_id="0", verbalizer_id="0"):
from openpromptu.prompts import GenerationVerbalizer
from openpromptu.prompts import ManualTemplate
from openpromptu import TokenizerWrapper
template = ManualTemplate(text = task.templates_text[template_id])
verbalizer = GenerationVerbalizer(tokenizer=tokenizer, classes = task.labels_list, label_words=task.verbalizers[verbalizer_id])
tokenizer_wrapper = TokenizerWrapper(max_seq_length=data_args.max_source_length, tokenizer=tokenizer, truncate_method="balanced", mask_token_func=mask_token_func)
return template, verbalizer, tokenizer_wrapper | null |
1,374 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import numpy as np
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
)
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
import copy
from torch.nn import CrossEntropyLoss
def mask_token_func(tokenizer, ith_mask=0):
return tokenizer.pad_token
def get_prompts(task, tokenizer, data_args, template_id="0", verbalizer_id="0"):
from openpromptu.prompts import GenerationVerbalizer
from openpromptu.prompts import ManualTemplate
from openpromptu import TokenizerWrapper
template = ManualTemplate(text = task.templates_text[template_id])
verbalizer = GenerationVerbalizer(tokenizer=tokenizer, classes = None, label_words=None)
tokenizer_wrapper = TokenizerWrapper(max_seq_length=data_args.max_source_length, tokenizer=tokenizer, truncate_method="tail", mask_token_func=mask_token_func)
return template, verbalizer, tokenizer_wrapper | null |
1,375 | from openpromptu.data_utils import InputExample
import torch
from transformers.data.data_collator import torch_default_data_collator
from transformers.data.data_collator import DataCollatorMixin as HfDataCollatorMixin
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import numpy as np
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
)
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
import copy
from torch.nn import CrossEntropyLoss
def get_backbone(model_args, **kwargs):
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# config.dropout_rate = 0.0
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if not hasattr(tokenizer,"pad_token") or (hasattr(tokenizer,"pad_token") and tokenizer.pad_token==None):
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
return config, tokenizer, model | null |
1,376 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
BlenderbotForConditionalGeneration,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def get_remove_columns(dataset_features):
return dataset_features | null |
1,377 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
BlenderbotForConditionalGeneration,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def preprocess_function(raw_example, **kwargs):
# max_target_length += 1
tokenizer = kwargs['tokenizer']
data_args = kwargs['data_args']
template = kwargs['template']
verbalizer = kwargs['verbalizer']
tokenizer_wrapper = kwargs['tokenizer_wrapper']
split = kwargs['split']
example = InputExample(**raw_example)
example = verbalizer.wrap_one_example(example)
example, other = template.wrap_one_example(example)
input_sentence = tokenizer_wrapper.merge_wrapped_example(example)
model_inputs = tokenizer(input_sentence, max_length=data_args.max_source_length,
padding="max_length", truncation=True)
with tokenizer.as_target_tokenizer():
label = tokenizer(other['tgt_text']).input_ids
model_inputs["labels"] = label
# from IPython import embed; embed()
return model_inputs | null |
1,378 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
BlenderbotForConditionalGeneration,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def get_backbone(model_args, **kwargs):
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.dropout_rate = 0.0
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = BlenderbotForConditionalGeneration.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# from IPython import embed; embed()
return config, tokenizer, model | null |
1,379 | from openpromptu.data_utils import InputExample
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import (
AutoConfig,
BlenderbotForConditionalGeneration,
AutoTokenizer,
)
from transformers.data.data_collator import DataCollatorForSeq2Seq as DataCollator
import torch
def mask_token_func(tokenizer, ith_mask=0):
return ""
def get_prompts(task, tokenizer, data_args, template_id="blenderbot", verbalizer_id="blenderbot"):
from openpromptu.prompts import GenerationVerbalizer
from openpromptu.prompts import ManualTemplate
from openpromptu import TokenizerWrapper
template = ManualTemplate(text = task.templates_text[template_id])
verbalizer = GenerationVerbalizer(tokenizer=tokenizer, classes = task.labels_list, label_words=task.verbalizers[verbalizer_id])
tokenizer_wrapper = TokenizerWrapper(max_seq_length=data_args.max_source_length, tokenizer=tokenizer, truncate_method="balanced", mask_token_func=mask_token_func)
return template, verbalizer, tokenizer_wrapper | null |
1,380 | import numpy as np
import re
The provided code snippet includes necessary dependencies for implementing the `round_stsb_target` function. Write a Python function `def round_stsb_target(label)` to solve the following problem:
STSB maps two sentences to a floating point number between 1 and 5 representing their semantic similarity. Since we are treating all tasks as text-to-text tasks we need to convert this floating point number to a string. The vast majority of the similarity score labels in STSB are in the set [0, 0.2, 0.4, ..., 4.8, 5.0]. So, we first round the number to the closest entry in this set, and then we convert the result to a string (literally e.g. "3.4"). This converts STSB roughly into a 26-class classification dataset. Args: label: original label. Returns: A preprocessed label.
Here is the function:
def round_stsb_target(label):
"""STSB maps two sentences to a floating point number between 1 and 5
representing their semantic similarity. Since we are treating all tasks as
text-to-text tasks we need to convert this floating point number to a string.
The vast majority of the similarity score labels in STSB are in the set
[0, 0.2, 0.4, ..., 4.8, 5.0]. So, we first round the number to the closest
entry in this set, and then we convert the result to a string (literally e.g.
"3.4"). This converts STSB roughly into a 26-class classification dataset.
Args:
label: original label.
Returns:
A preprocessed label.
"""
return np.round((label * 5) / 5, decimals=1) | STSB maps two sentences to a floating point number between 1 and 5 representing their semantic similarity. Since we are treating all tasks as text-to-text tasks we need to convert this floating point number to a string. The vast majority of the similarity score labels in STSB are in the set [0, 0.2, 0.4, ..., 4.8, 5.0]. So, we first round the number to the closest entry in this set, and then we convert the result to a string (literally e.g. "3.4"). This converts STSB roughly into a 26-class classification dataset. Args: label: original label. Returns: A preprocessed label. |
1,381 | import numpy as np
import re
The provided code snippet includes necessary dependencies for implementing the `pad_punctuation` function. Write a Python function `def pad_punctuation(text)` to solve the following problem:
Re-implementation of _pad_punctuation in t5. This function adds spaces around punctuation. While this pads punctuation as expected, it has the unexpected effected of padding certain unicode characters with accents, with spaces as well. For instance: "François" becomes "Fran ç ois
Here is the function:
def pad_punctuation(text):
"""Re-implementation of _pad_punctuation in t5. This function adds spaces
around punctuation. While this pads punctuation as expected, it has the
unexpected effected of padding certain unicode characters with accents, with
spaces as well. For instance: "François" becomes "Fran ç ois"""
# Pad everything except for: underscores (_), whitespace (\s),
# numbers (\p{N}), letters (\p{L}) and accent characters (\p{M}).
text = re.sub(r'([^_\s\p{N}\p{L}\p{M}])', r' \1 ', text)
# Collapse consecutive whitespace into one space.
text = re.sub(r'\s+', ' ', text)
return text | Re-implementation of _pad_punctuation in t5. This function adds spaces around punctuation. While this pads punctuation as expected, it has the unexpected effected of padding certain unicode characters with accents, with spaces as well. For instance: "François" becomes "Fran ç ois |
1,382 | import os
import argparse
import random
import json
from examples_prompt.search_space import AllBackboneSearchSpace, AllDeltaSearchSpace, BaseSearchSpace, DatasetSearchSpace
import optuna
from functools import partial
from optuna.samplers import TPESampler
import shutil
import time
import subprocess
def objective_singleseed(args, unicode, search_space_sample ):
os.mkdir(f"{args.output_dir}/{unicode}")
search_space_sample.update({"output_dir": f"{args.output_dir}/{unicode}"})
with open(f"{args.output_dir}/{unicode}/this_configs.json", 'w') as fout:
json.dump(search_space_sample, fout, indent=4,sort_keys=True)
command = "CUDA_VISIBLE_DEVICES={} ".format(args.cuda_id)
command += f"{args.pythonpath} {args.main_file_name} "
command += f"{args.output_dir}/{unicode}/this_configs.json"
command += f" >> {args.output_dir}/{unicode}/output.log 2>&1"
print("======"*5+"\n"+command)
p = subprocess.Popen(command, cwd=f"{args.pathbase}", shell=True)
print(f"wait for subprocess \"{command}\" to complete")
p.wait()
# if status_code != 0:
# with open(f"{args.output_dir}/{args.cuda_id}.log",'r') as flog:
# lastlines = " ".join(flog.readlines()[-100:])
# if "RuntimeError: CUDA out of memory." in lastlines:
# time.sleep(600) # sleep ten minites and try again
# shutil.rmtree(f"{args.output_dir}/{unicode}/")
# return objective_singleseed(args, unicode, search_space_sample)
# else:
# raise RuntimeError("error in {}".format(unicode))
with open(f"{args.output_dir}/{unicode}/results.json", 'r') as fret:
results =json.load(fret)
for filename in os.listdir(f"{args.output_dir}/{unicode}/"):
if not filename.endswith("this_configs.json"):
full_file_name = f"{args.output_dir}/{unicode}/{filename}"
if os.path.isdir(full_file_name):
shutil.rmtree(f"{args.output_dir}/{unicode}/{filename}")
else:
os.remove(full_file_name)
results_all_test_datasets = []
print("results:", results)
for datasets in results['test']:
results_all_test_datasets.append(results['test'][datasets]['test_average_metrics'])
return sum(results_all_test_datasets)/len(results_all_test_datasets)#results['test']['average_metrics']
def objective(trial, args=None):
search_space_sample = {}
search_space_sample.update(BaseSearchSpace().get_config(trial, args))
search_space_sample.update(AllBackboneSearchSpace[args.model_name]().get_config(trial, args))
search_space_sample.update(DatasetSearchSpace(args.dataset).get_config(trial, args))
search_space_sample.update(AllDeltaSearchSpace[args.delta_type]().get_config(trial, args))
results = []
for seed in range(42, 42+args.repeat_time):
search_space_sample.update({"seed": seed})
unicode = random.randint(0, 100000000)
while os.path.exists(f"{args.output_dir}/{unicode}"):
unicode = unicode+1
trial.set_user_attr("trial_dir", f"{args.output_dir}/{unicode}")
res = objective_singleseed(args, unicode = unicode, search_space_sample=search_space_sample)
results.append(res)
ave_res = sum(results)/len(results)
return -ave_res | null |
1,383 | import json
import os
import re
The provided code snippet includes necessary dependencies for implementing the `create_dir` function. Write a Python function `def create_dir(output_dir)` to solve the following problem:
Checks whether to the output_dir already exists and creates it if not. Args: output_dir: path to the output_dir
Here is the function:
def create_dir(output_dir):
"""
Checks whether to the output_dir already exists and creates it if not.
Args:
output_dir: path to the output_dir
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir) | Checks whether to the output_dir already exists and creates it if not. Args: output_dir: path to the output_dir |
1,384 | import json
import os
import re
def get_last_checkpoint(output_dir):
if os.path.exists(os.path.join(output_dir, 'pytorch_model.bin')):
return output_dir
return None | null |
1,385 | import json
import os
import re
def save_json(filepath, dictionary):
with open(filepath, "w") as outfile:
json.dump(dictionary, outfile) | null |
1,386 | import json
import os
import re
def read_json(filepath):
f = open(filepath,)
return json.load(f) | null |
1,387 | import time
import os
import torch
import numpy as np
from sklearn.metrics import accuracy_score, recall_score, f1_score
import bmtrain as bmt
from model_center.arguments import add_model_config_args, add_training_args, argparse
from model_center.model import Bert
from model_center.tokenizer import BertTokenizer
from model_center.dataset.bertdataset import DATASET
from model_center.utils import print_inspect
from model_center.layer import Linear
from model_center.dataset import DistributedDataLoader
import opendelta as od
from opendelta import LoraModel, AdapterModel, CompacterModel, LowRankAdapterModel, BitFitModel, ParallelAdapterModel
from opendelta.utils.inspect import inspect_optimizer_statistics
from bigmodelvis import Visualization
def get_tokenizer(args):
tokenizer = BertTokenizer.from_pretrained(args.model_config)
return tokenizer
def get_model(args):
num_types = {
"BoolQ" : 2,
"CB" : 3,
"COPA" : 1,
"RTE" : 2,
"WiC" : 2,
}
model = BertModel(args, num_types[args.dataset_name])
from bigmodelvis import Visualization
Visualization(model).structure_graph()
if args.delta_type == "lora":
delta_model = LoraModel(backbone_model=model, modified_modules=['project_q', 'project_k'], backend='bmt')
elif args.delta_type == "bitfit":
delta_model = BitFitModel(backbone_model=model, modified_modules=['self_att', 'ffn', 'layernorm'], backend='bmt')
elif args.delta_type == "adapter":
delta_model = AdapterModel(backbone_model=model, modified_modules=['self_att', 'ffn'], backend='bmt')
elif args.delta_type == "compacter":
delta_model = CompacterModel(backbone_model=model, modified_modules=['self_att', 'ffn'], backend='bmt')
elif args.delta_type == "low_rank_adapter":
delta_model = LowRankAdapterModel(backbone_model=model, modified_modules=['self_att', 'ffn'], backend='bmt')
elif args.delta_type == "parallel_adapter":
delta_model = ParallelAdapterModel(backbone_model=model, modified_modules=['self_att', 'self_att', 'ffn.ffn', 'ffn.ffn'], backend='bmt')
print("after modify")
delta_model.log()
# This will visualize the backbone after modification and other information.
delta_model.freeze_module(exclude=["deltas"], set_state_dict=True)
print("after freeze")
delta_model.log()
return model
def get_optimizer(args, model):
optimizer = bmt.optim.AdamOffloadOptimizer(model.parameters(), weight_decay=args.weight_decay)
return optimizer
def get_learning_rate_scheduler(args, optimizer):
if args.lr_decay_iters is None:
args.lr_decay_iters = args.train_iters * args.epochs
if args.lr_decay_style == "noam":
lr_scheduler = bmt.lr_scheduler.Noam(optimizer,
start_lr = args.lr,
warmup_iter = args.warmup_iters,
end_iter = args.lr_decay_iters,
num_iter = args.start_step)
elif args.lr_decay_style == "constant":
lr_scheduler = bmt.lr_scheduler.NoDecay(optimizer,
start_lr = args.lr,
warmup_iter = args.warmup_iters,
end_iter = -1,
num_iter = args.start_step)
elif args.lr_decay_style == "linear":
lr_scheduler = bmt.lr_scheduler.Linear(optimizer,
start_lr = args.lr,
warmup_iter = args.warmup_iters,
end_iter = args.lr_decay_iters,
num_iter = args.start_step)
elif args.lr_decay_style == "exponential":
lr_scheduler = bmt.lr_scheduler.Exponential(optimizer,
start_lr = args.lr,
warmup_iter = args.warmup_iters,
end_iter = args.lr_decay_iters,
num_iter = args.start_step)
elif args.lr_decay_style == "cosine":
lr_scheduler = bmt.lr_scheduler.Cosine(optimizer,
start_lr = args.lr,
warmup_iter = args.warmup_iters,
end_iter = args.lr_decay_iters,
num_iter = args.start_step)
else:
raise ValueError(f"lr_scheduler of type {args.lr_decay_style} is not supported yet.")
return lr_scheduler
def inspect_optimizer_statistics(optimizer, verbose=True):
stats = {}
for id, param_group in enumerate(optimizer.param_groups):
stat = {}
fine_grain_info = [(p.numel(), p.requires_grad) for p in param_group['params']]
stat['total_parameters'] = sum(n for n, r in fine_grain_info)
stat['trainable_parameters'] = sum(n for n, r in fine_grain_info if r)
stat['trainable_ratio'] = "{:.6f}%".format(stat['trainable_parameters']/stat['total_parameters']*100)
for key in param_group:
if key != 'params':
stat[key] = param_group[key]
stats[f'param_group_{id}'] = stat
if verbose:
logger.info(f"optimizer info: {stats}")
return stat
def setup_model_and_optimizer(args):
# get the tokenizer
tokenizer = get_tokenizer(args)
# get the model
model = get_model(args)
bmt.synchronize()
# get the optimizer and lr_scheduler
optimizer = get_optimizer(args, model)
inspect_optimizer_statistics(optimizer)
lr_scheduler = get_learning_rate_scheduler(args, optimizer)
bmt.synchronize()
# get the memory usage
bmt.print_rank("Model mem\n", torch.cuda.memory_summary())
bmt.synchronize()
return tokenizer, model, optimizer, lr_scheduler | null |
1,388 | import time
import os
import torch
import numpy as np
from sklearn.metrics import accuracy_score, recall_score, f1_score
import bmtrain as bmt
from model_center.arguments import add_model_config_args, add_training_args, argparse
def get_args():
parser = argparse.ArgumentParser()
parser = add_model_config_args(parser)
parser = add_training_args(parser)
group = parser.add_argument_group('delta', 'delta configurations')
group.add_argument('--delta-type', '--delta_type', type=str, help='delta type')
args = parser.parse_args()
return args
from model_center.model import Bert
from model_center.tokenizer import BertTokenizer
from model_center.dataset.bertdataset import DATASET
from model_center.utils import print_inspect
from model_center.layer import Linear
from model_center.dataset import DistributedDataLoader
import opendelta as od
from opendelta import LoraModel, AdapterModel, CompacterModel, LowRankAdapterModel, BitFitModel, ParallelAdapterModel
from opendelta.utils.inspect import inspect_optimizer_statistics
from bigmodelvis import Visualization
def initialize():
# get arguments
args = get_args()
# init bmt
bmt.init_distributed(seed = args.seed)
# init save folder
if args.save != None:
os.makedirs(args.save, exist_ok=True)
return args | null |
1,389 | import time
import os
import torch
import numpy as np
from sklearn.metrics import accuracy_score, recall_score, f1_score
import bmtrain as bmt
from model_center.arguments import add_model_config_args, add_training_args, argparse
from model_center.model import Bert
from model_center.tokenizer import BertTokenizer
from model_center.dataset.bertdataset import DATASET
from model_center.utils import print_inspect
from model_center.layer import Linear
from model_center.dataset import DistributedDataLoader
import opendelta as od
from opendelta import LoraModel, AdapterModel, CompacterModel, LowRankAdapterModel, BitFitModel, ParallelAdapterModel
from opendelta.utils.inspect import inspect_optimizer_statistics
from bigmodelvis import Visualization
def prepare_dataset(args, tokenizer, base_path, dataset_name, rank, world_size):
splits = ['train', 'dev', 'test']
dataset = {}
for split in splits:
dataset[split] = DATASET[dataset_name](base_path, split, rank, world_size, tokenizer, args.max_encoder_length)
return dataset | null |
1,390 | import time
import os
import torch
import numpy as np
from sklearn.metrics import accuracy_score, recall_score, f1_score
import bmtrain as bmt
from model_center.arguments import add_model_config_args, add_training_args, argparse
from model_center.model import Bert
from model_center.tokenizer import BertTokenizer
from model_center.dataset.bertdataset import DATASET
from model_center.utils import print_inspect
from model_center.layer import Linear
from model_center.dataset import DistributedDataLoader
import opendelta as od
from opendelta import LoraModel, AdapterModel, CompacterModel, LowRankAdapterModel, BitFitModel, ParallelAdapterModel
from opendelta.utils.inspect import inspect_optimizer_statistics
from bigmodelvis import Visualization
def finetune(args, tokenizer, model, optimizer, lr_scheduler, dataset):
loss_func = bmt.loss.FusedCrossEntropy(ignore_index=-100)
optim_manager = bmt.optim.OptimManager(loss_scale=args.loss_scale)
optim_manager.add_optimizer(optimizer, lr_scheduler)
# print_inspect(model, '*') # too much output
for epoch in range(12):
dataloader = {
"train": DistributedDataLoader(dataset['train'], batch_size=args.batch_size, shuffle=True),
"dev": DistributedDataLoader(dataset['dev'], batch_size=args.batch_size, shuffle=False),
}
model.train()
for it, data in enumerate(dataloader['train']):
if args.dataset_name == 'COPA':
input_ids0 = data["input_ids0"]
attention_mask0 = data["attention_mask0"]
token_type_ids0 = data["token_type_ids0"]
input_ids1 = data["input_ids1"]
attention_mask1 = data["attention_mask1"]
token_type_ids1 = data["token_type_ids1"]
labels = data["labels"]
else:
input_ids = data["input_ids"]
attention_mask = data["attention_mask"]
token_type_ids = data["token_type_ids"]
labels = data["labels"]
torch.cuda.synchronize()
st_time = time.time()
if args.dataset_name == 'COPA':
logits = torch.cat([
model(input_ids0, attention_mask=attention_mask0, token_type_ids=token_type_ids0),
model(input_ids1, attention_mask=attention_mask1, token_type_ids=token_type_ids1),
], dim=1)
else:
logits = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
loss = loss_func(logits.view(-1, logits.shape[-1]), labels.view(-1))
global_loss = bmt.sum_loss(loss).item()
optim_manager.zero_grad()
optim_manager.backward(loss)
grad_norm = optim_manager.clip_grad_norm(optimizer.param_groups, args.clip_grad, norm_type = 2)
optim_manager.step()
torch.cuda.synchronize()
elapsed_time = time.time() - st_time
# from IPython import embed; embed(header="25252")
bmt.print_rank(
"train | epoch {:3d} | Iter: {:6d}/{:6d} | loss: {:.4f} | lr: {:.4e}, scale: {:10.4f} | grad_norm: {:.4f} | time: {:.3f}".format(
epoch,
it,
len(dataloader["train"]),
global_loss,
lr_scheduler.current_lr,
int(optim_manager.loss_scale),
grad_norm,
elapsed_time,
)
)
model.eval()
with torch.no_grad():
for split in ['dev']:
pd = []
gt = []
for it, data in enumerate(dataloader[split]):
if args.dataset_name == 'COPA':
input_ids0 = data["input_ids0"]
attention_mask0 = data["attention_mask0"]
token_type_ids0 = data["token_type_ids0"]
input_ids1 = data["input_ids1"]
attention_mask1 = data["attention_mask1"]
token_type_ids1 = data["token_type_ids1"]
labels = data["labels"]
logits = torch.cat([
model(input_ids0, attention_mask=attention_mask0, token_type_ids=token_type_ids0),
model(input_ids1, attention_mask=attention_mask1, token_type_ids=token_type_ids1),
], dim=1)
else:
input_ids = data["input_ids"]
attention_mask = data["attention_mask"]
token_type_ids = data["token_type_ids"]
labels = data["labels"]
logits = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
loss = loss_func(logits.view(-1, logits.shape[-1]), labels.view(-1))
logits = logits.argmax(dim=-1)
pd.extend(logits.cpu().tolist())
gt.extend(labels.cpu().tolist())
bmt.print_rank(
"{} | epoch {:3d} | Iter: {:6d}/{:6d} | loss: {:.4f}".format(
split,
epoch,
it,
len(dataloader[split]),
loss,
)
)
pd = bmt.gather_result(torch.tensor(pd).int()).cpu().tolist()
gt = bmt.gather_result(torch.tensor(gt).int()).cpu().tolist()
bmt.print_rank(f"{split} epoch {epoch}:")
if args.dataset_name in ["BoolQ", "CB", "COPA", "RTE", "WiC", "WSC"]:
acc = accuracy_score(gt, pd)
bmt.print_rank(f"accuracy: {acc*100:.2f}")
if args.dataset_name in ["CB"]:
rcl = f1_score(gt, pd, average="macro")
f1 = recall_score(gt, pd, average="macro")
bmt.print_rank(f"recall: {rcl*100:.2f}")
bmt.print_rank(f"Average F1: {f1*100:.2f}") | null |
1,391 |
def SetConsoleTextAttribute(stream_id, attrs):
handle = _GetStdHandle(stream_id)
return _SetConsoleTextAttribute(handle, attrs) | null |
1,392 | STDOUT = -11
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
_SetConsoleTitleW.argtypes = [
wintypes.LPCWSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
_GetConsoleMode = windll.kernel32.GetConsoleMode
_GetConsoleMode.argtypes = [
wintypes.HANDLE,
POINTER(wintypes.DWORD)
]
_GetConsoleMode.restype = wintypes.BOOL
_SetConsoleMode = windll.kernel32.SetConsoleMode
_SetConsoleMode.argtypes = [
wintypes.HANDLE,
wintypes.DWORD
]
_SetConsoleMode.restype = wintypes.BOOL
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = _GetStdHandle(stream_id)
return _SetConsoleCursorPosition(handle, adjusted_position) | null |
1,393 |
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = _GetStdHandle(stream_id)
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value | null |
1,394 |
The provided code snippet includes necessary dependencies for implementing the `FillConsoleOutputAttribute` function. Write a Python function `def FillConsoleOutputAttribute(stream_id, attr, length, start)` to solve the following problem:
FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )
Here is the function:
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = _GetStdHandle(stream_id)
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written)) | FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten ) |
1,395 |
def SetConsoleTitle(title):
return _SetConsoleTitleW(title) | null |
1,396 | CSI = '\033['
def code_to_chars(code):
return CSI + str(code) + 'm' | null |
1,397 | OSC = '\033]'
BEL = '\a'
def set_title(title):
return OSC + '2;' + title + BEL | null |
1,398 | CSI = '\033['
def clear_screen(mode=2):
return CSI + str(mode) + 'J' | null |
1,399 | CSI = '\033['
def clear_line(mode=2):
return CSI + str(mode) + 'K' | null |
1,400 | import atexit
import contextlib
import sys
from .ansitowin32 import AnsiToWin32
def reset_all():
if AnsiToWin32 is not None: # Issue #74: objects might become None at exit
AnsiToWin32(orig_stdout).reset_all()
def _wipe_internal_state_for_tests():
global orig_stdout, orig_stderr
orig_stdout = None
orig_stderr = None
global wrapped_stdout, wrapped_stderr
wrapped_stdout = None
wrapped_stderr = None
global atexit_done
atexit_done = False
global fixed_windows_console
fixed_windows_console = False
try:
# no-op if it wasn't registered
atexit.unregister(reset_all)
except AttributeError:
# python 2: no atexit.unregister. Oh well, we did our best.
pass | null |
1,401 | import atexit
import contextlib
import sys
from .ansitowin32 import AnsiToWin32
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
try:
fd = wrapped.fileno()
except Exception:
fd = -1
system_has_native_ansi = not on_windows or enable_vt_processing(fd)
have_tty = not self.stream.closed and self.stream.isatty()
need_conversion = conversion_supported and not system_has_native_ansi
# should we strip ANSI sequences from our output?
if strip is None:
strip = need_conversion or not have_tty
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = need_conversion and have_tty
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not self.stream.closed:
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command == BEL:
if paramstring.count(";") == 1:
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
def flush(self):
self.wrapped.flush()
def just_fix_windows_console():
global fixed_windows_console
if sys.platform != "win32":
return
if fixed_windows_console:
return
if wrapped_stdout is not None or wrapped_stderr is not None:
# Someone already ran init() and it did stuff, so we won't second-guess them
return
# On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the
# native ANSI support in the console as a side-effect. We only need to actually
# replace sys.stdout/stderr if we're in the old-style conversion mode.
new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False)
if new_stdout.convert:
sys.stdout = new_stdout
new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False)
if new_stderr.convert:
sys.stderr = new_stderr
fixed_windows_console = True | null |
1,402 | import atexit
import contextlib
import sys
from .ansitowin32 import AnsiToWin32
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
global orig_stdout, orig_stderr
orig_stdout = sys.stdout
orig_stderr = sys.stderr
if sys.stdout is None:
wrapped_stdout = None
else:
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
if sys.stderr is None:
wrapped_stderr = None
else:
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
if orig_stdout is not None:
sys.stdout = orig_stdout
if orig_stderr is not None:
sys.stderr = orig_stderr
def colorama_text(*args, **kwargs):
init(*args, **kwargs)
try:
yield
finally:
deinit() | null |
1,403 | import atexit
import contextlib
import sys
from .ansitowin32 import AnsiToWin32
def reinit():
if wrapped_stdout is not None:
sys.stdout = wrapped_stdout
if wrapped_stderr is not None:
sys.stderr = wrapped_stderr | null |
1,404 | try:
from msvcrt import get_osfhandle
except ImportError:
def get_osfhandle(_):
from . import win32
def enable_vt_processing(fd):
if win32.windll is None or not win32.winapi_test():
return False
try:
handle = get_osfhandle(fd)
mode = win32.GetConsoleMode(handle)
win32.SetConsoleMode(
handle,
mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING,
)
mode = win32.GetConsoleMode(handle)
if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING:
return True
# Can get TypeError in testsuite where 'fd' is a Mock()
except (OSError, TypeError):
return False | null |
1,405 | import datetime
import os
import signal
import sys
import warnings
from typing import Optional
import requests
import openai
from openai.upload_progress import BufferReader
from openai.validators import (
apply_necessary_remediation,
apply_validators,
get_validators,
read_any_format,
write_out_file,
)
def organization_info(obj):
organization = getattr(obj, "organization", None)
if organization is not None:
return "[organization={}] ".format(organization)
else:
return ""
def display(obj):
sys.stderr.write(organization_info(obj))
sys.stderr.flush()
print(obj) | null |
1,406 | import datetime
import os
import signal
import sys
import warnings
from typing import Optional
import requests
import openai
from openai.upload_progress import BufferReader
from openai.validators import (
apply_necessary_remediation,
apply_validators,
get_validators,
read_any_format,
write_out_file,
)
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def organization_info(obj):
organization = getattr(obj, "organization", None)
if organization is not None:
return "[organization={}] ".format(organization)
else:
return ""
def display_error(e):
extra = (
" (HTTP status code: {})".format(e.http_status)
if e.http_status is not None
else ""
)
sys.stderr.write(
"{}{}Error:{} {}{}\n".format(
organization_info(e), bcolors.FAIL, bcolors.ENDC, e, extra
)
) | null |
1,407 | import datetime
import os
import signal
import sys
import warnings
from typing import Optional
import requests
import openai
from openai.upload_progress import BufferReader
from openai.validators import (
apply_necessary_remediation,
apply_validators,
get_validators,
read_any_format,
write_out_file,
)
class FineTune:
def list(cls, args):
resp = openai.FineTune.list()
print(resp)
def _is_url(cls, file: str):
return file.lower().startswith("http")
def _download_file_from_public_url(cls, url: str) -> Optional[bytes]:
resp = requests.get(url)
if resp.status_code == 200:
return resp.content
else:
return None
def _maybe_upload_file(
cls,
file: Optional[str] = None,
content: Optional[bytes] = None,
user_provided_file: Optional[str] = None,
check_if_file_exists: bool = True,
):
# Exactly one of `file` or `content` must be provided
if (file is None) == (content is None):
raise ValueError("Exactly one of `file` or `content` must be provided")
if content is None:
assert file is not None
with open(file, "rb") as f:
content = f.read()
if check_if_file_exists:
bytes = len(content)
matching_files = openai.File.find_matching_files(
name=user_provided_file or f.name, bytes=bytes, purpose="fine-tune"
)
if len(matching_files) > 0:
file_ids = [f["id"] for f in matching_files]
sys.stdout.write(
"Found potentially duplicated files with name '{name}', purpose 'fine-tune' and size {size} bytes\n".format(
name=os.path.basename(matching_files[0]["filename"]),
size=matching_files[0]["bytes"]
if "bytes" in matching_files[0]
else matching_files[0]["size"],
)
)
sys.stdout.write("\n".join(file_ids))
while True:
sys.stdout.write(
"\nEnter file ID to reuse an already uploaded file, or an empty string to upload this file anyway: "
)
inp = sys.stdin.readline().strip()
if inp in file_ids:
sys.stdout.write(
"Reusing already uploaded file: {id}\n".format(id=inp)
)
return inp
elif inp == "":
break
else:
sys.stdout.write(
"File id '{id}' is not among the IDs of the potentially duplicated files\n".format(
id=inp
)
)
buffer_reader = BufferReader(content, desc="Upload progress")
resp = openai.File.create(
file=buffer_reader,
purpose="fine-tune",
user_provided_filename=user_provided_file or file,
)
sys.stdout.write(
"Uploaded file from {file}: {id}\n".format(
file=user_provided_file or file, id=resp["id"]
)
)
return resp["id"]
def _get_or_upload(cls, file, check_if_file_exists=True):
try:
# 1. If it's a valid file, use it
openai.File.retrieve(file)
return file
except openai.error.InvalidRequestError:
pass
if os.path.isfile(file):
# 2. If it's a file on the filesystem, upload it
return cls._maybe_upload_file(
file=file, check_if_file_exists=check_if_file_exists
)
if cls._is_url(file):
# 3. If it's a URL, download it temporarily
content = cls._download_file_from_public_url(file)
if content is not None:
return cls._maybe_upload_file(
content=content,
check_if_file_exists=check_if_file_exists,
user_provided_file=file,
)
return file
def create(cls, args):
create_args = {
"training_file": cls._get_or_upload(
args.training_file, args.check_if_files_exist
),
}
if args.validation_file:
create_args["validation_file"] = cls._get_or_upload(
args.validation_file, args.check_if_files_exist
)
for hparam in (
"model",
"suffix",
"n_epochs",
"batch_size",
"learning_rate_multiplier",
"prompt_loss_weight",
"compute_classification_metrics",
"classification_n_classes",
"classification_positive_class",
"classification_betas",
):
attr = getattr(args, hparam)
if attr is not None:
create_args[hparam] = attr
resp = openai.FineTune.create(**create_args)
if args.no_follow:
print(resp)
return
sys.stdout.write(
"Created fine-tune: {job_id}\n"
"Streaming events until fine-tuning is complete...\n\n"
"(Ctrl-C will interrupt the stream, but not cancel the fine-tune)\n".format(
job_id=resp["id"]
)
)
cls._stream_events(resp["id"])
def get(cls, args):
resp = openai.FineTune.retrieve(id=args.id)
print(resp)
def results(cls, args):
fine_tune = openai.FineTune.retrieve(id=args.id)
if "result_files" not in fine_tune or len(fine_tune["result_files"]) == 0:
raise openai.error.InvalidRequestError(
f"No results file available for fine-tune {args.id}", "id"
)
result_file = openai.FineTune.retrieve(id=args.id)["result_files"][0]
resp = openai.File.download(id=result_file["id"])
print(resp.decode("utf-8"))
def events(cls, args):
if args.stream:
raise openai.error.OpenAIError(
message=(
"The --stream parameter is deprecated, use fine_tunes.follow "
"instead:\n\n"
" openai api fine_tunes.follow -i {id}\n".format(id=args.id)
),
)
resp = openai.FineTune.list_events(id=args.id) # type: ignore
print(resp)
def follow(cls, args):
cls._stream_events(args.id)
def _stream_events(cls, job_id):
def signal_handler(sig, frame):
status = openai.FineTune.retrieve(job_id).status
sys.stdout.write(
"\nStream interrupted. Job is still {status}.\n"
"To resume the stream, run:\n\n"
" openai api fine_tunes.follow -i {job_id}\n\n"
"To cancel your job, run:\n\n"
" openai api fine_tunes.cancel -i {job_id}\n\n".format(
status=status, job_id=job_id
)
)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
events = openai.FineTune.stream_events(job_id)
# TODO(rachel): Add a nifty spinner here.
try:
for event in events:
sys.stdout.write(
"[%s] %s"
% (
datetime.datetime.fromtimestamp(event["created_at"]),
event["message"],
)
)
sys.stdout.write("\n")
sys.stdout.flush()
except Exception:
sys.stdout.write(
"\nStream interrupted (client disconnected).\n"
"To resume the stream, run:\n\n"
" openai api fine_tunes.follow -i {job_id}\n\n".format(job_id=job_id)
)
return
resp = openai.FineTune.retrieve(id=job_id)
status = resp["status"]
if status == "succeeded":
sys.stdout.write("\nJob complete! Status: succeeded 🎉")
sys.stdout.write(
"\nTry out your fine-tuned model:\n\n"
"openai api completions.create -m {model} -p <YOUR_PROMPT>".format(
model=resp["fine_tuned_model"]
)
)
elif status == "failed":
sys.stdout.write(
"\nJob failed. Please contact [email protected] if you need assistance."
)
sys.stdout.write("\n")
def cancel(cls, args):
resp = openai.FineTune.cancel(id=args.id)
print(resp)
def delete(cls, args):
resp = openai.FineTune.delete(sid=args.id)
print(resp)
def prepare_data(cls, args):
sys.stdout.write("Analyzing...\n")
fname = args.file
auto_accept = args.quiet
df, remediation = read_any_format(fname)
apply_necessary_remediation(None, remediation)
validators = get_validators()
apply_validators(
df,
fname,
remediation,
validators,
auto_accept,
write_out_file_func=write_out_file,
)
def tools_register(parser):
subparsers = parser.add_subparsers(
title="Tools", help="Convenience client side tools"
)
def help(args):
parser.print_help()
parser.set_defaults(func=help)
sub = subparsers.add_parser("fine_tunes.prepare_data")
sub.add_argument(
"-f",
"--file",
required=True,
help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing prompt-completion examples to be analyzed."
"This should be the local file path.",
)
sub.add_argument(
"-q",
"--quiet",
required=False,
action="store_true",
help="Auto accepts all suggestions, without asking for user input. To be used within scripts.",
)
sub.set_defaults(func=FineTune.prepare_data) | null |
1,408 | import datetime
import os
import signal
import sys
import warnings
from typing import Optional
import requests
import openai
from openai.upload_progress import BufferReader
from openai.validators import (
apply_necessary_remediation,
apply_validators,
get_validators,
read_any_format,
write_out_file,
)
class Engine:
def get(cls, args):
engine = openai.Engine.retrieve(id=args.id)
display(engine)
def update(cls, args):
engine = openai.Engine.modify(args.id, replicas=args.replicas)
display(engine)
def generate(cls, args):
warnings.warn(
"Engine.generate is deprecated, use Completion.create", DeprecationWarning
)
if args.completions and args.completions > 1 and args.stream:
raise ValueError("Can't stream multiple completions with openai CLI")
kwargs = {}
if args.model is not None:
kwargs["model"] = args.model
resp = openai.Engine(id=args.id).generate(
completions=args.completions,
context=args.context,
length=args.length,
stream=args.stream,
temperature=args.temperature,
top_p=args.top_p,
logprobs=args.logprobs,
stop=args.stop,
**kwargs,
)
if not args.stream:
resp = [resp]
for part in resp:
completions = len(part["data"])
for c_idx, c in enumerate(part["data"]):
if completions > 1:
sys.stdout.write("===== Completion {} =====\n".format(c_idx))
sys.stdout.write("".join(c["text"]))
if completions > 1:
sys.stdout.write("\n")
sys.stdout.flush()
def list(cls, args):
engines = openai.Engine.list()
display(engines)
class ChatCompletion:
def create(cls, args):
if args.n is not None and args.n > 1 and args.stream:
raise ValueError(
"Can't stream chat completions with n>1 with the current CLI"
)
messages = [
{"role": role, "content": content} for role, content in args.message
]
resp = openai.ChatCompletion.create(
# Required
model=args.model,
messages=messages,
# Optional
n=args.n,
max_tokens=100,
temperature=args.temperature,
top_p=args.top_p,
stop=args.stop,
stream=args.stream,
)
if not args.stream:
resp = [resp]
for part in resp:
choices = part["choices"]
for c_idx, c in enumerate(sorted(choices, key=lambda s: s["index"])):
if len(choices) > 1:
sys.stdout.write("===== Chat Completion {} =====\n".format(c_idx))
sys.stdout.write(c["message"]["content"])
if len(choices) > 1:
sys.stdout.write("\n")
sys.stdout.flush()
class Completion:
def create(cls, args):
if args.n is not None and args.n > 1 and args.stream:
raise ValueError("Can't stream completions with n>1 with the current CLI")
if args.engine and args.model:
warnings.warn(
"In most cases, you should not be specifying both engine and model."
)
resp = openai.Completion.create(
engine=args.engine,
model=args.model,
n=args.n,
max_tokens=args.max_tokens,
logprobs=args.logprobs,
prompt=args.prompt,
stream=args.stream,
temperature=args.temperature,
top_p=args.top_p,
stop=args.stop,
echo=True,
)
if not args.stream:
resp = [resp]
for part in resp:
choices = part["choices"]
for c_idx, c in enumerate(sorted(choices, key=lambda s: s["index"])):
if len(choices) > 1:
sys.stdout.write("===== Completion {} =====\n".format(c_idx))
sys.stdout.write(c["text"])
if len(choices) > 1:
sys.stdout.write("\n")
sys.stdout.flush()
class Deployment:
def get(cls, args):
resp = openai.Deployment.retrieve(id=args.id)
print(resp)
def delete(cls, args):
model = openai.Deployment.delete(args.id)
print(model)
def list(cls, args):
models = openai.Deployment.list()
print(models)
def create(cls, args):
models = openai.Deployment.create(model=args.model, scale_settings={"scale_type": args.scale_type})
print(models)
class Model:
def get(cls, args):
resp = openai.Model.retrieve(id=args.id)
print(resp)
def delete(cls, args):
model = openai.Model.delete(args.id)
print(model)
def list(cls, args):
models = openai.Model.list()
print(models)
class File:
def create(cls, args):
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
resp = openai.File.create(
file=buffer_reader,
purpose=args.purpose,
user_provided_filename=args.file,
)
print(resp)
def get(cls, args):
resp = openai.File.retrieve(id=args.id)
print(resp)
def delete(cls, args):
file = openai.File.delete(args.id)
print(file)
def list(cls, args):
file = openai.File.list()
print(file)
class Image:
def create(cls, args):
resp = openai.Image.create(
prompt=args.prompt,
size=args.size,
n=args.num_images,
response_format=args.response_format,
)
print(resp)
def create_variation(cls, args):
with open(args.image, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
resp = openai.Image.create_variation(
image=buffer_reader,
size=args.size,
n=args.num_images,
response_format=args.response_format,
)
print(resp)
def create_edit(cls, args):
with open(args.image, "rb") as file_reader:
image_reader = BufferReader(file_reader.read(), desc="Upload progress")
mask_reader = None
if args.mask is not None:
with open(args.mask, "rb") as file_reader:
mask_reader = BufferReader(file_reader.read(), desc="Upload progress")
resp = openai.Image.create_edit(
image=image_reader,
mask=mask_reader,
prompt=args.prompt,
size=args.size,
n=args.num_images,
response_format=args.response_format,
)
print(resp)
class Audio:
def transcribe(cls, args):
with open(args.file, "rb") as r:
file_reader = BufferReader(r.read(), desc="Upload progress")
resp = openai.Audio.transcribe_raw(
# Required
model=args.model,
file=file_reader,
filename=args.file,
# Optional
response_format=args.response_format,
language=args.language,
temperature=args.temperature,
prompt=args.prompt,
)
print(resp)
def translate(cls, args):
with open(args.file, "rb") as r:
file_reader = BufferReader(r.read(), desc="Upload progress")
resp = openai.Audio.translate_raw(
# Required
model=args.model,
file=file_reader,
filename=args.file,
# Optional
response_format=args.response_format,
language=args.language,
temperature=args.temperature,
prompt=args.prompt,
)
print(resp)
class FineTune:
def list(cls, args):
resp = openai.FineTune.list()
print(resp)
def _is_url(cls, file: str):
return file.lower().startswith("http")
def _download_file_from_public_url(cls, url: str) -> Optional[bytes]:
resp = requests.get(url)
if resp.status_code == 200:
return resp.content
else:
return None
def _maybe_upload_file(
cls,
file: Optional[str] = None,
content: Optional[bytes] = None,
user_provided_file: Optional[str] = None,
check_if_file_exists: bool = True,
):
# Exactly one of `file` or `content` must be provided
if (file is None) == (content is None):
raise ValueError("Exactly one of `file` or `content` must be provided")
if content is None:
assert file is not None
with open(file, "rb") as f:
content = f.read()
if check_if_file_exists:
bytes = len(content)
matching_files = openai.File.find_matching_files(
name=user_provided_file or f.name, bytes=bytes, purpose="fine-tune"
)
if len(matching_files) > 0:
file_ids = [f["id"] for f in matching_files]
sys.stdout.write(
"Found potentially duplicated files with name '{name}', purpose 'fine-tune' and size {size} bytes\n".format(
name=os.path.basename(matching_files[0]["filename"]),
size=matching_files[0]["bytes"]
if "bytes" in matching_files[0]
else matching_files[0]["size"],
)
)
sys.stdout.write("\n".join(file_ids))
while True:
sys.stdout.write(
"\nEnter file ID to reuse an already uploaded file, or an empty string to upload this file anyway: "
)
inp = sys.stdin.readline().strip()
if inp in file_ids:
sys.stdout.write(
"Reusing already uploaded file: {id}\n".format(id=inp)
)
return inp
elif inp == "":
break
else:
sys.stdout.write(
"File id '{id}' is not among the IDs of the potentially duplicated files\n".format(
id=inp
)
)
buffer_reader = BufferReader(content, desc="Upload progress")
resp = openai.File.create(
file=buffer_reader,
purpose="fine-tune",
user_provided_filename=user_provided_file or file,
)
sys.stdout.write(
"Uploaded file from {file}: {id}\n".format(
file=user_provided_file or file, id=resp["id"]
)
)
return resp["id"]
def _get_or_upload(cls, file, check_if_file_exists=True):
try:
# 1. If it's a valid file, use it
openai.File.retrieve(file)
return file
except openai.error.InvalidRequestError:
pass
if os.path.isfile(file):
# 2. If it's a file on the filesystem, upload it
return cls._maybe_upload_file(
file=file, check_if_file_exists=check_if_file_exists
)
if cls._is_url(file):
# 3. If it's a URL, download it temporarily
content = cls._download_file_from_public_url(file)
if content is not None:
return cls._maybe_upload_file(
content=content,
check_if_file_exists=check_if_file_exists,
user_provided_file=file,
)
return file
def create(cls, args):
create_args = {
"training_file": cls._get_or_upload(
args.training_file, args.check_if_files_exist
),
}
if args.validation_file:
create_args["validation_file"] = cls._get_or_upload(
args.validation_file, args.check_if_files_exist
)
for hparam in (
"model",
"suffix",
"n_epochs",
"batch_size",
"learning_rate_multiplier",
"prompt_loss_weight",
"compute_classification_metrics",
"classification_n_classes",
"classification_positive_class",
"classification_betas",
):
attr = getattr(args, hparam)
if attr is not None:
create_args[hparam] = attr
resp = openai.FineTune.create(**create_args)
if args.no_follow:
print(resp)
return
sys.stdout.write(
"Created fine-tune: {job_id}\n"
"Streaming events until fine-tuning is complete...\n\n"
"(Ctrl-C will interrupt the stream, but not cancel the fine-tune)\n".format(
job_id=resp["id"]
)
)
cls._stream_events(resp["id"])
def get(cls, args):
resp = openai.FineTune.retrieve(id=args.id)
print(resp)
def results(cls, args):
fine_tune = openai.FineTune.retrieve(id=args.id)
if "result_files" not in fine_tune or len(fine_tune["result_files"]) == 0:
raise openai.error.InvalidRequestError(
f"No results file available for fine-tune {args.id}", "id"
)
result_file = openai.FineTune.retrieve(id=args.id)["result_files"][0]
resp = openai.File.download(id=result_file["id"])
print(resp.decode("utf-8"))
def events(cls, args):
if args.stream:
raise openai.error.OpenAIError(
message=(
"The --stream parameter is deprecated, use fine_tunes.follow "
"instead:\n\n"
" openai api fine_tunes.follow -i {id}\n".format(id=args.id)
),
)
resp = openai.FineTune.list_events(id=args.id) # type: ignore
print(resp)
def follow(cls, args):
cls._stream_events(args.id)
def _stream_events(cls, job_id):
def signal_handler(sig, frame):
status = openai.FineTune.retrieve(job_id).status
sys.stdout.write(
"\nStream interrupted. Job is still {status}.\n"
"To resume the stream, run:\n\n"
" openai api fine_tunes.follow -i {job_id}\n\n"
"To cancel your job, run:\n\n"
" openai api fine_tunes.cancel -i {job_id}\n\n".format(
status=status, job_id=job_id
)
)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
events = openai.FineTune.stream_events(job_id)
# TODO(rachel): Add a nifty spinner here.
try:
for event in events:
sys.stdout.write(
"[%s] %s"
% (
datetime.datetime.fromtimestamp(event["created_at"]),
event["message"],
)
)
sys.stdout.write("\n")
sys.stdout.flush()
except Exception:
sys.stdout.write(
"\nStream interrupted (client disconnected).\n"
"To resume the stream, run:\n\n"
" openai api fine_tunes.follow -i {job_id}\n\n".format(job_id=job_id)
)
return
resp = openai.FineTune.retrieve(id=job_id)
status = resp["status"]
if status == "succeeded":
sys.stdout.write("\nJob complete! Status: succeeded 🎉")
sys.stdout.write(
"\nTry out your fine-tuned model:\n\n"
"openai api completions.create -m {model} -p <YOUR_PROMPT>".format(
model=resp["fine_tuned_model"]
)
)
elif status == "failed":
sys.stdout.write(
"\nJob failed. Please contact [email protected] if you need assistance."
)
sys.stdout.write("\n")
def cancel(cls, args):
resp = openai.FineTune.cancel(id=args.id)
print(resp)
def delete(cls, args):
resp = openai.FineTune.delete(sid=args.id)
print(resp)
def prepare_data(cls, args):
sys.stdout.write("Analyzing...\n")
fname = args.file
auto_accept = args.quiet
df, remediation = read_any_format(fname)
apply_necessary_remediation(None, remediation)
validators = get_validators()
apply_validators(
df,
fname,
remediation,
validators,
auto_accept,
write_out_file_func=write_out_file,
)
def api_register(parser):
# Engine management
subparsers = parser.add_subparsers(help="All API subcommands")
def help(args):
parser.print_help()
parser.set_defaults(func=help)
sub = subparsers.add_parser("engines.list")
sub.set_defaults(func=Engine.list)
sub = subparsers.add_parser("engines.get")
sub.add_argument("-i", "--id", required=True)
sub.set_defaults(func=Engine.get)
sub = subparsers.add_parser("engines.update")
sub.add_argument("-i", "--id", required=True)
sub.add_argument("-r", "--replicas", type=int)
sub.set_defaults(func=Engine.update)
sub = subparsers.add_parser("engines.generate")
sub.add_argument("-i", "--id", required=True)
sub.add_argument(
"--stream", help="Stream tokens as they're ready.", action="store_true"
)
sub.add_argument("-c", "--context", help="An optional context to generate from")
sub.add_argument("-l", "--length", help="How many tokens to generate", type=int)
sub.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
sub.add_argument(
"-p",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
sub.add_argument(
"-n",
"--completions",
help="How many parallel completions to run on this context",
type=int,
)
sub.add_argument(
"--logprobs",
help="Include the log probabilites on the `logprobs` most likely tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is supplied, the API will always return the logprob of the generated token, so there may be up to `logprobs+1` elements in the response.",
type=int,
)
sub.add_argument(
"--stop", help="A stop sequence at which to stop generating tokens."
)
sub.add_argument(
"-m",
"--model",
required=False,
help="A model (most commonly a model ID) to generate from. Defaults to the engine's default model.",
)
sub.set_defaults(func=Engine.generate)
# Chat Completions
sub = subparsers.add_parser("chat_completions.create")
sub._action_groups.pop()
req = sub.add_argument_group("required arguments")
opt = sub.add_argument_group("optional arguments")
req.add_argument(
"-m",
"--model",
help="The model to use.",
required=True,
)
req.add_argument(
"-g",
"--message",
action="append",
nargs=2,
metavar=("ROLE", "CONTENT"),
help="A message in `{role} {content}` format. Use this argument multiple times to add multiple messages.",
required=True,
)
opt.add_argument(
"-n",
"--n",
help="How many completions to generate for the conversation.",
type=int,
)
opt.add_argument(
"-M", "--max-tokens", help="The maximum number of tokens to generate.", type=int
)
opt.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
opt.add_argument(
"-P",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
opt.add_argument(
"--stop",
help="A stop sequence at which to stop generating tokens for the message.",
)
opt.add_argument(
"--stream", help="Stream messages as they're ready.", action="store_true"
)
sub.set_defaults(func=ChatCompletion.create)
# Completions
sub = subparsers.add_parser("completions.create")
sub.add_argument(
"-e",
"--engine",
help="The engine to use. See https://platform.openai.com/docs/engines for more about what engines are available.",
)
sub.add_argument(
"-m",
"--model",
help="The model to use. At most one of `engine` or `model` should be specified.",
)
sub.add_argument(
"--stream", help="Stream tokens as they're ready.", action="store_true"
)
sub.add_argument("-p", "--prompt", help="An optional prompt to complete from")
sub.add_argument(
"-M", "--max-tokens", help="The maximum number of tokens to generate", type=int
)
sub.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
sub.add_argument(
"-P",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
sub.add_argument(
"-n",
"--n",
help="How many sub-completions to generate for each prompt.",
type=int,
)
sub.add_argument(
"--logprobs",
help="Include the log probabilites on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.",
type=int,
)
sub.add_argument(
"--stop", help="A stop sequence at which to stop generating tokens."
)
sub.set_defaults(func=Completion.create)
# Deployments
sub = subparsers.add_parser("deployments.list")
sub.set_defaults(func=Deployment.list)
sub = subparsers.add_parser("deployments.get")
sub.add_argument("-i", "--id", required=True, help="The deployment ID")
sub.set_defaults(func=Deployment.get)
sub = subparsers.add_parser("deployments.delete")
sub.add_argument("-i", "--id", required=True, help="The deployment ID")
sub.set_defaults(func=Deployment.delete)
sub = subparsers.add_parser("deployments.create")
sub.add_argument("-m", "--model", required=True, help="The model ID")
sub.add_argument("-s", "--scale_type", required=True, help="The scale type. Either 'manual' or 'standard'")
sub.set_defaults(func=Deployment.create)
# Models
sub = subparsers.add_parser("models.list")
sub.set_defaults(func=Model.list)
sub = subparsers.add_parser("models.get")
sub.add_argument("-i", "--id", required=True, help="The model ID")
sub.set_defaults(func=Model.get)
sub = subparsers.add_parser("models.delete")
sub.add_argument("-i", "--id", required=True, help="The model ID")
sub.set_defaults(func=Model.delete)
# Files
sub = subparsers.add_parser("files.create")
sub.add_argument(
"-f",
"--file",
required=True,
help="File to upload",
)
sub.add_argument(
"-p",
"--purpose",
help="Why are you uploading this file? (see https://platform.openai.com/docs/api-reference/ for purposes)",
required=True,
)
sub.set_defaults(func=File.create)
sub = subparsers.add_parser("files.get")
sub.add_argument("-i", "--id", required=True, help="The files ID")
sub.set_defaults(func=File.get)
sub = subparsers.add_parser("files.delete")
sub.add_argument("-i", "--id", required=True, help="The files ID")
sub.set_defaults(func=File.delete)
sub = subparsers.add_parser("files.list")
sub.set_defaults(func=File.list)
# Finetune
sub = subparsers.add_parser("fine_tunes.list")
sub.set_defaults(func=FineTune.list)
sub = subparsers.add_parser("fine_tunes.create")
sub.add_argument(
"-t",
"--training_file",
required=True,
help="JSONL file containing prompt-completion examples for training. This can "
"be the ID of a file uploaded through the OpenAI API (e.g. file-abcde12345), "
'a local file path, or a URL that starts with "http".',
)
sub.add_argument(
"-v",
"--validation_file",
help="JSONL file containing prompt-completion examples for validation. This can "
"be the ID of a file uploaded through the OpenAI API (e.g. file-abcde12345), "
'a local file path, or a URL that starts with "http".',
)
sub.add_argument(
"--no_check_if_files_exist",
dest="check_if_files_exist",
action="store_false",
help="If this argument is set and training_file or validation_file are file paths, immediately upload them. If this argument is not set, check if they may be duplicates of already uploaded files before uploading, based on file name and file size.",
)
sub.add_argument(
"-m",
"--model",
help="The model to start fine-tuning from",
)
sub.add_argument(
"--suffix",
help="If set, this argument can be used to customize the generated fine-tuned model name."
"All punctuation and whitespace in `suffix` will be replaced with a "
"single dash, and the string will be lower cased. The max "
"length of `suffix` is 40 chars. "
"The generated name will match the form `{base_model}:ft-{org-title}:{suffix}-{timestamp}`. "
'For example, `openai api fine_tunes.create -t test.jsonl -m ada --suffix "custom model name" '
"could generate a model with the name "
"ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
)
sub.add_argument(
"--no_follow",
action="store_true",
help="If set, returns immediately after creating the job. Otherwise, streams events and waits for the job to complete.",
)
sub.add_argument(
"--n_epochs",
type=int,
help="The number of epochs to train the model for. An epoch refers to one "
"full cycle through the training dataset.",
)
sub.add_argument(
"--batch_size",
type=int,
help="The batch size to use for training. The batch size is the number of "
"training examples used to train a single forward and backward pass.",
)
sub.add_argument(
"--learning_rate_multiplier",
type=float,
help="The learning rate multiplier to use for training. The fine-tuning "
"learning rate is determined by the original learning rate used for "
"pretraining multiplied by this value.",
)
sub.add_argument(
"--prompt_loss_weight",
type=float,
help="The weight to use for the prompt loss. The optimum value here depends "
"depends on your use case. This determines how much the model prioritizes "
"learning from prompt tokens vs learning from completion tokens.",
)
sub.add_argument(
"--compute_classification_metrics",
action="store_true",
help="If set, we calculate classification-specific metrics such as accuracy "
"and F-1 score using the validation set at the end of every epoch.",
)
sub.set_defaults(compute_classification_metrics=None)
sub.add_argument(
"--classification_n_classes",
type=int,
help="The number of classes in a classification task. This parameter is "
"required for multiclass classification.",
)
sub.add_argument(
"--classification_positive_class",
help="The positive class in binary classification. This parameter is needed "
"to generate precision, recall and F-1 metrics when doing binary "
"classification.",
)
sub.add_argument(
"--classification_betas",
type=float,
nargs="+",
help="If this is provided, we calculate F-beta scores at the specified beta "
"values. The F-beta score is a generalization of F-1 score. This is only "
"used for binary classification.",
)
sub.set_defaults(func=FineTune.create)
sub = subparsers.add_parser("fine_tunes.get")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.get)
sub = subparsers.add_parser("fine_tunes.results")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.results)
sub = subparsers.add_parser("fine_tunes.events")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
# TODO(rachel): Remove this in 1.0
sub.add_argument(
"-s",
"--stream",
action="store_true",
help="[DEPRECATED] If set, events will be streamed until the job is done. Otherwise, "
"displays the event history to date.",
)
sub.set_defaults(func=FineTune.events)
sub = subparsers.add_parser("fine_tunes.follow")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.follow)
sub = subparsers.add_parser("fine_tunes.cancel")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.cancel)
sub = subparsers.add_parser("fine_tunes.delete")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.delete)
# Image
sub = subparsers.add_parser("image.create")
sub.add_argument("-p", "--prompt", type=str, required=True)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-s", "--size", type=str, default="1024x1024", help="Size of the output image"
)
sub.add_argument("--response-format", type=str, default="url")
sub.set_defaults(func=Image.create)
sub = subparsers.add_parser("image.create_edit")
sub.add_argument("-p", "--prompt", type=str, required=True)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-I",
"--image",
type=str,
required=True,
help="Image to modify. Should be a local path and a PNG encoded image.",
)
sub.add_argument(
"-s", "--size", type=str, default="1024x1024", help="Size of the output image"
)
sub.add_argument("--response-format", type=str, default="url")
sub.add_argument(
"-M",
"--mask",
type=str,
required=False,
help="Path to a mask image. It should be the same size as the image you're editing and a RGBA PNG image. The Alpha channel acts as the mask.",
)
sub.set_defaults(func=Image.create_edit)
sub = subparsers.add_parser("image.create_variation")
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-I",
"--image",
type=str,
required=True,
help="Image to modify. Should be a local path and a PNG encoded image.",
)
sub.add_argument(
"-s", "--size", type=str, default="1024x1024", help="Size of the output image"
)
sub.add_argument("--response-format", type=str, default="url")
sub.set_defaults(func=Image.create_variation)
# Audio
# transcriptions
sub = subparsers.add_parser("audio.transcribe")
# Required
sub.add_argument("-m", "--model", type=str, default="whisper-1")
sub.add_argument("-f", "--file", type=str, required=True)
# Optional
sub.add_argument("--response-format", type=str)
sub.add_argument("--language", type=str)
sub.add_argument("-t", "--temperature", type=float)
sub.add_argument("--prompt", type=str)
sub.set_defaults(func=Audio.transcribe)
# translations
sub = subparsers.add_parser("audio.translate")
# Required
sub.add_argument("-m", "--model", type=str, default="whisper-1")
sub.add_argument("-f", "--file", type=str, required=True)
# Optional
sub.add_argument("--response-format", type=str)
sub.add_argument("--language", type=str)
sub.add_argument("-t", "--temperature", type=float)
sub.add_argument("--prompt", type=str)
sub.set_defaults(func=Audio.translate) | null |
1,409 | import datetime
import os
import signal
import sys
import warnings
from typing import Optional
import requests
import openai
from openai.upload_progress import BufferReader
from openai.validators import (
apply_necessary_remediation,
apply_validators,
get_validators,
read_any_format,
write_out_file,
)
class WandbLogger:
def sync(cls, args):
import openai.wandb_logger
resp = openai.wandb_logger.WandbLogger.sync(
id=args.id,
n_fine_tunes=args.n_fine_tunes,
project=args.project,
entity=args.entity,
force=args.force,
)
print(resp)
def wandb_register(parser):
subparsers = parser.add_subparsers(
title="wandb", help="Logging with Weights & Biases"
)
def help(args):
parser.print_help()
parser.set_defaults(func=help)
sub = subparsers.add_parser("sync")
sub.add_argument("-i", "--id", help="The id of the fine-tune job (optional)")
sub.add_argument(
"-n",
"--n_fine_tunes",
type=int,
default=None,
help="Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced.",
)
sub.add_argument(
"--project",
default="GPT-3",
help="""Name of the project where you're sending runs. By default, it is "GPT-3".""",
)
sub.add_argument(
"--entity",
help="Username or team name where you're sending runs. By default, your default entity is used, which is usually your username.",
)
sub.add_argument(
"--force",
action="store_true",
help="Forces logging and overwrite existing wandb run of the same fine-tune.",
)
sub.set_defaults(force=False)
sub.set_defaults(func=WandbLogger.sync) | null |
1,410 | import logging
import os
import re
import sys
from enum import Enum
from typing import Optional
import openai
logger = logging.getLogger("openai")
def _console_log_level():
def logfmt(props):
def log_debug(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() == "debug":
print(msg, file=sys.stderr)
logger.debug(msg) | null |
1,411 | import logging
import os
import re
import sys
from enum import Enum
from typing import Optional
import openai
logger = logging.getLogger("openai")
def _console_log_level():
if openai.log in ["debug", "info"]:
return openai.log
elif OPENAI_LOG in ["debug", "info"]:
return OPENAI_LOG
else:
return None
def logfmt(props):
def fmt(key, val):
# Handle case where val is a bytes or bytesarray
if hasattr(val, "decode"):
val = val.decode("utf-8")
# Check if val is already a string to avoid re-encoding into ascii.
if not isinstance(val, str):
val = str(val)
if re.search(r"\s", val):
val = repr(val)
# key should already be a string
if re.search(r"\s", key):
key = repr(key)
return "{key}={val}".format(key=key, val=val)
return " ".join([fmt(key, val) for key, val in sorted(props.items())])
def log_info(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() in ["debug", "info"]:
print(msg, file=sys.stderr)
logger.info(msg) | null |
1,412 | import logging
import os
import re
import sys
from enum import Enum
from typing import Optional
import openai
logger = logging.getLogger("openai")
def logfmt(props):
def fmt(key, val):
# Handle case where val is a bytes or bytesarray
if hasattr(val, "decode"):
val = val.decode("utf-8")
# Check if val is already a string to avoid re-encoding into ascii.
if not isinstance(val, str):
val = str(val)
if re.search(r"\s", val):
val = repr(val)
# key should already be a string
if re.search(r"\s", key):
key = repr(key)
return "{key}={val}".format(key=key, val=val)
return " ".join([fmt(key, val) for key, val in sorted(props.items())])
def log_warn(message, **params):
msg = logfmt(dict(message=message, **params))
print(msg, file=sys.stderr)
logger.warn(msg) | null |
1,413 | import logging
import os
import re
import sys
from enum import Enum
from typing import Optional
import openai
The provided code snippet includes necessary dependencies for implementing the `convert_to_dict` function. Write a Python function `def convert_to_dict(obj)` to solve the following problem:
Converts a OpenAIObject back to a regular dict. Nested OpenAIObjects are also converted back to regular dicts. :param obj: The OpenAIObject to convert. :returns: The OpenAIObject as a dict.
Here is the function:
def convert_to_dict(obj):
"""Converts a OpenAIObject back to a regular dict.
Nested OpenAIObjects are also converted back to regular dicts.
:param obj: The OpenAIObject to convert.
:returns: The OpenAIObject as a dict.
"""
if isinstance(obj, list):
return [convert_to_dict(i) for i in obj]
# This works by virtue of the fact that OpenAIObjects _are_ dicts. The dict
# comprehension returns a regular dict and recursively applies the
# conversion to each value.
elif isinstance(obj, dict):
return {k: convert_to_dict(v) for k, v in obj.items()}
else:
return obj | Converts a OpenAIObject back to a regular dict. Nested OpenAIObjects are also converted back to regular dicts. :param obj: The OpenAIObject to convert. :returns: The OpenAIObject as a dict. |
1,414 | import logging
import os
import re
import sys
from enum import Enum
from typing import Optional
import openai
def merge_dicts(x, y):
z = x.copy()
z.update(y)
return z | null |
1,415 | import logging
import os
import re
import sys
from enum import Enum
from typing import Optional
import openai
def default_api_key() -> str:
if openai.api_key_path:
with open(openai.api_key_path, "rt") as k:
api_key = k.read().strip()
if not api_key.startswith("sk-"):
raise ValueError(f"Malformed API key in {openai.api_key_path}.")
return api_key
elif openai.api_key is not None:
return openai.api_key
else:
raise openai.error.AuthenticationError(
"No API key provided. You can set your API key in code using 'openai.api_key = <API-KEY>', or you can set the environment variable OPENAI_API_KEY=<API-KEY>). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = <PATH>'. You can generate API keys in the OpenAI web interface. See https://onboard.openai.com for details, or email [email protected] if you have any questions."
) | null |
1,416 | import io
def progress(total, desc):
import tqdm # type: ignore
meter = tqdm.tqdm(total=total, unit_scale=True, desc=desc)
def incr(progress):
meter.n = progress
if progress == total:
meter.close()
else:
meter.refresh()
return incr | null |
1,417 | import io
def MB(i):
return int(i // 1024**2) | null |
1,418 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
def get_embedding(text: str, engine="text-similarity-davinci-001") -> List[float]:
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return openai.Embedding.create(input=[text], engine=engine)["data"][0]["embedding"] | null |
1,419 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
async def aget_embedding(
text: str, engine="text-similarity-davinci-001"
) -> List[float]:
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return (await openai.Embedding.acreate(input=[text], engine=engine))["data"][0][
"embedding"
] | null |
1,420 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
def get_embeddings(
list_of_text: List[str], engine="text-similarity-babbage-001"
) -> List[List[float]]:
assert len(list_of_text) <= 2048, "The batch size should not be larger than 2048."
# replace newlines, which can negatively affect performance.
list_of_text = [text.replace("\n", " ") for text in list_of_text]
data = openai.Embedding.create(input=list_of_text, engine=engine).data
data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input.
return [d["embedding"] for d in data] | null |
1,421 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
async def aget_embeddings(
list_of_text: List[str], engine="text-similarity-babbage-001"
) -> List[List[float]]:
assert len(list_of_text) <= 2048, "The batch size should not be larger than 2048."
# replace newlines, which can negatively affect performance.
list_of_text = [text.replace("\n", " ") for text in list_of_text]
data = (await openai.Embedding.acreate(input=list_of_text, engine=engine)).data
data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input.
return [d["embedding"] for d in data] | null |
1,422 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) | null |
1,423 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `plot_multiclass_precision_recall` function. Write a Python function `def plot_multiclass_precision_recall( y_score, y_true_untransformed, class_list, classifier_name )` to solve the following problem:
Precision-Recall plotting for a multiclass problem. It plots average precision-recall, per class precision recall and reference f1 contours. Code slightly modified, but heavily based on https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
Here is the function:
def plot_multiclass_precision_recall(
y_score, y_true_untransformed, class_list, classifier_name
):
"""
Precision-Recall plotting for a multiclass problem. It plots average precision-recall, per class precision recall and reference f1 contours.
Code slightly modified, but heavily based on https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
"""
n_classes = len(class_list)
y_true = pd.concat(
[(y_true_untransformed == class_list[i]) for i in range(n_classes)], axis=1
).values
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_true[:, i], y_score[:, i])
average_precision[i] = average_precision_score(y_true[:, i], y_score[:, i])
# A "micro-average": quantifying score on all classes jointly
precision_micro, recall_micro, _ = precision_recall_curve(
y_true.ravel(), y_score.ravel()
)
average_precision_micro = average_precision_score(y_true, y_score, average="micro")
print(
str(classifier_name)
+ " - Average precision score over all classes: {0:0.2f}".format(
average_precision_micro
)
)
# setup plot details
plt.figure(figsize=(9, 10))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
(l,) = plt.plot(x[y >= 0], y[y >= 0], color="gray", alpha=0.2)
plt.annotate("f1={0:0.1f}".format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append("iso-f1 curves")
(l,) = plt.plot(recall_micro, precision_micro, color="gold", lw=2)
lines.append(l)
labels.append(
"average Precision-recall (auprc = {0:0.2f})" "".format(average_precision_micro)
)
for i in range(n_classes):
(l,) = plt.plot(recall[i], precision[i], lw=2)
lines.append(l)
labels.append(
"Precision-recall for class `{0}` (auprc = {1:0.2f})"
"".format(class_list[i], average_precision[i])
)
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title(f"{classifier_name}: Precision-Recall curve for each class")
plt.legend(lines, labels) | Precision-Recall plotting for a multiclass problem. It plots average precision-recall, per class precision recall and reference f1 contours. Code slightly modified, but heavily based on https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html |
1,424 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `distances_from_embeddings` function. Write a Python function `def distances_from_embeddings( query_embedding: List[float], embeddings: List[List[float]], distance_metric="cosine", ) -> List[List]` to solve the following problem:
Return the distances between a query embedding and a list of embeddings.
Here is the function:
def distances_from_embeddings(
query_embedding: List[float],
embeddings: List[List[float]],
distance_metric="cosine",
) -> List[List]:
"""Return the distances between a query embedding and a list of embeddings."""
distance_metrics = {
"cosine": spatial.distance.cosine,
"L1": spatial.distance.cityblock,
"L2": spatial.distance.euclidean,
"Linf": spatial.distance.chebyshev,
}
distances = [
distance_metrics[distance_metric](query_embedding, embedding)
for embedding in embeddings
]
return distances | Return the distances between a query embedding and a list of embeddings. |
1,425 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `indices_of_nearest_neighbors_from_distances` function. Write a Python function `def indices_of_nearest_neighbors_from_distances(distances) -> np.ndarray` to solve the following problem:
Return a list of indices of nearest neighbors from a list of distances.
Here is the function:
def indices_of_nearest_neighbors_from_distances(distances) -> np.ndarray:
"""Return a list of indices of nearest neighbors from a list of distances."""
return np.argsort(distances) | Return a list of indices of nearest neighbors from a list of distances. |
1,426 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `pca_components_from_embeddings` function. Write a Python function `def pca_components_from_embeddings( embeddings: List[List[float]], n_components=2 ) -> np.ndarray` to solve the following problem:
Return the PCA components of a list of embeddings.
Here is the function:
def pca_components_from_embeddings(
embeddings: List[List[float]], n_components=2
) -> np.ndarray:
"""Return the PCA components of a list of embeddings."""
pca = PCA(n_components=n_components)
array_of_embeddings = np.array(embeddings)
return pca.fit_transform(array_of_embeddings) | Return the PCA components of a list of embeddings. |
1,427 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `tsne_components_from_embeddings` function. Write a Python function `def tsne_components_from_embeddings( embeddings: List[List[float]], n_components=2, **kwargs ) -> np.ndarray` to solve the following problem:
Returns t-SNE components of a list of embeddings.
Here is the function:
def tsne_components_from_embeddings(
embeddings: List[List[float]], n_components=2, **kwargs
) -> np.ndarray:
"""Returns t-SNE components of a list of embeddings."""
# use better defaults if not specified
if "init" not in kwargs.keys():
kwargs["init"] = "pca"
if "learning_rate" not in kwargs.keys():
kwargs["learning_rate"] = "auto"
tsne = TSNE(n_components=n_components, **kwargs)
array_of_embeddings = np.array(embeddings)
return tsne.fit_transform(array_of_embeddings) | Returns t-SNE components of a list of embeddings. |
1,428 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `chart_from_components` function. Write a Python function `def chart_from_components( components: np.ndarray, labels: Optional[List[str]] = None, strings: Optional[List[str]] = None, x_title="Component 0", y_title="Component 1", mark_size=5, **kwargs, )` to solve the following problem:
Return an interactive 2D chart of embedding components.
Here is the function:
def chart_from_components(
components: np.ndarray,
labels: Optional[List[str]] = None,
strings: Optional[List[str]] = None,
x_title="Component 0",
y_title="Component 1",
mark_size=5,
**kwargs,
):
"""Return an interactive 2D chart of embedding components."""
empty_list = ["" for _ in components]
data = pd.DataFrame(
{
x_title: components[:, 0],
y_title: components[:, 1],
"label": labels if labels else empty_list,
"string": ["<br>".join(tr.wrap(string, width=30)) for string in strings]
if strings
else empty_list,
}
)
chart = px.scatter(
data,
x=x_title,
y=y_title,
color="label" if labels else None,
symbol="label" if labels else None,
hover_data=["string"] if strings else None,
**kwargs,
).update_traces(marker=dict(size=mark_size))
return chart | Return an interactive 2D chart of embedding components. |
1,429 | import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import openai
from openai.datalib import numpy as np
from openai.datalib import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `chart_from_components_3D` function. Write a Python function `def chart_from_components_3D( components: np.ndarray, labels: Optional[List[str]] = None, strings: Optional[List[str]] = None, x_title: str = "Component 0", y_title: str = "Component 1", z_title: str = "Compontent 2", mark_size: int = 5, **kwargs, )` to solve the following problem:
Return an interactive 3D chart of embedding components.
Here is the function:
def chart_from_components_3D(
components: np.ndarray,
labels: Optional[List[str]] = None,
strings: Optional[List[str]] = None,
x_title: str = "Component 0",
y_title: str = "Component 1",
z_title: str = "Compontent 2",
mark_size: int = 5,
**kwargs,
):
"""Return an interactive 3D chart of embedding components."""
empty_list = ["" for _ in components]
data = pd.DataFrame(
{
x_title: components[:, 0],
y_title: components[:, 1],
z_title: components[:, 2],
"label": labels if labels else empty_list,
"string": ["<br>".join(tr.wrap(string, width=30)) for string in strings]
if strings
else empty_list,
}
)
chart = px.scatter_3d(
data,
x=x_title,
y=y_title,
z=z_title,
color="label" if labels else None,
symbol="label" if labels else None,
hover_data=["string"] if strings else None,
**kwargs,
).update_traces(marker=dict(size=mark_size))
return chart | Return an interactive 3D chart of embedding components. |
1,430 | import asyncio
import json
import platform
import sys
import threading
import warnings
from contextlib import asynccontextmanager
from json import JSONDecodeError
from typing import (
AsyncGenerator,
AsyncIterator,
Dict,
Iterator,
Optional,
Tuple,
Union,
overload,
)
from urllib.parse import urlencode, urlsplit, urlunsplit
import aiohttp
import requests
import openai
from openai import error, util, version
from openai.openai_response import OpenAIResponse
from openai.util import ApiType
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlsplit(url)
if base_query:
query = "%s&%s" % (base_query, query)
return urlunsplit((scheme, netloc, path, query, fragment)) | null |
1,431 | import asyncio
import json
import platform
import sys
import threading
import warnings
from contextlib import asynccontextmanager
from json import JSONDecodeError
from typing import (
AsyncGenerator,
AsyncIterator,
Dict,
Iterator,
Optional,
Tuple,
Union,
overload,
)
from urllib.parse import urlencode, urlsplit, urlunsplit
import aiohttp
import requests
import openai
from openai import error, util, version
from openai.openai_response import OpenAIResponse
from openai.util import ApiType
The provided code snippet includes necessary dependencies for implementing the `_aiohttp_proxies_arg` function. Write a Python function `def _aiohttp_proxies_arg(proxy) -> Optional[str]` to solve the following problem:
Returns a value suitable for the 'proxies' argument to 'aiohttp.ClientSession.request.
Here is the function:
def _aiohttp_proxies_arg(proxy) -> Optional[str]:
"""Returns a value suitable for the 'proxies' argument to 'aiohttp.ClientSession.request."""
if proxy is None:
return None
elif isinstance(proxy, str):
return proxy
elif isinstance(proxy, dict):
return proxy["https"] if "https" in proxy else proxy["http"]
else:
raise ValueError(
"'openai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
) | Returns a value suitable for the 'proxies' argument to 'aiohttp.ClientSession.request. |
Subsets and Splits