code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def _initialize_affine_weight(weight, output_size, input_size,
per_partition_size, partition_dim, init_method,
stride=1, return_master_weight=False):
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
# If we only use 1 process for model parallelism, bypass scatter.
world_size = get_model_parallel_world_size()
if world_size == 1:
init_method(weight)
if return_master_weight:
return weight
return None
# Initialize master weight
master_weight = torch.empty(output_size, input_size,
dtype=weight.dtype,
requires_grad=False)
init_method(master_weight)
# Split and copy
per_partition_per_stride_size = divide(per_partition_size, stride)
weight_list = torch.split(master_weight, per_partition_per_stride_size,
dim=partition_dim)
rank = get_model_parallel_rank()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None
|
Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk.
|
_initialize_affine_weight
|
python
|
THUDM/GLM
|
mpu/layers.py
|
https://github.com/THUDM/GLM/blob/master/mpu/layers.py
|
MIT
|
def _reduce(input_):
"""All-reduce the the input tensor across model parallel group."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=group)
return input_
|
All-reduce the the input tensor across model parallel group.
|
_reduce
|
python
|
THUDM/GLM
|
mpu/mappings.py
|
https://github.com/THUDM/GLM/blob/master/mpu/mappings.py
|
MIT
|
def _split(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Split along last dimension.
world_size = torch.distributed.get_world_size(group=group)
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = torch.distributed.get_rank(group=group)
output = input_list[rank].contiguous()
return output
|
Split the tensor along its last dimension and keep the
corresponding slice.
|
_split
|
python
|
THUDM/GLM
|
mpu/mappings.py
|
https://github.com/THUDM/GLM/blob/master/mpu/mappings.py
|
MIT
|
def _gather(input_):
"""Gather tensors and concatinate along the last dimension."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = torch.distributed.get_rank(group=group)
world_size = torch.distributed.get_world_size(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
|
Gather tensors and concatinate along the last dimension.
|
_gather
|
python
|
THUDM/GLM
|
mpu/mappings.py
|
https://github.com/THUDM/GLM/blob/master/mpu/mappings.py
|
MIT
|
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Argumentss:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device('cuda')
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb)
|
Sets the random number generator state of the current GPU.
Argumentss:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
|
_set_cuda_rng_state
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
|
Set to the initial state (no tracker).
|
reset
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
states = {}
for name in self.states_:
states[name] = self.states_[name]
return states
|
Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary.
|
get_states
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception('cuda rng state {} is not added'.format(name))
# Store current rng state.
orig_cuda_rng_state = torch.cuda.get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = torch.cuda.get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
|
Fork the cuda rng state, perform operations, and exit with
the original state.
|
fork
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + get_model_parallel_rank()
# Data parallel gets the original sedd.
data_parallel_seed = seed
if torch.distributed.get_rank() == 0:
print('> initializing model parallel cuda seeds on global rank {}, '
'model parallel rank {}, and data parallel rank {} with '
'model parallel seed: {} and data parallel seed: {}'.format(
torch.distributed.get_rank(), get_model_parallel_rank(),
get_data_parallel_rank(), model_parallel_seed,
data_parallel_seed), flush=True)
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME,
model_parallel_seed)
|
Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
|
model_parallel_cuda_manual_seed
|
python
|
THUDM/GLM
|
mpu/random.py
|
https://github.com/THUDM/GLM/blob/master/mpu/random.py
|
MIT
|
def _transpose_for_scores(self, tensor):
"""Transpose a 3D tensor [b, s, np*hn] into a 4D tensor with
size [b, np, s, hn].
"""
new_tensor_shape = tensor.size()[:-1] + \
(self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head)
tensor = tensor.view(*new_tensor_shape)
return tensor.permute(0, 2, 1, 3)
|
Transpose a 3D tensor [b, s, np*hn] into a 4D tensor with
size [b, np, s, hn].
|
_transpose_for_scores
|
python
|
THUDM/GLM
|
mpu/transformer.py
|
https://github.com/THUDM/GLM/blob/master/mpu/transformer.py
|
MIT
|
def _transpose_for_scores(self, tensor):
"""Transpose a 3D tensor [b, s, np*hn] into a 4D tensor with
size [b, np, s, hn].
"""
new_tensor_shape = tensor.size()[:-1] + \
(self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head)
tensor = tensor.view(*new_tensor_shape)
return tensor.permute(0, 2, 1, 3)
|
Transpose a 3D tensor [b, s, np*hn] into a 4D tensor with
size [b, np, s, hn].
|
_transpose_for_scores
|
python
|
THUDM/GLM
|
mpu/transformer.py
|
https://github.com/THUDM/GLM/blob/master/mpu/transformer.py
|
MIT
|
def scaled_init_method(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
|
Init method based on N(0, sigma/sqrt(2*num_layers).
|
scaled_init_method
|
python
|
THUDM/GLM
|
mpu/transformer.py
|
https://github.com/THUDM/GLM/blob/master/mpu/transformer.py
|
MIT
|
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
|
Ensure that numerator is divisible by the denominator and return
the division value.
|
divide
|
python
|
THUDM/GLM
|
mpu/utils.py
|
https://github.com/THUDM/GLM/blob/master/mpu/utils.py
|
MIT
|
def split_tensor_along_last_dim(tensor, num_partitions,
contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
|
Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
|
split_tensor_along_last_dim
|
python
|
THUDM/GLM
|
mpu/utils.py
|
https://github.com/THUDM/GLM/blob/master/mpu/utils.py
|
MIT
|
def update_cmd(cmd, config):
'''
@param cmd str
@param configs list of dicts
'''
for k, v in config.items():
if v is None:
continue
if type(v) == bool:
if v:
cmd += "--{} ".format(k)
else:
cmd += "--{} {} ".format(k, v)
return cmd
|
@param cmd str
@param configs list of dicts
|
update_cmd
|
python
|
THUDM/GLM
|
scripts/dispatcher.py
|
https://github.com/THUDM/GLM/blob/master/scripts/dispatcher.py
|
MIT
|
def clean_text(text):
"""Remove new lines and multiple spaces and adjust end of sentence dot."""
text = text.replace("\n", " ")
text = re.sub(r'\s+', ' ', text)
for _ in range(3):
text = text.replace(' . ', '. ')
return text
|
Remove new lines and multiple spaces and adjust end of sentence dot.
|
clean_text
|
python
|
THUDM/GLM
|
tasks/data_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/data_utils.py
|
MIT
|
def __init__(self, guid, text_a, text_b=None, label=None, logits=None, meta: Optional[Dict] = None, idx=-1,
num_choices=1):
"""
Create a new InputExample.
:param guid: a unique textual identifier
:param text_a: the sequence of text
:param text_b: an optional, second sequence of text
:param label: an optional label
:param logits: an optional list of per-class logits
:param meta: an optional dictionary to store arbitrary meta information
:param idx: an optional numeric index
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.logits = logits
self.idx = idx
self.num_choices = num_choices
self.meta = meta if meta else {}
|
Create a new InputExample.
:param guid: a unique textual identifier
:param text_a: the sequence of text
:param text_b: an optional, second sequence of text
:param label: an optional label
:param logits: an optional list of per-class logits
:param meta: an optional dictionary to store arbitrary meta information
:param idx: an optional numeric index
|
__init__
|
python
|
THUDM/GLM
|
tasks/data_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/data_utils.py
|
MIT
|
def build_sample(ids, types=None, paddings=None, positions=None, masks=None, label=None, unique_id=None, target=None,
logit_mask=None, segment_ids=None, prompt_ids=None):
"""Convert to numpy and return a sample consumed by the batch producer."""
ids_np = np.array(ids, dtype=np.int64)
sample = {'text': ids_np, 'label': int(label)}
if types is not None:
types_np = np.array(types, dtype=np.int64)
sample['types'] = types_np
if paddings is not None:
paddings_np = np.array(paddings, dtype=np.int64)
sample['padding_mask'] = paddings_np
if positions is not None:
positions_np = np.array(positions, dtype=np.int64)
sample['position'] = positions_np
if masks is not None:
masks_np = np.array(masks, dtype=np.int64)
sample['mask'] = masks_np
if target is not None:
target_np = np.array(target, dtype=np.int64)
sample['target'] = target_np
if logit_mask is not None:
logit_mask_np = np.array(logit_mask, dtype=np.int64)
sample['logit_mask'] = logit_mask_np
if segment_ids is not None:
segment_ids = np.array(segment_ids, dtype=np.int64)
sample['segment_id'] = segment_ids
if prompt_ids is not None:
prompt_ids = np.array(prompt_ids, dtype=np.int64)
sample['prompt_pos'] = prompt_ids
if unique_id is not None:
sample['uid'] = unique_id
return sample
|
Convert to numpy and return a sample consumed by the batch producer.
|
build_sample
|
python
|
THUDM/GLM
|
tasks/data_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/data_utils.py
|
MIT
|
def build_data_loader(dataset, batch_size, num_workers, drop_last, shuffle=True, only_rank0=False):
"""Data loader. Note that batch-size is the local (per GPU) batch-size."""
# Sampler.
if only_rank0:
rank, world_size = 0, 1
else:
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=shuffle)
# Data loader. Note that batch size is the per GPU batch size.
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=sampler,
shuffle=False,
num_workers=num_workers,
drop_last=drop_last,
pin_memory=True,
collate_fn=my_collate)
return data_loader
|
Data loader. Note that batch-size is the local (per GPU) batch-size.
|
build_data_loader
|
python
|
THUDM/GLM
|
tasks/data_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/data_utils.py
|
MIT
|
def multichoice_evaluate(model, dataloader, example_dict, args):
"""Calculate correct over total answers and return prediction if the
`output_predictions` is true."""
model.eval()
results = {}
with torch.no_grad():
# For all the batches in the dataset.
for _, batch in enumerate(dataloader):
# Run the model forward.
data = process_batch(batch, args)
if args.pretrained_bert:
tokens, types, labels_, attention_mask = data['text'], data['types'], data['label'], data[
'padding_mask']
inputs = [tokens, types, attention_mask]
elif args.cloze_eval:
tokens, labels_, position_ids = data['text'], data['label'], data['position']
attention_mask, target_ids, logit_mask = data['mask'], data['target'], data['logit_mask']
if not args.fast_decode:
inputs = [tokens, position_ids, attention_mask, target_ids, logit_mask]
if args.continuous_prompt:
prompt_pos = data["prompt_pos"]
inputs.append(prompt_pos)
else:
dec_input_ids, dec_position_ids, dec_attention_mask = data['dec_text'], data['dec_position'], data[
'dec_mask']
dec_target_ids, dec_logit_mask = data['dec_target'], data['dec_logit_mask']
inputs = [tokens, position_ids, attention_mask, dec_input_ids, dec_position_ids, dec_attention_mask,
dec_target_ids, dec_logit_mask]
else:
tokens, labels_, position_ids, attention_mask = data['text'], data['label'], data['position'], data[
'mask']
inputs = [tokens, position_ids, attention_mask]
if len(inputs[0].shape) == 3 and inputs[0].size(1) > segment_length:
logit_list = []
for i in range((inputs[0].size(1) - 1) // segment_length + 1):
input_batch = [arg[:, i * segment_length: (i + 1) * segment_length] for arg in inputs]
if args.pretrained_bert:
logits = model(*input_batch)
else:
logits, *mems = model(*input_batch)
logit_list.append(logits)
logits = torch.cat(logit_list, dim=1)
elif args.cloze_eval and args.fast_decode:
logit_list = []
num_choices = inputs[3].size(1)
for i in range((num_choices - 1) // segment_length + 1):
input_batch = inputs[:3] + [arg[:, i * segment_length: (i + 1) * segment_length] for arg in
inputs[3:]]
logits, *mems = model(*input_batch)
logit_list.append(logits)
logits = torch.cat(logit_list, dim=1)
else:
if args.pretrained_bert:
logits = model(*inputs)
else:
logits, *mems = model(*inputs)
if "segment_id" in data:
from torch_scatter import scatter_sum
if "loss_mask" in data:
logits = logits * data["loss_mask"]
logits = scatter_sum(logits, data["segment_id"], dim=1)
elif "loss_mask" in data:
loss_mask = data["loss_mask"]
logits = logits * loss_mask - 10000.0 * (1.0 - loss_mask)
uid_list = batch['uid']
if isinstance(uid_list, torch.Tensor):
uid_list = uid_list.cpu().numpy().tolist()
predicted = torch.argmax(logits, dim=-1).tolist()
labels = labels_.tolist()
if args.task.lower() == 'wsc':
predicted = [1 if pred == 0 else 0 for pred in predicted]
for uid, prediction, label in zip(uid_list, predicted, labels):
results[uid] = (prediction, label)
model.train()
torch.distributed.barrier()
results_gathered = [None for _ in range(mpu.get_data_parallel_world_size())]
torch.distributed.all_gather_object(results_gathered, results, group=mpu.get_data_parallel_group())
results = {}
for result in results_gathered:
results.update(result)
predictions, labels, examples = [], [], []
for uid, example in example_dict.items():
prediction, label = results[uid]
predictions.append(prediction)
labels.append(label)
examples.append(example)
torch.distributed.barrier()
return predictions, labels, examples
|
Calculate correct over total answers and return prediction if the
`output_predictions` is true.
|
multichoice_evaluate
|
python
|
THUDM/GLM
|
tasks/eval_utils.py
|
https://github.com/THUDM/GLM/blob/master/tasks/eval_utils.py
|
MIT
|
def evaluate_and_print_results(data_loader, model, eval_metric, args):
"""Evaluate and print results on screen."""
# Evaluate and get results.
output, _ = evaluate(model, data_loader, eval_metric, args)
string = ""
if eval_metric == 'loss':
output = output['loss']
num_tokenized_tokens = data_loader.dataset.num_tokenized_tokens
num_original_tokens = data_loader.dataset.num_original_tokens
val_loss = output / (num_tokenized_tokens - 1)
ppl = math.exp(min(20, val_loss))
token_ratio = (num_tokenized_tokens - 1) / (num_original_tokens - 1)
adjusted_ppl = math.exp(min(20, val_loss * token_ratio))
string += 'avg loss: {:.4E} | '.format(val_loss)
string += 'ppl: {:.4E} | '.format(ppl)
string += 'adjusted ppl: {:.4E} | '.format(adjusted_ppl)
string += 'token ratio: {} |'.format(token_ratio)
score_dict = {"avg loss": val_loss, "ppl": ppl, "adjusted ppl": adjusted_ppl}
elif eval_metric == 'accuracy':
output = output['accuracy']
num_examples = len(data_loader.dataset)
acc = output / num_examples * 100
string += 'number correct: {} | '.format(output)
string += 'total examples: {} | '.format(num_examples)
string += 'avg accuracy: {:.2f}'.format(acc)
score_dict = {"accuracy": acc}
else:
raise NotImplementedError('evaluation method for {} metric is not '
'implemented yet.'.format(eval_metric))
length = len(string) + 1
print_rank_0('-' * length)
print_rank_0(string)
print_rank_0('-' * length)
return score_dict
|
Evaluate and print results on screen.
|
evaluate_and_print_results
|
python
|
THUDM/GLM
|
tasks/language_model/finetune.py
|
https://github.com/THUDM/GLM/blob/master/tasks/language_model/finetune.py
|
MIT
|
def process_batch(batch, args):
"""Process batch and produce inputs for the model."""
if 'mask' in batch:
# finetune SQuAD
batch['attention_mask'] = batch.pop('mask')
batch['position_id'] = batch.pop('position')
tokens = batch['text'].long().cuda()
attention_mask = batch['attention_mask'].long().cuda()
position_ids = batch['position_id'].long().cuda()
if tokens.dim() == 3:
tokens = tokens.squeeze(1)
attention_mask = attention_mask.squeeze(1)
position_ids = position_ids.squeeze(1)
return tokens, attention_mask, position_ids
|
Process batch and produce inputs for the model.
|
process_batch
|
python
|
THUDM/GLM
|
tasks/seq2seq/evaluate.py
|
https://github.com/THUDM/GLM/blob/master/tasks/seq2seq/evaluate.py
|
MIT
|
def evaluate(self, model, dataloader, example_dict, args):
"""Calculate correct over total answers and return prediction if the
`output_predictions` is true."""
model.eval()
local_predictions = {}
print_rank_0("Distributed store created")
with torch.no_grad():
# For all the batches in the dataset.
for idx, data in enumerate(dataloader):
tokens, attention_mask, position_ids = process_batch(data, args)
batch_size = tokens.size(0)
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=args.out_seq_length,
num_beams=args.num_beams,
device=tokens.device,
length_penalty=args.length_penalty,
do_early_stopping=False,
)
beam_scores = torch.zeros((batch_size, args.num_beams), dtype=torch.float, device=tokens.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * args.num_beams,))
# Run the model forward.
counter = 0
context_length = tokens.size(1)
while counter < args.tgt_seq_length:
if counter == 0:
next_token_logits, *mems = model(tokens, position_ids, attention_mask, return_memory=True)
seq_length = next_token_logits.size(1)
next_token_logits = next_token_logits[:, -1]
next_token_logits = next_token_logits.unsqueeze(1).repeat(1, args.num_beams, 1).view(
batch_size * args.num_beams, -1)
mems = [mem.unsqueeze(1).repeat(1, args.num_beams, 1, 1).view(batch_size * args.num_beams,
seq_length, -1) for mem in mems]
position_ids = tokens.new_ones(batch_size, args.num_beams, 2, 1)
for i, text in enumerate(tokens.tolist()):
mask_pos = text.index(self.mask_token)
position_ids[i, :, 0] = mask_pos
position_ids = position_ids.reshape(batch_size * args.num_beams, 2, 1)
tokens = tokens.new_zeros(batch_size * args.num_beams, 0)
else:
if not args.no_block_position:
position_ids[:, 1] = counter + 1
last_token = tokens[:, -1:]
if self.mask_pad_token:
cur_attention_mask = attention_mask[:, :, -1:, :].unsqueeze(1).expand(-1, args.num_beams, -1,
-1, -1).reshape(
batch_size * args.num_beams, 1, 1, context_length)
cur_attention_mask = torch.cat(
(cur_attention_mask, attention_mask.new_ones((batch_size * args.num_beams, 1, 1, counter))),
dim=-1)
else:
cur_attention_mask = tokens.new_zeros([batch_size * args.num_beams])
next_token_logits, *mems = model(last_token, position_ids, cur_attention_mask, *mems,
return_memory=True)
next_token_logits = next_token_logits[:, -1]
next_token_logits = top_k_logits(next_token_logits, top_k=args.top_k, top_p=args.top_p)
next_token_scores = F.log_softmax(next_token_logits, dim=-1)
next_token_scores = self.processors(tokens, next_token_scores)
next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores)
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, args.num_beams * vocab_size)
probs = F.softmax(next_token_scores, dim=-1)
if args.select_topk:
_, next_tokens = torch.topk(probs, k=2 * args.num_beams, dim=-1, largest=True)
else:
next_tokens = torch.multinomial(probs, num_samples=2 * args.num_beams)
next_token_scores = torch.gather(next_token_scores, -1, next_tokens)
next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, _indices)
next_indices = next_tokens // vocab_size
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
tokens,
next_token_scores,
next_tokens,
next_indices,
eos_token_id=self.end_token,
pad_token_id=self.pad_token
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
beam_next_tokens = beam_next_tokens.unsqueeze(-1)
tokens = torch.cat([tokens[beam_idx, :], beam_next_tokens], dim=-1)
mems = [mem[beam_idx] for mem in mems] if mems else []
if beam_scorer.is_done:
break
counter += 1
tokens, _, scores = beam_scorer.finalize(tokens, beam_scores, next_tokens, next_indices,
eos_token_id=self.end_token, pad_token_id=self.pad_token)
uid_list = data['uid']
if isinstance(uid_list, torch.Tensor):
uid_list = uid_list.cpu().numpy().tolist()
predictions = []
for i, text in enumerate(tokens.tolist()):
text = [token for token in text if token not in [self.end_token, self.pad_token]]
if args.task in ['squad', 'squad_v1'] and args.tokenizer_model_type.startswith('bert'):
uid = uid_list[i]
example = example_dict[uid]
text = squad_decode(example, text, self.tokenizer)
else:
text = self.tokenizer.DecodeIds(text)
predictions.append(text)
for uid, prediction in zip(uid_list, predictions):
local_predictions[uid] = prediction
if (idx + 1) % args.log_interval == 0:
print_rank_0(f"Iteration {idx + 1} / {len(dataloader)}")
model.train()
torch.distributed.barrier()
print_rank_0("Evaluation completed")
gathered_predictions = [None for i in range(torch.distributed.get_world_size())]
torch.distributed.all_gather_object(gathered_predictions, local_predictions)
gathered_predictions = {uid: pred for preds in gathered_predictions for uid, pred in preds.items() }
predictions, examples, scores = [], [], []
for uid, example in example_dict.items():
prediction = gathered_predictions[uid]
predictions.append(prediction)
examples.append(example)
torch.distributed.barrier()
return predictions, [], examples
|
Calculate correct over total answers and return prediction if the
`output_predictions` is true.
|
evaluate
|
python
|
THUDM/GLM
|
tasks/seq2seq/evaluate.py
|
https://github.com/THUDM/GLM/blob/master/tasks/seq2seq/evaluate.py
|
MIT
|
def clean_text(text):
"""Remove new lines and multiple spaces and adjust end of sentence dot."""
text = text.replace("\n", " ")
text = re.sub(r'\s+', ' ', text)
for _ in range(3):
text = text.replace(' . ', '. ')
return text
|
Remove new lines and multiple spaces and adjust end of sentence dot.
|
clean_text
|
python
|
THUDM/GLM
|
tasks/superglue/dataset.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/dataset.py
|
MIT
|
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return unidecode.unidecode(text.lower())
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
Lower text and remove punctuation, articles and extra whitespace.
|
normalize_answer
|
python
|
THUDM/GLM
|
tasks/superglue/evaluate.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/evaluate.py
|
MIT
|
def multirc_em(predictions, labels, examples: List[InputExample]):
"""Compute the exact match (EM) for a sequence of predictions and actual labels"""
question_ids = [example.meta["question_idx"] for example in examples]
unique_questions = set(question_ids)
q_actuals = list(zip(question_ids, labels))
q_predictions = list(zip(question_ids, predictions))
actuals_per_question = defaultdict(list)
predictions_per_question = defaultdict(list)
for qid, val in q_actuals:
actuals_per_question[qid].append(val)
for qid, val in q_predictions:
predictions_per_question[qid].append(val)
em = 0
for qid in unique_questions:
if actuals_per_question[qid] == predictions_per_question[qid]:
em += 1
em /= len(unique_questions)
return em
|
Compute the exact match (EM) for a sequence of predictions and actual labels
|
multirc_em
|
python
|
THUDM/GLM
|
tasks/superglue/evaluate.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/evaluate.py
|
MIT
|
def __init__(self, args, tokenizer, label_list, max_seq_length, pattern_id: int = 0, verbalizer_file: str = None,
seed: int = 42, is_multi_token=False, max_segment_length=0, fast_decode: bool = False, split='train',
num_prompt_tokens=0):
"""
Create a new PVP.
:param args: the args
:param tokenizer: the tokenizer
:param label_list: the list of labels
:param max_seq_length: the maximum length of the sequence
:param pattern_id: the pattern id to use
:param seed: a seed to be used for generating random numbers if necessary
:param is_multi_token: if the verbalizers contain multiple tokens
:param fast_decode: whether to use the fast decode mode for multi-token tasks
:param continuous_prompt: whether to use continuous prompt optimization
"""
self.args = args
self.tokenizer = tokenizer
self.label_list = label_list
self.max_seq_length = max_seq_length
self.pattern_id = pattern_id
self.num_prompt_tokens = num_prompt_tokens
self.rng = random.Random(seed)
self.num_truncated = 0
self.fast_decode = fast_decode
self.split = split
self.max_dec_seq_length = 16
self._is_multi_token = is_multi_token
self.max_segment_length = max_segment_length
self.task_mask = args.task_mask
self.continuous_prompt = args.continuous_prompt
self.prefix_prompt = args.prefix_prompt
if self.continuous_prompt:
print_rank_0(f"Prompt tokens in pvp {self.num_prompt_tokens} spell length {self.spell_length}")
if verbalizer_file:
self.verbalize = PVP._load_verbalizer_from_file(verbalizer_file, self.pattern_id)
|
Create a new PVP.
:param args: the args
:param tokenizer: the tokenizer
:param label_list: the list of labels
:param max_seq_length: the maximum length of the sequence
:param pattern_id: the pattern id to use
:param seed: a seed to be used for generating random numbers if necessary
:param is_multi_token: if the verbalizers contain multiple tokens
:param fast_decode: whether to use the fast decode mode for multi-token tasks
:param continuous_prompt: whether to use continuous prompt optimization
|
__init__
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def encode(self, example: InputExample, priming: bool = False, labeled: bool = False):
"""
Encode an input example using this pattern-verbalizer pair.
:param example: the input example to encode
:param priming: whether to use this example for priming
:param labeled: if ``priming=True``, whether the label should be appended to this example
:return: A tuple, consisting of a list of input ids and a list of token type ids
"""
if not priming:
assert not labeled, "'labeled' can only be set to true if 'priming' is also set to true"
tokenizer = self.tokenizer
raw_parts_a, raw_parts_b = self.get_parts(example)
raw_parts_a = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_a]
prompt_id = tokenizer.num_tokens
def encode_input(raw_parts):
parts = []
for x, s in raw_parts:
if isinstance(x, str):
x = tokenizer.EncodeAsIds(x)
elif isinstance(x, int):
x = [prompt_id] * x
else:
pass
parts.append((x, s))
return parts
parts_a = encode_input(raw_parts_a)
if self.prefix_prompt > 0:
parts_a = [([prompt_id] * self.prefix_prompt, False)] + parts_a
parts_b = None
if raw_parts_b:
raw_parts_b = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_b]
parts_b = encode_input(raw_parts_b)
if self.is_multi_token:
answers = self.get_answers(example)
if example.label is not None:
label = self.label_list.index(example.label)
else:
label = 0
if not self.fast_decode:
ids_list, positions_list, sep_list, mask_list, target_list, prompt_list = [], [], [], [], [], []
segment_id_list = []
if priming:
answer = answers[label]
answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)
self.num_truncated += self.truncate(parts_a, parts_b, answer_ids, max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in parts_a for token_id in part]
tokens_b = [token_id for part, _ in parts_b for token_id in part] if parts_b else None
input_ids = tokens_a
if tokens_b:
input_ids += tokens_b
if labeled:
mask_idx = input_ids.index(self.mask_id)
input_ids = input_ids[:mask_idx] + answer_ids + input_ids[mask_idx + 1:]
return input_ids
else:
for idx, answer in enumerate(answers):
this_parts_a, this_parts_b = copy.deepcopy(parts_a), copy.deepcopy(parts_b)
answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)
answer_ids = answer_ids + [tokenizer.get_command('eop').Id]
self.num_truncated += self.truncate(this_parts_a, this_parts_b, answer_ids,
max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in this_parts_a for token_id in part]
tokens_b = [token_id for part, _ in this_parts_b for token_id in part] if parts_b else None
if self.max_segment_length > 0:
num_segments = (len(answer_ids) - 1) // self.max_segment_length + 1
segments = [
answer_ids[index * self.max_segment_length: (index + 1) * self.max_segment_length]
for
index in range(num_segments)]
segment_id_list += [idx] * len(segments)
else:
segments = [answer_ids]
for segment in segments:
data = build_input_from_ids(tokens_a, tokens_b, segment, self.max_seq_length,
self.tokenizer,
args=self.args, add_cls=True, add_sep=False, add_piece=True,
mask_id=self.mask_id)
ids, types, paddings, position_ids, sep, target_ids, loss_masks = data
prompt_pos = [idx for idx, token in enumerate(ids) if token == prompt_id]
ids = [idx if idx != prompt_id else 0 for idx in ids]
prompt_list.append(prompt_pos)
ids_list.append(ids)
positions_list.append(position_ids)
sep_list.append(sep)
target_list.append(target_ids)
mask_list.append(loss_masks)
if self.mask in tokens_a:
mask_pos = tokens_a.index(self.mask)
tokens_a = tokens_a[:mask_pos] + segment + tokens_a[mask_pos:]
else:
mask_pos = tokens_b.index(self.mask)
tokens_b = tokens_b[:mask_pos] + segment + tokens_b[mask_pos:]
segment_id_list = segment_id_list if segment_id_list else None
sample = build_sample(ids_list, positions=positions_list, masks=sep_list, label=label,
logit_mask=mask_list, target=target_list,
unique_id=example.guid, segment_ids=segment_id_list, prompt_ids=prompt_list)
return sample
else:
this_parts_a, this_parts_b = copy.deepcopy(parts_a), copy.deepcopy(parts_b)
self.num_truncated += self.truncate(this_parts_a, this_parts_b, None, max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in this_parts_a for token_id in part]
tokens_b = [token_id for part, _ in this_parts_b for token_id in part] if parts_b else None
data = build_input_from_ids(tokens_a, tokens_b, None, self.max_seq_length, self.tokenizer,
args=self.args, add_cls=True, add_sep=False, add_piece=False)
ids, types, paddings, position_ids, sep, target_ids, loss_masks = data
sample = build_sample(ids, positions=position_ids, masks=sep, label=label, unique_id=example.guid)
ids_list, positions_list, mask_list, target_list, logit_mask_list = [], [], [], [], []
for answer in answers:
answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)
answer_ids = answer_ids + [tokenizer.get_command('eop').Id]
answer_ids = answer_ids[:self.max_dec_seq_length]
data = build_decoder_input(ids, answer_ids, self.max_seq_length, self.max_dec_seq_length, tokenizer)
dec_ids, _, _, dec_position_ids, _, dec_target_ids, dec_loss_masks = data
ids_list.append(dec_ids)
positions_list.append(dec_position_ids)
mask_list.append(sep)
target_list.append(dec_target_ids)
logit_mask_list.append(dec_loss_masks)
sample = build_decoder_sample(sample, ids_list, positions_list, mask_list, target_list, logit_mask_list)
return sample
else:
self.num_truncated += self.truncate(parts_a, parts_b, [], max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in parts_a for token_id in part]
tokens_b = [token_id for part, _ in parts_b for token_id in part] if parts_b else None
if priming:
input_ids = tokens_a
if tokens_b:
input_ids += tokens_b
if labeled:
mask_idx = input_ids.index(self.mask_id)
verbalizer = self.verbalize(example.label)
assert len(verbalizer) == 1, 'priming only supports one verbalization per label'
verbalizer = verbalizer[0]
verbalizer_id = get_verbalization_ids(verbalizer, self.tokenizer, force_single_token=True)
input_ids[mask_idx] = verbalizer_id
return input_ids
data = build_input_from_ids(tokens_a, tokens_b, None, self.max_seq_length, self.tokenizer, args=self.args,
add_cls=True, add_sep=False, add_piece=True)
ids, types, paddings, position_ids, sep, target_ids, loss_masks = data
prompt_pos = [idx for idx, token in enumerate(ids) if token == prompt_id]
ids = [token if token != prompt_id else 0 for token in ids]
target_ids = self.get_verbalizer_ids()
if example.label is not None:
label = self.label_list.index(example.label)
else:
label = 0
sample = build_sample(ids=ids, positions=position_ids, target=target_ids, masks=sep, logit_mask=loss_masks,
label=label, unique_id=example.guid, prompt_ids=prompt_pos)
return sample
|
Encode an input example using this pattern-verbalizer pair.
:param example: the input example to encode
:param priming: whether to use this example for priming
:param labeled: if ``priming=True``, whether the label should be appended to this example
:return: A tuple, consisting of a list of input ids and a list of token type ids
|
encode
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def truncate(self, parts_a: List[Tuple[List[int], bool]], parts_b: List[Tuple[List[int], bool]], answer: List[int],
max_length: int):
"""Truncate two sequences of text to a predefined total maximum length"""
total_len = self._seq_length(parts_a) + self._seq_length(parts_b)
if answer:
total_len += len(answer)
total_len += num_special_tokens_to_add(parts_a, parts_b, answer, add_cls=True, add_sep=False, add_piece=True)
num_tokens_to_remove = total_len - max_length
if num_tokens_to_remove <= 0:
return False
for _ in range(num_tokens_to_remove):
if self._seq_length(parts_a, only_shortenable=True) > self._seq_length(parts_b, only_shortenable=True):
self._remove_last(parts_a)
else:
self._remove_last(parts_b)
return True
|
Truncate two sequences of text to a predefined total maximum length
|
truncate
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def encode(self, example: InputExample, priming: bool = False, labeled: bool = False):
"""
Encode an input example using this pattern-verbalizer pair.
:param example: the input example to encode
:param priming: whether to use this example for priming
:param labeled: if ``priming=True``, whether the label should be appended to this example
:return: A tuple, consisting of a list of input ids and a list of token type ids
"""
if self.continuous_prompt or self.pattern_id < 2:
return super().encode(example, priming=priming, labeled=labeled)
if not priming:
assert not labeled, "'labeled' can only be set to true if 'priming' is also set to true"
tokenizer = self.tokenizer
premise = self.remove_final_punc(self.shortenable(example.text_a))
choice1 = " " + self.remove_final_punc(self.lowercase_first(example.meta['choice1']))
choice2 = " " + self.remove_final_punc(self.lowercase_first(example.meta['choice2']))
question = example.meta['question']
assert question in ['cause', 'effect']
answer = " because" if question == 'cause' else " so"
answer_ids = [get_verbalization_ids(answer, tokenizer, force_single_token=True)]
if self.is_multi_token:
answer_ids.append(tokenizer.get_command('eop').Id)
ids_list, positions_list, sep_list, mask_list, target_list = [], [], [], [], []
for choice in [choice1, choice2]:
parts = ['"', choice1[1:], '" or "', choice2[1:], '"?', premise, [self.mask], choice]
parts = [x if isinstance(x, tuple) else (x, False) for x in parts]
parts = [(tokenizer.EncodeAsIds(x).tokenization if isinstance(x, str) else x, s) for x, s in parts if
x]
self.num_truncated += self.truncate(parts, None, answer_ids, max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in parts for token_id in part]
data = build_input_from_ids(tokens_a, None, answer_ids, self.max_seq_length, self.tokenizer, args=self.args,
add_cls=True, add_sep=False, add_piece=True)
ids, types, paddings, position_ids, sep, target_ids, loss_masks = data
ids_list.append(ids)
positions_list.append(position_ids)
sep_list.append(sep)
target_list.append(target_ids)
mask_list.append(loss_masks)
if example.label is not None:
label = self.label_list.index(example.label)
else:
label = 0
sample = build_sample(ids_list, positions=positions_list, masks=sep_list, label=label,
logit_mask=mask_list, target=target_list,
unique_id=example.guid)
return sample
|
Encode an input example using this pattern-verbalizer pair.
:param example: the input example to encode
:param priming: whether to use this example for priming
:param labeled: if ``priming=True``, whether the label should be appended to this example
:return: A tuple, consisting of a list of input ids and a list of token type ids
|
encode
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def encode(self, example: InputExample, priming: bool = False, labeled: bool = False):
"""
Encode an input example using this pattern-verbalizer pair.
:param example: the input example to encode
:param priming: whether to use this example for priming
:param labeled: if ``priming=True``, whether the label should be appended to this example
:return: A tuple, consisting of a list of input ids and a list of token type ids
"""
if self.args.loss_func in ['generative', 'mix']:
sample = super().encode(example, priming=priming, labeled=labeled)
if self.split == 'train':
sample['label'] = 0
return sample
if not priming:
assert not labeled, "'labeled' can only be set to true if 'priming' is also set to true"
tokenizer = self.tokenizer
prompt_id = tokenizer.num_tokens
raw_parts_a, raw_parts_b = self.get_parts(example)
raw_parts_a = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_a]
def encode_input(raw_parts):
parts = []
for x, s in raw_parts:
if isinstance(x, str):
x = tokenizer.EncodeAsIds(x)
elif isinstance(x, int):
x = [prompt_id] * x
else:
pass
parts.append((x, s))
return parts
parts_a = encode_input(raw_parts_a)
if self.prefix_prompt > 0:
parts_a = [([prompt_id] * self.prefix_prompt, False)] + parts_a
parts_b = None
if raw_parts_b:
raw_parts_b = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_b]
parts_b = encode_input(raw_parts_b)
answer = self.get_answers(example)[0]
answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False)
answer_ids = answer_ids + [tokenizer.get_command('eop').Id]
self.num_truncated += self.truncate(parts_a, parts_b, answer_ids, max_length=self.max_seq_length)
tokens_a = [token_id for part, _ in parts_a for token_id in part]
tokens_b = [token_id for part, _ in parts_b for token_id in part] if parts_b else None
data = build_input_from_ids(tokens_a, tokens_b, answer_ids, self.max_seq_length, self.tokenizer, args=self.args,
add_cls=True, add_sep=False, add_piece=True)
ids, types, paddings, position_ids, sep, target_ids, loss_masks = data
prompt_pos = [idx for idx, token in enumerate(ids) if token == prompt_id]
ids = [token if token != prompt_id else 0 for token in ids]
if example.label is not None:
label = self.label_list.index(example.label)
else:
label = 0
return {'text': np.array(ids, dtype=np.int64), 'target': np.array(target_ids, dtype=np.int64),
'attention_mask': np.array(sep, dtype=np.int64), 'loss_mask': np.array(loss_masks, dtype=np.int64),
"position_id": np.array(position_ids, dtype=np.int64),
'prompt_pos': np.array(prompt_pos, dtype=np.int64), 'label': label, 'uid': example.guid}
|
Encode an input example using this pattern-verbalizer pair.
:param example: the input example to encode
:param priming: whether to use this example for priming
:param labeled: if ``priming=True``, whether the label should be appended to this example
:return: A tuple, consisting of a list of input ids and a list of token type ids
|
encode
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def get_verbalization_ids(word: str, tokenizer, force_single_token: bool) -> Union[int, List[int]]:
"""
Get the token ids corresponding to a verbalization
:param word: the verbalization
:param tokenizer: the tokenizer to use
:param force_single_token: whether it should be enforced that the verbalization corresponds to a single token.
If set to true, this method returns a single int instead of a list and throws an error if the word
corresponds to multiple tokens.
:return: either the list of token ids or the single token id corresponding to this word
"""
ids = tokenizer.EncodeAsIds(word).tokenization
if not force_single_token:
return ids
assert len(ids) == 1, \
f'Verbalization "{word}" does not correspond to a single token, got {tokenizer.DecodeIds(ids)}'
verbalization_id = ids[0]
assert verbalization_id not in tokenizer.command_id_map, \
f'Verbalization {word} is mapped to a special token {tokenizer.IdToToken(verbalization_id)}'
return verbalization_id
|
Get the token ids corresponding to a verbalization
:param word: the verbalization
:param tokenizer: the tokenizer to use
:param force_single_token: whether it should be enforced that the verbalization corresponds to a single token.
If set to true, this method returns a single int instead of a list and throws an error if the word
corresponds to multiple tokens.
:return: either the list of token ids or the single token id corresponding to this word
|
get_verbalization_ids
|
python
|
THUDM/GLM
|
tasks/superglue/pvp.py
|
https://github.com/THUDM/GLM/blob/master/tasks/superglue/pvp.py
|
MIT
|
def search_github_code_byapi(token: str, peer_page: int = 50, page: int = 1, excludes: list = []) -> list[str]:
"""
curl -Ls -o response.json -H "Authorization: Bearer <token>" https://api.github.com/search/code?q=%22%2Fapi%2Fv1%2Fclient%2Fsubscribe%3Ftoken%3D%22&sort=indexed&order=desc&per_page=30&page=1
"""
if utils.isblank(token):
return []
peer_page, page = min(max(peer_page, 1), 100), max(1, page)
url = f"https://api.github.com/search/code?q=%22%2Fapi%2Fv1%2Fclient%2Fsubscribe%3Ftoken%3D%22&sort=indexed&order=desc&per_page={peer_page}&page={page}"
headers = {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {token}",
# "X-GitHub-Api-Version": "2022-11-28"
}
content, links = utils.http_get(url=url, headers=headers), set()
if utils.isblank(content):
return []
try:
items = json.loads(content).get("items", [])
excludes = list(set(excludes))
for item in items:
if not item or type(item) != dict:
continue
link = item.get("html_url", "")
if utils.isblank(link):
continue
reponame = item.get("repository", {}).get("full_name", "") + "/"
if not intercept(text=reponame, excludes=excludes):
links.add(link)
return list(links)
except:
return []
|
curl -Ls -o response.json -H "Authorization: Bearer <token>" https://api.github.com/search/code?q=%22%2Fapi%2Fv1%2Fclient%2Fsubscribe%3Ftoken%3D%22&sort=indexed&order=desc&per_page=30&page=1
|
search_github_code_byapi
|
python
|
wzdnzd/aggregator
|
subscribe/crawl.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/crawl.py
|
Apache-2.0
|
def download_mmdb(repo: str, target: str, filepath: str, retry: int = 3) -> bool:
"""
Download GeoLite2-City.mmdb from github release
"""
repo = utils.trim(text=repo)
if not repo or len(repo.split("/", maxsplit=1)) != 2:
logger.error(f"invalid github repo name: {repo}")
return False
target = utils.trim(text=target)
if not target:
logger.error("invalid download target")
return False
# extract download url from github release page
release_api = f"https://api.github.com/repos/{repo}/releases/latest?per_page=1"
assets, content = None, utils.http_get(url=release_api)
try:
data = json.loads(content)
assets = data.get("assets", [])
except:
logger.error(f"failed download {target} due to cannot extract download url through Github API")
if not assets or not isinstance(assets, list):
logger.error(f"no assets found for {target} in github release")
return False
download_url = ""
for asset in assets:
if asset.get("name", "") == target:
download_url = asset.get("browser_download_url", "")
break
if not download_url:
logger.error(f"no download url found for {target} in github release")
return False
return download(download_url, filepath, target, retry)
|
Download GeoLite2-City.mmdb from github release
|
download_mmdb
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def download(url: str, filepath: str, filename: str, retry: int = 3) -> bool:
"""Download file from url to filepath with filename"""
if retry < 0:
logger.error(f"archieved max retry count for download, url: {url}")
return False
url = utils.trim(text=url)
if not url:
logger.error("invalid download url")
return False
filepath = utils.trim(text=filepath)
if not filepath:
logger.error(f"invalid save filepath, url: {url}")
return False
filename = utils.trim(text=filename)
if not filename:
logger.error(f"invalid save filename, url: {url}")
return False
if not os.path.exists(filepath) or not os.path.isdir(filepath):
os.makedirs(filepath)
fullpath = os.path.join(filepath, filename)
if os.path.exists(fullpath) and os.path.isfile(fullpath):
os.remove(fullpath)
# download target file from github release to fullpath
try:
urllib.request.urlretrieve(url=url, filename=fullpath)
except Exception:
return download(url, filepath, filename, retry - 1)
logger.info(f"download file {filename} to {fullpath} success")
return True
|
Download file from url to filepath with filename
|
download
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def query_ip_country(ip: str, reader: database.Reader) -> str:
"""
Query country information for an IP address using mmdb database
Args:
ip: The IP address to query
reader: The mmdb database reader
Returns:
The country name in Chinese
"""
if not ip or not reader:
return ""
try:
# fake ip
if ip.startswith("198.18.0."):
logger.warning("cannot get geolocation because IP address is faked")
return ""
response = reader.country(ip)
# Try to get country name in Chinese
country = response.country.names.get("zh-CN", "")
# If Chinese name is not available, try to convert ISO code to Chinese country name
if not country and response.country.iso_code:
iso_code = response.country.iso_code
# Try to get Chinese country name from ISO code mapping
country = ISO_TO_CHINESE.get(iso_code, iso_code)
# Special handling for well-known IPs
if not country:
if ip == "1.1.1.1" or ip == "1.0.0.1":
country = "Cloudflare"
elif ip.startswith("8.8.8.") or ip.startswith("8.8.4."):
country = "Google"
return country
except Exception as e:
logger.error(f"query ip country failed, ip: {ip}, error: {str(e)}")
return ""
|
Query country information for an IP address using mmdb database
Args:
ip: The IP address to query
reader: The mmdb database reader
Returns:
The country name in Chinese
|
query_ip_country
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def get_listening_ports() -> set:
"""Get the set of listening ports in the system, cross-platform compatible"""
listening_ports = set()
try:
# Windows system
if os.name == "nt":
try:
# Use 'cp437' encoding to handle Windows command line output
output = subprocess.check_output("netstat -an", shell=True).decode("cp437", errors="replace")
for line in output.split("\n"):
if "LISTENING" in line:
parts = line.split()
if len(parts) >= 2:
addr_port = parts[1]
if ":" in addr_port:
try:
port = int(addr_port.split(":")[-1])
listening_ports.add(port)
except ValueError:
pass
except Exception as e:
logger.warning(f"Windows netstat command failed: {str(e)}")
return listening_ports
# macOS system
elif sys.platform == "darwin":
try:
output = subprocess.check_output("lsof -i -P -n | grep LISTEN", shell=True).decode(
"utf-8", errors="replace"
)
for line in output.split("\n"):
if ":" in line:
try:
port_part = line.split(":")[-1].split(" ")[0]
port = int(port_part)
listening_ports.add(port)
except (ValueError, IndexError):
pass
except Exception as e:
logger.warning(f"macOS lsof command failed: {str(e)}")
return listening_ports
# Linux and other systems
else:
# Try using ss command (newer Linux systems)
try:
output = subprocess.check_output("ss -tuln", shell=True).decode("utf-8", errors="replace")
for line in output.split("\n"):
if "LISTEN" in line:
parts = line.split()
for part in parts:
if ":" in part:
try:
port = int(part.split(":")[-1])
listening_ports.add(port)
except ValueError:
pass
except Exception as e:
logger.warning(f"Linux ss command failed, trying netstat: {str(e)}")
# Fall back to netstat command (older Linux systems)
try:
output = subprocess.check_output("netstat -tuln", shell=True).decode("utf-8", errors="replace")
for line in output.split("\n"):
if "LISTEN" in line:
parts = line.split()
for part in parts:
if ":" in part:
try:
port = int(part.split(":")[-1])
listening_ports.add(port)
except ValueError:
pass
except Exception as e:
logger.warning(f"Linux netstat command also failed: {str(e)}")
return listening_ports
except Exception as e:
logger.warning(f"Failed to get listening ports: {str(e)}")
return listening_ports
|
Get the set of listening ports in the system, cross-platform compatible
|
get_listening_ports
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def scan_ports_batch(start_port: int, count: int = 100) -> dict:
"""Batch scan port statuses, return a dictionary of port statuses"""
global _PORT_STATUS_CACHE, _AVAILABLE_PORTS
# Create a list of ports to scan (excluding ports with known status)
ports_to_scan = [p for p in range(start_port, start_port + count) if p not in _PORT_STATUS_CACHE]
if not ports_to_scan:
# If all ports are already cached, return cached results directly
return {p: _PORT_STATUS_CACHE.get(p, True) for p in range(start_port, start_port + count)}
# Use a more efficient way to check ports in batch
results = {}
try:
# Get the ports that are currently listening in the system
listening_ports = get_listening_ports()
# Update results
for port in ports_to_scan:
in_use = port in listening_ports
results[port] = in_use
_PORT_STATUS_CACHE[port] = in_use
if not in_use:
_AVAILABLE_PORTS.add(port)
except Exception as e:
logger.warning(f"Batch port scanning failed, falling back to individual port checks: {str(e)}")
# If batch checking fails, fall back to individual port checks
for port in ports_to_scan:
in_use = check_single_port(port)
results[port] = in_use
_PORT_STATUS_CACHE[port] = in_use
if not in_use:
_AVAILABLE_PORTS.add(port)
# Merge cached and newly scanned results
return {
**{
p: _PORT_STATUS_CACHE.get(p, True) for p in range(start_port, start_port + count) if p in _PORT_STATUS_CACHE
},
**results,
}
|
Batch scan port statuses, return a dictionary of port statuses
|
scan_ports_batch
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def check_single_port(port: int) -> bool:
"""Helper function for checking a single port, checks if the port is listening"""
try:
# Use socket to check TCP port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.2)
result = sock.connect_ex(("127.0.0.1", port))
sock.close()
if result == 0:
return True
# Also check IPv6
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.settimeout(0.2)
result = sock.connect_ex(("::1", port))
sock.close()
return result == 0
except:
pass
return False
except:
# Assume port is not in use when an error occurs
return False
|
Helper function for checking a single port, checks if the port is listening
|
check_single_port
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def is_port_in_use(port: int) -> bool:
"""Check if a port is in use (using cache)"""
global _PORT_STATUS_CACHE, _AVAILABLE_PORTS
# If port is known to be available, return directly
if port in _AVAILABLE_PORTS:
return False
# If port status is already cached, return directly
if port in _PORT_STATUS_CACHE:
return _PORT_STATUS_CACHE[port]
# Otherwise check the port and cache the result
in_use = check_single_port(port)
_PORT_STATUS_CACHE[port] = in_use
if not in_use:
_AVAILABLE_PORTS.add(port)
return in_use
|
Check if a port is in use (using cache)
|
is_port_in_use
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def generate_mihomo_config(proxies: list[dict]) -> tuple[dict, dict]:
"""Generate mihomo configuration for the given proxies"""
# Base configuration
config = {
"mixed-port": 7890,
"allow-lan": True,
"mode": "global",
"log-level": "error",
"proxies": proxies,
"dns": {
"enable": True,
"enhanced-mode": "fake-ip",
"fake-ip-range": "198.18.0.1/16",
"default-nameserver": ["114.114.114.114", "223.5.5.5", "8.8.8.8"],
"nameserver": ["https://doh.pub/dns-query"],
},
"listeners": [],
}
# Record the port assigned to each proxy
records = dict()
# If there are no proxies, return directly
if not proxies:
return config, records
# Pre-scan ports in batch to improve efficiency
start_port = 32001
# Scan enough ports to ensure there are sufficient available ports
port_count = len(proxies) * 2
port_status = scan_ports_batch(start_port, port_count)
# Find all available ports
available_ports = [p for p, in_use in port_status.items() if not in_use]
# If available ports are insufficient, scan more ports
if len(available_ports) < len(proxies):
additional_ports = scan_ports_batch(start_port + port_count, port_count * 2)
available_ports.extend([p for p, in_use in additional_ports.items() if not in_use])
# Assign an available port to each proxy
for index, proxy in enumerate(proxies):
if index < len(available_ports):
port = available_ports[index]
else:
# If available ports are insufficient, use traditional method to find available ports
port = start_port + port_count + index
max_attempts = 1000
attempts = 0
while is_port_in_use(port) and attempts < max_attempts:
port += 1
attempts += 1
if attempts >= max_attempts:
logger.warning(
f"Could not find an available port for proxy {proxy['name']} after {max_attempts} attempts"
)
continue
listener = {
"name": f"http-{index}",
"type": "http",
"port": port,
"proxy": proxy["name"],
"listen": "127.0.0.1",
"users": [],
}
config["listeners"].append(listener)
records[proxy["name"]] = port
return config, records
|
Generate mihomo configuration for the given proxies
|
generate_mihomo_config
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def make_proxy_request(port: int, url: str, max_retries: int = 5, timeout: int = 10) -> tuple[bool, dict]:
"""
Make an HTTP request through a proxy and return the response
Args:
port: The port of the proxy
url: The URL to request
max_retries: Maximum number of retry attempts
timeout: Timeout for the request in seconds
Returns:
A tuple of (success, data) where:
- success: Whether the request was successful
- data: The parsed JSON data (empty dict if request failed)
"""
if not port:
logger.warning("No port provided for proxy")
return False, {}
# Configure the proxy for the request
proxy_url = f"http://127.0.0.1:{port}"
proxies_config = {"http": proxy_url, "https": proxy_url}
# Configure proxy handler
proxy_handler = urllib.request.ProxyHandler(proxies_config)
# Build opener with proxy handler
opener = urllib.request.build_opener(proxy_handler)
opener.addheaders = [
("User-Agent", utils.USER_AGENT),
("Accept", "application/json"),
("Connection", "close"),
]
# Try to get response with retry and backoff
attempt, success, data = 0, False, {}
while not success and attempt < max(max_retries, 1):
try:
# Random sleep to avoid being blocked by the API (increasing with each retry)
if attempt > 0:
wait_time = min(2**attempt * random.uniform(0.5, 1.5), 6)
time.sleep(wait_time)
# Make request
response = opener.open(url, timeout=timeout)
if response.getcode() == 200:
content = response.read().decode("utf-8")
data = json.loads(content)
success = True
except Exception as e:
logger.warning(f"Attempt {attempt+1} failed to request {url} through proxy port {port}: {str(e)}")
attempt += 1
return success, data
|
Make an HTTP request through a proxy and return the response
Args:
port: The port of the proxy
url: The URL to request
max_retries: Maximum number of retry attempts
timeout: Timeout for the request in seconds
Returns:
A tuple of (success, data) where:
- success: Whether the request was successful
- data: The parsed JSON data (empty dict if request failed)
|
make_proxy_request
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def get_ipv4(port: int, max_retries: int = 5) -> str:
"""
Get the IPv4 address by accessing https://api.ipify.org?format=json through a proxy
Args:
port: The port of the proxy
max_retries: Maximum number of retry attempts
Returns:
The IPv4 address or empty string if failed
"""
if not port:
logger.warning("No port provided for proxy")
return ""
success, data = make_proxy_request(port=port, url="https://api.ipify.org?format=json", max_retries=max_retries)
return data.get("ip", "") if success else ""
|
Get the IPv4 address by accessing https://api.ipify.org?format=json through a proxy
Args:
port: The port of the proxy
max_retries: Maximum number of retry attempts
Returns:
The IPv4 address or empty string if failed
|
get_ipv4
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def locate_by_ipinfo(name: str, port: int, reader: database.Reader = None) -> dict:
"""Check the location of a single proxy by making a request through it"""
result = {"name": name, "country": ""}
if not port:
logger.warning(f"No port found for proxy {name}")
return result
if reader:
# Get IP address through proxy
if ip := get_ipv4(port=port, max_retries=2):
country = query_ip_country(ip, reader)
if country:
result["country"] = country
return result
# Random sleep to avoid being blocked by the API
time.sleep(random.uniform(0.01, 0.5))
api_services = [
{"url": "https://ipinfo.io", "country_key": "country"},
{"url": "https://ipapi.co/json/", "country_key": "country_code"},
{"url": "https://ipwho.is", "country_key": "country_code"},
{"url": "https://freeipapi.com/api/json", "country_key": "countryCode"},
{"url": "https://api.country.is", "country_key": "country"},
{"url": "https://api.ip.sb/geoip", "country_key": "country_code"},
]
max_retries = 3
for attempt in range(max_retries):
service = random.choice(api_services)
# We're already handling retries in this loop
success, data = make_proxy_request(port=port, url=service["url"], max_retries=1, timeout=12)
if success:
# Extract country code from the response using the service-specific key
country_key = service["country_key"]
country_code = data.get(country_key, "")
if country_code:
# Convert ISO code to Chinese country name
result["country"] = ISO_TO_CHINESE.get(country_code, country_code)
break
# If request failed, wait before trying another service
if attempt < max_retries - 1:
wait_time = min(2**attempt * random.uniform(1, 2), 6)
logger.warning(
f"Attempt {attempt+1} failed for proxy {name} with {service['url']}, waiting {wait_time:.2f}s"
)
time.sleep(wait_time)
return result
|
Check the location of a single proxy by making a request through it
|
locate_by_ipinfo
|
python
|
wzdnzd/aggregator
|
subscribe/location.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/location.py
|
Apache-2.0
|
def get_messages(self, account: Account) -> list:
"""download a list of messages currently in the account."""
if not account or not self.auth_headers:
return []
content = utils.http_get(
url="{}/messages?page={}".format(self.api_address, 1),
headers=self.auth_headers,
retry=2,
)
messages = []
if not content:
return messages
try:
dataset = json.loads(content).get("hydra:member", [])
for message_data in dataset:
content = utils.http_get(
url=f"{self.api_address}/messages/{message_data['id']}",
headers=self.auth_headers,
)
if not content:
continue
data = json.loads(content)
text = data.get("text", "")
html = data.get("html", "")
messages.append(
Message(
id=message_data["id"],
sender=message_data["from"],
to=message_data["to"],
subject=message_data["subject"],
intro=message_data["intro"],
text=text,
html=html,
data=message_data,
)
)
except:
logger.error(f"failed to list messages, email: {self.address}")
return messages
|
download a list of messages currently in the account.
|
get_messages
|
python
|
wzdnzd/aggregator
|
subscribe/mailtm.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/mailtm.py
|
Apache-2.0
|
def delete_account(self, account: Account) -> bool:
"""try to delete the account. returns True if it succeeds."""
if account is None or not self.auth_headers:
return False
try:
request = urllib.request.Request(
url=f"{self.api_address}/accounts/{account.id}",
headers=self.auth_headers,
method="DELETE",
)
response = urllib.request.urlopen(request, timeout=10, context=utils.CTX)
status_code = response.getcode()
return status_code == 204
except Exception:
logger.info(f"[MailTMError] delete account failed, domain: {self.api_address}, address: {account.address}")
return False
|
try to delete the account. returns True if it succeeds.
|
delete_account
|
python
|
wzdnzd/aggregator
|
subscribe/mailtm.py
|
https://github.com/wzdnzd/aggregator/blob/master/subscribe/mailtm.py
|
Apache-2.0
|
def download_mmdb(repo: str, target: str, filepath: str, retry: int = 3):
"""
Download GeoLite2-City.mmdb from github release
"""
repo = trim(text=repo)
if not repo or len(repo.split("/", maxsplit=1)) != 2:
raise ValueError(f"invalid github repo name: {repo}")
target = trim(target)
if not target:
raise ValueError("invalid download target")
# extract download url from github release page
release_api = f"https://api.github.com/repos/{repo}/releases/latest?per_page=1"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
}
count, response = 0, None
while count < retry and response is None:
try:
request = urllib.request.Request(url=release_api, headers=headers)
response = urllib.request.urlopen(request, timeout=10, context=CTX)
except Exception:
count += 1
assets = read_response(response=response, expected=200, deserialize=True, key="assets")
if not assets or not isinstance(assets, list):
raise Exception("no assets found in github release")
download_url = ""
for asset in assets:
if asset.get("name", "") == target:
download_url = asset.get("browser_download_url", "")
break
if not download_url:
raise Exception("no download url found in github release")
download(download_url, filepath, target, retry)
|
Download GeoLite2-City.mmdb from github release
|
download_mmdb
|
python
|
wzdnzd/aggregator
|
tools/clean.py
|
https://github.com/wzdnzd/aggregator/blob/master/tools/clean.py
|
Apache-2.0
|
def download(url: str, filepath: str, filename: str, retry: int = 3) -> None:
"""Download file from url to filepath with filename"""
if retry < 0:
raise Exception("archieved max retry count for download")
url = trim(url)
if not url:
raise ValueError("invalid download url")
filepath = trim(filepath)
if not filepath:
raise ValueError("invalid save filepath")
filename = trim(filename)
if not filename:
raise ValueError("invalid save filename")
if not os.path.exists(filepath) or not os.path.isdir(filepath):
os.makedirs(filepath)
fullpath = os.path.join(filepath, filename)
if os.path.exists(fullpath) and os.path.isfile(fullpath):
os.remove(fullpath)
# download target file from github release to fullpath
try:
urllib.request.urlretrieve(url=url, filename=fullpath)
except Exception:
return download(url, filepath, filename, retry - 1)
print(f"download file {filename} to {fullpath} success")
|
Download file from url to filepath with filename
|
download
|
python
|
wzdnzd/aggregator
|
tools/clean.py
|
https://github.com/wzdnzd/aggregator/blob/master/tools/clean.py
|
Apache-2.0
|
def download_mmdb(target: str, filepath: str, retry: int = 3):
"""
Download GeoLite2-City.mmdb from github release
"""
target = trim(target)
if not target:
raise ValueError("invalid download target")
# extract download url from github release page
release_api = "https://api.github.com/repos/PrxyHunter/GeoLite2/releases/latest?per_page=1"
count, response = 0, None
while count < retry and response is None:
try:
response = requests.get(release_api, timeout=10)
except Exception:
count += 1
if not response or response.status_code != 200:
raise Exception("request github release api failed")
assets = response.json().get("assets", [])
if not assets:
raise Exception("no assets found in github release")
download_url = ""
for asset in assets:
if asset.get("name", "") == target:
download_url = asset.get("browser_download_url", "")
break
if not download_url:
raise Exception("no download url found in github release")
download(download_url, filepath, target, retry, 60)
|
Download GeoLite2-City.mmdb from github release
|
download_mmdb
|
python
|
wzdnzd/aggregator
|
tools/ip-location.py
|
https://github.com/wzdnzd/aggregator/blob/master/tools/ip-location.py
|
Apache-2.0
|
def download(url: str, filepath: str, filename: str, retry: int = 3, timeout: int = 10) -> None:
"""Download file from url to filepath with filename"""
if retry < 0:
raise Exception("archieved max retry count for download")
url = trim(url)
if not url:
raise ValueError("invalid download url")
filepath = trim(filepath)
if not filepath:
raise ValueError("invalid save filepath")
filename = trim(filename)
if not filename:
raise ValueError("invalid save filename")
if not os.path.exists(filepath) or not os.path.isdir(filepath):
os.makedirs(filepath)
fullpath = os.path.join(filepath, filename)
if os.path.exists(fullpath) and os.path.isfile(fullpath):
os.remove(fullpath)
# download target file from github release to fullpath
timeout = max(timeout, 6)
try:
with requests.get(url, stream=True, timeout=timeout) as r:
r.raise_for_status()
with open(fullpath, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
f.flush()
except Exception:
return download(url, filepath, filename, retry - 1, min(timeout * 2, 180))
print(f"download file {filename} to {fullpath} success")
|
Download file from url to filepath with filename
|
download
|
python
|
wzdnzd/aggregator
|
tools/ip-location.py
|
https://github.com/wzdnzd/aggregator/blob/master/tools/ip-location.py
|
Apache-2.0
|
def download_mmdb(repo: str, target: str, filepath: str, retry: int = 3):
"""
Download GeoLite2-City.mmdb from github release
"""
repo = trim(text=repo)
if not repo or len(repo.split("/", maxsplit=1)) != 2:
raise ValueError(f"invalid github repo name: {repo}")
target = trim(target)
if not target:
raise ValueError("invalid download target")
# extract download url from github release page
release_api = f"https://api.github.com/repos/{repo}/releases/latest?per_page=1"
headers = {
"User-Agent": USER_AGENT,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
}
count, response = 0, None
while count < retry and response is None:
try:
request = urllib.request.Request(url=release_api, headers=headers)
response = urllib.request.urlopen(request, timeout=10, context=CTX)
except Exception:
count += 1
assets = read_response(response=response, expected=200, deserialize=True, key="assets")
if not assets or not isinstance(assets, list):
raise Exception("no assets found in github release")
download_url = ""
for asset in assets:
if asset.get("name", "") == target:
download_url = asset.get("browser_download_url", "")
break
if not download_url:
raise Exception("no download url found in github release")
download(download_url, filepath, target, retry)
|
Download GeoLite2-City.mmdb from github release
|
download_mmdb
|
python
|
wzdnzd/aggregator
|
tools/xui.py
|
https://github.com/wzdnzd/aggregator/blob/master/tools/xui.py
|
Apache-2.0
|
def download(url: str, filepath: str, filename: str, retry: int = 3) -> None:
"""Download file from url to filepath with filename"""
if retry < 0:
raise Exception("archieved max retry count for download")
url = trim(url)
if not url:
raise ValueError("invalid download url")
filepath = trim(filepath)
if not filepath:
raise ValueError("invalid save filepath")
filename = trim(filename)
if not filename:
raise ValueError("invalid save filename")
if not os.path.exists(filepath) or not os.path.isdir(filepath):
os.makedirs(filepath)
fullpath = os.path.join(filepath, filename)
if os.path.exists(fullpath) and os.path.isfile(fullpath):
os.remove(fullpath)
# download target file from github release to fullpath
try:
urllib.request.urlretrieve(url=url, filename=fullpath)
except Exception:
return download(url, filepath, filename, retry - 1)
print(f"download file {filename} to {fullpath} success")
|
Download file from url to filepath with filename
|
download
|
python
|
wzdnzd/aggregator
|
tools/xui.py
|
https://github.com/wzdnzd/aggregator/blob/master/tools/xui.py
|
Apache-2.0
|
def test_synthetic_arange_random_n_data():
"""Test if correct data quantity is generated by synthetic_arange_random."""
n_list = [10, 20]
for n in n_list:
y_pred, y_std, y_true, x = synthetic_arange_random(n)
assert len(y_pred) == n
assert len(y_std) == n
assert len(y_true) == n
assert len(x) == n
|
Test if correct data quantity is generated by synthetic_arange_random.
|
test_synthetic_arange_random_n_data
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_data.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_data.py
|
MIT
|
def test_synthetic_sine_heteroscedastic_n_data():
"""Test if correct data quantity is generated by synthetic_sine_heteroscedastic."""
n_list = [10, 20]
for n in n_list:
y_pred, y_std, y_true, x = synthetic_sine_heteroscedastic(n)
assert len(y_pred) == n
assert len(y_std) == n
assert len(y_true) == n
assert len(x) == n
|
Test if correct data quantity is generated by synthetic_sine_heteroscedastic.
|
test_synthetic_sine_heteroscedastic_n_data
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_data.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_data.py
|
MIT
|
def test_get_all_accuracy_metrics_returns(get_test_set):
"""Test if correct accuracy metrics are returned."""
y_pred, y_std, y_true = get_test_set
met_dict = get_all_accuracy_metrics(y_pred, y_true)
met_keys = met_dict.keys()
assert len(met_keys) == 6
met_str_list = ["mae", "rmse", "mdae", "marpd", "r2", "corr"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if correct accuracy metrics are returned.
|
test_get_all_accuracy_metrics_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_average_calibration_returns(get_test_set):
"""Test if correct average calibration metrics are returned."""
n_bins = 20
met_dict = get_all_average_calibration(*get_test_set, n_bins)
met_keys = met_dict.keys()
assert len(met_keys) == 3
met_str_list = ["rms_cal", "ma_cal", "miscal_area"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if correct average calibration metrics are returned.
|
test_get_all_average_calibration_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_adversarial_group_calibration_returns(get_test_set):
"""Test if correct adversarial group calibration metrics are returned."""
n_bins = 20
met_dict = get_all_adversarial_group_calibration(*get_test_set, n_bins)
met_keys = met_dict.keys()
assert len(met_keys) == 2
met_str_list = ["ma_adv_group_cal", "rms_adv_group_cal"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
for met_str in met_str_list:
inner_dict = met_dict[met_str]
inner_keys = inner_dict.keys()
assert len(inner_keys) == 3
inner_str_list = [
"group_sizes",
"adv_group_cali_mean",
"adv_group_cali_stderr",
]
bool_list = [s in inner_keys for s in inner_str_list]
assert all(bool_list)
|
Test if correct adversarial group calibration metrics are returned.
|
test_get_all_adversarial_group_calibration_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_sharpness_metrics_returns(get_test_set):
"""Test if correct sharpness metrics are returned."""
y_pred, y_std, y_true = get_test_set
met_dict = get_all_sharpness_metrics(y_std)
met_keys = met_dict.keys()
assert len(met_keys) == 1
assert "sharp" in met_keys
|
Test if correct sharpness metrics are returned.
|
test_get_all_sharpness_metrics_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_scoring_rule_metrics_returns(get_test_set):
"""Test if correct scoring rule metrics are returned."""
resolution = 99
scaled = True
met_dict = get_all_scoring_rule_metrics(*get_test_set, resolution, scaled)
met_keys = met_dict.keys()
assert len(met_keys) == 4
met_str_list = ["nll", "crps", "check", "interval"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if correct scoring rule metrics are returned.
|
test_get_all_scoring_rule_metrics_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_get_all_metrics_returns(get_test_set):
"""Test if correct metrics are returned by get_all_metrics function."""
met_dict = get_all_metrics(*get_test_set)
met_keys = met_dict.keys()
assert len(met_keys) == 5
met_str_list = [
"accuracy",
"avg_calibration",
"adv_group_calibration",
"sharpness",
"scoring_rule",
]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if correct metrics are returned by get_all_metrics function.
|
test_get_all_metrics_returns
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics.py
|
MIT
|
def test_prediction_error_metric_fields(get_test_set):
"""Test if prediction error metrics have correct fields."""
y_pred, y_std, y_true = get_test_set
met_dict = prediction_error_metrics(y_pred, y_true)
met_keys = met_dict.keys()
assert len(met_keys) == 6
met_str_list = ["mae", "rmse", "mdae", "marpd", "r2", "corr"]
bool_list = [s in met_keys for s in met_str_list]
assert all(bool_list)
|
Test if prediction error metrics have correct fields.
|
test_prediction_error_metric_fields
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_accuracy.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_accuracy.py
|
MIT
|
def test_prediction_error_metric_values(get_test_set):
"""Test if prediction error metrics have correct values."""
y_pred, y_std, y_true = get_test_set
met_dict = prediction_error_metrics(y_pred, y_true)
print(met_dict)
assert met_dict["mae"] > 0.21 and met_dict["mae"] < 0.22
assert met_dict["rmse"] > 0.21 and met_dict["rmse"] < 0.22
assert met_dict["mdae"] >= 0.20 and met_dict["mdae"] < 0.21
assert met_dict["marpd"] > 12 and met_dict["marpd"] < 13
assert met_dict["r2"] > 0.88 and met_dict["r2"] < 0.89
assert met_dict["corr"] > 0.99 and met_dict["corr"] < 1.0
|
Test if prediction error metrics have correct values.
|
test_prediction_error_metric_values
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_accuracy.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_accuracy.py
|
MIT
|
def test_sharpness_on_test_set(supply_test_set):
"""Test sharpness on the test set for some dummy values."""
_, test_std, _ = supply_test_set
assert np.abs(sharpness(test_std) - 0.648074069840786) < 1e-6
|
Test sharpness on the test set for some dummy values.
|
test_sharpness_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_root_mean_squared_calibration_error_on_test_set(supply_test_set):
"""Test root mean squared calibration error on some dummy values."""
test_rmsce_nonvectorized_interval = root_mean_squared_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="interval"
)
test_rmsce_vectorized_interval = root_mean_squared_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="interval"
)
assert (
np.abs(test_rmsce_nonvectorized_interval - test_rmsce_vectorized_interval)
< 1e-6
)
assert np.abs(test_rmsce_vectorized_interval - 0.4165757476562379) < 1e-6
test_rmsce_nonvectorized_quantile = root_mean_squared_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="quantile"
)
test_rmsce_vectorized_quantile = root_mean_squared_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="quantile"
)
assert (
np.abs(test_rmsce_nonvectorized_quantile - test_rmsce_vectorized_quantile)
< 1e-6
)
assert np.abs(test_rmsce_vectorized_quantile - 0.30362567774902066) < 1e-6
|
Test root mean squared calibration error on some dummy values.
|
test_root_mean_squared_calibration_error_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_mean_absolute_calibration_error_on_test_set(supply_test_set):
"""Test mean absolute calibration error on some dummy values."""
test_mace_nonvectorized_interval = mean_absolute_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="interval"
)
test_mace_vectorized_interval = mean_absolute_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="interval"
)
assert (
np.abs(test_mace_nonvectorized_interval - test_mace_vectorized_interval) < 1e-6
)
assert np.abs(test_mace_vectorized_interval - 0.3733333333333335) < 1e-6
test_mace_nonvectorized_quantile = mean_absolute_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="quantile"
)
test_mace_vectorized_quantile = mean_absolute_calibration_error(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="quantile"
)
assert (
np.abs(test_mace_nonvectorized_quantile - test_mace_vectorized_quantile) < 1e-6
)
assert np.abs(test_mace_vectorized_quantile - 0.23757575757575758) < 1e-6
|
Test mean absolute calibration error on some dummy values.
|
test_mean_absolute_calibration_error_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_adversarial_group_calibration_on_test_set(supply_test_set):
"""Test adversarial group calibration on test set for some dummy values."""
test_out_interval = adversarial_group_calibration(
*supply_test_set,
cali_type="mean_abs",
prop_type="interval",
num_bins=100,
num_group_bins=10,
draw_with_replacement=False,
num_trials=10,
num_group_draws=10,
verbose=False
)
assert np.max(np.abs(test_out_interval.group_size - np.linspace(0, 1, 10))) < 1e-6
assert np.all(test_out_interval.score_mean < 0.5)
assert np.abs(test_out_interval.score_mean[-1] - 0.3733333333333335) < 1e-6
assert np.min(test_out_interval.score_stderr) >= 0
test_out_quantile = adversarial_group_calibration(
*supply_test_set,
cali_type="mean_abs",
prop_type="quantile",
num_bins=100,
num_group_bins=10,
draw_with_replacement=False,
num_trials=10,
num_group_draws=10,
verbose=False
)
assert np.max(np.abs(test_out_quantile.group_size - np.linspace(0, 1, 10))) < 1e-6
assert np.all(test_out_quantile.score_mean < 0.5)
assert np.abs(test_out_quantile.score_mean[-1] - 0.2375757575757576) < 1e-6
assert np.min(test_out_quantile.score_stderr) >= 0
|
Test adversarial group calibration on test set for some dummy values.
|
test_adversarial_group_calibration_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_miscalibration_area_on_test_set(supply_test_set):
"""Test miscalibration area on some dummy values."""
test_miscal_area_nonvectorized_interval = miscalibration_area(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="interval"
)
test_miscal_area_vectorized_interval = miscalibration_area(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="interval"
)
assert (
np.abs(
test_miscal_area_nonvectorized_interval
- test_miscal_area_vectorized_interval
)
< 1e-6
)
assert np.abs(test_miscal_area_vectorized_interval - 0.37710437710437716) < 1e-6
test_miscal_area_nonvectorized_quantile = miscalibration_area(
*supply_test_set,
num_bins=100,
vectorized=False,
recal_model=None,
prop_type="quantile"
)
test_miscal_area_vectorized_quantile = miscalibration_area(
*supply_test_set,
num_bins=100,
vectorized=True,
recal_model=None,
prop_type="quantile"
)
assert (
np.abs(
test_miscal_area_nonvectorized_quantile
- test_miscal_area_vectorized_quantile
)
< 1e-6
)
assert np.abs(test_miscal_area_vectorized_quantile - 0.23916245791245785) < 1e-6
|
Test miscalibration area on some dummy values.
|
test_miscalibration_area_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_vectorization_for_proportion_list_on_test_set(supply_test_set):
"""Test vectorization in get_proportion_lists on the test set for some dummy values."""
(
test_exp_props_nonvec_interval,
test_obs_props_nonvec_interval,
) = get_proportion_lists(
*supply_test_set, num_bins=100, recal_model=None, prop_type="interval"
)
(
test_exp_props_vec_interval,
test_obs_props_vec_interval,
) = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None, prop_type="interval"
)
assert (
np.max(np.abs(test_exp_props_nonvec_interval - test_exp_props_vec_interval))
< 1e-6
)
assert (
np.max(np.abs(test_obs_props_nonvec_interval - test_obs_props_vec_interval))
< 1e-6
)
(
test_exp_props_nonvec_quantile,
test_obs_props_nonvec_quantile,
) = get_proportion_lists(
*supply_test_set, num_bins=100, recal_model=None, prop_type="quantile"
)
(
test_exp_props_vec_quantile,
test_obs_props_vec_quantile,
) = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None, prop_type="quantile"
)
assert (
np.max(np.abs(test_exp_props_nonvec_quantile - test_exp_props_vec_quantile))
< 1e-6
)
assert (
np.max(np.abs(test_obs_props_nonvec_quantile - test_obs_props_vec_quantile))
< 1e-6
)
|
Test vectorization in get_proportion_lists on the test set for some dummy values.
|
test_vectorization_for_proportion_list_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_proportion_lists_vectorized_on_test_set(supply_test_set):
"""Test get_proportion_lists_vectorized on the test set for some dummy values."""
(
test_exp_props_interval,
test_obs_props_interval,
) = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None, prop_type="interval"
)
assert test_exp_props_interval.shape == test_obs_props_interval.shape
assert (
np.max(np.abs(np.unique(test_exp_props_interval) - np.linspace(0, 1, 100)))
< 1e-6
)
assert (
np.max(
np.abs(
np.sort(np.unique(test_obs_props_interval))
- np.array([0.0, 0.33333333, 0.66666667, 1.0])
)
)
< 1e-6
)
(
test_exp_props_quantile,
test_obs_props_quantile,
) = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None, prop_type="quantile"
)
assert test_exp_props_quantile.shape == test_obs_props_quantile.shape
assert (
np.max(np.abs(np.unique(test_exp_props_quantile) - np.linspace(0, 1, 100)))
< 1e-6
)
assert (
np.max(
np.abs(
np.sort(np.unique(test_obs_props_quantile))
- np.array([0.0, 0.33333333, 0.66666667, 1.0])
)
)
< 1e-6
)
|
Test get_proportion_lists_vectorized on the test set for some dummy values.
|
test_get_proportion_lists_vectorized_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_proportion_lists_on_test_set(supply_test_set):
"""Test get_proportion_lists on the test set for some dummy values."""
test_exp_props_interval, test_obs_props_interval = get_proportion_lists(
*supply_test_set, num_bins=100, recal_model=None, prop_type="interval"
)
assert len(test_exp_props_interval) == len(test_obs_props_interval)
assert (
np.max(np.abs(np.unique(test_exp_props_interval) - np.linspace(0, 1, 100)))
< 1e-6
)
assert (
np.max(
np.abs(
np.sort(np.unique(test_obs_props_interval))
- np.array([0.0, 0.33333333, 0.66666667, 1.0])
)
)
< 1e-6
)
test_exp_props_quantile, test_obs_props_quantile = get_proportion_lists(
*supply_test_set, num_bins=100, recal_model=None, prop_type="quantile"
)
assert len(test_exp_props_quantile) == len(test_obs_props_quantile)
assert (
np.max(np.abs(np.unique(test_exp_props_quantile) - np.linspace(0, 1, 100)))
< 1e-6
)
assert (
np.max(
np.abs(
np.sort(np.unique(test_obs_props_quantile))
- np.array([0.0, 0.33333333, 0.66666667, 1.0])
)
)
< 1e-6
)
|
Test get_proportion_lists on the test set for some dummy values.
|
test_get_proportion_lists_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_proportion_in_interval_on_test_set(supply_test_set):
"""Test get_proportion_in_interval on the test set for some dummy values."""
test_quantile_value_list = [
(0.0, 0.0),
(0.25, 0.0),
(0.5, 0.0),
(0.75, 0.3333333333333333),
(1.0, 1.0),
]
for test_q, test_val in test_quantile_value_list:
assert (
np.abs(
get_proportion_in_interval(*supply_test_set, quantile=test_q) - test_val
)
< 1e-6
)
|
Test get_proportion_in_interval on the test set for some dummy values.
|
test_get_proportion_in_interval_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_proportion_under_quantile_on_test_set(supply_test_set):
"""Test get_proportion_in_interval on the test set for some dummy values."""
test_quantile_value_list = [
(0.0, 0.0),
(0.25, 0.6666666666666666),
(0.5, 0.6666666666666666),
(0.75, 0.6666666666666666),
(1.0, 1.0),
]
for test_q, test_val in test_quantile_value_list:
assert (
np.abs(
get_proportion_under_quantile(*supply_test_set, quantile=test_q)
- test_val
)
< 1e-6
)
|
Test get_proportion_in_interval on the test set for some dummy values.
|
test_get_proportion_under_quantile_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_prediction_interval_on_test_set(supply_test_set):
"""Test get_prediction_interval on the test set for some dummy values."""
test_quantile_value_list = [
(
0.01,
np.array([1.00125335, 2.00626673, 3.01253347]),
np.array([0.99874665, 1.99373327, 2.98746653]),
),
(
0.25,
np.array([1.03186394, 2.15931968, 3.31863936]),
np.array([0.96813606, 1.84068032, 2.68136064]),
),
(
0.50,
np.array([1.06744898, 2.33724488, 3.67448975]),
np.array([0.93255102, 1.66275512, 2.32551025]),
),
(
0.75,
np.array([1.11503494, 2.57517469, 4.15034938]),
np.array([0.88496506, 1.42482531, 1.84965062]),
),
(
0.99,
np.array([1.25758293, 3.28791465, 5.5758293]),
np.array([0.74241707, 0.71208535, 0.4241707]),
),
]
y_pred, y_std, y_true = supply_test_set
with pytest.raises(Exception):
bounds = get_prediction_interval(y_pred, y_std, quantile=0.0, recal_model=None)
with pytest.raises(Exception):
bounds = get_prediction_interval(y_pred, y_std, quantile=1.0, recal_model=None)
for test_q, test_upper, test_lower in test_quantile_value_list:
bounds = get_prediction_interval(
y_pred, y_std, quantile=test_q, recal_model=None
)
upper_bound = bounds.upper
lower_bound = bounds.lower
assert np.max(np.abs(upper_bound - test_upper)) < 1e-6
assert np.max(np.abs(upper_bound - test_upper)) < 1e-6
|
Test get_prediction_interval on the test set for some dummy values.
|
test_get_prediction_interval_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_get_quantile_on_test_set(supply_test_set):
"""Test get_prediction_interval on the test set for some dummy values."""
test_quantile_value_list = [
(0.01, np.array([0.76736521, 0.83682606, 0.67365213])),
(
0.25,
np.array([0.93255102, 1.66275512, 2.32551025]),
),
(
0.50,
np.array([1.0, 2.0, 3.0]),
),
(
0.75,
np.array([1.06744898, 2.33724488, 3.67448975]),
),
(
0.99,
np.array([1.23263479, 3.16317394, 5.32634787]),
),
]
y_pred, y_std, y_true = supply_test_set
with pytest.raises(Exception):
bound = get_quantile(y_pred, y_std, quantile=0.0, recal_model=None)
with pytest.raises(Exception):
bound = get_quantile(y_pred, y_std, quantile=1.0, recal_model=None)
for test_q, test_bound in test_quantile_value_list:
bound = get_quantile(y_pred, y_std, quantile=test_q, recal_model=None)
assert np.max(np.abs(bound - test_bound)) < 1e-6
|
Test get_prediction_interval on the test set for some dummy values.
|
test_get_quantile_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_calibration.py
|
MIT
|
def test_nll_gaussian_on_one_pt():
"""Sanity check by testing one point at mean of gaussian."""
y_pred = np.array([0])
y_true = np.array([0])
y_std = np.array([1 / np.sqrt(2 * np.pi)])
assert np.abs(nll_gaussian(y_pred, y_std, y_true)) < 1e-6
|
Sanity check by testing one point at mean of gaussian.
|
test_nll_gaussian_on_one_pt
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_scoring_rule.py
|
MIT
|
def test_check_score_on_one_pt():
"""Sanity check to show that check score is minimized (i.e. 0) if data
occurs at the exact requested quantile."""
y_pred = np.array([0])
y_true = np.array([1])
y_std = np.array([1])
score = check_score(
y_pred=y_pred,
y_std=y_std,
y_true=y_true,
start_q=0.5 + 0.341,
end_q=0.5 + 0.341,
resolution=1,
)
assert np.abs(score) < 1e-2
|
Sanity check to show that check score is minimized (i.e. 0) if data
occurs at the exact requested quantile.
|
test_check_score_on_one_pt
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_scoring_rule.py
|
MIT
|
def test_interval_score_on_one_pt():
"""Sanity check on interval score. For one point in the center of the
distribution and intervals one standard deviation and two standard
deviations away, should return ((1 std) * 2 + (2 std) * 2) / 2 = 3.
"""
y_pred = np.array([0])
y_true = np.array([0])
y_std = np.array([1])
score = interval_score(
y_pred=y_pred,
y_std=y_std,
y_true=y_true,
start_p=0.682,
end_p=0.954,
resolution=2,
)
assert np.abs(score - 3) < 1e-2
|
Sanity check on interval score. For one point in the center of the
distribution and intervals one standard deviation and two standard
deviations away, should return ((1 std) * 2 + (2 std) * 2) / 2 = 3.
|
test_interval_score_on_one_pt
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_metrics_scoring_rule.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_metrics_scoring_rule.py
|
MIT
|
def test_recal_model_mace_criterion_on_test_set(supply_test_set):
"""
Test recalibration on mean absolute calibration error on the test set
for some dummy values.
"""
test_mace = mean_absolute_calibration_error(
*supply_test_set, num_bins=100, vectorized=True, recal_model=None
)
test_exp_props, test_obs_props = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None
)
recal_model = iso_recal(test_exp_props, test_obs_props)
recal_test_mace = mean_absolute_calibration_error(
*supply_test_set, num_bins=100, vectorized=True, recal_model=recal_model
)
recal_exp_props = recal_model.predict(test_obs_props)
assert np.abs(test_mace - 0.24206060606060598) < 1e-2
assert np.abs(recal_test_mace - 0.003035353535353514) < 1e-2
for idx in range(1, recal_exp_props.shape[0]):
assert recal_exp_props[idx - 1] <= recal_exp_props[idx]
|
Test recalibration on mean absolute calibration error on the test set
for some dummy values.
|
test_recal_model_mace_criterion_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_recal_model_rmce_criterion_on_test_set(supply_test_set):
"""
Test recalibration on root mean squared calibration error on the test set
for some dummy values.
"""
test_rmsce = root_mean_squared_calibration_error(
*supply_test_set, num_bins=100, vectorized=True, recal_model=None
)
test_exp_props, test_obs_props = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None
)
recal_model = iso_recal(test_exp_props, test_obs_props)
recal_test_rmsce = root_mean_squared_calibration_error(
*supply_test_set, num_bins=100, vectorized=True, recal_model=recal_model
)
recal_exp_props = recal_model.predict(test_obs_props)
assert np.abs(test_rmsce - 0.28741418862839013) < 1e-2
assert np.abs(recal_test_rmsce - 0.003981861230030349) < 1e-2
for idx in range(1, recal_exp_props.shape[0]):
assert recal_exp_props[idx - 1] <= recal_exp_props[idx]
|
Test recalibration on root mean squared calibration error on the test set
for some dummy values.
|
test_recal_model_rmce_criterion_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_recal_model_miscal_area_criterion_on_test_set(supply_test_set):
"""
Test recalibration on miscalibration area on the test set
for some dummy values.
"""
test_miscal_area = miscalibration_area(
*supply_test_set, num_bins=100, vectorized=True, recal_model=None
)
test_exp_props, test_obs_props = get_proportion_lists_vectorized(
*supply_test_set, num_bins=100, recal_model=None
)
recal_model = iso_recal(test_exp_props, test_obs_props)
recal_test_miscal_area = miscalibration_area(
*supply_test_set, num_bins=100, vectorized=True, recal_model=recal_model
)
recal_exp_props = recal_model.predict(test_obs_props)
assert np.abs(test_miscal_area - 0.24426139657444004) < 1e-2
assert np.abs(recal_test_miscal_area - 0.0029569160997732244) < 1e-2
for idx in range(1, recal_exp_props.shape[0]):
assert recal_exp_props[idx - 1] <= recal_exp_props[idx]
|
Test recalibration on miscalibration area on the test set
for some dummy values.
|
test_recal_model_miscal_area_criterion_on_test_set
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_optimize_recalibration_ratio_mace_criterion(supply_test_set):
"""
Test standard deviation recalibration on mean absolute calibration error
on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
ma_cal_ratio = optimize_recalibration_ratio(
y_pred, y_std, y_true, criterion="ma_cal"
)
recal_ma_cal = mean_absolute_calibration_error(y_pred, ma_cal_ratio * y_std, y_true)
recal_rms_cal = root_mean_squared_calibration_error(
y_pred, ma_cal_ratio * y_std, y_true
)
recal_miscal = miscalibration_area(y_pred, ma_cal_ratio * y_std, y_true)
assert np.abs(ma_cal_ratio - 0.33215708813773176) < 1e-2
assert np.abs(recal_ma_cal - 0.06821616161616162) < 1e-2
assert np.abs(recal_rms_cal - 0.08800130087804929) < 1e-2
assert np.abs(recal_miscal - 0.06886262626262629) < 1e-2
|
Test standard deviation recalibration on mean absolute calibration error
on the test set for some dummy values.
|
test_optimize_recalibration_ratio_mace_criterion
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_optimize_recalibration_ratio_rmce_criterion(supply_test_set):
"""
Test standard deviation recalibration on root mean squared calibration error
on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
rms_cal_ratio = optimize_recalibration_ratio(
y_pred, y_std, y_true, criterion="rms_cal"
)
recal_ma_cal = mean_absolute_calibration_error(
y_pred, rms_cal_ratio * y_std, y_true
)
recal_rms_cal = root_mean_squared_calibration_error(
y_pred, rms_cal_ratio * y_std, y_true
)
recal_miscal = miscalibration_area(y_pred, rms_cal_ratio * y_std, y_true)
assert np.abs(rms_cal_ratio - 0.34900989073212507) < 1e-2
assert np.abs(recal_ma_cal - 0.06945555555555555) < 1e-2
assert np.abs(recal_rms_cal - 0.08570902541177935) < 1e-2
assert np.abs(recal_miscal - 0.07011706864564003) < 1e-2
|
Test standard deviation recalibration on root mean squared calibration error
on the test set for some dummy values.
|
test_optimize_recalibration_ratio_rmce_criterion
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_optimize_recalibration_ratio_miscal_area_criterion(supply_test_set):
"""
Test standard deviation recalibration on miscalibration area
on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
miscal_ratio = optimize_recalibration_ratio(
y_pred, y_std, y_true, criterion="miscal"
)
recal_ma_cal = mean_absolute_calibration_error(y_pred, miscal_ratio * y_std, y_true)
recal_rms_cal = root_mean_squared_calibration_error(
y_pred, miscal_ratio * y_std, y_true
)
recal_miscal = miscalibration_area(y_pred, miscal_ratio * y_std, y_true)
assert np.abs(miscal_ratio - 0.3321912522557988) < 1e-2
assert np.abs(recal_ma_cal - 0.06821616161616162) < 1e-2
assert np.abs(recal_rms_cal - 0.08800130087804929) < 1e-2
assert np.abs(recal_miscal - 0.06886262626262629) < 1e-2
|
Test standard deviation recalibration on miscalibration area
on the test set for some dummy values.
|
test_optimize_recalibration_ratio_miscal_area_criterion
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_get_prediction_interval_recalibrated(supply_test_set):
"""
Test standard deviation recalibration on miscalibration area
on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
test_exp_props, test_obs_props = get_proportion_lists_vectorized(
y_pred, y_std, y_true, num_bins=100, recal_model=None
)
recal_model = iso_recal(test_exp_props, test_obs_props)
test_quantile_prop_list = [
(0.01, 0.0, 0.0),
(0.25, 0.69, 0.25),
(0.5, 0.86, 0.5),
(0.75, 0.92, 0.75),
(0.99, 0.97, 0.97),
]
for q, test_orig_prop, test_recal_prop in test_quantile_prop_list:
orig_bounds = get_prediction_interval(y_pred, y_std, q, None)
recal_bounds = get_prediction_interval(y_pred, y_std, q, recal_model)
orig_prop = np.mean(
(orig_bounds.lower <= y_true) * (y_true <= orig_bounds.upper)
)
recal_prop = np.mean(
(recal_bounds.lower <= y_true) * (y_true <= recal_bounds.upper)
)
assert np.max(np.abs(test_orig_prop - orig_prop)) < 1e-2
assert np.max(np.abs(test_recal_prop - recal_prop)) < 1e-2
|
Test standard deviation recalibration on miscalibration area
on the test set for some dummy values.
|
test_get_prediction_interval_recalibrated
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_get_std_recalibrator(supply_test_set):
"""
Test get_std_recalibration on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
test_quantile_prop_list = [
(0.01, 0.00, 0.00),
(0.25, 0.06, 0.00),
(0.50, 0.56, 0.00),
(0.75, 0.74, 0.56),
(0.99, 0.89, 0.88),
]
std_recalibrator = get_std_recalibrator(y_pred, y_std, y_true)
for q, test_prop_in_pi, test_prop_under_q in test_quantile_prop_list:
y_std_recal = std_recalibrator(y_std)
pi = get_prediction_interval(y_pred, y_std_recal, q)
prop_in_pi = ((pi.lower <= y_true) * (y_true <= pi.upper)).mean()
quantile_bound = get_quantile(y_pred, y_std_recal, q)
prop_under_q = (quantile_bound >= y_true).mean()
assert np.max(np.abs(test_prop_in_pi - prop_in_pi)) < 5e-2
assert np.max(np.abs(test_prop_under_q - prop_under_q)) < 5e-2
|
Test get_std_recalibration on the test set for some dummy values.
|
test_get_std_recalibrator
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_get_quantile_recalibrator(supply_test_set):
"""
Test get_std_recalibration on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
test_quantile_prop_list = [
(0.01, 0.00),
(0.25, 0.00),
(0.50, 0.00),
(0.75, 0.00),
(0.99, 0.83),
]
quantile_recalibrator = get_quantile_recalibrator(y_pred, y_std, y_true)
for q, test_prop_under_q in test_quantile_prop_list:
quantile_bound_recal = quantile_recalibrator(y_pred, y_std, q)
assert all(np.isfinite(quantile_bound_recal))
prop_under_q_recal = (quantile_bound_recal >= y_true).mean()
assert np.max(np.abs(test_prop_under_q - prop_under_q_recal)) < 1e-2
|
Test get_std_recalibration on the test set for some dummy values.
|
test_get_quantile_recalibrator
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_get_interval_recalibrator(supply_test_set):
"""
Test get_std_recalibration on the test set for some dummy values.
"""
random.seed(0)
np.random.seed(seed=0)
y_pred, y_std, y_true = supply_test_set
test_quantile_prop_list = [
(0.01, 0.00),
(0.25, 0.25),
(0.50, 0.50),
(0.75, 0.75),
(0.99, 0.97),
]
interval_recalibrator = get_interval_recalibrator(y_pred, y_std, y_true)
for q, test_prop_in_interval in test_quantile_prop_list:
interval_recal = interval_recalibrator(y_pred, y_std, q)
prop_in_interval_recal = (
(interval_recal.lower <= y_true) * (y_true <= interval_recal.upper)
).mean()
assert np.max(np.abs(test_prop_in_interval - prop_in_interval_recal)) < 1e-2
|
Test get_std_recalibration on the test set for some dummy values.
|
test_get_interval_recalibrator
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_recalibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_recalibration.py
|
MIT
|
def test_filter_subset(get_test_set):
"""Test if filter_subset returns correct number of subset elements."""
y_pred, y_std, y_true, _ = get_test_set
_test_n_subset = 2
[y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], _test_n_subset)
assert len(y_pred) == _test_n_subset
assert len(y_std) == _test_n_subset
assert len(y_true) == _test_n_subset
|
Test if filter_subset returns correct number of subset elements.
|
test_filter_subset
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
tests/test_viz.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/tests/test_viz.py
|
MIT
|
def synthetic_arange_random(
num_points: int = 10,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Dataset of evenly spaced points and identity function (with some randomization).
This function returns predictions and predictive uncertainties (given as standard
deviations) from some hypothetical uncertainty model, along with true input x and
output y data points.
Args:
num_points: The number of data points in the set.
Returns:
- The y predictions given by a hypothetical predictive uncertainty model. These
are the true values of y but with uniform noise added.
- The standard deviations given by a hypothetical predictive uncertainty model.
These are the errors between the predictions and the truth plus some unifom
noise.
- The true outputs y.
- The true inputs x.
"""
x = np.arange(num_points)
y_true = np.arange(num_points)
y_pred = np.arange(num_points) + np.random.random((num_points,))
y_std = np.abs(y_true - y_pred) + 0.1 * np.random.random((num_points,))
return y_pred, y_std, y_true, x
|
Dataset of evenly spaced points and identity function (with some randomization).
This function returns predictions and predictive uncertainties (given as standard
deviations) from some hypothetical uncertainty model, along with true input x and
output y data points.
Args:
num_points: The number of data points in the set.
Returns:
- The y predictions given by a hypothetical predictive uncertainty model. These
are the true values of y but with uniform noise added.
- The standard deviations given by a hypothetical predictive uncertainty model.
These are the errors between the predictions and the truth plus some unifom
noise.
- The true outputs y.
- The true inputs x.
|
synthetic_arange_random
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/data.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/data.py
|
MIT
|
def synthetic_sine_heteroscedastic(
n_points: int = 10,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Return samples from "synthetic sine" heteroscedastic noisy function.
This returns a synthetic dataset which can be used to train and assess a predictive
uncertainty model.
Args:
n_points: The number of data points in the set.
Returns:
- Predicted output points y.
- Predictive uncertainties, defined using standard deviation of added noise.
- True output points y.
- True input points x.
"""
bounds = [0, 15]
x = np.linspace(bounds[0], bounds[1], n_points)
f = np.sin(x)
std = 0.01 + np.abs(x - 5.0) / 10.0
noise = np.random.normal(scale=std)
y = f + noise
return f, std, y, x
|
Return samples from "synthetic sine" heteroscedastic noisy function.
This returns a synthetic dataset which can be used to train and assess a predictive
uncertainty model.
Args:
n_points: The number of data points in the set.
Returns:
- Predicted output points y.
- Predictive uncertainties, defined using standard deviation of added noise.
- True output points y.
- True input points x.
|
synthetic_sine_heteroscedastic
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/data.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/data.py
|
MIT
|
def get_all_accuracy_metrics(
y_pred: np.ndarray,
y_true: np.ndarray,
verbose: bool = True,
) -> Dict[str, float]:
"""Compute all accuracy metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
verbose: Activate verbose mode.
Returns:
The evaluations for all accuracy related metrics.
"""
if verbose:
print(" (1/n) Calculating accuracy metrics")
acc_metrics = prediction_error_metrics(y_pred, y_true)
return acc_metrics
|
Compute all accuracy metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
verbose: Activate verbose mode.
Returns:
The evaluations for all accuracy related metrics.
|
get_all_accuracy_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_average_calibration(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int,
verbose: bool = True,
) -> Dict[str, float]:
"""Compute all metrics for average calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
verbose: Activate verbose mode.
Returns:
The evaluations for all metrics relating to average calibration.
"""
if verbose:
print(" (2/n) Calculating average calibration metrics")
cali_metrics = {}
cali_metrics["rms_cal"] = root_mean_squared_calibration_error(
y_pred, y_std, y_true, num_bins=num_bins
)
cali_metrics["ma_cal"] = mean_absolute_calibration_error(
y_pred, y_std, y_true, num_bins=num_bins
)
cali_metrics["miscal_area"] = miscalibration_area(
y_pred, y_std, y_true, num_bins=num_bins
)
return cali_metrics
|
Compute all metrics for average calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
verbose: Activate verbose mode.
Returns:
The evaluations for all metrics relating to average calibration.
|
get_all_average_calibration
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_adversarial_group_calibration(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int,
verbose: bool = True,
) -> Dict[str, Dict[str, np.ndarray]]:
"""Compute all metrics for adversarial group calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
verbose: Activate verbose mode.
Returns:
The evaluations for all metrics relating to adversarial group calibration.
Each inner dictionary contains the size of each group and the metrics
computed for each group.
"""
adv_group_cali_metrics = {}
if verbose:
print(" (3/n) Calculating adversarial group calibration metrics")
print(" [1/2] for mean absolute calibration error")
ma_adv_group_cali = adversarial_group_calibration(
y_pred,
y_std,
y_true,
cali_type="mean_abs",
num_bins=num_bins,
verbose=verbose,
)
ma_adv_group_size = ma_adv_group_cali.group_size
ma_adv_group_cali_score_mean = ma_adv_group_cali.score_mean
ma_adv_group_cali_score_stderr = ma_adv_group_cali.score_stderr
adv_group_cali_metrics["ma_adv_group_cal"] = {
"group_sizes": ma_adv_group_size,
"adv_group_cali_mean": ma_adv_group_cali_score_mean,
"adv_group_cali_stderr": ma_adv_group_cali_score_stderr,
}
if verbose:
print(" [2/2] for root mean squared calibration error")
rms_adv_group_cali = adversarial_group_calibration(
y_pred,
y_std,
y_true,
cali_type="root_mean_sq",
num_bins=num_bins,
verbose=verbose,
)
rms_adv_group_size = rms_adv_group_cali.group_size
rms_adv_group_cali_score_mean = rms_adv_group_cali.score_mean
rms_adv_group_cali_score_stderr = rms_adv_group_cali.score_stderr
adv_group_cali_metrics["rms_adv_group_cal"] = {
"group_sizes": rms_adv_group_size,
"adv_group_cali_mean": rms_adv_group_cali_score_mean,
"adv_group_cali_stderr": rms_adv_group_cali_score_stderr,
}
return adv_group_cali_metrics
|
Compute all metrics for adversarial group calibration.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
verbose: Activate verbose mode.
Returns:
The evaluations for all metrics relating to adversarial group calibration.
Each inner dictionary contains the size of each group and the metrics
computed for each group.
|
get_all_adversarial_group_calibration
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_sharpness_metrics(
y_std: np.ndarray,
verbose: bool = True,
) -> Dict[str, float]:
"""Compute all sharpness metrics
Args:
y_std: 1D array of he predicted standard deviations for the held out dataset.
verbose: Activate verbose mode.
Returns:
The evaluations for all sharpness metrics.
"""
if verbose:
print(" (4/n) Calculating sharpness metrics")
sharp_metrics = {}
sharp_metrics["sharp"] = sharpness(y_std)
return sharp_metrics
|
Compute all sharpness metrics
Args:
y_std: 1D array of he predicted standard deviations for the held out dataset.
verbose: Activate verbose mode.
Returns:
The evaluations for all sharpness metrics.
|
get_all_sharpness_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_scoring_rule_metrics(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
resolution: int,
scaled: bool,
verbose: bool = True,
) -> Dict[str, float]:
"""Compute all scoring rule metrics
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
resolution: The number of quantiles to use for computation.
scaled: Whether to scale the score by size of held out set.
verbose: Activate verbose mode.
Returns:
The computed scoring rule metrics.
"""
if verbose:
print(" (n/n) Calculating proper scoring rule metrics")
sr_metrics = {}
sr_metrics["nll"] = nll_gaussian(y_pred, y_std, y_true, scaled=scaled)
sr_metrics["crps"] = crps_gaussian(y_pred, y_std, y_true, scaled=scaled)
sr_metrics["check"] = check_score(
y_pred, y_std, y_true, scaled=scaled, resolution=resolution
)
sr_metrics["interval"] = interval_score(
y_pred, y_std, y_true, scaled=scaled, resolution=resolution
)
return sr_metrics
|
Compute all scoring rule metrics
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
resolution: The number of quantiles to use for computation.
scaled: Whether to scale the score by size of held out set.
verbose: Activate verbose mode.
Returns:
The computed scoring rule metrics.
|
get_all_scoring_rule_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def get_all_metrics(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int = 100,
resolution: int = 99,
scaled: bool = True,
verbose: bool = True,
) -> Dict[str, Any]:
"""Compute all metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
resolution: The number of quantiles to use for computation.
scaled: Whether to scale the score by size of held out set.
verbose: Activate verbose mode.
Returns:
Dictionary containing all metrics.
"""
# Accuracy
accuracy_metrics = get_all_accuracy_metrics(y_pred, y_true, verbose)
# Calibration
calibration_metrics = get_all_average_calibration(
y_pred, y_std, y_true, num_bins, verbose
)
# Adversarial Group Calibration
adv_group_cali_metrics = get_all_adversarial_group_calibration(
y_pred, y_std, y_true, num_bins, verbose
)
# Sharpness
sharpness_metrics = get_all_sharpness_metrics(y_std, verbose)
# Proper Scoring Rules
scoring_rule_metrics = get_all_scoring_rule_metrics(
y_pred, y_std, y_true, resolution, scaled, verbose
)
# Print all outputs
if verbose:
print("**Finished Calculating All Metrics**")
print("\n")
print(" Accuracy Metrics ".center(60, "="))
for acc_metric, acc_val in accuracy_metrics.items():
print(" {:<13} {:.3f}".format(METRIC_NAMES[acc_metric], acc_val))
print(" Average Calibration Metrics ".center(60, "="))
for cali_metric, cali_val in calibration_metrics.items():
print(" {:<37} {:.3f}".format(METRIC_NAMES[cali_metric], cali_val))
print(" Adversarial Group Calibration Metrics ".center(60, "="))
_print_adversarial_group_calibration(adv_group_cali_metrics, print_group_num=3)
print(" Sharpness Metrics ".center(60, "="))
for sharp_metric, sharp_val in sharpness_metrics.items():
print(" {:} {:.3f}".format(METRIC_NAMES[sharp_metric], sharp_val))
print(" Scoring Rule Metrics ".center(60, "="))
for sr_metric, sr_val in scoring_rule_metrics.items():
print(" {:<25} {:.3f}".format(METRIC_NAMES[sr_metric], sr_val))
all_scores = {
"accuracy": accuracy_metrics,
"avg_calibration": calibration_metrics,
"adv_group_calibration": adv_group_cali_metrics,
"sharpness": sharpness_metrics,
"scoring_rule": scoring_rule_metrics,
}
return all_scores
|
Compute all metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of he predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: The number of bins to use for discretization in some metrics.
resolution: The number of quantiles to use for computation.
scaled: Whether to scale the score by size of held out set.
verbose: Activate verbose mode.
Returns:
Dictionary containing all metrics.
|
get_all_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics.py
|
MIT
|
def prediction_error_metrics(
y_pred: np.ndarray,
y_true: np.ndarray,
) -> Dict[str, float]:
"""Get all prediction error metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
Returns:
A dictionary with Mean average error ('mae'), Root mean squared
error ('rmse'), Median absolute error ('mdae'), Mean absolute
relative percent difference ('marpd'), r^2 ('r2'), and Pearson's
correlation coefficient ('corr').
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_true)
# Compute metrics
mae = mean_absolute_error(y_true, y_pred)
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
mdae = median_absolute_error(y_true, y_pred)
residuals = y_true - y_pred
marpd = np.abs(2 * residuals / (np.abs(y_pred) + np.abs(y_true))).mean() * 100
r2 = r2_score(y_true, y_pred)
corr = np.corrcoef(y_true, y_pred)[0, 1]
prediction_metrics = {
"mae": mae,
"rmse": rmse,
"mdae": mdae,
"marpd": marpd,
"r2": r2,
"corr": corr,
}
return prediction_metrics
|
Get all prediction error metrics.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
Returns:
A dictionary with Mean average error ('mae'), Root mean squared
error ('rmse'), Median absolute error ('mdae'), Mean absolute
relative percent difference ('marpd'), r^2 ('r2'), and Pearson's
correlation coefficient ('corr').
|
prediction_error_metrics
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_accuracy.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_accuracy.py
|
MIT
|
def sharpness(y_std: np.ndarray) -> float:
"""Return sharpness (a single measure of the overall confidence).
Args:
y_std: 1D array of the predicted standard deviations for the held out dataset.
Returns:
A single scalar which quantifies the average of the standard deviations.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_std)
# Check that input std is positive
assert_is_positive(y_std)
# Compute sharpness
sharp_metric = np.sqrt(np.mean(y_std**2))
return sharp_metric
|
Return sharpness (a single measure of the overall confidence).
Args:
y_std: 1D array of the predicted standard deviations for the held out dataset.
Returns:
A single scalar which quantifies the average of the standard deviations.
|
sharpness
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def root_mean_squared_calibration_error(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int = 100,
vectorized: bool = False,
recal_model: IsotonicRegression = None,
prop_type: str = "interval",
) -> float:
"""Root mean squared calibration error.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
vectorized: whether to vectorize computation for observed proportions.
(while setting to True is faster, it has much higher memory requirements
and may fail to run for larger datasets).
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A single scalar which calculates the root mean squared calibration error.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Check that input std is positive
assert_is_positive(y_std)
# Check that prop_type is one of 'interval' or 'quantile'
assert prop_type in ["interval", "quantile"]
# Get lists of expected and observed proportions for a range of quantiles
if vectorized:
(exp_proportions, obs_proportions) = get_proportion_lists_vectorized(
y_pred, y_std, y_true, num_bins, recal_model, prop_type
)
else:
(exp_proportions, obs_proportions) = get_proportion_lists(
y_pred, y_std, y_true, num_bins, recal_model, prop_type
)
squared_diff_proportions = np.square(exp_proportions - obs_proportions)
rmsce = np.sqrt(np.mean(squared_diff_proportions))
return rmsce
|
Root mean squared calibration error.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
vectorized: whether to vectorize computation for observed proportions.
(while setting to True is faster, it has much higher memory requirements
and may fail to run for larger datasets).
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A single scalar which calculates the root mean squared calibration error.
|
root_mean_squared_calibration_error
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
def mean_absolute_calibration_error(
y_pred: np.ndarray,
y_std: np.ndarray,
y_true: np.ndarray,
num_bins: int = 100,
vectorized: bool = False,
recal_model: IsotonicRegression = None,
prop_type: str = "interval",
) -> float:
"""Mean absolute calibration error; identical to ECE.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
vectorized: whether to vectorize computation for observed proportions.
(while setting to True is faster, it has much higher memory requirements
and may fail to run for larger datasets).
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A single scalar which calculates the mean absolute calibration error.
"""
# Check that input arrays are flat
assert_is_flat_same_shape(y_pred, y_std, y_true)
# Check that input std is positive
assert_is_positive(y_std)
# Check that prop_type is one of 'interval' or 'quantile'
assert prop_type in ["interval", "quantile"]
# Get lists of expected and observed proportions for a range of quantiles
if vectorized:
(exp_proportions, obs_proportions) = get_proportion_lists_vectorized(
y_pred, y_std, y_true, num_bins, recal_model, prop_type
)
else:
(exp_proportions, obs_proportions) = get_proportion_lists(
y_pred, y_std, y_true, num_bins, recal_model, prop_type
)
abs_diff_proportions = np.abs(exp_proportions - obs_proportions)
mace = np.mean(abs_diff_proportions)
return mace
|
Mean absolute calibration error; identical to ECE.
Args:
y_pred: 1D array of the predicted means for the held out dataset.
y_std: 1D array of the predicted standard deviations for the held out dataset.
y_true: 1D array of the true labels in the held out dataset.
num_bins: number of discretizations for the probability space [0, 1].
vectorized: whether to vectorize computation for observed proportions.
(while setting to True is faster, it has much higher memory requirements
and may fail to run for larger datasets).
recal_model: an sklearn isotonic regression model which recalibrates the predictions.
prop_type: "interval" to measure observed proportions for centered prediction intervals,
and "quantile" for observed proportions below a predicted quantile.
Returns:
A single scalar which calculates the mean absolute calibration error.
|
mean_absolute_calibration_error
|
python
|
uncertainty-toolbox/uncertainty-toolbox
|
uncertainty_toolbox/metrics_calibration.py
|
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.