repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
LTL-GATA
|
LTL-GATA-main/src/model/__init__.py
|
from typing import List, Tuple
from argparse import Namespace
import logging
import pdb
import numpy as np
import torch
from utils import max_len, to_pt, pad_sequences
from components import Actions, Vocabulary
from model.features import TextEncoder
from model.layers import LSTMCell
from state import BatchedStates
logger = logging.getLogger()
class ActionNet(torch.nn.Module):
def __init__(self, hidden: int, num_inputs: int, **kwargs) -> None:
super(ActionNet, self).__init__(**kwargs)
self.layers = torch.nn.Sequential(
torch.nn.Linear(hidden * num_inputs, hidden),
torch.nn.ReLU(),
)
self.final = torch.nn.Linear(hidden, 1)
def forward(self, inputs: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
out = self.layers(inputs) * mask.unsqueeze(-1)
return self.final(out).squeeze(-1) * mask
class PolicyNet(torch.nn.Module):
def __init__(self, config: Namespace,
word_vocab: Vocabulary,
ltl_vocab: Vocabulary,
action_vocab: Vocabulary,
pretrain: bool,
graph_updater=None,
context_length: int = 1,
**kwargs) -> None:
super(PolicyNet, self).__init__(**kwargs)
self._dummy = torch.nn.Parameter(torch.empty(0))
# TODO clean this up
self.config = config
for k, v in vars(config.model).items():
setattr(self, k, v)
self.pretrain = pretrain
self.context_length = context_length
self.build(config, word_vocab=word_vocab,
ltl_vocab=ltl_vocab,
action_vocab=action_vocab,
pretrain=pretrain,
graph_updater=graph_updater)
@property
def device(self) -> str:
return self._dummy.device
def load_vocab(self, word_vocab: Vocabulary,
ltl_vocab: Vocabulary,
action_vocab: Vocabulary) -> None:
assert isinstance(word_vocab, Vocabulary)
assert isinstance(ltl_vocab, Vocabulary)
assert isinstance(action_vocab, Vocabulary)
if self.concat_strings:
self.word_vocab = word_vocab
self.ltl_vocab = ltl_vocab
self.action_vocab = action_vocab
concat_vocab = action_vocab
concat_vocab.name = 'concat-vocab'
concat_vocab += ['[ACTION]']
if self.use_observations:
concat_vocab += word_vocab
concat_vocab += ['[OBS]']
if self.use_ltl:
concat_vocab += ltl_vocab
concat_vocab += ['[LTL]']
self.text_encoder = TextEncoder(config=self.config.text_encoder,
vocab=concat_vocab)
else:
self.ltl_vocab = ltl_vocab
self.word_vocab = word_vocab
self.action_vocab = action_vocab
diff_ltl = self.use_ltl and not self.same_ltl_text_encoder
if diff_ltl and self.ltl_encoder is not None:
if len(ltl_vocab) != len(self.ltl_encoder.vocab):
self.ltl_encoder = TextEncoder(
config=self.config.ltl_encoder, vocab=ltl_vocab)
else:
self.ltl_encoder.load_vocab(ltl_vocab)
if self.text_encoder is not None:
if len(word_vocab) != len(self.text_encoder.vocab):
self.text_encoder = TextEncoder(
config=self.config.text_encoder, vocab=word_vocab,)
else:
self.text_encoder.load_vocab(word_vocab)
def build(self, config: Namespace, word_vocab: Vocabulary,
ltl_vocab: Vocabulary, action_vocab: Vocabulary,
pretrain: bool, graph_updater) -> None:
# assert self.action_net_hidden_size == 768 if \
# self.use_pretrained_lm_for_text else True, \
# "Action net hidden size must match BERT output size of 768"
self.text_encoder, self.ltl_encoder, self.graph_encoder, \
self.actions_encoder = None, None, None, None
if self.concat_strings:
self.word_vocab = word_vocab
self.ltl_vocab = ltl_vocab
self.action_vocab = action_vocab
concat_vocab = action_vocab
concat_vocab.name = 'concat-vocab'
concat_vocab += ['[ACTION]']
if self.use_observations:
concat_vocab += word_vocab
concat_vocab += ['[OBS]']
if self.use_ltl:
concat_vocab += ltl_vocab
concat_vocab += ['[LTL]']
self.text_encoder = TextEncoder(config=config.text_encoder,
vocab=concat_vocab)
# 1 for the encoded admissible actions
self.action_network = ActionNet(hidden=self.action_net_hidden_size,
num_inputs=1)
if self.recurrent_memory:
num_inputs = 1
self.recurrent_memory_unit = LSTMCell(
self.action_net_hidden_size * num_inputs,
self.action_net_hidden_size, use_bias=True)
else:
if pretrain:
self.ltl_encoder = TextEncoder(config=config.ltl_encoder,
vocab=ltl_vocab,)
elif self.use_ltl:
# assert not bool(self.same_ltl_text_encoder *
# self.ltl_text_string_concat), \
# "Config violation: 'same_ltl_text_encoder' and " + \
# "'ltl_text_string_concat' can't both be True"
if self.same_ltl_text_encoder:
word_vocab += ltl_vocab
ltl_vocab += word_vocab
else:
# ltl_vocab += word_vocab
self.ltl_encoder = TextEncoder(config=config.ltl_encoder,
vocab=ltl_vocab,)
if self.use_observations:
if pretrain and config.pretrain.text or not pretrain:
self.text_encoder = TextEncoder(config=config.text_encoder,
vocab=word_vocab,)
if self.use_ltl and self.same_ltl_text_encoder and \
not pretrain:
self.ltl_encoder = self.text_encoder
if self.use_belief_graph:
self.graph_encoder = graph_updater
# self.load_vocab(word_vocab, ltl_vocab,)
self.ltl_vocab = ltl_vocab
self.word_vocab = word_vocab
if self.use_independent_actions_encoder:
self.action_vocab = action_vocab
self.actions_encoder = TextEncoder(
config=config.actions_encoder, vocab=action_vocab,)
else:
self.action_vocab = self.word_vocab
self.actions_encoder = self.text_encoder
# 1 for the encoded admissible actions
num_inputs = 1 + np.sum(np.array([
self.use_observations,
self.use_belief_graph,
self.use_ltl]))
self.action_network = ActionNet(hidden=self.action_net_hidden_size,
num_inputs=num_inputs)
if self.recurrent_memory:
num_inputs -= 1
self.recurrent_memory_unit = LSTMCell(
self.action_net_hidden_size * num_inputs,
self.action_net_hidden_size, use_bias=True)
def encode_actions(self, actions: List[List[str]]) -> torch.Tensor:
"""
# actions come out as NumActionsxLengthxEmbed
# stacking gives BatchxNumActionsxLengthxEmbed
@
returns: torch Tensor [batch-size, num-actions, embed-size]
"""
# we first sum over the length (dim=1) then pad the num actions
# since num actions per batch may not be the same size
if self.use_pretrained_lm_for_actions:
actions_mask = list()
unwrapped_actions = list()
batch_size = len(actions)
max_num_action = max_len(actions)
for _actions in actions:
actions_len = len(_actions)
padding_len = (max_num_action - actions_len)
unwrapped_actions.extend(
_actions + [self.actions_encoder.tokenizer.pad_token] *
padding_len)
actions_mask.extend([1] * len(_actions) + [0] * (padding_len))
encoded_actions, _mask = self.actions_encoder(
unwrapped_actions)
max_word_num = _mask.shape[1]
actions_mask = torch.tensor(actions_mask, device=self.device)
encoded_actions = encoded_actions.view(
batch_size, max_num_action, max_word_num, -1)
actions_mask = actions_mask.view(
batch_size, max_num_action)
# batch-size x max-num-action
# batch-size x max-num-action x hid
# _mask = torch.sum(actions_mask, -1)
_mask = actions_mask
encoded_actions = torch.sum(encoded_actions, dim=-2)
tmp = torch.eq(_mask, 0).float().to(self.device)
_mask = _mask + tmp
# batch-size x max-num-action x hid
encoded_actions = encoded_actions / _mask.unsqueeze(-1)
else:
batch_size = len(actions)
max_num_action = max_len(actions)
input_action_candidate_list = list()
for i in range(batch_size):
word_list = [item.split() for item in actions[i]]
word_id_list = [[self.action_vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
word_level = to_pt(input_word, True)
input_action_candidate_list.append(word_level)
max_word_num = max([item.size(1)
for item in input_action_candidate_list])
inputs = torch.zeros(
(batch_size, max_num_action, max_word_num),
device=self.device, dtype=torch.long)
for i in range(batch_size):
j, k = input_action_candidate_list[i].shape
assert j == input_action_candidate_list[i].size(0)
assert k == input_action_candidate_list[i].size(1)
inputs[i, :j, :k] = input_action_candidate_list[i]
inputs = inputs.view(batch_size * max_num_action, max_word_num)
encoded_actions, actions_mask = self.actions_encoder(
inputs, compute_word_ids=False)
if self.actions_encoder.lstm_backbone:
encoded_actions = encoded_actions.view(
batch_size, max_num_action, -1)
elif self.actions_encoder.mlp_backbone:
encoded_actions = encoded_actions.view(
batch_size, max_num_action, -1)
else:
encoded_actions = encoded_actions.view(
batch_size, max_num_action, max_word_num, -1)
encoded_actions = torch.sum(encoded_actions, dim=-2)
actions_mask = actions_mask.view(
batch_size, max_num_action, max_word_num)
# batch-size x max-num-action
_mask = torch.sum(actions_mask, -1)
# batch-size x max-num-action x hid
tmp = torch.eq(_mask, 0).float().to(self.device)
_mask = _mask + tmp
# batch-size x max-num-action x hid
encoded_actions = encoded_actions / _mask.unsqueeze(-1)
actions_mask = actions_mask.byte().any(-1).float()
return encoded_actions, actions_mask
def combine_features(
self, num_actions: int, batch_size: int,
encoded_obs: torch.Tensor, obs_mask: torch.Tensor,
encoded_bg: torch.Tensor, bg_mask: torch.Tensor,
encoded_ltl: torch.Tensor, ltl_mask: torch.Tensor,
previous_hidden: torch.Tensor = None,
previous_cell: torch.Tensor = None) -> torch.Tensor:
if self.concat_features:
encoded_features = None
for name, feature, mask in [
('obs', encoded_obs, obs_mask),
('ltl', encoded_ltl, ltl_mask),
('bg', encoded_bg, bg_mask),
]:
if feature is None:
continue
if name == 'obs':
sumit = self.text_encoder.lstm_backbone or \
self.text_encoder.mlp_backbone if\
self.text_encoder else None
elif name == 'ltl':
sumit = self.ltl_encoder.lstm_backbone or \
self.ltl_encoder.mlp_backbone if\
self.ltl_encoder else None
elif name == 'bg':
sumit = False
# masked mean
# if name == 'obs' and not self.use_pretrained_lm_for_text or \
# name == 'ltl' and not self.use_pretrained_lm_for_ltl:
_mask = torch.sum(mask, -1) # batch
if not sumit:
feature = torch.sum(feature, dim=1) # batch x hid
tmp = torch.eq(_mask, 0).float().to(self.device)
_mask = _mask + tmp
feature = feature / \
_mask.unsqueeze(-1) # batch x hid
# TODO check this for pretraining
# if num_actions > 1:
# feature = torch.stack([feature] * num_actions, dim=1)
if encoded_features is None:
encoded_features = feature
else:
encoded_features = torch.cat([encoded_features, feature],
dim=-1)
else:
logger.critical(
"Concat features is disable but no other " +
"aggregation mechanism exists")
raise RuntimeError(
"Concat features is disable but no other " +
"aggregation mechanism exists")
if self.recurrent_memory:
previous_hidden, previous_cell = \
self.recurrent_memory_unit(encoded_features,
h_0=previous_hidden,
c_0=previous_cell)
return torch.stack([encoded_features] * num_actions, dim=1), \
previous_hidden, previous_cell
def encode(self, states: BatchedStates,
admissible_actions: List[Actions],
previous_hidden: torch.Tensor,
previous_cell: torch.Tensor) -> torch.Tensor:
"""
@returns:
encoded_features: torch Tensor of size
[batch-size, num-action-candidates, embedding-size]
mask: torch Tensor of size [batch-size, num-action-candidates]
used to mask the padded actions from the action network
"""
encoded_obs, obs_mask, encoded_bg, bg_mask, encoded_ltl, ltl_mask = \
tuple([None] * 6)
if self.concat_strings:
# encoded_obs, obs_mask = self.text_encoder([
obs = None
ltl = None
batch_size = len(admissible_actions)
if self.use_observations:
obs = [
' '.join(['[OBS]', obs]) for
obs in states.observations]
if self.use_ltl:
ltl = [
' '.join(['[LTL]', ltl.tokenize()]) for
ltl in states.ltl_formulas]
max_num_action = max_len(admissible_actions)
inputs = list()
final_mask = list()
for i, actions in enumerate(admissible_actions):
pad = [self.text_encoder.vocab.pad_token for _ in range(
max_num_action - len(actions))]
final_mask.extend([1] * len(actions) + [0] * len(pad))
actions += pad
for action in actions:
if obs and ltl:
inputs.append(
' '.join([obs[i], ltl[i], '[ACTION]', action]))
elif obs:
inputs.append(
' '.join([obs[i], '[ACTION]', action]))
elif ltl:
inputs.append(
' '.join([ltl[i], '[ACTION]', action]))
encodings, mask = self.text_encoder(inputs)
_mask = torch.sum(mask, -1)
if not self.text_encoder.lstm_backbone:
encodings = torch.sum(encodings, dim=1) # batch x hid
tmp = torch.eq(_mask, 0).float().to(self.device)
_mask = _mask + tmp
encodings = encodings / \
_mask.unsqueeze(-1) # batch x hid
encodings = encodings.reshape((batch_size, max_num_action,
self.action_net_hidden_size))
final_mask = torch.tensor(final_mask, device=self.device)
final_mask = final_mask.view(batch_size, max_num_action)
if self.recurrent_memory:
previous_hidden, previous_cell = \
self.recurrent_memory_unit(encodings,
h_0=previous_hidden,
c_0=previous_cell)
return encodings, final_mask, previous_hidden, previous_cell
else:
obs = list()
ltls = list()
if self.context_length == 1:
if self.use_ltl:
ltls = [ltl.tokenize() for ltl in states.ltl_formulas]
obs = states.observations
else:
for state in states:
obs.append('<obs> ' + ' <obs> '.join(
[s.observation for s in
state.past[-(self.context_length - 1):] + [state]]
))
ltls.append('<ltl> ' + ' <ltl> '.join(
[s.ltl.tokenize() for s in
state.past[-(self.context_length - 1):] + [state]]
))
if self.use_observations:
encoded_obs, obs_mask = self.text_encoder(obs)
if self.use_ltl:
encoded_ltl, ltl_mask = self.ltl_encoder(ltls)
if self.use_belief_graph:
if self.graph_encoder.real_valued_graph:
encoded_bg, bg_mask = self.graph_encoder.encode_graph(
torch.stack([bg._facts if bg is not None else [] for
bg in states.belief_graphs]))
else:
encoded_bg, bg_mask = self.graph_encoder.encode_graph(
[bg.facts_as_triplets if bg is not None else [] for
bg in states.belief_graphs])
encoded_actions, actions_mask = \
self.encode_actions(admissible_actions)
batch_size, num_actions, _ = encoded_actions.shape
encoded_features, previous_hidden, previous_cell = \
self.combine_features(
num_actions, batch_size,
encoded_obs, obs_mask, encoded_bg, bg_mask,
encoded_ltl, ltl_mask,
previous_hidden, previous_cell)
return (torch.cat([encoded_actions, encoded_features], dim=-1),
actions_mask, previous_hidden, previous_cell)
def compute_inverse_dynamics_loss(self, states: BatchedStates):
obs = states.observations
if len(obs) <= 1:
return None
loss = self.text_encoder.compute_inverse_dynamics_loss(
obs[:-1], obs[1:], states.actions)
return loss
def mlm_loss(self, observations: List[str]) -> torch.Tensor:
loss = None
if self.text_encoder is not None:
loss = self.text_encoder.compute_mlm_loss(observations)
# if self.ltl_encoder is not None:
# loss += self.ltl_encoder.mlm_loss(observations)
return loss
def forward(self, states: BatchedStates, admissible_actions: List[Actions],
previous_hidden: torch.Tensor = None,
previous_cell: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
inputs, mask, previous_hidden, previous_cell = self.encode(
states, admissible_actions, previous_hidden, previous_cell)
return self.action_network(inputs, mask), mask, \
previous_hidden, previous_cell
| 21,064 | 44.301075 | 79 |
py
|
LTL-GATA
|
LTL-GATA-main/src/support-files/gata-models/augment_dataset.py
|
import json
from tqdm import tqdm
def main():
for split in ['train', 'test', 'valid']:
print(split)
with open(f'cmd_gen.0.2/{split}.json') as f:
data = json.load(f)
data['graph_index'] = json.loads(data['graph_index'])
rkey = list(data['graph_index']['relations'].keys())[-1]
rkey = str(int(rkey) + 1)
ekey = list(data['graph_index']['entities'].keys())[-1]
ekey = str(int(ekey) + 1)
data['graph_index']['entities'][ekey] = 'examined'
# Add cookbook is examined relation
data['graph_index']['relations'][rkey] = [
int(list(data['graph_index']
['entities'].values()).index('cookbook')),
int(ekey),
int(list(data['graph_index']
['relation_types'].values()).index('is')),
]
# go through examples and
# 1. get graph index of examine cookbook and add above rkey
# 2. add 'Add , cookbook , examined , is
"""
{'game': 'tw-cooking-recipe1+take1+drop+go9-yGMMf1gdtY2giP5e.z8',
'step': [0, 0],
'observation': "you are hungry ! ",
'previous_action': 'restart',
'previous_graph_seen': 0,
'target_commands': ['add , exit , livingroom , east_of',
'add , player , livingroom , at',
'add , sofa , livingroom , at']}
"""
for i, example in enumerate(tqdm(data['examples'])):
# example = json.loads(example)
if example['previous_action'] == 'examine cookbook':
example['target_commands'].append(
'add , cookbook , examined , is')
graph_idx = example['previous_graph_seen']
data['graph_index']['graphs'][str(graph_idx)].append(int(rkey))
data['examples'][i] = example
data['graph_index'] = json.dumps(data['graph_index'])
with open(f'cmd_gen.0.2/{split}_aug.json', 'w') as f:
json.dump(data, f)
if __name__ == "__main__":
main()
| 1,985 | 35.109091 | 79 |
py
|
LTL-GATA
|
LTL-GATA-main/gpt3-experiments/nl2ltl.py
|
import os
import openai
import random
import pickle
import time
openai.api_key = os.getenv("OPENAI_API_KEY")
obss_raw = open("data/observations.txt").read().strip().split("\n")
obss_raw = list(map(eval, obss_raw))
formulas_raw = open("data/formulas.txt").read().strip().split("\n")
formulas_raw = list(map(eval, formulas_raw))
obss = []
formulas = []
for i in range(len(obss_raw)):
if 'player_at_kitchen' in formulas_raw[i][0]:
formulas_raw[i] = formulas_raw[i][1:]
if obss_raw[i][0] == 'first':
obss.append(obss_raw[i][1])
formulas.append(formulas_raw[i][0])
elif obss_raw[i][0] == 'second':
obss.append(obss_raw[i][1])
formulas.append(formulas_raw[i][1])
prompt_idxs = [101, 108, 466, 819, 821, 844]
def predict(test_idx):
prompt = ""
for i in range(len(prompt_idxs)):
idx = prompt_idxs[i]
prompt += str(i+1) + ". " + "NL: " + \
obss[idx] + "\n" + "LTL: " + str(formulas[idx]) + "\n"
prompt += str(len(prompt_idxs)+1) + ". " + "NL: " + \
obss[test_idx] + "\n" + "LTL:"
response = openai.Completion.create(
model="text-ada-001",
prompt=prompt,
max_tokens=250,
temperature=0,
stop='\n'
)
time.sleep(1)
return response
test_idxs = [i for i in range(len(obss_raw)) if i not in prompt_idxs and (
"cookbook_is_examined" not in formulas[i])]
random.seed(10)
random.shuffle(test_idxs)
N = 234
absolute_correct = 0
almost_correct = 0
responses = {}
out_file = open("gpt-out.pkl", "wb")
for i in range(N):
test_idx = test_idxs[i]
# Remove some examples that just instruct the agent to open cookbook and eat
resp = predict(test_idx)
print(resp["choices"][0]["text"])
responses[test_idx] = resp["choices"][0]["text"]
if resp["choices"][0]["text"].strip() == formulas[test_idx]:
absolute_correct += 1
print("Absolute correct!")
elif resp["choices"][0]["text"].strip().replace("(", "").replace(")", "").replace(" ", "") == formulas[test_idx].replace("(", "").replace(")", "").replace(" ", ""):
almost_correct += 1
print("Almost correct!")
else:
print("Incorrect.")
print("Absolute Accuracy:", absolute_correct / N)
print("Almost Accuracy:", (absolute_correct + almost_correct) / N)
pickle.dump(responses, out_file)
out_file.close()
| 2,377 | 24.847826 | 168 |
py
|
period_graph
|
period_graph-master/setup.py
|
from setuptools import setup
from setuptools import setup
setup(name='period_graph',
version='0.1',
description='Saves and parallelizes computations of periods of (quartic) hypersurfaces',
url='https://github.com/a-kulkarn/period_graph.git',
author='Avinash Kulkarni',
author_email='[email protected]',
license='MIT',
packages=['period_graph'],
zip_safe=False)
| 421 | 29.142857 | 94 |
py
|
period_graph
|
period_graph-master/period_graph/__init__.py
|
from sage.all import *
from period_graph.interface import *
SELF_PATH = period_graph.interface.SELF_PATH
TEST_PATH = os.path.join(os.path.join(SELF_PATH, "tests", ""))
| 169 | 27.333333 | 62 |
py
|
period_graph
|
period_graph-master/period_graph/interface.py
|
import os, sys, subprocess
from sage.all import *
from period_graph.src.SAGE_CONFIG import *
sys.path.insert(1, SELF_PATH + "src/")
sys.path.insert(2, SELF_PATH + "src/suite/")
import numpy as np
# Stupid imports (should be pure python in the future).
load(SRC_ABS_PATH + "sage/phase_I_util.py") # Needed for nn_sort.
load(SRC_ABS_PATH + "first-stage-analysis.sage")
import period_graph.src.integrator
from period_graph.src.carry_periods import *
load(SRC_ABS_PATH + "sage/to_AI_pipe.sage")
load(SRC_ABS_PATH + "sage/sage_data_handling.sage")
############################################################################################
# Testing
############################################################################################
def load_test(testfile=SRC_ABS_PATH+"user_input/test_edges"):
"""
Loads a file containing a list of polynomials. Returns a list
of homogeneous quartics in 4 variables.
"""
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
lst = []
with open(testfile) as F:
for line in F:
tmp = sage_eval(line, locals={'x':x, 'y':y, 'z':z, 'w':w})
lst.append(tmp)
return lst
############################################################################################
# Util / misc
############################################################################################
def lex_order_mons_of_degree(R,d):
# Note: Only works for characteristic 0.
P = R.change_ring(order='lex')
mons = (sum(P.gens()) ** d).monomials()
return [R(m) for m in mons]
def isSmooth(g):
return ProjectiveHypersurface(g).is_smooth()
def get_simple_polys(n,degree=4, R=PolynomialRing(QQ,'x,y,z,w')):
"""
Return smooth polynomials with n terms with each coefficient equal to 1.
"""
mons = lex_order_mons_of_degree(R,degree)
if n > len(mons):
raise ValueError("The number of terms may not exceed the number of monomials.")
return [sum(p) for p in Subsets(mons,n) if isSmooth(sum(p))]
def convert_folder_to_edge(folder, R=PolynomialRing(QQ,'x,y,z,w')):
pols = folder.split('__')
f0 = parse_compressed_pol(R, pols[1])
f1 = parse_compressed_pol(R, pols[2])
return (f0,f1)
def _quartics_to_file(filename, quartics):
"""
Print the list of quartics to a file in a human readable way.
"""
with open(filename, 'w') as F:
lststr = str(quartics).replace(',', ',\n')
F.write(lststr)
F.write('\n')
############################################################################################
# Neural network sorting (nn_sort)
############################################################################################
def read_nn_results(parent, probabilities=True):
"""
Read the results output by the neural network. The polynomials are returned
in the parent ring specified by the first argument.
"""
mons = lex_order_mons_of_degree(parent,4)
output = []
with open(SRC_ABS_PATH + "ai_output", 'r') as F:
for line in F:
e = eval(line)
v0 = e[0:len(mons)]
v1 = e[len(mons):2*len(mons)]
q0 = sum(mons[i] * v0[i] for i in range(len(mons)))
q1 = sum(mons[i] * v1[i] for i in range(len(mons)))
output.append((q0,q1))
if probabilities:
probs = np.loadtxt(SRC_ABS_PATH + "ai_probabilities", dtype=float, ndmin=2)
# sort list so highest probability comes first
# the polynomials are already sorted accordingly
probs_sorted = probs[(-probs[:,0]).argsort()]
# pEN preserves the order of the input list, this too can be valuable so is preserved
return output, probs_sorted, probs
else:
return output
def nn_sort(edges, probabilities=True):
"""
Given a list of edges `edges` (each edge of the form (f,g), where `f,g` are homogeneous
in 4 variables), return a sorted list of edges in descending order of AI-score.
If probabilities=True, also return the 2D array of all neural network probabilities.
This function also modifes the internal `ai_file`.
"""
if len(edges) == 0:
return edges
R = edges[0][0].parent()
mons = lex_order_mons_of_degree(R,4)
convert = (lambda a : [a.monomial_coefficient(m) for m in mons])
E_list_form = map((lambda a : (convert(a[0]), convert(a[1]))), edges)
# Launch the AI ranking
send_jobs_to_AI(E_list_form)
run_ai_eval()
return read_nn_results(R, probabilities=probabilities)
###
def rerun_nn(parent=PolynomialRing(QQ, 'x,y,z,w'), probabilities=True):
"""
Rerun nn_sort on the unlabelled edge data.
"""
run_ai_eval()
return read_nn_results(parent, probabilities=probabilities)
def write_user_edges_to_file(edges):
"""
Given a list of edges `edges` (each edge of the form (f,g), where `f,g` are homogeneous
in 4 variables), save this list to the `user_edges` file to be read by the
main programs. The names of the variables are changed to `x,y,z,w`.
"""
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
with open(SRC_ABS_PATH + "user_input/" + "user_edges", 'w+') as F:
for e in edges:
F.write("[{},{}]\n".format(R(e[0]), R(e[1])))
return
# Aliases
edges_to_file = write_user_edges_to_file
to_user_file = write_user_edges_to_file
############################################################################################
# Compute transition matrices.
############################################################################################
def _raise_exit_function_not_implemented_error():
help_string = (
"""
The Interface for automatic parsing of the exit function has not been
designed. Please encode you desired function in the file
{}user_input/user_exit_functions.sage
and remember to set `CONSTRUCT_GRAPH=True`. I am aware it is possible to
use function pickling, but as this interface is a prototype, I am not
committing to a framework at this time.
""")
print(help_string)
raise NotImplementedError
# TODO: Need to somehow feed in the exit function and construct graph variables.
def compute_transition_matrices(edges, exit_function=None):
"""
Given a list of edges `edges` (each edge of the form (f,g), where `f,g` are homogeneous
in 4 variables), attempt to compute the transition matrices.
"""
if len(edges) == 0:
return
write_user_edges_to_file(edges)
if not exit_function == None:
_raise_exit_function_not_implemented_error()
# Pickle exit_function (to be loaded later)
#with open(SRC_ABS_PATH + "user_input/" + "pickled_user_function", 'w+') as F:
# pass
construct_edge_odes()
integrate_edge_odes()
return
def first_ivps(edges, exit_function=None):
"""
Given a list of edges `edges` (each edge of the form (f,g), where `f,g` are homogeneous
in 4 variables), attempt to compute the first ODE associated to eade edge.
"""
if len(edges) == 0:
return
write_user_edges_to_file(edges)
if not exit_function == None:
_raise_exit_function_not_implemented_error()
opts = {'generator':'file', 'forgo-manifest':None, "only-first":None}
construct_edge_odes(opts=opts)
return
def ivps(edges, opts={'generator':'file', 'forgo-manifest':None}):
"""
Given a list of edges `edges` (each edge of the form (f,g), where `f,g` are homogeneous
in 4 variables), attempt to compute the initial value problems (IVPs).
"""
if len(edges) == 0:
return
write_user_edges_to_file(edges)
construct_edge_odes(opts)
return
def load_transition_matrix(e):
"""
Loads the transition matrix associated to `e = (f,g)`, (where f,g are homogeneous
quartics), provided it exists. Raises an error otherwise.
"""
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
e0str = str(R(e[0]))
e1str = str(R(e[1]))
G = construct_phase_III_graph()
e_G = [ed for ed in G.edges() if ed[0]==e0str and ed[1]==e1str][0]
return load_transition_mat(e_G)
############################################################################################
# Help / Info
############################################################################################
def interface_help():
help_string = (
"""
COMMANDS:
compute_transition_matrices
write_user_edges_to_file
nn_sort
ivps
first_ivps
create_training_data
construct_edge_odes
integrate_edge_odes
load_test
load_transition_matrix
carry_periods
insert_S4_links
add_isolated_vertices
""")
print(help_string)
############################################################################################
# Core wrappings for shell utilities.
############################################################################################
def create_training_data(opts={'generator':'file'}):
subprocess.call(["sage", "create-training-data.sage"] + format_subproc_opts(opts),
cwd=SRC_ABS_PATH)
def construct_edge_odes(opts={'generator':'file', 'forgo-manifest':None}):
subprocess.call(["sage", "construct-edge-odes.sage"] + format_subproc_opts(opts),
cwd=SRC_ABS_PATH)
def integrate_edge_odes(opts={'generator':'file'}):
period_graph.src.integrator._integrate_edge_odes(**opts)
def run_ai_eval():
subprocess.check_call([PYTHON3_BIN, "neural-network/AI_eval.py"],
cwd=SRC_ABS_PATH)
def format_subproc_opts(opts):
opt_list = list(opts)
pass_to_subproc = []
for opt in opt_list:
arg = opts[opt]
if arg == None:
pass_to_subproc += ["--{}".format(str(opt))]
else:
pass_to_subproc += ["--{}={}".format(str(opt), str(arg))]
return pass_to_subproc
############################################################################################
# Cleanup utilities.
############################################################################################
import shutil
def manifest_size():
return subprocess.check_output(["wc","-l", "edge-manifest"],
cwd=SRC_ABS_PATH)
def clean_ode_failures():
for dirname in os.listdir(SRC_ABS_PATH + "ode-data"):
if not os.path.exists("{}ode-data/{}/safe_write_flag".format(SRC_ABS_PATH, dirname)):
vic = "{}ode-data/{}".format(SRC_ABS_PATH, dirname)
dest = os.path.join(SRC_ABS_PATH,"failed-ode-step", dirname)
shutil.move(vic, dest)
return
############################################################################################
# Timings data
############################################################################################
def timings_from_training_successes(folder=os.path.join(TRAINING_PATH, "edge-data"), verbose=True):
"""
Returns a list of pairs (edge, timings), where each edge is given as a
pair of quartic_data objects and each timings is a list [time, order, degree].
Note that only successes are returned, as failed edges just have placeholder data for
[time, order, degree].
"""
dataShape = {'edgesX-*.csv':2*35+1, 'timingsY-*.csv':3+1}
data_gruppe, is_data_labelled = ReadData(folder, dataShape, verbose=verbose)
assert is_data_labelled
# Format and return the result.
edges_vec = [[ZZ(x) for x in e] for e in data_gruppe['edgesX-*.csv']]
edges = map(vector_to_edge, edges_vec)
timings = [x.astype(float) for x in data_gruppe['timingsY-*.csv']]
timings = [list(x) for x in timings]
return zip(edges, timings)
class TimingsData(object):
def __init__(self, filename):
"""
Example timings data format:
AIStream: GroebnerInit: 0.000
AIStream: 0.000
AIStream: 0.000
AIStream: total-PF: 1, 1: 0.000
AIStream: GroebnerInit: 0.000
AIStream: 0.000
AIStream: 0.000
AIStream: total-PF: 1, 1: 0.000
AIStream: 0.000
AIStream: 0.000
AIStream: 0.010
AIStream: total-PF: 2, 1: 0.010
...
"""
self.PF_time = {}
with open(filename, 'r') as timings:
for line in timings:
split_line=line.split(":")
if len(split_line) == 1: # Ignore empty lines.
continue
if split_line[1].strip() == "GroebnerInit":
self._groebner_time = float(split_line[2])
elif split_line[1].strip() == "total-PF":
label = int(split_line[2].split(",")[0]) # Specific to our computations.
self.PF_time[label] = float(split_line[3])
def number_completed_odes(self):
return len(self.PF_time.keys())
def total_PF_time(self):
return sum(t for k,t in self.PF_time.items())
def groebner_time(self):
return self._groebner_time
def total_time(self):
return self.groebner_time() + self.total_PF_time()
def PF_timings(self):
return self.PF_time
#### end class
def get_ivp_timings(odedata=SRC_ABS_PATH + "ode-data", only_first=False, only_completed=True):
edge_and_time=[]
for folder in os.listdir(odedata):
for filename in os.listdir(os.path.join(odedata,folder)):
if filename.endswith("timings"):
timings = TimingsData(os.path.join(odedata,folder,filename))
e = convert_folder_to_edge(folder)
if only_first == True:
try:
t = timings.PF_timings()[1]
edge_and_time.append((e,t))
except KeyError:
edge_and_time.append((e, math.inf))
elif not only_completed or timings.number_completed_odes() == 21: # for K3 surfaces
t = timings.total_PF_time()
edge_and_time.append((e,t))
edge_and_time.sort(key=lambda x:-x[1])
return edge_and_time
def get_first_ivp_timing(odedata=SRC_ABS_PATH + "ode-data"):
return get_ivp_timings(odedata=odedata, only_first=True)
| 14,549 | 31.261641 | 99 |
py
|
period_graph
|
period_graph-master/period_graph/src/carry_periods.py
|
from SAGE_CONFIG import *
from sage.all import *
load(SRC_ABS_PATH + "first-stage-analysis.sage")
from period_graph.src.post_integration_graph import *
#load(SRC_ABS_PATH + "post-integration-analysis.sage")
# Load the ARBMatrixWrap class
# load(pathToSuite+"arb_matrix_cereal_wrap.sage")
from period_graph.src.suite import arb_matrix_cereal_wrap as amcw
###################################################
# Functions for getting transition matrices
def path_edges(G, pth):
return [(pth[i], pth[i+1], G.edge_label(pth[i], pth[i+1])) for i in range(len(pth)-1)]
def get_transition_mat(e):
edata = e[2]
etype = edata.edge_type()
if etype == 'permutation':
tm = compute_permutation_transition(e)
elif etype == 'normal' or etype == 'weak':
tm = load_transition_mat(e)
else:
raise ValueError("Invalid edge type given.")
if edata.direction() == 'forward':
return tm
else:
return tm.inverse()
def permutation_matrix_auf_list(perm_as_list):
A = zero_matrix(ZZ, len(perm_as_list))
for i in range(len(perm_as_list)):
A[i,perm_as_list[i]-1] = 1 # Permutations are on symbols [x,y,z,w]
return A
def compute_permutation_transition(e):
"""
WARNING: Assumes that the S4_transition function has been loaded into the magma
interpreter.
"""
assert e[2].edge_type() == 'permutation'
A = e[0].perm_as_matrix()
B = e[1].perm_as_matrix().transpose()
#A = permutation_matrix_auf_list(e[0].perm)
#B = permutation_matrix_auf_list(e[1].perm).transpose()
perm = (B*A).list()
magma_output = magma.eval('S4_transition("{}","{}");'.format(perm, e[0].quartic()))
return sage_eval("matrix(QQ, {})".format(magma_output))
def load_transition_mat(e):
label = e[2]
tm = load(SRC_ABS_PATH + 'ode-data/' + label.dirname() + '/' + 'transition_mat.sobj')
tm = tm.arb_matrix()
return tm
def edge_precision(e):
if e[2].edge_type() == 'permutation':
return math.inf
else:
try:
tm = get_transition_mat(e)
return -log(max(tm.apply_map(lambda x : x.diameter()).list()), 10)
except:
return 0
###################################################
# Functions for periods
def load_periods(v):
per_mat = load(SRC_ABS_PATH + 'periods/' + str(v) + '/' + 'periods.sobj')
return per_mat.arb_matrix()
import os
def save_periods(v, per):
if not os.path.exists(SRC_ABS_PATH + 'periods/' + v + '/'):
os.mkdir(SRC_ABS_PATH + 'periods/' + v + '/')
save(amcw.ARBMatrixCerealWrap(per), SRC_ABS_PATH + 'periods/' + v + '/' + 'periods.sobj')
return
# Constant to increase Magma's supply of working digits. Creates a safety buffer for
# Magma's ARB arithmetic.
MAGMA_WORKING_PRECISION_FACTOR = 1.2
def save_periods_magma(v, periods):
"""
Save the periods in a magma-readable format.
Adapted from output_to_file function in integrator.sage.
"""
ivpdir = SRC_ABS_PATH + 'periods/' + v + '/'
filename = 'periods-magma'
# Create the periods file if it is not there.
if not os.path.exists(ivpdir + filename):
#os.mkdir('periods/' + v + '/')
with open(ivpdir+filename,'w') as output_file:
maximal_error = max(periods.apply_map(lambda x : x.diameter()).list());
periods_mid = periods.apply_map(lambda x : x.mid());
print("Accumulated maximal error:", maximal_error)
if maximal_error == 0:
# For a default precision, use the value stored in the base ring of the arb_matrix
bit_precision = periods.base_ring().precision()
attained_precision = floor(log(2 ** bit_precision, 10))
else:
attained_precision = -maximal_error.log(10).round()
# Magma first reads the complex ball precision and number of digits.
output_file.write(str(attained_precision)+"\n")
digits = ceil(attained_precision*MAGMA_WORKING_PRECISION_FACTOR);
output_file.write(str(digits)+"\n")
print("Writing the periods to file.")
numrows = periods_mid.nrows()
numcols = periods_mid.ncols()
for i in range(numrows):
output_file.write(str(periods_mid[i].list()))
if i < numrows-1: output_file.write("\n")
####
# TODO: Abolish this global scope/load nonsense. This is *horrible* design.
# The values defined below are usually defined via the meta file or are constants
# in integrator.sage.
d = 4
fermat_type = [1,1,1,1]
bit_precision = ceil(log(10 ** DIGIT_PRECISION, 2))
ncpus = 8
# Basic initialization
def initialize_fermat_directory():
R = ComplexBallField(bit_precision)
fermat_string = quartic_data('x^4 + y^4 + z^4 + w^4').quartic_string()
locals_dict = {'d' : 4,
'fermat_type' : [1,1,1,1],
'bit_precision' : 466,
'ncpus' : 8}
# TODO: Replace with a save_eval with locals dict. Then test.
load(pathToSuite+"fermat_periods.sage")
fermat_period_data = periods_of_fermat(fermat_type)
print("Fermat periods computed.")
fpm_rows=fermat_period_data.nrows()
fpm_cols=fermat_period_data.ncols()
fermat_periods = MatrixSpace(R,fpm_rows,fpm_cols)(fermat_period_data)
save_periods(fermat_string, fermat_periods)
save_periods_magma(fermat_string, fermat_periods)
###########################################################
# Main functions
# The code below is inefficient and meant for testing.
# Also, we have Sage 8.6, and not 8.8 with the fancy "breadth_first_search" options.
#
# TODO: Since we are upgrading to Sage 9.0, we can optimize this part.
def carry_periods(G=None, start_vtx=quartic_data('x^4 + y^4 + z^4 + w^4'), verbose=False):
"""
Move the period data from Fermat to all the other connected hypersurfaces.
Optional parameters are:
G -- Directed graph of quartic data edges. Default is to load from file.
start_vtx -- The starting vertex, as a quartic_data object.
"""
# Ensure that Fermat is initialized.
if start_vtx == quartic_data('x^4 + y^4 + z^4 + w^4'):
try:
load_periods(start_vtx.quartic_string())
except IOError:
initialize_fermat_directory()
old_dir = magma.eval("GetCurrentDirectory()")
magma.eval('ChangeDirectory("{}")'.format(SRC_ABS_PATH))
magma.load(SRC_ABS_PATH + "magma/S4-transition.m")
if verbose:
print("Constructing graph...")
if G == None:
G = load_phase_III_graph()
if verbose:
print("Graph built. Inserting permutation links...")
insert_S4_links(G)
# This determines what route we take if there is ambiguity.
weight_func = (lambda e : e[2].weight())
short_paths = G.shortest_paths(start_vtx, by_weight=True, weight_function = weight_func)
for v in G.vertices():
print("Vertex: ", v) if verbose else None
#path_verts = G.shortest_path(start_vtx, v, weight_function = weight_func)
try:
path_verts = short_paths[v]
except KeyError:
# There is no path linking the starting vertex to v.
continue
if len(path_verts) < 2:
continue
else:
print(path_edges(G, path_verts)) if verbose else None
carry_periods_along_path(path_edges(G, path_verts))
#Cleanup
magma.eval('ChangeDirectory("{}")'.format(old_dir))
###
def carry_periods_along_path(pe):
weak_link_crossed = False
for e in pe:
source_per = load_periods(e[0].quartic_string())
if weak_link_crossed:
if e[2].edge_type() == 'permutation':
# We can move the holomorphic periods, but nothing else.
new_per = matrix(source_per.rows()[0])
else:
# We do not have enough periods to compute anything else.
return
else:
tm = get_transition_mat(e)
if e[2].edge_type() == 'weak':
weak_link_crossed = True
# Apply the transformation.
new_per = tm*source_per
## end if
# Save the new periods to the file.
save_periods(e[1].quartic_string(), new_per)
save_periods_magma(e[1].quartic_string(), new_per)
####################
| 8,517 | 32.143969 | 99 |
py
|
period_graph
|
period_graph-master/period_graph/src/post_integration_graph.py
|
from SAGE_CONFIG import *
from sage.all import *
import os
# Load the ARBMatrixWrap class
#load(pathToSuite+"arb_matrix_cereal_wrap.sage")
from period_graph.src.suite import arb_matrix_cereal_wrap
# Load the first stage analysis dependency.
load(SRC_ABS_PATH + "first-stage-analysis.sage")
###################################################
# Construction of Graph
# Here we construct the graph from the return code object and the
# vertex manifest.
def parse_vtx_string(s):
return s.strip('vtx').strip('()')
def parse_ret_code(c):
return c[0][0][0].strip('ode-data').strip('/')
def reconstruct_return_codes():
retc = []
for dirname in os.listdir(SRC_ABS_PATH + "ode-data"):
if os.path.exists("{}ode-data/{}/transition_mat.sobj".format(SRC_ABS_PATH, dirname)):
retc.append(((("ode-data/" + dirname,), {}), 0))
else:
retc.append(((("ode-data/" + dirname,), {}), 1))
save(retc, SRC_ABS_PATH + "Z-integration-return-codes.sobj")
return
def load_phase_III_graph(directed=True, allow_weak_edges=True):
manifest = load_manifest()
success_list = []
for dirname in os.listdir(SRC_ABS_PATH + "ode-data"):
ode_safe_write = os.path.exists(
"{}ode-data/{}/safe_write_flag".format(SRC_ABS_PATH, dirname))
transition_matrix_exists = os.path.exists(
"{}ode-data/{}/transition_mat.sobj".format(SRC_ABS_PATH, dirname))
if ode_safe_write and transition_matrix_exists:
success_list += [dirname]
if directed:
G = DiGraph({})
else:
G = Graph()
with open(SRC_ABS_PATH + "edge-manifest",'r') as F:
for line in F:
v,w,dirid = line.rstrip().split(",")
G.add_vertex(quartic_data(v))
G.add_vertex(quartic_data(w))
for c in success_list:
# Load the transition matrix to attach necessary data to the edge.
tm = load(SRC_ABS_PATH + 'ode-data/' + c + '/' + 'transition_mat.sobj')
label = EdgeLabel(c, 'forward', tm)
try:
if allow_weak_edges or not label.is_weak():
G.add_edge(manifest[c], label=label)
if not label.is_weak():
backward_label = EdgeLabel(c, 'backward', tm)
G.add_edge((manifest[c][1], manifest[c][0]), label=backward_label)
except KeyError as e:
print("WARNING: Manifest key error: ", e)
return G
def my_phaseIII(weak=True, isolated_vertices=True):
H3 = load_phase_III_graph(directed=True, allow_weak_edges=weak)
if isolated_vertices:
# Function defined in first stage analysis.
add_isolated_vertices(G,[4,5])
insert_S4_links(H3)
return H3
| 2,766 | 29.744444 | 93 |
py
|
period_graph
|
period_graph-master/period_graph/src/__init__.py
| 0 | 0 | 0 |
py
|
|
period_graph
|
period_graph-master/period_graph/src/integrator.py
|
import subprocess
from SAGE_CONFIG import *
from sage.all import *
load(SRC_ABS_PATH + "sage/arg_saver.py")
USER_EDGES_FILE = "user_input/user_edges"
## Note: Sage's timeout mechanism + subprocess + decorator = fail. Work will be done, but
## no return codes produced. (The failure occurs in the decorator's cleanup).
## This is why we pass the timeout duties to the subprocess.
# Load dependency
load(SRC_ABS_PATH + "first-stage-analysis.sage")
@parallel(ncpus=60)
def integrate_odes_in_directory(dirname):
# There is a very annoying issue with the "load" call and variable scope.
# Basically, load only looks for identifiers defined in global scope.
#
# Our current fix is to call sage as a subprocess so that it can have its own "global"
# scope without subprocesses interfereing with each other.
#
# Check if this job or something similar has been attempted.
abs_dirname = SRC_ABS_PATH + dirname
args = {'timeout':INTEGRATION_ALARM, 'digit_precision':DIGIT_PRECISION}
if attempt_already_made('integrate_odes_in_directory', abs_dirname, args):
return 0
try:
timeout_opt = '--timeout={}'.format(INTEGRATION_ALARM)
ivpdir_opt = '--ivpdir={}'.format(dirname)
prec_opt = '--digit-precision={}'.format(DIGIT_PRECISION)
ret_code = subprocess.call(['sage', pathToSuite + 'transition-integrator.sage',
timeout_opt, ivpdir_opt, prec_opt], cwd=SRC_ABS_PATH)
return ret_code
except subprocess.CalledProcessError as err:
# Uncomment to debug sage:
print(err.output)
return 1
####
def integration_job_list(**job_config):
ODE_DATA_DIR = os.path.join(SRC_ABS_PATH, "ode-data", "")
# Create the job list.
if job_config['generator'] == 'default':
joblist = ['ode-data/'+dirname+'/' for dirname in os.listdir(ODE_DATA_DIR)]
# Only take jobs that have the safe write indicator
joblist = [pth for pth in joblist if os.path.exists(pth+'safe_write_flag')]
elif job_config['generator'] == 'file':
joblist = []
with open(os.path.join(SRC_ABS_PATH, USER_EDGES_FILE)) as F:
for line in F:
v,w = line.strip().lstrip('[').rstrip(']').split(',')
# Need to cast the strings as polynomials to get the correctly
# sorted terms in the directory name.
vq = quartic_data(v)
wq = quartic_data(w)
dirname = 'ode-data/{}/'.format(edge_ivp_label((vq,wq)))
if os.path.exists(SRC_ABS_PATH + dirname):
joblist.append(dirname)
else:
raise ValueError("Invalid option for 'generator': {}".format(job_config['generator']))
return joblist
###
def _integrate_edge_odes(**job_config):
# Integrate
joblist = integration_job_list(**job_config)
results = list(integrate_odes_in_directory(joblist))
#old_retc = _load_integration_return_codes()
#_save_integration_return_codes(results, old_retc)
return
#################################################################
# OSBOLETE: Return code functionality.
#################################################################
# def _load_integration_return_codes():
# try:
# retc = load('Z-integration-return-codes.sobj')
# prev_args = [x[0][0][0] for x in retc]
# except IOError:
# retc = []
# prev_args = []
# return retc
# def _save_integration_return_codes(results, retc):
# # Save the return codes for analysis
# results_list = list(results) + retc
# save(results_list, "Z-integration-return-codes")
| 3,737 | 32.675676 | 94 |
py
|
period_graph
|
period_graph-master/period_graph/src/sage/arg_saver.py
|
import os
def attempt_already_made(function_name, dirname, new_args):
MAKE_ATTEMPT = False
# Construct filename from function and dirname.
filename = dirname + function_name + '_args.sobj'
special_comparisons = {'construct_all_odes' : construct_all_odes_cmp}
try:
old_args = load(filename)
except IOError:
# Legacy code to suppose old data. Will be depreciated.
# print ("\nACHTUNG! Bitte stellen dass alte Daten neu formatiert wurden. "
# + "Versuche es trotzdem nochmal...\n")
save(new_args, filename)
return MAKE_ATTEMPT
if function_name in special_comparisons:
comparison_function = special_comparisons[function_name]
else:
comparison_function = (lambda x,y : x['timeout'] > y['timeout'])
# The comparison function should return True if an attempt should be made.
if comparison_function(new_args, old_args):
save(new_args, filename)
return MAKE_ATTEMPT
else:
return not MAKE_ATTEMPT
def construct_all_odes_cmp(x,y):
if x['only_first'] == False and y['only_first'] == True:
return True
else:
return x['timeout'] > y['timeout']
| 1,220 | 28.780488 | 83 |
py
|
period_graph
|
period_graph-master/period_graph/src/sage/mac_mp_queue.py
|
# WARNING: Random code copied from off the internet.
# Code copied from https://github.com/keras-team/autokeras/issues/368
import multiprocessing
import multiprocessing.queues
class SharedCounter(object):
""" A synchronized shared counter.
The locking done by multiprocessing.Value ensures that only a single
process or thread may read or write the in-memory ctypes object. However,
in order to do n += 1, Python performs a read followed by a write, so a
second process may read the old value before the new one is written by the
first process. The solution is to use a multiprocessing.Lock to guarantee
the atomicity of the modifications to Value.
This class comes almost entirely from Eli Bendersky's blog:
http://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing/
"""
def __init__(self, n = 0):
self.count = multiprocessing.Value('i', n)
def increment(self, n = 1):
""" Increment the counter by n (default = 1) """
with self.count.get_lock():
self.count.value += n
@property
def value(self):
""" Return the value of the counter """
return self.count.value
class MacQueue(multiprocessing.queues.Queue):
""" A portable implementation of multiprocessing.Queue.
Because of multithreading / multiprocessing semantics, Queue.qsize() may
raise the NotImplementedError exception on Unix platforms like Mac OS X
where sem_getvalue() is not implemented. This subclass addresses this
problem by using a synchronized shared counter (initialized to zero) and
increasing / decreasing its value every time the put() and get() methods
are called, respectively. This not only prevents NotImplementedError from
being raised, but also allows us to implement a reliable version of both
qsize() and empty().
"""
def __init__(self, *args, **kwargs):
super(MacQueue, self).__init__(*args, **kwargs)
self.size = SharedCounter(0)
def put(self, *args, **kwargs):
self.size.increment(1)
super(MacQueue, self).put(*args, **kwargs)
def get(self, *args, **kwargs):
self.size.increment(-1)
return super(MacQueue, self).get(*args, **kwargs)
def qsize(self):
""" Reliable implementation of multiprocessing.Queue.qsize() """
return self.size.value
def empty(self):
""" Reliable implementation of multiprocessing.Queue.empty() """
return not self.qsize()
| 2,659 | 35.944444 | 108 |
py
|
period_graph
|
period_graph-master/period_graph/src/sage/phase_II_util.py
|
import queue
import os
PERIOD_SUITE_SAFE_FLAG = ".PERIODSUITE-this-directory-is-safe-to-rm-fr"
def format_magma_args(args):
return [k+':='+str(args[k]) for k in args]
##
def construct_all_odes(**kwds):
while True:
# Check for the terminate signal.
if quitEvent.is_set():
return "Quit!"
# Retrieve an available job from the queue.
if job_queue.empty():
return "Quit!"
try:
e = job_queue.get(timeout=TIMEOUT+1)
except queue.Empty:
return "Quit!"
entropy_bias=None
dirname = edge_ivp_label(e)
abs_dirname = "{}ode-data/{}/".format(SRC_ABS_PATH, dirname)
v = e[0].quartic()
w = e[1].quartic()
if entropy_bias == None:
magma_bias_param = '"None"'
else:
magma_bias_param = entropy_bias
# Check for the directory.
if not os.path.exists(abs_dirname):
os.mkdir(abs_dirname)
with open(abs_dirname + PERIOD_SUITE_SAFE_FLAG, 'w') as F:
F.write('')
# Check if this job or something similar has been attempted.
args = {'f0':str(v), 'f1':str(w), 'bias':str(magma_bias_param), 'timeout':PHASE_II_ALARM}
try:
args['only_first'] = kwds['only_first']
except KeyError:
args['only_first'] = False
if attempt_already_made('construct_all_odes', abs_dirname, args):
continue
## Launch magma to check if the edge should be added to the tree.
## Magma will write success data into a special file.
## Python writes failure data if the Magma alarm is triggered.
import subprocess
try:
magma_process = subprocess.Popen(['magma', '-b', 'name:='+dirname] +
format_magma_args(args) +
["magma/transition-homotopy.m"],
stdout=subprocess.PIPE)
realtime_printer = iter(magma_process.stdout.readline, b'')
# Ping the process as it runs. Also check if the quit event has
# been set.
while magma_process.poll() == None:
for b_line in realtime_printer:
line = b_line.decode()
if line[0:9] == "AIStream:":
with open(abs_dirname + "timings", 'a') as F:
F.write(line)
else:
print(line, end='')
# Basically, like time.sleep(), but the sleep can be inturrupted.
if quitEvent.wait(timeout=1):
magma_process.terminate()
return "Quit!"
magma_output = magma_process.returncode
# Write an indicator flag indicating that the file-write is clean.
if magma_output == 0:
with open(abs_dirname + "safe_write_flag", 'w') as F:
F.write("SAFE\n")
if magma_output == ALARM_CLOCK_CODE:
job_done_queue.put([False, e, dirname])
else:
job_done_queue.put([True, e, dirname])
except subprocess.CalledProcessError as err:
logging.error("ERROR: ")
logging.error(err.output)
return "ERROR!"
####
| 3,522 | 32.552381 | 97 |
py
|
period_graph
|
period_graph-master/period_graph/src/sage/user_interface.py
|
####
# USER INPUT MANAGEMENT.
import signal
def input_with_timeout():
try:
signal.alarm(TIMEOUT)
foo = input()
signal.alarm(0)
return foo
except:
# timeout
return "TIMEOUT"
####
| 251 | 12.263158 | 37 |
py
|
period_graph
|
period_graph-master/period_graph/src/sage/phase_I_util.py
|
import subprocess
# Constants
zero_vec = [0 for i in range(35)]
dim_coeff_space = 35
fail_data_string = "30000, 1000, 1000"
# Error codes
ERROR_CODE = 1
ALARM_CLOCK_CODE = -14
# Data
TRAINING_PATH = os.path.join(SELF_PATH, "training-data", "")
#################################################################################################
## Neural Network data / magma parsing.
# Format the output according to Kat's specification. This being:
#
# QUOTE
# I'd like the data to be formatted in the following way.
# 2+k CSV files representing M data points, where k is the # of cohomology matrices used
# coeff pairs: (Mx70) X.csv
# times: (1xM) Y.csv
# matrices 1: (Mx441) M1.csv
# ...
# matrices k: (Mx441) Mk.csv
# with each file comma-separated with no extra brackets, like such:
# id, *, *, *, *, ... , *
# id, *, *, *, *, ... , *
# id, *, *, *, *, ... , *
# id, *, *, *, *, ... , *
#
# END QUOTE.
def parse_magma_nn_output(magma_output):
# NOTE: Every "load" command in magma produces an extra line of print to capture.
if isinstance(magma_output, bytes):
magma_output = magma_output.decode()
data_lines = magma_output.replace('[','').replace(']','').split('\n')
data = []
for line in data_lines:
if len(line) < 4:
continue
elif line[0:4] == "Load":
continue
else:
data.append(line)
return data
def parse_edge_traverse_output(magma_output):
data_lines = magma_output.decode().split('\n')
timings_data = []
data_label = ""
for line in data_lines:
if line[0:9] == 'AIStream:':
timings_data += [line[9:]]
if line[0:10] == 'DataLabel:':
data_label = line[10:].replace('[','').replace(']','')
return data_label, ','.join(timings_data)
def attach_timings_data(nn_data, timingsX, timingsY):
return nn_data + [timingsX, timingsY]
def write_nn_data(suffix, data, issuccess):
# 0. Decide on the writing mode (success, fail, None). Note a 'Differentiate Cohomology fail'
# is handled separately. `None` represents unlabelled data.
dirname = TRAINING_PATH
filenames = ["edgesX-"+suffix+".csv",
"DCM01-"+suffix+".csv",
"DCM10-"+suffix+".csv"]
if issuccess == None:
dirname = os.path.join(SELF_PATH, "neural_network_input/", "")
elif issuccess:
dirname += "edge-data/"
filenames.append("partial-timingsX-"+suffix+".csv")
filenames.append("timingsY-"+suffix+".csv")
else:
dirname += "failed-edges/"
filenames.append("partial-timingsX-"+suffix+".csv")
filenames.append("timingsY-"+suffix+".csv")
data_label = str(hash(data[0]))
assert len(data) == len(filenames)
for i in range(len(filenames)):
with open(dirname+filenames[i], 'a') as F:
F.write(data_label + ', ' + data[i] + '\n')
return
####
# Alias function.
def write_unlabelled_nn_data(suffix, nn_data):
write_nn_data(suffix, nn_data, None)
def create_nn_data(suffix, v, w, entropy_bias=None):
"""
One-off creation of neural network data associated to an edge.
Note that this function always starts a magma subprocess.
To batch write several things to the AI pipe, use send_jobs_to_AI
instead.
"""
## Launch magma to create the data for neural-network evaluation.
## Magma will write success data into a special file.
## Python writes failure data if the Magma alarm is triggered.
if entropy_bias == None:
magma_bias_param = '"None"'
else:
magma_bias_param = entropy_bias
magma_output = subprocess.check_output(['magma', '-b',
'suffix:='+"par-run-" + str(suffix),
'f0_vec:='+str(v),
'f1_vec:='+str(w),
'bias:='+str(magma_bias_param),
"magma/create-nn-data-III.m"])
return parse_magma_nn_output(magma_output)
#################################################################################################
## Main worker function.
def edge_traversable(suffix, v, w, entropy_bias=None):
## Launch magma to check if the edge should be added to the tree.
## Magma will write success data into a special file.
## Python writes failure data if the Magma alarm is triggered.
#############
## Attempt to create neural network data
try:
nn_data = create_nn_data(suffix, v, w, entropy_bias=entropy_bias)
except subprocess.CalledProcessError as e:
# Uncomment to view raw magma output:
# print(e.output)
if e.returncode == ERROR_CODE:
# The relevant hypersurface was singular.
# OR, the entropy was larger than the threshold.
# Log the error for inspection.
print(e.output)
logging.info(time.asctime() + '\nCORE: ' + str(suffix) + '\n' + e.output + '\n')
elif e.returncode == ALARM_CLOCK_CODE:
# The X-label for the Neural network data failed to compute.
# This goes into the bin of terrible inputs.
dcm_fail_path = os.path.join(TRAINING_PATH, "process"+suffix+"DCfail")
with open(dcm_fail_path, 'a') as F:
F.write((str(v)+', '+str(w)+'\n').translate(None, '[]'))
return False
## End try
#############
## Begin magma process and output capture.
try:
magma_bias_param = '"None"' if (entropy_bias == None) else entropy_bias
# This can be done in real-time, if we were interested in this kinda thing.
comment_string = """
magma_process = subprocess.Popen(['magma', '-b',
'suffix:='+"par-run-" + str(suffix),
'f0_vec:='+str(v),
'f1_vec:='+str(w),
'bias:='+str(magma_bias_param),
"magma/attempt-edge-traverse-II.m"],
cwd=SRC_ABS_PATH, stdout=subprocess.PIPE)
realtime_printer = iter(magma_process.stdout.readline, "")
# Ping the process as it runs.
data_label = ""
timings_data = ""
while magma_process.poll() == None:
for line in realtime_printer:
print line
if line[0:9] == "AIStream:":
timings_data += line.strip()
elif line[0:10] == "DataLabel:":
data_label = line[10:]
else:
print line
print line, data_label
retcode = magma_process.returncode
"""
magma_output = subprocess.check_output(['magma', '-b',
'suffix:='+"par-run-" + str(suffix),
'f0_vec:='+str(v),
'f1_vec:='+str(w),
'bias:='+str(magma_bias_param),
'timeout:='+str(PHASE_I_ALARM),
"magma/attempt-edge-traverse-II.m"], cwd=SRC_ABS_PATH)
# Add data label to the nn data
timingsY, timingsX = parse_edge_traverse_output(magma_output)
nn_data = attach_timings_data(nn_data, timingsX, timingsY)
# write to success file.
write_nn_data(suffix, nn_data, True)
return True
except subprocess.CalledProcessError as e:
if not e.returncode == ALARM_CLOCK_CODE:
raise e
# Setup failure data write to failure file.
timingsY, timingsX = parse_edge_traverse_output(e.output)
timingsY = fail_data_string
nn_data = attach_timings_data(nn_data, timingsX, timingsY)
write_nn_data(suffix, nn_data, False)
return False
| 8,150 | 33.104603 | 102 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/model_bundle.py
|
import os
import pickle as pk
from keras.models import load_model
class trivialPCA:
def __init__(self):
pass
def transform(self, x):
return x
class ModelBundle:
def __init__(self, *args, **kwds):
if len(args) == 1:
model_id = args[0]
PCA, MLP, CNN = None, None, None
elif len(args) == 4:
model_id, PCA, MLP, CNN = args
else:
raise NotImplementedError
try:
self._base_network_name = kwds['base_network'].name()
except KeyError:
self._base_network_name = None
self.model_id = model_id
self.PCA = PCA
self.MLP = MLP
self.CNN = CNN
def name(self):
return self.model_id
def base_network_name(self):
return self._base_network_name
def components(self):
return self.PCA, self.MLP, self.CNN
def _load(self, path):
spath = os.path.join(path, self.model_id, '')
try:
self.PCA = pk.load(open(spath+'PCs'+ self.model_id +'.pkl','rb'))
except IOError:
self.PCA = trivialPCA()
self.MLP = load_model(spath+'MLP'+self.model_id+'.h5')
self.CNN = load_model(spath+'CNN'+self.model_id+'.h5')
def save(self, path, also_to_newest=False):
if also_to_newest:
names = [self.model_id, "_newest"]
else:
names = [self.model_id]
for name in names:
spath = os.path.join(path, name, '')
try:
os.mkdir(spath)
except FileExistsError:
pass
pk.dump(self.PCA, open(spath+'PCs'+ name +'.pkl',"wb"))
self.MLP.save(spath+'MLP'+name+'.h5')
self.CNN.save(spath+'CNN'+name+'.h5')
def save_parameters(self, path, setup_dic, params_dic, also_to_newest=False):
# IDEA: Model bundles should perhaps have a header where a dictionary of
# some of these params are kept (such as if it is a finetuned model).
# IDEA: Model bundles could also pickle the parameter dictionary for later.
if also_to_newest:
names = [self.model_id, "_newest"]
else:
names = [self.model_id]
for name in names:
fname = os.path.join(path, name, "Params"+name+".txt")
with open(fname,"w+") as f:
f.write("\n*****************\n")
f.write("Network Training Params for '{}'".format(self.model_id))
f.write("\n\n")
# Print key-value pairs according to special formatting instructions
# Determined by dictionary keys.
B = ["Base network (None if new): " + str(self.base_network_name()),
"",
"Setup parameters:",
tall_dic_str(setup_dic),
"",
"Network architecture hyperparameters:",
tall_dic_str(params_dic), "\n"]
f.write('\n'.join(B))
# strg = ["Network permanent name: ",
# "Fresh network? Else finetuned: ",
# "New network name: ",
# "Num cohomology matrices / pair: ",
# "Balance the training set: ",
# "PCA preprocessing with 23 PCs: ",
# "Training set filename: ",
# "Total time elapsed: ",
# "Reference network (if finetuning): ",
# "Random seed: "]
# B = [s+str(n) for s,n in list(zip(strg,paramsOther))]
print("\nNetwork parameters written to: ",fname,"\n")
return
def save_training_data_info(self, path, data_dic):
"""
Writes information regarding the preparation of the training data to the model folder.
"""
fname = os.path.join(path, self.name(), "TrainDataInfo" + self.name() + ".txt")
notice_msg = ("NOTE: the random seed has no effect on RandomSampler, as there is a "
+ "separate seed set in that sampler. Future improvements might remove "
+ "this issue. For the time being, we will be untroubled by this "
+ "non-critical loss of generality.")
B = ["Data info:", tall_dic_str(data_dic), '', notice_msg, '\n']
with open(fname, 'w') as f:
f.write('\n'.join(B))
return
def evaluate_models(self, data):
test_x,test_y,test_M = data
print("PC PROJECTIONS STARTED")
test_x0 = self.PCA.transform(test_x)
print("PC PROJECTIONS COMPLETE")
# batch size
BSTEST = 10
pNN = self.MLP.predict(test_x0).ravel()
pCN = self.CNN.predict(test_M, batch_size=BSTEST, verbose=1).flatten()
## COMBINED: ENSEMBLE METHOD OF MLP + CNN
# TODO: Is this equivalent after thresholding?
pEN = pCN*pNN
# ranking from highest prob to lowest prob.
ranking = (lambda v : (test_x[(-v).argsort()]).astype(int))
return pCN, ranking(pCN), pNN, ranking(pNN), pEN, ranking(pEN)
# Loader function to reconstruct object.
def load_model_bundle(path, model_id):
B = ModelBundle(model_id)
B._load(path)
return B
def tall_dic_str(D):
max_key_len = max(len(k) for k in D)
format_string = "{0:" + str(max_key_len) + "} : {1},"
s = "\n".join(format_string.format(str(k), str(v)) for k,v in D.items())
return "\n".join(['{', s, '}'])
# Acquire the correct model given the parameters.
def fetch_model(NN_PATH, ReadNewest, UseModel):
"""
Returns the model specified by the input parameters.
"""
MODEL_DIRS = ["SpecialModels", "SavedModels"]
if ReadNewest:
model_path = 'SavedModels'
fname_list = os.listdir(os.path.join(NN_PATH, model_path))
if len(fname_list) == 0:
error_msg = ("No models present in the 'SavedModels' directory. Please either train "
+ "A network using the provided utilities, or use one of the "
+ "presupplied models in the 'SpecialModels' directory.")
raise IOError(error_msg)
else:
key_func = lambda fname : os.path.getmtime(os.path.join(NN_PATH, model_path, fname))
model_name = max(fname_list, key=key_func)
else:
model_name = UseModel
for dir in MODEL_DIRS:
if model_name in os.listdir(NN_PATH + dir):
model_path = dir
break
else:
error_msg = "No model corresponding to '{}' found.".format(UseModel)
raise IOError(error_msg)
return load_model_bundle(os.path.join(NN_PATH, model_path), model_name)
| 7,104 | 32.833333 | 98 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/AI_train.py
|
# Python 3.7.3.
import os, sys, scipy.io, scipy.linalg, random
from time import time
###
# In CONFIG
# -- paths
# -- balance
# -- PCA (how many components)
# -- number cohomology mats
# -- max-data-size : Read files until file sizes exceeds max-data-size
# -- output : Saved models
# -- output : Saved predictions
# -- hyperparameter config.
from NNCONFIG import *
from util import *
from data_handling import *
from model_bundle import *
from AI_functions import *
#**************************************************
# Main script.
sampler = RandomSampler if dataStream==1 else RandomBalancedSampler
print("Using sampler: ", sampler.__name__)
# Read and process the data.
data = ReadDataAndFormat(INPUT_DIR, dataShape, NumMats, "training", ttratio, Sampler=sampler, verbose=False)
#data = KH_circumvent(INPUT_DIR, dataShape, NumMats, "training", Sampler=sampler, ttratio, verbose=False)
train_x, train_y, train_M = data
print(len(train_y))
if train_y is None:
raise RuntimeError("Data in input directory is unlabelled. Directory: {}".format(INPUT_DIR))
print("\n\n# successes in original training set: ",
np.sum(train_y)," / ",train_y.shape[0],
" total training samples.")
# ** Actually training/loading the network.
if not FineTuneInTraining:
BM, paramsNN, paramsCN = train_model_bundle(data, NumMats, **network_architecture_hyperparameters)
else: #load pre-trained models from computer
#old_model_id = '_newest' if ReadNewest else OldModel
#old_model_bundle = load_model_bundle(os.path.join(NN_PATH, 'SavedModels', ''), old_model_id)
old_model_bundle = fetch_model(NN_PATH, ReadNewest, UseModel)
BM = finetune_bundle(old_model_bundle, data, **finetune_hyperparameters)
paramsNN,paramsCN = [OldModel],[OldModel]
#**************************************************
### PRINT, SAVE, AND VISUALIZE RESULTS
## write the core-indices that define the train dataset.
#csvfile = NN_PATH+'SavedModels/train_indices'+BM.name()+'.csv'
#csv_newest = NN_PATH+'SavedModels/train_indices_newest.csv'
#np.savetxt(csvfile, indices_out, delimiter=",")
#np.savetxt(csv_newest, indices_out, delimiter=",")
BM.save(os.path.join(NN_PATH, 'SavedModels/',''), also_to_newest=False)
print("***\n\nTHIS WILL BE INPUT TO AI_analyze.py:\n")
print(" Naming this training: ", BM.name(), "\n\n***")
reference_network = "None" if not FineTuneInTraining else OldModel
# Note: We are aiming for a 'large lower right' & 'very small upper right' of the confusion matrices.
# Parameters that the network hasn't yet remembered about itself.
setup_params = {"Num cohomology matrices / pair" : NumMats,
"Total time elapsed" : 0,
"Random seed" : random_seed,
"Training set filename" : '"{}"'.format(INPUT_DIR)}
model_save_path = os.path.join(NN_PATH, "SavedModels", '')
BM.save_parameters(model_save_path, setup_dic=setup_params,
params_dic=network_architecture_hyperparameters, also_to_newest=False)
## Save the information about the training set.
success_percent_str = "{} / {}".format(np.sum(train_y), train_y.shape[0])
data_info = {"MAX_INPUT_DATA_SIZE" : MAX_INPUT_DATA_SIZE,
"Train/test ratio" : ttratio,
"Random seed" : random_seed,
"Sampler name" : sampler.__name__,
"Percentage successes in training set" : success_percent_str,
"Training set filename" : '"{}"'.format(INPUT_DIR)}
BM.save_training_data_info(model_save_path, data_info)
| 3,520 | 34.928571 | 108 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/AI_eval.py
|
# Python 3.7.3.
## THIS FILE saves only to TestingOutputs
import os, sys, scipy.io, scipy.linalg, random, numpy as np
from time import time
###
# In CONFIG
# -- paths
# -- balance
# -- PCA (how many components
# -- number cohomology mats
# -- max-data-size : Read files until file sizes exceeds max-data-size
# -- output : Saved models
# -- output : Saved predictions
# -- hyperparameter config.
from NNCONFIG import *
#######
# Keras import
# We need to do sketchy path stuff when called from sage.
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # don't display warnings; only errors
try:
from keras.models import load_model
except ModuleNotFoundError:
sys.path.insert(0, PYTHON3_LOCAL_SITE_PKG)
from keras.models import load_model
from util import *
from model_bundle import *
from AI_functions import *
from data_handling import *
#**************************************************
# Setup input parameters.
# File containing pretrained networks.
# ModelNum = '_newest' if ReadNewest else UseModel
#**************************************************
# Read in evaluation data.
fnames = sorted(list(dataShape.keys()))
sampler = BasicSampler
data_set = DataSet(SAGE_INPUT_DIR, dataShape, ratio=None)
data_gruppe, is_data_labelled = data_set.read_all()
test_all = ReformatData(data_gruppe,is_data_labelled, NumMats)
test_x,test_y,test_M = test_all # test_y is 'None' if the data is unlabelled.
#**************************************************
# load and evaluate models.
MB = fetch_model(NN_PATH, ReadNewest, UseModel)
dataout = MB.evaluate_models(test_all)
WritePredictionsToSagePipe(NN_PATH, dataout)
| 1,640 | 25.047619 | 77 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/AI_analyze.py
|
# Python 3.7.3.
###
# In CONFIG
# -- paths
# -- balance
# -- PCA (how many components
# -- number cohomology mats
# -- max-data-size : Read files until file sizes exceeds max-data-size
# -- output : Saved models
# -- output : Saved predictions
# -- hyperparameter config.
from NNCONFIG import *
# Suppress warnings from tensorflow; only display errors
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# Remaining dependencies
import numpy as np, matplotlib.pylab as plt
from util import *
from model_bundle import *
from data_handling import *
#**************************************************
# Setup input parameters.
np.random.seed(random_seed)
if INPUT_DIR == "/Users/Heal/Dropbox/Research/EAK/4-monomial-complete/":
sampler = BasicSampler
else:
sampler = RandomBalancedSampler
print("Using sampler: ", sampler.__name__)
#**************************************************
# Main script.
## read the core-indices that define the train dataset.
#csvfile = NN_PATH+'SavedModels/train_indices'+ModelNum+'.csv'
#indices = np.loadtxt(csvfile)
test_all = ReadDataAndFormat(INPUT_DIR, dataShape, NumMats, "testing", ttratio, Sampler=sampler, verbose=False)
#test_all = KH_circumvent(EVALS_DIR, dataShape, NumMats, "testing", ttratio, Sampler=sampler, verbose=False)
test_x, test_y, test_M = test_all
print(sum(test_y))
#***************************************************
### PRINT, SAVE, AND VISUALIZE RESULTS FOR TEST DATA
# File containing pretrained networks.
ModelNum = '_newest' if ReadNewest else UseModel
MB = fetch_model(NN_PATH, ReadNewest, ModelNum)
pCN, rCN, pNN, rNN, pEN, rEN = MB.evaluate_models(test_all)
ModelNum = MB.name()
print("***\n\nTHESE WILL BE INPUT TO AI_analyze.py:\n")
print(" Using trained model: ", ModelNum, "\n\n***")
PlotsOn = True #broken for now TODO: Investigate?
paramsNN = [ModelNum]
paramsCN = [ModelNum]
## write the core-indices that define the test dataset.
#csvfile = NN_PATH+'EvalOutputs/test_indices'+ModelNum+'.csv'
#np.savetxt(csvfile, indices_out, delimiter=",")
opt_th_pNN = OptimalROCSup(pNN, test_y, "NN")
opt_th_pCN = OptimalROCSup(pCN, test_y, "CN")
print("opt_th_pNN:", opt_th_pNN)
print("opt_th_pCN:", opt_th_pCN)
yNN = ThresholdProbs(pNN, opt_th_pNN)
yCN = ThresholdProbs(pCN, opt_th_pCN)
opt_th_pEN = OptimalROCSup(pEN, test_y, "EN")
print("opt_th_pEN:", opt_th_pEN)
plt.show()
yEN = yCN*yNN # EN -- "Ensemble of networks"
print(sum(yEN),yEN.shape)
# NOTE: We are aiming for a 'large lower right' and 'very small upper right'
# of the confusion matrices.
for val in ["_newest", ModelNum]:
argsin = [NN_PATH, val, paramsNN, paramsCN, test_y, yNN, yCN, yEN]
WriteConfusion(*argsin)
PrintConfusion(test_y, yNN, yCN, yEN) #prints confusion matrices per filter
yEN_opt_th_pEN = ThresholdProbs(pEN, opt_th_pEN) #not equivalent to yEN = yCN*yNN.
PrintConfusion(test_y, yNN, yCN, yEN_opt_th_pEN) #prints confusion matrices per filter
if PlotsOn and sum(test_y)>0 and sum(test_y)<len(test_y):
WritePlots(NN_PATH,"",pNN,pCN,pEN,test_y)
if True: # Only run this on the dev machine to generate article information.
from sklearn.metrics import confusion_matrix
import util
C_mat = confusion_matrix(test_y, yEN)
util._WriteTable9Data(os.path.join(NN_PATH, 'EvalOutputs', 'table9data.txt'),
MB, ttratio, C_mat)
| 3,483 | 27.793388 | 111 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/data_handling.py
|
from NNCONFIG import *
import scipy.linalg
import numpy as np
from sklearn.metrics import confusion_matrix, roc_curve
from sklearn.utils import resample
from numpy import genfromtxt
from sklearn.decomposition import PCA
import glob, os
import pickle as pk
import matplotlib.pylab as plt
import math
from time import process_time
from pandas import read_csv
from sys import getsizeof
###############################################################################################
###############################################################################################
###############################################################################################
###############################################################################################
# Data handling and preprocessing.
def size_str_to_num(s):
"""
parses 'maxsize' input
"""
suffix_one = {'M':10**6, 'G':10**9}
suffix_two = { x + 'B': suffix_one[x] for x in suffix_one.keys()}
if s[-1] in suffix_one:
txtnum = s[0:-1]
pwr = suffix_one[s[-1]]
elif s[-2:] in suffix_two:
txtnum = s[0:-2]
pwr = suffix_two[s[-2:]]
else:
raise NotImplementedError
return eval(txtnum) * pwr
def read_file_data(filename_tuple, dataShape, subsetsz=5000):
"""
Read data from a single tuple of file. Each file in the tuple should be associated
to the same output tuple.
The parameter subset takes the number of items and outputs a set of indices to select.
"""
##GET A RANDOM SUBSET OF ROWS FROM EACH FILE: THESE ROWS WILL MATCH AMONG X,Y,M1,M2.
def get_file_len(shortfname):
datum = read_csv(shortfname, header=None).values
return len(datum)
shortfname = filename_tuple['edgesX-*.csv']
slen = get_file_len(shortfname)
subs = np.arange(slen)
np.random.seed(0)
np.random.shuffle(subs)
# Debug info
if False:
print(filename_tuple)
print(get_file_len(shortfname), subs)
fnames = filename_tuple.keys()
data_gruppe = {field : np.empty((0,dataShape[field])) for field in fnames}
# Only use 'subsetsz' data points; note subsetsz is a number.
skipthese = lambda x: x not in subs[:subsetsz]
for field in fnames:
fieldDataFile = filename_tuple[field]
# Select the data type depending on if we are reading DCM matrices
# (with exact rational entries)
#
# TODO: (low priority): The 442 is a magic number. The data type could be specified
# via the "FilenameByField" object.
#
datatype = 'str' if dataShape[field]==442 else float
datum = read_csv(fieldDataFile, dtype=datatype, skiprows=skipthese, header=None).values
# Catch misshapings form single line files.
if len(datum.shape) == 0:
continue # Empty file, so do nothing.
elif len(datum.shape) == 1:
datum = np.array([datum])
data_gruppe[field] = datum
# data_gruppe[field] = datum[subset(datum.shape[0])]
return DataGroup(data_gruppe)
###############################################################################################
# Classes
class DataGroup(dict):
def __init__(self, *args, empty=False):
filename_by_field = args[0]
if empty == True:
super(dict, self).__init__()
for field in filename_by_field.keys():
dataShape = filename_by_field.shape()
self[field] = np.empty((0,dataShape[field]))
else:
super(dict, self).__init__()
for field, val in filename_by_field.items():
self[field] = val
def concatenate(self, DG):
for key in self:
datum = DG[key]
self[key] = np.concatenate((self[key], datum), axis=0)
def append(self, DG):
self.concatenate(DG)
def data_size(self):
return sum(A.nbytes for k,A in self.items())
def truncate_size_to(self, new_size):
if not self.data_size() <= new_size:
first_row_size = sum(A[0].nbytes for k,A in self.items())
num_keep = int(new_size / first_row_size)
for key in self:
self[key] = self[key][0:num_keep]
class FilenameByField(dict):
def __init__(self, raw_fbf_dict, shape):
self._shape = shape
super(dict, self).__init__()
for field, val in raw_fbf_dict.items():
self[field] = val
def shape(self):
return self._shape
class DataSet:
"""
Class allowing for the manipulation of the dataset as an abstract list of
filenames. Has methods to actually read in the data.
"""
def __init__(self, folder, dataShape, ratio, verbose=False):
####
# Arrange filenames into an array
#
fnames = list(dataShape.keys())
field_globs = {field : glob.glob(os.path.join(folder,"**",field), recursive=True)
for field in fnames}
filename_by_field = FilenameByField({field : np.sort(field_globs[field]) for field in fnames}, dataShape)
if verbose:
self._print_verbose_reading_info(field_globs)
# total length
file_list_len = len(filename_by_field[fnames[0]])
Yfile_list_len = len(filename_by_field['timingsY-*.csv'])
if file_list_len == 0:
self._raise_no_data_error(folder, fnames)
# Check if the data is labelled
is_data_labelled = (not Yfile_list_len == 0)
if not is_data_labelled:
fnames.remove('timingsY-*.csv')
filename_by_field.pop('timingsY-*.csv', None)
# Initialize the object variables.
self.filename_by_field = filename_by_field
self._is_labelled = is_data_labelled
self._ratio = ratio # train/test ratio
#######
def _print_reading_info(self, field_globs):
head_globs = field_globs['edgesX-*.csv'][0:10]
num_other_files = len(field_globs['edgesX-*.csv']) - len(head_globs)
print("Input files:")
print('\n'.join(head_globs))
if num_other_files > 0:
print(" ...\n", 9*" ", "...and {} other files.\n".format(num_other_files))
else:
print("\n")
def _raise_no_data_error(self, folder, fnames):
error_string = "Input data directory contains no data matching filename specification.\n"
error_val1 = "INPUT_DIR: {}".format(folder)
error_val2 = "fnames: {}".format(fnames)
error_val3 = "glob example: {}".format(os.path.join(folder,"*"+fnames[0]))
error_post = "Please update the NNCONFIG.py file if the folder is incorrect."
raise RuntimeError('\n'.join([error_string, error_val1,
error_val2, error_val3, error_post]))
#######
# Basic attribute access
#######
def is_labelled(self):
return self._is_labelled
#######
# Internal data reading
#######
def _read_data(self, filename_by_field, Sampler):
"""
Main method to read in the data from the filenames specified by
'filename_by_field', using the 'Sampler'.
"""
fnames = list(filename_by_field.keys())
if MAX_INPUT_DATA_SIZE == None:
max_size = math.inf
else:
max_size = size_str_to_num(MAX_INPUT_DATA_SIZE)
print("MaxSize is: ", max_size, " bytes.")
# Reading data
data_gruppe = Sampler(filename_by_field, max_size)
self._check_data_validity(data_gruppe)
# Clip the hashes off and return.
for a in data_gruppe:
data_gruppe[a] = data_gruppe[a][:,1:]
return data_gruppe, self.is_labelled()
def _check_data_validity(self, data_gruppe):
fnames = data_gruppe.keys()
hashes = np.asarray([data_gruppe[field][:,0] for field in fnames], dtype='int')
if not np.all(np.equal.reduce(hashes)):
hash_error_msg = str(hashes[:,~np.equal.reduce(hashes)])
raise RuntimeError("Possible data corruption: hashes do not match.\n"+hash_error_msg)
#######
# Data access methods.
#######
def read_all(self):
return self._read_data(self.filename_by_field, Sampler=BasicSampler)
def sample_training(self, sampler):
return self._read_data(self._training_files, Sampler=sampler)
def sample_testing(self, sampler):
return self._read_data(self._testing_files, Sampler=sampler)
#######
# Partitioning
#######
def partition(self, independent=True):
"""
Partitions data folders into training and testing. Ratio specifies how large
each of these are. The independent parameter loosely controls whether the success/fail
ratio of the whole dataset is reflected in the partition.
"""
filename_by_field = self.filename_by_field
fnames = filename_by_field.keys()
file_list_len = len(self.filename_by_field[list(fnames)[0]])
num_training = int(self._ratio * file_list_len)
##THIS IS A NEW RANDOM SEED. Need this to match between train and test!
np.random.seed(30)
randomized_order = np.random.permutation(file_list_len)
if num_training == 0 or num_training == file_list_len:
self._raise_bad_partition_error(file_list_len, num_training)
# TODO: Implement the independent selection.
training_indices = randomized_order[0:num_training]
testing_indices = randomized_order[num_training:]
self._training_files = FilenameByField(
{field : filename_by_field[field][training_indices] for field in list(fnames)},
filename_by_field.shape())
self._testing_files = FilenameByField(
{field : filename_by_field[field][testing_indices] for field in list(fnames)},
filename_by_field.shape())
def _raise_bad_partition_error(self, file_list_len, num_training):
msg_template = "Partition of {} files not possible with ratio {}. Results in {} with 0 files."
bad_set = "training set" if num_training == 0 else "testing set"
error_msg = msg_template.format(file_list_len, self._ratio, bad_set)
raise RuntimeError(error_msg)
###############################################################################################
# Samplers
def BasicSampler(filename_by_field, max_size):
"""
Read data from all the filenames inside 'filename_by_field', up to the limit specified
by 'maxsize'.
"""
fnames = filename_by_field.keys()
file_list_len = len(filename_by_field[list(fnames)[0]])
data_gruppe = DataGroup(filename_by_field, empty=True)
for i in range(file_list_len):
# Read a single file of data.
filename_tuple = {field : filename_by_field[field][i] for field in fnames}
data_gruppe_item = read_file_data(filename_tuple, filename_by_field.shape())
data_gruppe.concatenate(data_gruppe_item)
# Check the total data size
if data_gruppe.data_size() >= max_size:
data_gruppe.truncate_size_to(max_size)
break
return data_gruppe
def RandomSampler(filename_by_field, max_size, verbose=False):
"""
Select a random subset of data from all the filenames inside 'filename_by_field', up to the
limit specified by 'maxsize'.
"""
fnames = filename_by_field.keys()
file_list_len = len(filename_by_field[list(fnames)[0]])
data_gruppe = DataGroup(filename_by_field, empty=True)
for i in np.random.permutation(file_list_len):
filename_tuple = {field : filename_by_field[field][i] for field in fnames}
# This takes a long time...
data_gruppe_item = read_file_data(filename_tuple, filename_by_field.shape())
data_gruppe.concatenate(data_gruppe_item)
# Check the total data size
if verbose:
print("before truncating, data_gruppe size was: ",data_gruppe.data_size(), " bytes.")
if data_gruppe.data_size() >= max_size:
data_gruppe.truncate_size_to(max_size)
print("after truncating, data_gruppe size is now: ",
data_gruppe.data_size(), " bytes.")
print("max size allowed was: ",int(max_size), " bytes.")
break
return data_gruppe
def RandomBalancedSampler(filename_by_field, max_size): # TODO: allow arg passing of ratio
"""
Select a random subset of data from all the filenames inside 'filename_by_field', up to the
limit specified by 'maxsize'.
"""
fnames = filename_by_field.keys()
# Hardcoded ratio constat
success_ratio = 0.5
# Divide successes and fails.
import re
spat = re.compile('edge-data')
fpat = re.compile('failed-edges')
successes = FilenameByField(
{field : [a for a in filename_by_field[field] if spat.search(a)] for field in fnames},
filename_by_field.shape())
failures = FilenameByField(
{field : [a for a in filename_by_field[field] if fpat.search(a)] for field in fnames},
filename_by_field.shape())
suc_gruppe = RandomSampler(successes, max_size * success_ratio, False)
fail_gruppe = RandomSampler(failures, max_size * (1-success_ratio), False)
fail_gruppe.concatenate(suc_gruppe)
return fail_gruppe
###############################################################################################
# Read Data
def ReformatData(data_gruppe, is_data_labelled, NumMats):
Y = None
DCM_stack_list = []
for a in data_gruppe:
start = process_time()
if a[0:8] == "timingsY":
Y = data_gruppe[a][:,0]
Y = (Y<1000)*1
Y = Y.ravel()
elif a[0:3] == "DCM":
datum = data_gruppe[a]
mat = np.reshape(datum, (datum.shape[0],21,21))
mat = np.asarray([MatrixComplexity(m) for m in mat])
DCM_stack_list += [mat]
else:
data_gruppe[a] = np.asarray(data_gruppe[a], dtype='float64')
mid = process_time()
if DCM_stack_list != []:
Ms = np.stack(DCM_stack_list, axis=3)
# LEGACY: Supports the old 4-5-nomial datasets for backward compatibility.
if NumMats==1:
Mss = Ms[:,:,:,0]
if Ms.shape[3]==1: #if you're only given one cohomology matrix in the file
ds = int(np.sqrt(dataShape["DCM-*.csv"]-1))
elif Ms.shape[3]==2: #if you're given two cohomology matrices but only want to use one
ds = int(np.sqrt(dataShape["DCM01-*.csv"]-1))
Ms = np.reshape(Mss,(len(Mss),ds,ds,NumMats))
end = process_time()
print("\n\nReformat time thresh/reshape: ",mid-start," time.")
print("Reformat time process Ms: ",end-mid," time.\n\n")
return data_gruppe["edgesX-*.csv"], Y, Ms
def _reformat_ys(y):
pass
def _reformat_dcm_matrices(Ms):
ds = int(np.sqrt(dataShape["DCM01-*.csv"]-1))
test_M = np.reshape(test_M,(len(test_M),ds,ds,NumMats))
if NumMats==1: test_M = test_M[:,:,:,0]
return test_M
def MatrixComplexity2(M): #total number of digits per element in each matrix
W = []
for sample in M:
Wi=[entry.replace("-","").replace(" ","").split('/') for entry in sample]
W.append([sum([0 if u=='0' else len(u) for u in w]) for w in Wi])
return np.array(W)
def ReadDataAndFormat(input_dir, dataShape, NumMats, data_part, ratio,
Sampler=BasicSampler, verbose=False):
start = process_time()
data_set = DataSet(input_dir, dataShape, ratio, verbose=verbose)
if data_part == "training":
data_set.partition() ## KH Moved this 7/19. I think we only want to call this once for the following.
data_gruppe, is_data_labelled = data_set.sample_training(sampler=Sampler)
elif data_part == "testing":
data_set.partition() ## KH Moved this 7/19. I think we only want to call this once for the following.
data_gruppe, is_data_labelled = data_set.sample_testing(sampler=Sampler)
elif data_part == "all":
data_gruppe, is_data_labelled = data_set.read_all()
else:
raise ValueError("Invalid value for data_part: {}".format(data_part))
mid = process_time()
outdat = ReformatData(data_gruppe, is_data_labelled, NumMats)
end = process_time()
print("\n\nReadData takes: ",mid-start," time.")
print("ReformatData takes: ",end-mid, " time.\n\n")
return outdat
#def KH_circumvent(input_dir, dataShape, NumMats, data_part, ratio, Sampler=BasicSampler, verbose=False):
# #this is just to do the 4nomial case, for KH. different file format.
# #this is the alternative to ReadDataAndFormat that generated Table 9 in paper.
#
# data_set = DataSet(input_dir, dataShape, ratio, verbose=verbose)
# data_gruppe, is_data_labelled = data_set.read_all()
# for a in data_gruppe:
# num_samps = len(data_gruppe[a])
#
# scrambler = KHpermutation()
# stopat = int(ratio * len(scrambler))
#
# if data_part == "training":
# for a in data_gruppe:
# data_gruppe[a] = data_gruppe[a][scrambler[:stopat]]
# elif data_part == "testing":
# for a in data_gruppe:
# data_gruppe[a] = data_gruppe[a][scrambler[stopat:]]
#
# outdat = ReformatData(data_gruppe, is_data_labelled, NumMats)
# return outdat
def PerformPCA(PCAk, train_x):
print("\n\nSTEP 1 (OPTIONAL): Doing PCA for dimension reduction...")
pca = PCA(n_components=PCAk)
pca.fit(train_x)
print("...singular values of input dataset: \n", pca.singular_values_,"\n")
# plt.plot(pca.singular_values_)
train_x_pca = pca.transform(train_x) #dimension reduced by PCA. First PCAk comp proj.
return train_x_pca,pca
def UpSampleToBalance(X,y,M):
print("\n\nSTEP 2 (OPTIONAL): Balancing Dataset...")
y0 = y.ravel()
y_succ,y_fail = y[y0==1],y[y0==0]
X_succ,X_fail = X[y0==1],X[y0==0]
M_succ,M_fail = M[y0==1],M[y0==0]
nsamps = np.round(len(y[y0==0])).astype('int')
# Upsample minority class
X_succ_upsampled, M_succ_upsampled, y_succ_upsampled \
= resample(X_succ, M_succ, y_succ, replace=True,\
n_samples=nsamps,\
random_state=0)
# Combine majority class with upsampled minority class
X_upsampled = np.concatenate((X_fail, X_succ_upsampled))
M_upsampled = np.concatenate((M_fail, M_succ_upsampled))
y_upsampled = np.concatenate((y_fail, y_succ_upsampled))
print("***** # successes in BALANCED training set: ",
np.sum(y_upsampled)," / ",y_upsampled.shape[0]," total training samples.")
return X_upsampled, y_upsampled, M_upsampled
def MatrixComplexity(M,out="W"): #total number of digits in each matrix
#out can be "W" or "ND"
W,N,D = [],[],[]
char2delete = "-"," ","}","{",'"'
def replacechar(string):
for char in char2delete:
string = string.replace(char,"")
return string
if out=="W":
for sample in M:
Wi=[replacechar(entry).split('/') for entry in sample]
Wj=[sum([len(u) for u in w]) for w in Wi]
W.append(Wj)
return np.array(W)
else:
for sample in M:
Wi=[replacechar(entry).split('/') for entry in sample]
for w in Wi:
w = w.append(0) if len(w)==1 else w
Wj=np.asarray([[int(u) for u in w] for w in Wi])
N.append(Wj[:,0])
D.append(Wj[:,1])
return np.asarray(N),np.asarray(D)
def MatrixStats(Ns,Ds):
def kl_divergence(p, q):
return -np.sum(np.where(p*q != 0, p * np.log(p / q), 0))
maxM,kldM,sumM,lenM,avgM,entM = [],[],[],[],[],[]
for N,D in zip(Ns,Ds):
NpD = N+D
maxM.append(np.max(NpD))
kldM.append(kl_divergence(N,D))
sumM.append(np.sum(NpD))
lenM.append(len(NpD[NpD>0]))
avgM.append(np.sum(NpD)/len(NpD[NpD>0])) #average over nonzero elements
entM.append(scipy.stats.entropy(NpD))
titles = ["MAXIMUM", "KL DIVERGENCE", "SUM", "LENGTH OF NONZEROS", "AVERAGE OVER NONZEROS", "ENTROPY"]
return np.asarray([maxM,kldM,sumM,lenM,avgM,entM]),titles
| 20,536 | 34.046075 | 113 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/software_test.py
|
#
# This file tests basic usage of training and evaluation.
# NOTE: The training tests must be run beforehand to generate testing data.
#
# NOTE: This test *must* be run in the current directory with python3.
#
import os, subprocess
assert subprocess.call(["python3", "AI_train.py"]) == 0
assert subprocess.call(["python3", "AI_analyze.py"]) == 0
| 350 | 26 | 75 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/AI_finetune.py
|
import os, sys, scipy.io, scipy.linalg, time, random, pickle
from time import time
###
# In CONFIG
# -- paths
# -- balance
# -- PCA (how many components
# -- number cohomology mats
# -- max-data-size : Read files until file sizes exceeds max-data-size
# -- output : Saved models
# -- output : Saved predictions
# -- hyperparameter config.
from NNCONFIG import *
from util import *
from AI_functions import *
#**************************************************
# Main script.
start_time = time()
sampler = BasicSampler if dataStream==1 else RandomBalancedSampler
print("Using sampler: ", sampler)
# Read and process the data.
data_gruppe, is_data_labelled = ReadData(INPUT_DIR, None, dataShape, Sampler=sampler, verbose=False)
data = ReformatData(data_gruppe, is_data_labelled, NumMats)
train_x,train_y,train_M = data
print("\n\n# successes in original training set: ",
np.sum(train_y)," / ",train_y.shape[0],
" total training samples.")
#old_model_id = '_newest' if ReadNewest else OldModel
#old_model_bundle = load_model_bundle(os.path.join(NN_PATH, 'SavedModels', ''), old_model_id)
old_model_bundle = fetch_model(NN_PATH, ReadNewest, UseModel)
BM = finetune_bundle(old_model_bundle, BatchSize, EpochNum, data, Balancing=Balancing)
### SAVE MODEL ITSELF TO FILE
network_name = BM.model_id
BM.save(os.path.join(NN_PATH, 'SavedModels/',''), also_to_newest=True)
paramsNN,paramsCN = [OldModel],[OldModel]
elapsed_time = time() - start_time
#**************************************************
### WRITE MODEL PARAMETERS TO FILE
reference_network = old_model_id
paramsNN,paramsCN = [OldModel],[OldModel]
# Note: We are aiming for a 'large lower right' & 'very small upper right' of the confusion matrices.
ParamsSetup = [IsNewData, network_name, not FineTuneInTraining, NumMats,Balancing,DoPCA,
INPUT_DIR,elapsed_time, reference_network, random_seed]
WriteParameters(os.path.join(NN_PATH, "SavedModels"), network_name, ParamsSetup)
WriteParameters(os.path.join(NN_PATH, "SavedModels"), "_newest", ParamsSetup)
| 2,057 | 31.666667 | 101 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/table9_script.py
|
########################################################################################
#
# Script that combines AI_train and AI_analyze in multiple rounds to generate table
# data for the article. Not part of the main software package.
#
########################################################################################
# Python 3.7.3.
import os, sys, scipy.io, scipy.linalg, random
from time import time
# Suppress warnings from tensorflow; only display errors
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# Remaining dependencies
import numpy as np, matplotlib.pylab as plt
###
# In CONFIG
# -- paths
# -- balance
# -- PCA (how many components)
# -- number cohomology mats
# -- max-data-size : Read files until file sizes exceeds max-data-size
# -- output : Saved models
# -- output : Saved predictions
# -- hyperparameter config.
from NNCONFIG import *
from util import *
from data_handling import *
from model_bundle import *
from AI_functions import *
#**************************************************
# Functions. TODO: create a separate module, or move to AI_functions.
def save_training_info(NN_PATH, BM, INPUT_DIR, NumMats, random_seed,
MAX_INPUT_DATA_SIZE, ttratio, sampler, train_y):
BM.save(os.path.join(NN_PATH, 'SavedModels/',''), also_to_newest=False)
reference_network = str(BM.base_network_name())
# Parameters that the network hasn't yet remembered about itself.
setup_params = {"Num cohomology matrices / pair" : NumMats,
"Total time elapsed" : 0,
"Random seed" : random_seed,
"Training set filename" : '"{}"'.format(INPUT_DIR)}
model_save_path = os.path.join(NN_PATH, "SavedModels", '')
BM.save_parameters(model_save_path, setup_dic=setup_params,
params_dic=network_architecture_hyperparameters, also_to_newest=False)
## Save the information about the training set.
success_percent_str = "{} / {}".format(np.sum(train_y), train_y.shape[0])
data_info = {"MAX_INPUT_DATA_SIZE" : MAX_INPUT_DATA_SIZE,
"Train/test ratio" : ttratio,
"Random seed" : random_seed,
"Sampler name" : sampler.__name__,
"Percentage successes in training set" : success_percent_str,
"Training set filename" : '"{}"'.format(INPUT_DIR)}
BM.save_training_data_info(model_save_path, data_info)
return
##
def WriteTrainingConfusionStats(ModelNum, pCN, pNN, pEN, test_y,
print_matrices=True, write_table9=False):
"""
Write the confusion matrices using the optimal threshold for the ensemble network.
(Note: we **do not** also write the confusion matrices using the individually optimal thresholds.)
If print_matrices=True, also print **both** types of confusion matrix to standard out.
"""
# Determine optimal thresholds.
opt_th_pNN = OptimalROCSup(pNN, test_y, "NN")
opt_th_pCN = OptimalROCSup(pCN, test_y, "CN")
opt_th_pEN = OptimalROCSup(pEN, test_y, "EN")
# Threshold.
yNN = ThresholdProbs(pNN, opt_th_pNN)
yCN = ThresholdProbs(pCN, opt_th_pCN)
yEN = yCN*yNN
yEN_opt_th_pEN = ThresholdProbs(pEN, opt_th_pEN) #not equivalent to yEN = yCN*yNN.
# TODO: 'params' are kept for legacy, but ultimately serve no purpose. Should be removed.
paramsNN, paramsCN = [ModelNum], [ModelNum]
argsin = [NN_PATH, ModelNum, paramsNN, paramsCN, test_y, yNN, yCN, yEN]
WriteConfusion(*argsin)
if print_matrices:
print("Optimal thresholds:")
print("opt_th_pNN:", opt_th_pNN)
print("opt_th_pCN:", opt_th_pCN)
print("opt_th_pEN:", opt_th_pEN)
PrintConfusion(test_y, yNN, yCN, yEN) #prints confusion matrices per filter
PrintConfusion(test_y, yNN, yCN, yEN_opt_th_pEN) #prints confusion matrices per filter
if write_table9:
util._WriteTable9Data(os.path.join(NN_PATH, 'EvalOutputs', 'table9data.txt'),
MB, ttratio, confusion_matrix(test_y, yEN))
return
##
# Imports to print table9
from sklearn.metrics import confusion_matrix
import util
###################################################
# Main script.
#
# NOTE: We are aiming for a 'large lower right' and 'very small upper right'
# of the confusion matrices for the trained network.
#
# Terminology: EN -- "Ensemble of networks"
####################
######
## Data setup.
sampler = RandomSampler if dataStream==1 else RandomBalancedSampler
print("Using sampler: ", sampler.__name__)
# Read and process the data.
train_data = ReadDataAndFormat(INPUT_DIR, dataShape, NumMats, "training", ttratio,
Sampler=sampler, verbose=False)
test_data = ReadDataAndFormat(INPUT_DIR, dataShape, NumMats, "testing", ttratio,
Sampler=sampler, verbose=False)
train_x, train_y, train_M = train_data
test_x, test_y, test_M = test_data
## Display training data stats
if train_y is None:
error_msg = "Data in input directory is unlabelled. Directory: {}".format(INPUT_DIR)
raise RuntimeError(error_msg)
print("\n\n# successes in original training set: ",
np.sum(train_y), " / ", train_y.shape[0],
" total training samples.")
######
## Training/Testing loop
for dummy_var in range(2):
######
## Run training protocol.
# ** Actually training/loading the network.
if not FineTuneInTraining:
BM, paramsNN, paramsCN = train_model_bundle(train_data, NumMats,
**network_architecture_hyperparameters)
else:
old_model_bundle = fetch_model(NN_PATH, ReadNewest, UseModel)
BM = finetune_bundle(old_model_bundle, train_data, **finetune_hyperparameters)
## Save the training run info.
save_training_info(NN_PATH=NN_PATH, BM=BM, INPUT_DIR=INPUT_DIR, NumMats=NumMats,
random_seed=random_seed, MAX_INPUT_DATA_SIZE=MAX_INPUT_DATA_SIZE,
ttratio=ttratio, sampler=sampler, train_y=train_y)
######
## Run testing protocol.
# Load file containing pretrained networks and evaluate on testing data.
MB = fetch_model(NN_PATH, ReadNewest, UseModel)
pCN, rCN, pNN, rNN, pEN, rEN = MB.evaluate_models(test_data)
ModelNum = MB.name()
print(" Using trained model: ", ModelNum, "\n\n***")
WriteTrainingConfusionStats(ModelNum, pCN, pNN, pEN, test_y,
print_matrices=True, write_table9=True)
| 6,666 | 32.84264 | 102 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/util.py
|
###############################################################################################
###############################################################################################
###############################################################################################
###############################################################################################
# Utilities for:
# (1) Analyzing results.
# (2) Writing/Printing results.
from NNCONFIG import *
import scipy.linalg
import numpy as np
from sklearn.metrics import confusion_matrix, roc_curve
from sklearn.utils import resample
from numpy import genfromtxt
from sklearn.decomposition import PCA
import glob, os
import pickle as pk
import matplotlib.pylab as plt
import math
from sys import getsizeof
###############################################################################################
###############################################################################################
###############################################################################################
###############################################################################################
# Analytics.
def clusteringResults(yGuess,yTrue,titleString):
err = yGuess - yTrue;
fps = np.count_nonzero(err == 1);
trs = np.count_nonzero(err == 0);
fns = np.count_nonzero(err == -1);
print(
"\n"+titleString+
"\n % false positives: "+str(100*fps/err.shape[0])+"%,"+
"\n % false negatives: "+str(100*fns/err.shape[0])+"%,"+
"\n % correct guesses: "+str(100*trs/err.shape[0])+"%.\n")
return err
import collections
def OptimalROCSup(p_predict, y_true, ttl, zepplin=False):
"""
Return the *supremum* of the T such that the ROC distance is optimal.
Note that as the aforementioned T is never actually in this set.
"""
fpr, tpr, thresholds = roc_curve(y_true,p_predict,pos_label=1)
plt.plot(fpr,tpr,label=ttl)
plt.title("ROC curves.")
roc_dist = tpr**2+(1-fpr)**2
T_best = np.argmax(roc_dist)
if zepplin:
print("Been dazed and confused for so long it's not true!")
return thresholds[T_best]
###############################################################################################
###############################################################################################
###############################################################################################
###############################################################################################
# Saving to files.
def WritePlots(NN_PATH,uniquenum,pNN,pCN,pEN,test_y):
fname = os.path.join(NN_PATH,"Plot"+uniquenum)
plt.figure()
_ = plt.hist(pNN, bins=10, alpha=0.7, label="MLP") # arguments are passed to np.histogram
_ = plt.hist(pCN, bins=10, alpha=0.7, label="CNN") # arguments are passed to np.histogram
plt.title("Histograms of SUCC/FAIL probabilities on Test")
plt.legend(prop={'size': 10})
plt.savefig(fname + "_1.png")
fig=plt.figure()
fig.suptitle("Histograms of SUCC/FAIL probabilities on TEST")
ax1=plt.subplot(1, 2, 1)
ax2=plt.subplot(1, 2, 2)
_ = ax1.hist(pNN[test_y==1], bins=10, alpha=0.7, label="MLP")
_ = ax1.hist(pCN[test_y==1], bins=10, alpha=0.7, label="CNN")
plt.legend(prop={'size': 10})
ax1.set_title('Test TRUE SUCCESSES')
_ = ax2.hist(pNN[test_y==0], bins=10, alpha=0.7, label="MLP")
_ = ax2.hist(pCN[test_y==0], bins=10, alpha=0.7, label="CNN")
plt.legend(prop={'size': 10})
ax2.set_title('Test TRUE FAILURES')
plt.savefig(fname + "_2.png")
fig = plt.figure()
plt.plot(pEN, label = "Ensemble Prob")
plt.plot(test_y, label = "True Prob")
plt.legend(prop={'size': 10})
plt.savefig(fname + "_3.png")
fig = plt.figure()
plt.plot(pEN[test_y==1], label = "Ensemble Prob")
pdfT = test_y[test_y==1].astype(float)
pdfT /= max(pdfT)
plt.plot(pdfT, label = "True Prob")
plt.legend(prop={'size': 10})
plt.savefig(fname + "_4.png")
# plt.show()
return
def ThresholdProbs(vec,t):
tvec = np.zeros((vec.shape))
tvec[vec > t] = 1
return tvec
def WriteConfusion(NN_PATH,uniquenum,paramsNN,paramsCN,test_y,yNN,yCN,yEN):
fname = os.path.join(NN_PATH, "EvalOutputs/ConfMats"+uniquenum+".txt")
with open(fname,"w+") as f:
#####
f.write("\n\n*****************\n\nFilter 1 (ReLU MLP) Params:\n")
if len(paramsNN)==1:
#loading a pre-trained model
B = "Loading from pre-trained MLP model: " + str(paramsNN)
else:
strg = ["\nWidth of each hidden layer: ",
"\nRegularization penalty parameter: ",
"\nNumber of training iterations: "]
B = [s+str(n) for s,n in list(zip(strg,paramsNN))]
f.write(''.join(B))
f.write("\n\nConfusion Matrix for MLP:\n")
f.write(str(confusion_matrix(test_y,yNN)))
#####
f.write("\n\n*****************\n\nFilter 2 (CNN) Params:\n")
if len(paramsCN)==1:
#loading a pre-trained model
B = "Loading from pre-trained CNN model: " + str(paramsCN)
else:
strg = ["\nBatch size for training: ",
"\nEpoch length: ",
"\nBatch size for testing: "]
B = [s+str(n) for s,n in list(zip(strg,paramsCN))]
f.write(''.join(B))
f.write("\n\nConfusion Matrix for CNN:\n")
f.write(str(confusion_matrix(test_y,yCN)))
f.write("\n\n*****************\n\nConfusion Matrix for Ensemble:\n")
f.write(str(confusion_matrix(test_y,yEN)))
f.write("\n\n*****************\n")
print("\nConfusion matrices written to: ", fname, "\n")
return
def _WriteTable9Data(fname, MB, ttratio, confusion_mat):
"""
Writes to a file specified by 'fname'. Each row of the file is of the form
<model id>, <train-test ratio (alpha)>, [C.ravel()], TP+TN/(FP+FN)
where 'C' is the confusion matrix of the ensemble network. Table 9 refers to the table
in the article accompanying this software.
"""
with open(fname, 'a') as F:
A = confusion_mat
conf_rat = (A[0,0] + A[1,1])/(A[0,1] + A[1,0])
line = ', '.join([MB.name(), str(ttratio), str(A.ravel()), str(conf_rat)])
F.write(line+'\n')
return
def WritePredictions(NN_PATH, INPUT_DIR, rand_seed, readnum, uniquenum, datain):
# Internally keep training transcripts, for future analysis.
transcript_directory = os.path.join(NN_PATH, "EvalOutputs", '') #save files locally
pCN, rCN, pNN, rNN, pEN, rEN = datain
print(rNN.shape, rCN.shape)
file_data = [pCN, rCN, pNN, rNN, pEN, rEN]
file_names = [
"ProbabilitiesNN{}.txt", "RankedCoefsNN{}.txt",
"ProbabilitiesCN{}.txt", "RankedCoefsCN{}.txt",
"ProbabilitiesEN{}.txt", "RankedCoefsEN{}.txt"]
true_file_names = [name.format(uniquenum) for name in file_names]
for i in range(len(true_file_names)):
np.savetxt(transcript_directory+true_file_names[i],file_data[i])
# Save the name of the network to refer to later.
with open(transcript_directory+"EvalParams{}.txt".format(uniquenum), 'w+') as F:
F.write("\nModel identifier: " + str(readnum) +"\n")
F.write("Model folder: " + str(NN_PATH) +"\n")
F.write("Eval Data folder: " + str(INPUT_DIR)+"\n")
F.write("Random seed: " + str(rand_seed)+"\n")
F.write("Eval identifier: " + str(uniquenum)+"\n")
# Print a status update:
print("\nProbabilities & Rankings written to: ")
[print(tfn) for tfn in true_file_names]
print("\n")
return
def WritePredictionsToSagePipe(NN_PATH, datain):
output_directory = os.path.join(NN_PATH,"..",'') # for the ai_output file (global)
pCN, rCN, pNN, rNN, pEN, rEN = datain
print(rNN.shape, rCN.shape)
temp = np.array2string(rEN, separator=',', threshold=1e32, max_line_width=1e32)[1:-1]
ensemble_sorted_edges = temp.replace("],","]").replace(".","").replace(" [","[")
# Write the output for reading by the ODE-computation process.
#np.savetxt(output_directory+"ai_output", rEN.astype(int), fmt="%1i")
with open(output_directory+"ai_output", "w+") as f:
f.write(ensemble_sorted_edges)
# Also write the probability vectors to output.
np.savetxt(output_directory+"ai_probabilities", np.stack((pEN, pNN, pCN), axis=1))
return
def ReadPredictions(NN_PATH, uniquenum):
transcript_directory = os.path.join(NN_PATH, "EvalOutputs", '')
file_names = [
"ProbabilitiesNN{}.txt", "RankedCoefsNN{}.txt",
"ProbabilitiesCN{}.txt", "RankedCoefsCN{}.txt",
"ProbabilitiesEN{}.txt", "RankedCoefsEN{}.txt",
]
with open(transcript_directory+"EvalParams{}.txt".format(uniquenum), 'r') as F:
NetNum = F.read()
true_file_names = [name.format(uniquenum) for name in file_names]
file_data = [np.loadtxt(transcript_directory+tfn, dtype=float) for tfn in true_file_names]
file_data += [NetNum]
return file_data
def PrintConfusion(test_y, yNN, yCN, yEN, show_legend=True):
if show_legend:
print("\n*********************")
print(" LEGEND:\n")
print(" The entries of the confusion matrix C_{i,j} are the number of objects ")
print(" with label 'i' assigned label 'j'. In this case, the first row corresponds ")
print(" to 'failed' labels.")
print("\n*********************")
print("\n Confusion Matrix, Filter 1 (MLP):\n", confusion_matrix(test_y,yNN))
print("\n Confusion Matrix, Filter 2 (CNN):\n", confusion_matrix(test_y,yCN))
print("\n Confusion Matrix, given BOTH filters:\n",confusion_matrix(test_y,yEN))
print("\n*********************\n")
def discrete_matshow(data):
# #get discrete colormap
# cmap = plt.get_cmap('YlGnBu', np.max(data)-np.min(data)+1)
# # set limits .5 outside true range
# mat = plt.imshow(data,cmap=cmap,vmin = np.min(data)-.5, vmax = np.max(data)+.5)
# #tell the colorbar to tick at integers
# cax = plt.colorbar(mat, ticks=np.arange(np.min(data),np.max(data)+1))
# #plt.show()
fig,a = plt.subplots(2,1)
for i in [0,1]:
d = data[:,:,i]
cmap = plt.get_cmap('Blues', np.max(d)-np.min(d)+1)
mat = a[i].imshow(d,cmap=cmap,vmin=0,vmax=5)#vmin = np.min(d)-.5, vmax = np.max(d)+.5)
a[i].axis('off')
# cax = plt.colorbar(mat,ax=a[i])#, ticks=np.arange(0,7))#np.min(d),np.max(d)+1))
| 10,743 | 38.069091 | 95 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/AI_functions.py
|
###############################################################################################
###############################################################################################
###############################################################################################
###############################################################################################
# Utilities for creating and fine-tuning neural networks in Keras.
import os, sys, scipy.io, scipy.linalg, time, random, pickle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' #don't display warnings; only errors
import numpy as np, tensorflow as tf, matplotlib.pylab as plt
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from numpy import genfromtxt
#from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix
from time import time, asctime
import pickle as pk
import tensorflow as tf
from keras.models import Sequential,load_model
from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense
from keras.utils import to_categorical
from keras import optimizers
from util import *
from model_bundle import *
from data_handling import *
def generate_network_name(reference_network=None):
# Old names str(int(time()))
if reference_network == None:
date_components = asctime().replace(':', ',').split()
return '_'.join(date_components[1:4])
else:
return reference_network + '+' + generate_network_name() + '_FT'
def finetune_bundle(old_model_bundle, data, **kwds):
# Parse
try:
EpochNum = kwds['EpochNum']
except KeyError:
EpochNum = 5
try:
BatchSize = kwds['BatchSize']
except KeyError:
BatchSize = 32
try:
Balancing = kwds['Balancing']
except KeyError:
Balancing = True
## Begin actual function code ##
train_x,train_y,train_M = data
if Balancing:
train_x,train_y,train_M = UpSampleToBalance(train_x,train_y,train_M)
saved_pca, saved_NN, saved_CN = old_model_bundle.components()
train_x = saved_pca.transform(train_x)
# Freeze the layers except the last 2 layers
for layer in saved_NN.layers[:-2]:
layer.trainable = False
for layer in saved_CN.layers[:-2]:
layer.trainable = False
#[print(layer, layer.trainable) for layer in saved_CN.layers]
#[print(layer, layer.trainable) for layer in saved_NN.layers]
bs,ep = BatchSize,EpochNum
additional_layer,act = 1024,"relu"
finetuned_NN = MLPFineTune(saved_NN,additional_layer,act)
finetuned_CN = CNNFineTune(saved_CN,additional_layer,act)
print("\n\nSTEP 3f: Fine-tuning Filter 1 (MLP using X,Y)... ")
finetuned_NN.fit(train_x, train_y, batch_size=bs, epochs=ep, verbose=1) # Main MLP-fine-tuning.
print(" ...done.\n")
print("\n\nSTEP 4f: Fine-tuning Filter 2 (CNN using X,Y)... ")
finetuned_CN.fit(train_M, train_y, batch_size=bs, epochs=ep, verbose=1) # Main CNN-fine-tuning
print(" ...done.\n")
return ModelBundle(generate_network_name(old_model_bundle.name()),
saved_pca, finetuned_NN, finetuned_CN,
base_network = old_model_bundle)
####
def train_model_bundle(data, NumMats,
DoPCA = True,
PCAk = 23,
BatchSize = 2000,
EpochNum = 100,
StepSizeMLP = 1e-5,
StepSizeCNN = 1e-5,
Balancing = True):
train_x, train_y, train_M = data
bs, ep = BatchSize, EpochNum
if Balancing:
train_x,train_y,train_M = UpSampleToBalance(train_x,train_y,train_M)
# Substantial data processing.
if DoPCA:
train_x,pca = PerformPCA(PCAk, train_x)
else:
pca = None
# ** SUPERVISED: MULTILAYER PERCEPTRON
print("\n\nSTEP 3: Training Filter 1 (MLP using X,Y)... ")
# hlsizes,numiters,act = (100,1000,1000,1000,1000,100,100), 100, "relu"
hlsizes,numiters,act = (100,1000,1000), 100, "relu"
NN = MLPClassifier0(hlsizes,StepSizeMLP,act,train_x.shape[1])
NN.fit(train_x, train_y, batch_size=bs, epochs=ep, verbose=1) # Main MLP-Training.
print(" ...done.")
# ** SUPERVISED: CONVNET
# hyperparameters are contained in util.py
print("\n\nSTEP 4: Training Filter 2 (CNN using M,Y)... ")
CN = CNNClassifier(NumMats,train_M.shape[1],StepSizeCNN)
CN.fit(train_M, train_y, batch_size=bs, epochs=ep, verbose=1) # Main CNN-Training
print(" ...done.\n")
# ** SAVE WEIGHTS & MODELS
paramsNN,paramsCN = [hlsizes,StepSizeMLP,StepSizeCNN,numiters],[bs,ep]
return ModelBundle(generate_network_name(), pca, NN, CN), paramsNN, paramsCN
###############################################################################################
# Classifier constructors.
def CNNClassifier(k,l,ss):
model = Sequential()
# model.add(Conv2D(22, (3, 3), activation='relu',input_shape=(21, 21, k)))
if l==5:
model.add(Conv2D(64, kernel_size=3, activation='relu',input_shape=(l, l, l)))
elif l==21:
model.add(Conv2D(64, kernel_size=3, activation='relu',input_shape=(l, l, k)))
# model.add(Conv2D(32, kernel_size=3, activation='relu'))
model.add(Conv2D(16, kernel_size=3, activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) #converts 2D feature maps to 1D feature vectors
model.add(Dense(100, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
sgd = optimizers.SGD(lr=ss, decay=1e-6, momentum=0.9, nesterov=True)
opti = optimizers.Adam()
model.compile(loss='binary_crossentropy',
optimizer=opti,
metrics=['accuracy'])
return model
def MLPClassifier0(hlsizes,ss,act,insz):
model = Sequential()
model.add(Dense(hlsizes[0], input_dim=insz, kernel_initializer="uniform", activation = act))
for i in range(len(hlsizes)-1):
model.add(Dense(hlsizes[i+1], kernel_initializer="uniform", activation=act))
model.add(Dense(1, kernel_initializer="uniform", activation='sigmoid'))
#sgd = optimizers.SGD(lr=ss, momentum=0.9, nesterov=True)
opti = optimizers.Adam()
model.compile(loss='binary_crossentropy',
optimizer=opti,
metrics=['accuracy'])
return model
def CNNFineTune(oldmodel,numlay,act):
model = Sequential()
model.add(oldmodel)
# Add new layers
model.add(Dense(numlay, activation=act))
model.add(Dense(1, activation='sigmoid'))
#sgd = optimizers.SGD(lr=ss, decay=1e-6, momentum=0.9, nesterov=True)
opti = optimizers.Adam()
model.compile(loss='binary_crossentropy',
optimizer=opti,
metrics=['accuracy'])
return model
def MLPFineTune(oldmodel,numlay,act):
model = Sequential()
model.add(oldmodel)
# Add new layers
model.add(Dense(numlay, activation=act))
model.add(Dense(1, activation='sigmoid'))
#sgd = optimizers.SGD(lr=ss, momentum=0.9, nesterov=True)
opti = optimizers.Adam()
model.compile(loss='binary_crossentropy',
optimizer=opti,
metrics=['accuracy'])
return model
| 7,500 | 33.726852 | 99 |
py
|
period_graph
|
period_graph-master/period_graph/src/neural-network/tests/test_data_partition_consistency.py
|
##############################################################################################
#
# Test for data partition consistency.
#
##############################################################################################
#
# This tests will ONLY work on a particular developer machine ('doob', on the Dartmouth math
# department cluster).
#
# This script needs to be run in the /neural_network/ directory.
#
# IMPORTANT NOTE: The RandomSampler object uses its own **hard-coded** random seed, in order
# To force consistency between the data-sets, in case the train/analyze setup
# is run at completely different times. Thus, the 'random_seed' parameter
# really does nothing in effect. This design fault might be corrected in a
# future iteration.
#
##############################################################################################
# Imports
import os, sys, scipy.io, scipy.linalg, random
from time import time
import numpy
# Adjust the path to find the config files
NN_PATH = "/home/akulkarn/period_graph/period_graph/src/neural-network/"
sys.path.insert(1, NN_PATH)
##############################################################################################
#
# Testing config setup. (Selectively change the config file variables for the test.)
from NNCONFIG import *
INPUT_DIR = "/home/akulkarn/Gauss-Manin-data"
# Data management parameters.
MAX_INPUT_DATA_SIZE = "1MB"
ttratio = 0.3
dataShape = {"edgesX-*.csv":2*35+1, "timingsY-*.csv":3+1, "DCM01-*.csv":21**2+1, "DCM10-*.csv":21**2+1}
NumMats = 2
random_seed = 132456789
##############################################################################################
# Secondary imports
from util import *
from data_handling import *
from model_bundle import *
from AI_functions import *
# Force this inside the submodule
import data_handling
data_handling.MAX_INPUT_DATA_SIZE = "1MB"
##############################################################################################
## First, check that numpy.random seed reset is consistent.
numpy.random.seed(random_seed)
first_rand = [numpy.random.rand() for i in range(100)]
numpy.random.seed(random_seed)
second_rand = [numpy.random.rand() for i in range(100)]
assert first_rand == second_rand
##############################################################################################
# Start main test. The idea of the test is to detect that running the program twice with
# the same random initial conditions produces the same DataSet.
data_sets = []
for i in range(2):
# Set the seed to conduct the main test.
numpy.random.seed(random_seed)
sampler = RandomSampler
print("Using sampler: ", sampler.__name__)
# Read and process the data.
data = ReadDataAndFormat(INPUT_DIR, dataShape, NumMats, "training", ttratio, Sampler=sampler, verbose=False)
#data = KH_circumvent(INPUT_DIR, dataShape, NumMats, "training", Sampler=sampler, ttratio, verbose=False)
train_x, train_y, train_M = data
print(len(train_y))
print("\n\n# successes in original training set: ",
np.sum(train_y)," / ",train_y.shape[0],
" total training samples.")
data_sets += [data]
# Compare the outputs.
for ds in data_sets:
train_x, train_y, train_M = ds
assert all(numpy.array_equal(a,b) for a,b in zip(data_sets[0], ds))
| 3,407 | 30.266055 | 112 |
py
|
period_graph
|
period_graph-master/period_graph/tests/training3.py
|
#
# This file tests generating training data with the generator option and total jobs option.
#
#
import os, subprocess
from sage.all import *
from period_graph import *
# Setup test edges.
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
create_training_data(opts={'generator':'complete4', 'total-jobs':10})
| 320 | 19.0625 | 91 |
py
|
period_graph
|
period_graph-master/period_graph/tests/star2.py
|
#
# This file tests whether the periods are correctly computed for a small star
# based around the Fermat vertex.
#
import os, subprocess
from sage.all import *
from period_graph import *
# Setup test edges.
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
E = [[x**4 + y**4 + z**4 + w**4, x**4 + y**4 + z**4 + z*w**3],
[x**4 + y**4 + z**4 + w**4, x**4 + y**4 + z**3*w + w**4],
[x**4 + y**4 + z**4 + w**4, x**4 + y**4 + z**4 + x*w**3],
[x**4 + y**4 + z**4 + w**4, x**4 + y**4 + z**4 + y*w**3],
[x**4 + y**4 + z**4 + w**4, x**4 + y**4 + x*z**3 + w**4]]
# Run the program
first_ivps(E)
integrate_edge_odes()
G = load_phase_III_graph()
initialize_fermat_directory()
carry_periods(G=G)
# Verify the results.
# assert len(os.listdir("../src/periods/")) == 6
# Run this afterward to check the period matrices.
res = subprocess.call(["magma", "-b", "verify-periods-are-correct.m"], cwd=TEST_PATH)
assert res == 0
| 938 | 27.454545 | 85 |
py
|
period_graph
|
period_graph-master/period_graph/tests/training2.py
|
#
# This file tests generating training data with the generator option
# WARNING: This test takes several hours.
#
import os, subprocess
from sage.all import *
from period_graph import *
# Setup test edges.
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
create_training_data(opts={'generator':'complete4', 'generate-quartics':None})
| 344 | 22 | 78 |
py
|
period_graph
|
period_graph-master/period_graph/tests/all.py
|
import period_graph.tests.star1
import period_graph.tests.star2
import period_graph.tests.lankystar1
import period_graph.tests.lankystar2
import period_graph.tests.training1
import period_graph.tests.training2
import period_graph.tests.training3
import period_graph.tests.neural_network1
import period_graph.tests.neural_network2
| 336 | 20.0625 | 41 |
py
|
period_graph
|
period_graph-master/period_graph/tests/neural_network1.py
|
#
# This file tests basic usage of the neural network eval function.
#
import os, subprocess
from sage.all import *
from period_graph import *
# Setup test edges.
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
E = [(x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + z*w**3),
(x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**3*w + w**4),
(x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + x*w**3),
(x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + y*w**3),
(x**4 + y**4 + z**4 + w**4,x**4 + y**4 + x*z**3 + w**4)]
# Run the training program
sE, _, _ = nn_sort(E)
assert Set(sE) == Set(E)
| 608 | 24.375 | 66 |
py
|
period_graph
|
period_graph-master/period_graph/tests/neural_network2.py
|
#
# This file tests large input sets, involking parallelization.
#
# TODO: We need to figure out how to fix paths with regard to the tests.
import os, subprocess
from sage.all import *
from period_graph import SELF_PATH, nn_sort
TEST_PATH = os.path.join(os.path.join(SELF_PATH, "tests", ""))
# Setup test edges.
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
E = 20*[[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + z*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**3*w + w**4],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + x*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + y*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + x*z**3 + w**4]]
# Run the training program
sE, _, _ = nn_sort(E)
| 734 | 29.625 | 72 |
py
|
period_graph
|
period_graph-master/period_graph/tests/lankystar2.py
|
#
# This file tests whether the periods are correctly computed for a small graph
# based around the Fermat vertex. Some vertices are of distance 2 away from Fermat.
#
import os, subprocess
from sage.all import *
from period_graph import *
# Setup test edges.
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
E = [[x**4 + y**4 + z**4 + w**4, x**4 + y**4 + z**4 + z*w**3],
[x**3*w + y**4 + z**4 + w**4, x**4 + x**3*w + y**4 + z**4 + w**4]]
# Run the program
ivps(E)
integrate_edge_odes()
# Build a graph with some permutation links thrown in.
G = load_phase_III_graph()
# Compute the periods.
initialize_fermat_directory()
# Add a permutation edge.
u = quartic_data(x**4 + y**4 + z**4 + z*w**3)
v = quartic_data(u.s4label)
G.add_vertex(v)
G.add_edge(u, v, EdgeLabel(None, 'forward', 'permutation'))
# Compute the periods.
carry_periods(G=G)
# Verify the results.
# assert len(os.listdir("../src/periods/")) == 6
# Run this afterward to check the period matrices.
res = subprocess.call(["magma", "-b", "verify-periods-are-correct.m"], cwd=TEST_PATH)
assert res == 0
| 1,087 | 23.177778 | 85 |
py
|
period_graph
|
period_graph-master/period_graph/tests/training1.py
|
#
# This file tests basic usage of the training data creator.
#
# TODO: We need to figure out how to fix paths with regard to the tests.
import os, subprocess
from sage.all import *
from period_graph import *
# Setup test edges.
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
E = [[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + z*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**3*w + w**4],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + x*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + y*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + x*z**3 + w**4]]
# Run the training program
write_user_edges_to_file(E)
create_training_data()
| 676 | 28.434783 | 72 |
py
|
period_graph
|
period_graph-master/period_graph/tests/star1.py
|
#
# This file tests whether the periods are correctly computed for a small star
# based around the Fermat vertex.
#
import os, subprocess
from sage.all import *
from period_graph import *
# Setup test edges.
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
E = [[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + z*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**3*w + w**4],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + x*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + y*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + x*z**3 + w**4]]
# Run the program
ivps(E)
integrate_edge_odes()
G = load_phase_III_graph()
initialize_fermat_directory()
carry_periods(G=G)
# Verify the results.
# assert len(os.listdir("../src/periods/")) == 6
# Run this afterward to check the period matrices.
res = subprocess.call(["magma", "-b", "verify-periods-are-correct.m"], cwd=TEST_PATH)
assert res == 0
| 928 | 26.323529 | 85 |
py
|
period_graph
|
period_graph-master/period_graph/tests/__init__.py
| 2 | 0 | 0 |
py
|
|
period_graph
|
period_graph-master/period_graph/tests/lankystar1.py
|
#
# This file tests whether the periods are correctly computed for a small graph
# based around the Fermat vertex. Some vertices are of distance 2 away from Fermat.
#
import os, subprocess
from sage.all import *
from period_graph import *
# Setup test edges.
R = PolynomialRing(QQ, 4, "xyzw")
(x,y,z,w) = R.gens()
E = [[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + z*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**3*w + w**4],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + x*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + z**4 + y*w**3],
[x**4 + y**4 + z**4 + w**4,x**4 + y**4 + x*z**3 + w**4]]
# Run the program
ivps(E)
integrate_edge_odes()
# Build a graph with some permutation links thrown in.
G = load_phase_III_graph()
# Compute the periods.
initialize_fermat_directory()
# Add a permutation edge.
u = quartic_data(x**4 + y**4 + z**4 + z*w**3)
v = quartic_data(u.s4label)
G.add_vertex(v)
G.add_edge(u, v, EdgeLabel(None, 'forward', 'permutation'))
# Compute the periods.
carry_periods(G=G)
# Verify the results.
# assert len(os.listdir("../src/periods/")) == 6
# Run this afterward to check the period matrices.
res = subprocess.call(["magma", "-b", "verify-periods-are-correct.m"], cwd=TEST_PATH)
assert res == 0
| 1,261 | 25.851064 | 85 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='spacy-entity-linker',
version='1.0.3',
author='Emanuel Gerber',
author_email='[email protected]',
packages=['spacy_entity_linker'],
url='https://github.com/egerber/spacy-entity-linker',
license="MIT",
classifiers=["Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6"
],
description='Linked Entity Pipeline for spaCy',
long_description=long_description,
long_description_content_type="text/markdown",
zip_safe=True,
install_requires=[
'spacy>=3.0.0',
'numpy>=1.0.0',
'tqdm'
],
entry_points={
'spacy_factories': 'entityLinker = spacy_entity_linker.EntityLinker:EntityLinker'
}
)
| 1,326 | 26.081633 | 89 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/__main__.py
|
import sys
import tarfile
import urllib.request
import tqdm
import os
class DownloadProgressBar(tqdm.tqdm):
"""
Code taken from https://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads
"""
def update_to(self, chunk_id=1, max_chunk_size=1, total_size=None):
if total_size is not None:
self.total = total_size
self.update(chunk_id * max_chunk_size - self.n)
def download_knowledge_base(
file_url="https://huggingface.co/MartinoMensio/spaCy-entity-linker/resolve/main/knowledge_base.tar.gz"
):
OUTPUT_TAR_FILE = os.path.abspath(
os.path.dirname(__file__)) + '/../data_spacy_entity_linker/wikidb_filtered.tar.gz'
OUTPUT_DB_PATH = os.path.abspath(os.path.dirname(__file__)) + '/../data_spacy_entity_linker'
if not os.path.exists(OUTPUT_DB_PATH):
os.makedirs(OUTPUT_DB_PATH)
with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc='Downloading knowledge base') as dpb:
urllib.request.urlretrieve(file_url, filename=OUTPUT_TAR_FILE, reporthook=dpb.update_to)
tar = tarfile.open(OUTPUT_TAR_FILE)
tar.extractall(OUTPUT_DB_PATH)
tar.close()
os.remove(OUTPUT_TAR_FILE)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("No arguments given.")
pass
command = sys.argv.pop(1)
if command == "download_knowledge_base":
download_knowledge_base()
else:
raise ValueError("Unrecognized command given. If you are trying to install the knowledge base, run "
"'python -m spacy_entity_linker \"download_knowledge_base\"'.")
| 1,628 | 32.244898 | 110 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/SpanInfo.py
|
"""
SpanInfo class
Stores the info of spacy.tokens.Span (start, end and text of a span) by making it serializable
"""
import spacy
import srsly
class SpanInfo:
@staticmethod
def from_span(span: spacy.tokens.Span):
return SpanInfo(span.start, span.end, span.text)
def __init__(self, start: int, end: int, text: str):
self.start = start
self.end = end
self.text = text
def __repr__(self) -> str:
return self.text
def __len__(self):
return self.end - self.start
def __eq__(self, __o: object) -> bool:
if isinstance(__o, SpanInfo) or isinstance(__o, spacy.tokens.Span):
return self.start == __o.start and self.end == __o.end and self.text == __o.text
return False
def get_span(self, doc: spacy.tokens.Doc):
"""
Returns the real spacy.tokens.Span of the doc from the stored info"""
return doc[self.start:self.end]
@srsly.msgpack_encoders("SpanInfo")
def serialize_spaninfo(obj, chain=None):
if isinstance(obj, SpanInfo):
result = {
"start": obj.start,
"end": obj.end,
"text": obj.text,
}
return result
# otherwise return the original object so another serializer can handle it
return obj if chain is None else chain(obj)
@srsly.msgpack_decoders("SpanInfo")
def deserialize_spaninfo(obj, chain=None):
if "start" in obj:
return SpanInfo(obj['start'], obj['end'], obj['text'])
# otherwise return the original object so another serializer can handle it
return obj if chain is None else chain(obj)
| 1,635 | 28.745455 | 94 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/EntityCollection.py
|
import srsly
from collections import Counter, defaultdict
from .DatabaseConnection import get_wikidata_instance
MAX_ITEMS_PREVIEW=20
class EntityCollection:
def __init__(self, entities=[]):
self.entities = entities
def __iter__(self):
for entity in self.entities:
yield entity
def __getitem__(self, item):
return self.entities[item]
def __len__(self):
return len(self.entities)
def append(self, entity):
self.entities.append(entity)
def get_categories(self, max_depth=1):
categories = []
for entity in self.entities:
categories += entity.get_categories(max_depth)
return categories
def print_super_entities(self, max_depth=1, limit=10):
wikidataInstance = get_wikidata_instance()
all_categories = []
category_to_entites = defaultdict(list)
for e in self.entities:
for category in e.get_categories(max_depth):
category_to_entites[category].append(e)
all_categories.append(category)
counter = Counter()
counter.update(all_categories)
for category, frequency in counter.most_common(limit):
print("{} ({}) : {}".format(wikidataInstance.get_entity_name(category), frequency,
','.join([str(e) for e in category_to_entites[category]])))
def __repr__(self) -> str:
preview_str="<EntityCollection ({} entities):".format(len(self))
for index,entity_element in enumerate(self):
if index>MAX_ITEMS_PREVIEW:
preview_str+="\n...{} more".format(len(self)-MAX_ITEMS_PREVIEW)
break
preview_str+="\n-{}".format(entity_element.get_preview_string())
preview_str+=">"
return preview_str
def pretty_print(self):
for entity in self.entities:
entity.pretty_print()
def grouped_by_super_entities(self, max_depth=1):
counter = Counter()
counter.update(self.get_categories(max_depth))
return counter
def get_distinct_categories(self, max_depth=1):
return list(set(self.get_categories(max_depth)))
@srsly.msgpack_encoders("EntityCollection")
def serialize_obj(obj, chain=None):
if isinstance(obj, EntityCollection):
return {
"entities": obj.entities,
}
# otherwise return the original object so another serializer can handle it
return obj if chain is None else chain(obj)
@srsly.msgpack_decoders("EntityCollection")
def deserialize_obj(obj, chain=None):
if "entities" in obj:
return EntityCollection(entities=obj["entities"])
# otherwise return the original object so another serializer can handle it
return obj if chain is None else chain(obj)
| 2,834 | 29.815217 | 99 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/EntityClassifier.py
|
from itertools import groupby
import numpy as np
class EntityClassifier:
def __init__(self):
pass
def _get_grouped_by_length(self, entities):
sorted_by_len = sorted(entities, key=lambda entity: len(entity.get_span()), reverse=True)
entities_by_length = {}
for length, group in groupby(sorted_by_len, lambda entity: len(entity.get_span())):
entities = list(group)
entities_by_length[length] = entities
return entities_by_length
def _filter_max_length(self, entities):
entities_by_length = self._get_grouped_by_length(entities)
max_length = max(list(entities_by_length.keys()))
return entities_by_length[max_length]
def _select_max_prior(self, entities):
priors = [entity.get_prior() for entity in entities]
return entities[np.argmax(priors)]
def _get_casing_difference(self, word1, original):
difference = 0
for w1, w2 in zip(word1, original):
if w1 != w2:
difference += 1
return difference
def _filter_most_similar(self, entities):
similarities = np.array(
[self._get_casing_difference(entity.get_span().text, entity.get_original_alias()) for entity in entities])
min_indices = np.where(similarities == similarities.min())[0].tolist()
return [entities[i] for i in min_indices]
def __call__(self, entities):
filtered_by_length = self._filter_max_length(entities)
filtered_by_casing = self._filter_most_similar(filtered_by_length)
return self._select_max_prior(filtered_by_casing)
| 1,638 | 31.78 | 118 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/EntityElement.py
|
import spacy
import srsly
from .DatabaseConnection import get_wikidata_instance
from .EntityCollection import EntityCollection
from .SpanInfo import SpanInfo
class EntityElement:
def __init__(self, row, span):
self.identifier = row[0]
self.prior = 0
self.original_alias = None
self.in_degree = None
self.label = None
self.description = None
if len(row) > 1:
self.label = row[1]
if len(row) > 2:
self.description = row[2]
if len(row) > 3 and row[3]:
self.prior = row[3]
if len(row) > 4 and row[4]:
self.in_degree = row[4]
if len(row) > 5 and row[5]:
self.original_alias = row[5]
self.url="https://www.wikidata.org/wiki/Q{}".format(self.get_id())
if span:
self.span_info = SpanInfo.from_span(span)
else:
# sometimes the constructor is called with None as second parameter (e.g. in get_sub_entities/get_super_entities)
self.span_info = None
self.chain = None
self.chain_ids = None
self.wikidata_instance = get_wikidata_instance()
def get_in_degree(self):
return self.in_degree
def get_original_alias(self):
return self.original_alias
def is_singleton(self):
return len(self.get_chain()) == 0
def get_span(self, doc: spacy.tokens.Doc=None):
"""
Returns the span of the entity in the document.
:param doc: the document in which the entity is contained
:return: the span of the entity in the document
If the doc is not None, it returns a real spacy.tokens.Span.
Otherwise it returns the instance of SpanInfo that emulates the behaviour of a spacy.tokens.Span
"""
if doc is not None:
# return a real spacy.tokens.Span
return self.span_info.get_span(doc)
# otherwise return the instance of SpanInfo that emulates the behaviour of a spacy.tokens.Span
return self.span_info
def get_label(self):
return self.label
def get_id(self):
return self.identifier
def get_prior(self):
return self.prior
def get_chain(self, max_depth=10):
if self.chain is None:
self.chain = self.wikidata_instance.get_chain(self.identifier, max_depth=max_depth, property=31)
return self.chain
def is_category(self):
pass
def is_leaf(self):
pass
def get_categories(self, max_depth=10):
return self.wikidata_instance.get_categories(self.identifier, max_depth=max_depth)
def get_sub_entities(self, limit=10):
return EntityCollection(
[EntityElement(row, None) for row in self.wikidata_instance.get_children(self.get_id(), limit)])
def get_super_entities(self, limit=10):
return EntityCollection(
[EntityElement(row, None) for row in self.wikidata_instance.get_parents(self.get_id(), limit)])
def get_subclass_hierarchy(self):
chain = self.wikidata_instance.get_chain(self.identifier, max_depth=5, property=279)
return [self.wikidata_instance.get_entity_name(el[0]) for el in chain]
def get_instance_of_hierarchy(self):
chain = self.wikidata_instance.get_chain(self.identifier, max_depth=5, property=31)
return [self.wikidata_instance.get_entity_name(el[0]) for el in chain]
def get_chain_ids(self, max_depth=10):
if self.chain_ids is None:
self.chain_ids = set([el[0] for el in self.get_chain(max_depth=max_depth)])
return self.chain_ids
def get_description(self):
if self.description:
return self.description
else:
return ""
def is_intersecting(self, other_element):
return len(self.get_chain_ids().intersection(other_element.get_chain_ids())) > 0
def serialize(self):
return {
"id": self.get_id(),
"label": self.get_label(),
"span": self.get_span()
}
def pretty_print(self):
print(self.__repr__())
def get_url(self):
return self.url
def __repr__(self):
return "<EntityElement: {}>".format(self.get_preview_string())
def get_preview_string(self):
return "{0:<10} {1:<25} {2:<50}".format(self.get_url(),self.get_label(),self.get_description()[:100])
def pretty_string(self, description=False):
if description:
return "{} => {} <{}>".format(self.span_info, self.get_label(), self.get_description())
else:
return "{} => {}".format(self.span_info, self.get_label())
# TODO: this method has never worked because the custom attribute is not registered properly
# def save(self, category):
# for span in self.span:
# span.sent._.linked_entities.append(
# {"id": self.identifier, "range": [span.start, span.end + 1], "category": category})
def __str__(self):
label = self.get_label()
if label:
return label
else:
return ""
def __eq__(self, other):
return isinstance(other, EntityElement) and other.get_id() == self.get_id()
@srsly.msgpack_encoders("EntityElement")
def serialize_obj(obj, chain=None):
if isinstance(obj, EntityElement):
result = {
"identifier": obj.identifier,
"label": obj.label,
"description": obj.description,
"prior": obj.prior,
"in_degree": obj.in_degree,
"original_alias": obj.original_alias,
"span_info": obj.span_info,
}
return result
# otherwise return the original object so another serializer can handle it
return obj if chain is None else chain(obj)
@srsly.msgpack_decoders("EntityElement")
def deserialize_obj(obj, chain=None):
if "identifier" in obj:
row = [obj['identifier'], obj['label'], obj['description'], obj['prior'], obj['in_degree'], obj['original_alias']]
span_info = obj['span_info']
return EntityElement(row, span_info)
# otherwise return the original object so another serializer can handle it
return obj if chain is None else chain(obj)
| 6,250 | 32.972826 | 125 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/DatabaseConnection.py
|
import sqlite3
import os
from .__main__ import download_knowledge_base
MAX_DEPTH_CHAIN = 10
P_INSTANCE_OF = 31
P_SUBCLASS = 279
MAX_ITEMS_CACHE = 100000
conn = None
entity_cache = {}
chain_cache = {}
DB_DEFAULT_PATH = os.path.abspath(os.path.join(__file__, "../../data_spacy_entity_linker/wikidb_filtered.db"))
wikidata_instance = None
def get_wikidata_instance():
global wikidata_instance
if wikidata_instance is None:
wikidata_instance = WikidataQueryController()
return wikidata_instance
class WikidataQueryController:
def __init__(self):
self.conn = None
self.cache = {
"entity": {},
"chain": {},
"name": {}
}
self.init_database_connection()
def _get_cached_value(self, cache_type, key):
return self.cache[cache_type][key]
def _is_cached(self, cache_type, key):
return key in self.cache[cache_type]
def _add_to_cache(self, cache_type, key, value):
if len(self.cache[cache_type]) < MAX_ITEMS_CACHE:
self.cache[cache_type][key] = value
def init_database_connection(self, path=DB_DEFAULT_PATH):
# check if the database exists
if not os.path.exists(DB_DEFAULT_PATH):
# Automatically download the knowledge base if it isn't already
download_knowledge_base()
self.conn = sqlite3.connect(path, check_same_thread=False)
def clear_cache(self):
self.cache["entity"].clear()
self.cache["chain"].clear()
self.cache["name"].clear()
def get_entities_from_alias(self, alias):
c = self.conn.cursor()
if self._is_cached("entity", alias):
return self._get_cached_value("entity", alias).copy()
query_alias = """SELECT j.item_id,j.en_label, j.en_description,j.views,j.inlinks,a.en_alias
FROM aliases as a LEFT JOIN joined as j ON a.item_id = j.item_id
WHERE a.en_alias_lowercase = ? AND j.item_id NOT NULL"""
c.execute(query_alias, [alias.lower()])
fetched_rows = c.fetchall()
self._add_to_cache("entity", alias, fetched_rows)
return fetched_rows
def get_instances_of(self, item_id, properties=[P_INSTANCE_OF, P_SUBCLASS], count=1000):
query = "SELECT source_item_id from statements where target_item_id={} and edge_property_id IN ({}) LIMIT {}".format(
item_id, ",".join([str(prop) for prop in properties]), count)
c = self.conn.cursor()
c.execute(query)
res = c.fetchall()
return [e[0] for e in res]
def get_entity_name(self, item_id):
if self._is_cached("name", item_id):
return self._get_cached_value("name", item_id)
c = self.conn.cursor()
query = "SELECT en_label from joined WHERE item_id=?"
c.execute(query, [item_id])
res = c.fetchone()
if res and len(res):
if res[0] is None:
self._add_to_cache("name", item_id, 'no label')
else:
self._add_to_cache("name", item_id, res[0])
else:
self._add_to_cache("name", item_id, '<none>')
return self._get_cached_value("name", item_id)
def get_entity(self, item_id):
c = self.conn.cursor()
query = "SELECT j.item_id,j.en_label,j.en_description,j.views,j.inlinks from joined as j " \
"WHERE j.item_id=={}".format(item_id)
res = c.execute(query)
return res.fetchone()
def get_children(self, item_id, limit=100):
c = self.conn.cursor()
query = "SELECT j.item_id,j.en_label,j.en_description,j.views,j.inlinks from joined as j " \
"JOIN statements as s on j.item_id=s.source_item_id " \
"WHERE s.target_item_id={} and s.edge_property_id IN (279,31) LIMIT {}".format(item_id, limit)
res = c.execute(query)
return res.fetchall()
def get_parents(self, item_id, limit=100):
c = self.conn.cursor()
query = "SELECT j.item_id,j.en_label,j.en_description,j.views,j.inlinks from joined as j " \
"JOIN statements as s on j.item_id=s.target_item_id " \
"WHERE s.source_item_id={} and s.edge_property_id IN (279,31) LIMIT {}".format(item_id, limit)
res = c.execute(query)
return res.fetchall()
def get_categories(self, item_id, max_depth=10):
chain = []
edges = []
self._append_chain_elements(item_id, 0, chain, edges, max_depth, [P_INSTANCE_OF, P_SUBCLASS])
return [el[0] for el in chain]
def get_chain(self, item_id, max_depth=10, property=P_INSTANCE_OF):
chain = []
edges = []
self._append_chain_elements(item_id, 0, chain, edges, max_depth, property)
return chain
def get_recursive_edges(self, item_id):
chain = []
edges = []
self._append_chain_elements(self, item_id, 0, chain, edges)
return edges
def _append_chain_elements(self, item_id, level=0, chain=None, edges=None, max_depth=10, prop=P_INSTANCE_OF):
if chain is None:
chain = []
if edges is None:
edges = []
properties = prop
if type(prop) != list:
properties = [prop]
if self._is_cached("chain", (item_id, max_depth)):
chain += self._get_cached_value("chain", (item_id, max_depth)).copy()
return
# prevent infinite recursion
if level >= max_depth:
return
c = self.conn.cursor()
query = "SELECT target_item_id,edge_property_id from statements where source_item_id={} and edge_property_id IN ({})".format(
item_id, ",".join([str(prop) for prop in properties]))
# set value for current item in order to prevent infinite recursion
self._add_to_cache("chain", (item_id, max_depth), [])
for target_item in c.execute(query):
chain_ids = [el[0] for el in chain]
if not (target_item[0] in chain_ids):
chain += [(target_item[0], level + 1)]
edges.append((item_id, target_item[0], target_item[1]))
self._append_chain_elements(target_item[0],
level=level + 1,
chain=chain,
edges=edges,
max_depth=max_depth,
prop=prop)
self._add_to_cache("chain", (item_id, max_depth), chain)
if __name__ == '__main__':
queryInstance = WikidataQueryController()
queryInstance.init_database_connection()
print(queryInstance.get_categories(13191, max_depth=1))
print(queryInstance.get_categories(13191, max_depth=1))
| 6,837 | 32.356098 | 133 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/EntityCandidates.py
|
MAX_ITEMS_PREVIEW=20
class EntityCandidates:
def __init__(self, entity_elements):
self.entity_elements = entity_elements
def __iter__(self):
for entity in self.entity_elements:
yield entity
def __len__(self):
return len(self.entity_elements)
def __getitem__(self, item):
return self.entity_elements[item]
def pretty_print(self):
for entity in self.entity_elements:
entity.pretty_print()
def __repr__(self) -> str:
preview_str=""
for index,entity_element in enumerate(self):
if index>MAX_ITEMS_PREVIEW:
break
preview_str+="{}\n".format(entity_element.get_preview_string())
return preview_str
def __str__(self):
return str(["entity {}: {} (<{}>)".format(i, entity.get_label(), entity.get_description()) for i, entity in
enumerate(self.entity_elements)])
| 953 | 27.058824 | 115 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/EntityLinker.py
|
from spacy.tokens import Doc, Span
from spacy.language import Language
from .EntityClassifier import EntityClassifier
from .EntityCollection import EntityCollection
from .TermCandidateExtractor import TermCandidateExtractor
@Language.factory('entityLinker')
class EntityLinker:
def __init__(self, nlp, name):
Doc.set_extension("linkedEntities", default=EntityCollection(), force=True)
Span.set_extension("linkedEntities", default=None, force=True)
def __call__(self, doc):
tce = TermCandidateExtractor(doc)
classifier = EntityClassifier()
for sent in doc.sents:
sent._.linkedEntities = EntityCollection([])
entities = []
for termCandidates in tce:
entityCandidates = termCandidates.get_entity_candidates()
if len(entityCandidates) > 0:
entity = classifier(entityCandidates)
span = doc[entity.span_info.start:entity.span_info.end]
# Add the entity to the sentence-level EntityCollection
span.sent._.linkedEntities.append(entity)
# Also associate the token span with the entity
span._.linkedEntities = entity
# And finally append to the document-level collection
entities.append(entity)
doc._.linkedEntities = EntityCollection(entities)
return doc
| 1,398 | 35.815789 | 83 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/TermCandidateExtractor.py
|
from .TermCandidate import TermCandidate
class TermCandidateExtractor:
def __init__(self, doc):
self.doc = doc
def __iter__(self):
for sent in self.doc.sents:
for candidate in self._get_candidates_in_sent(sent, self.doc):
yield candidate
def _get_candidates_in_sent(self, sent, doc):
roots = list(filter(lambda token: token.dep_ == "ROOT", sent))
if len(roots) < 1:
return []
root = roots[0]
excluded_children = []
candidates = []
def get_candidates(node, doc):
if (node.pos_ in ["PROPN", "NOUN"]) and node.pos_ not in ["PRON"]:
term_candidates = TermCandidate(doc[node.i:node.i + 1])
for child in node.children:
start_index = min(node.i, child.i)
end_index = max(node.i, child.i)
if child.dep_ == "compound" or child.dep_ == "amod":
subtree_tokens = list(child.subtree)
if all([c.dep_ == "compound" for c in subtree_tokens]):
start_index = min([c.i for c in subtree_tokens])
term_candidates.append(doc[start_index:end_index + 1])
if not child.dep_ == "amod":
term_candidates.append(doc[start_index:start_index + 1])
excluded_children.append(child)
if child.dep_ == "prep" and child.text == "of":
end_index = max([c.i for c in child.subtree])
term_candidates.append(doc[start_index:end_index + 1])
candidates.append(term_candidates)
for child in node.children:
if child in excluded_children:
continue
get_candidates(child, doc)
get_candidates(root, doc)
return candidates
| 1,948 | 33.803571 | 84 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/__init__.py
|
try: # Python 3.8
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata # noqa: F401
from .EntityLinker import EntityLinker
pkg_meta = importlib_metadata.metadata(__name__.split(".")[0])
__version__ = pkg_meta["version"]
__all__ = [EntityLinker]
| 298 | 26.181818 | 62 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/spacy_entity_linker/TermCandidate.py
|
from .EntityCandidates import EntityCandidates
from .EntityElement import EntityElement
from .DatabaseConnection import get_wikidata_instance
class TermCandidate:
def __init__(self, span):
self.variations = [span]
def pretty_print(self):
print("Term Candidates are [{}]".format(self))
def append(self, span):
self.variations.append(span)
def has_plural(self, variation):
return any([t.tag_ == "NNS" for t in variation])
def get_singular(self, variation):
return ' '.join([t.text if t.tag_ != "NNS" else t.lemma_ for t in variation])
def __str__(self):
return ', '.join([variation.text for variation in self.variations])
def get_entity_candidates(self):
wikidata_instance = get_wikidata_instance()
entities_by_variation = {}
for variation in self.variations:
entities_by_variation[variation] = wikidata_instance.get_entities_from_alias(variation.text)
if self.has_plural(variation):
entities_by_variation[variation] += wikidata_instance.get_entities_from_alias(
self.get_singular(variation))
entity_elements = []
for variation, entities in entities_by_variation.items():
entity_elements += [EntityElement(entity, variation) for entity in entities]
return EntityCandidates(entity_elements)
| 1,394 | 34.769231 | 104 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/tests/test_TermCandidateExtractor.py
|
import unittest
import spacy
import spacy_entity_linker.TermCandidateExtractor
class TestCandidateExtractor(unittest.TestCase):
def __init__(self, arg, *args, **kwargs):
super(TestCandidateExtractor, self).__init__(arg, *args, **kwargs)
| 252 | 24.3 | 74 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/tests/test_EntityElement.py
|
import unittest
import spacy
class TestEntityElement(unittest.TestCase):
def __init__(self, arg, *args, **kwargs):
super(TestEntityElement, self).__init__(arg, *args, **kwargs)
self.nlp = spacy.load('en_core_web_sm')
def setUp(self):
self.nlp.add_pipe("entityLinker", last=True)
self.doc = self.nlp(
"Elon Musk was born in South Africa. Bill Gates and Steve Jobs come from the United States. The US are located in North America. A ship is made of wood.")
def tearDown(self):
self.nlp.remove_pipe("entityLinker")
def test_get_in_degree(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
in_degree = all_linked_entities[0].get_in_degree()
assert in_degree > 0
def test_get_original_alias(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
original_alias = all_linked_entities[0].get_original_alias()
assert original_alias == "Elon Musk"
def test_is_singleton(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
is_singleton = all_linked_entities[0].is_singleton()
assert is_singleton == False
is_singleton = all_linked_entities[-1].is_singleton()
assert is_singleton == True
def test_get_span(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
span = all_linked_entities[0].get_span()
real_span = doc[0:2]
assert span.text == real_span.text
assert span.start == real_span.start
assert span.end == real_span.end
def test_get_label(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
label = all_linked_entities[0].get_label()
assert label == "Elon Musk"
def test_get_id(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
id = all_linked_entities[0].get_id()
assert id > 0
def test_get_prior(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
prior = all_linked_entities[0].get_prior()
assert prior > 0
def test_get_chain(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
chain = all_linked_entities[0].get_chain()
assert chain != None
assert len(chain) > 0
def test_get_categories(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
categories = all_linked_entities[0].get_categories()
assert categories != None
assert len(categories) > 0
def test_get_sub_entities(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
# [-1] --> wood
sub_entities = all_linked_entities[-1].get_sub_entities()
assert sub_entities != None
assert len(sub_entities) > 0
def test_get_super_entities(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
super_entities = all_linked_entities[0].get_super_entities()
assert super_entities != None
assert len(super_entities) > 0
def test_get_subclass_hierarchy(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
# [5] --> US
hierarchy = all_linked_entities[5].get_subclass_hierarchy()
assert hierarchy != None
assert len(hierarchy) > 0
assert 'country' in hierarchy
def test_get_instance_of_hierarchy(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
# [5] --> US
hierarchy = all_linked_entities[5].get_instance_of_hierarchy()
assert hierarchy != None
assert len(hierarchy) > 0
assert 'country' in hierarchy
def test_get_chain_ids(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
chain_ids = all_linked_entities[0].get_chain_ids()
assert chain_ids != None
assert len(chain_ids) > 0
def test_get_description(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
description = all_linked_entities[0].get_description()
assert description != None
assert len(description) > 0
def test_is_intersecting(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
assert not all_linked_entities[0].is_intersecting(all_linked_entities[1])
# United States and US
assert all_linked_entities[4].is_intersecting(all_linked_entities[5])
def test_serialize(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
serialized = all_linked_entities[0].serialize()
assert serialized != None
assert len(serialized) > 0
assert 'id' in serialized
assert 'label' in serialized
assert 'span' in serialized
def test_pretty_print(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
all_linked_entities[0].pretty_print()
def test_get_url(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
url = all_linked_entities[0].get_url()
assert url != None
assert len(url) > 0
assert 'wikidata.org/wiki/Q' in url
def test___repr__(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
repr = all_linked_entities[0].__repr__()
assert repr != None
assert len(repr) > 0
def test___eq__(self):
doc = self.doc
all_linked_entities = doc._.linkedEntities
assert not all_linked_entities[0] == all_linked_entities[1]
assert all_linked_entities[4] == all_linked_entities[5]
| 5,786 | 29.457895 | 166 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/tests/test_serialize.py
|
import unittest
import spacy
from multiprocessing.pool import ThreadPool
class TestSerialize(unittest.TestCase):
def __init__(self, arg, *args, **kwargs):
super(TestSerialize, self).__init__(arg, *args, **kwargs)
self.nlp = spacy.load('en_core_web_sm')
def test_serialize(self):
self.nlp.add_pipe("entityLinker", last=True)
text = "Apple is looking at buying U.K. startup for $1 billion"
doc = self.nlp(text)
serialised = doc.to_bytes()
doc2 = spacy.tokens.Doc(doc.vocab).from_bytes(serialised)
for ent, ent2 in zip(doc.ents, doc2.ents):
assert ent.text == ent2.text
assert ent.label_ == ent2.label_
linked = ent._.linkedEntities
linked2 = ent2._.linkedEntities
if linked:
assert linked.get_description() == linked2.get_description()
assert linked.get_id() == linked2.get_id()
assert linked.get_label() == linked2.get_label()
assert linked.get_span() == linked2.get_span()
assert linked.get_url() == linked2.get_url()
self.nlp.remove_pipe("entityLinker")
| 1,183 | 33.823529 | 76 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/tests/test_EntityLinker.py
|
import unittest
import spacy
from spacy_entity_linker.EntityLinker import EntityLinker
class TestEntityLinker(unittest.TestCase):
def __init__(self, arg, *args, **kwargs):
super(TestEntityLinker, self).__init__(arg, *args, **kwargs)
self.nlp = spacy.load('en_core_web_sm')
def test_initialization(self):
self.nlp.add_pipe("entityLinker", last=True)
doc = self.nlp(
"Elon Musk was born in South Africa. Bill Gates and Steve Jobs come from in the United States")
doc._.linkedEntities.pretty_print()
doc._.linkedEntities.print_super_entities()
for sent in doc.sents:
sent._.linkedEntities.pretty_print()
self.nlp.remove_pipe("entityLinker")
def test_empty_root(self):
# test empty lists of roots (#9)
self.nlp.add_pipe("entityLinker", last=True)
doc = self.nlp(
'I was right."\n\n "To that extent."\n\n "But that was all."\n\n "No, no, m')
for sent in doc.sents:
sent._.linkedEntities.pretty_print()
# empty document
doc = self.nlp('\n\n')
for sent in doc.sents:
sent._.linkedEntities.pretty_print()
self.nlp.remove_pipe("entityLinker")
| 1,257 | 30.45 | 107 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/tests/test_multiprocessing.py
|
import unittest
import spacy
from multiprocessing.pool import ThreadPool
class TestMultiprocessing(unittest.TestCase):
def __init__(self, arg, *args, **kwargs):
super(TestMultiprocessing, self).__init__(arg, *args, **kwargs)
self.nlp = spacy.load('en_core_web_sm')
def test_is_pipe_multiprocessing_safe(self):
self.nlp.add_pipe("entityLinker", last=True)
ents = [
'Apple',
'Microsoft',
'Google',
'Amazon',
'Facebook',
'IBM',
'Twitter',
'Tesla',
'SpaceX',
'Alphabet',
]
text = "{} is looking at buying U.K. startup for $1 billion"
texts = [text.format(ent) for ent in ents]
docs = self.nlp.pipe(texts, n_process=2)
for doc in docs:
print(doc)
for ent in doc.ents:
print(ent.text, ent.label_, ent._.linkedEntities)
self.nlp.remove_pipe("entityLinker")
| 1,005 | 26.189189 | 71 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/tests/test_EntityCollection.py
|
import unittest
import spacy
from spacy_entity_linker.EntityCollection import EntityCollection
class TestEntityCollection(unittest.TestCase):
def __init__(self, arg, *args, **kwargs):
super(TestEntityCollection, self).__init__(arg, *args, **kwargs)
self.nlp = spacy.load('en_core_web_sm')
def setUp(self):
self.nlp.add_pipe("entityLinker", last=True)
self.doc = self.nlp(
"Elon Musk was born in South Africa. Bill Gates and Steve Jobs come from the United States")
def tearDown(self):
self.nlp.remove_pipe("entityLinker")
def test_categories(self):
doc = self.doc
res = doc._.linkedEntities.get_distinct_categories()
print(res)
assert res != None
assert len(res) > 0
res = doc._.linkedEntities.grouped_by_super_entities()
print(res)
assert res != None
assert len(res) > 0
def test_printing(self):
doc = self.doc
# pretty print
doc._.linkedEntities.pretty_print()
# repr
print(doc._.linkedEntities)
def test_super_entities(self):
doc = self.doc
doc._.linkedEntities.print_super_entities()
def test_iterable_indexable(self):
doc = self.doc
ents = list(doc._.linkedEntities)
assert len(ents) > 0
ent = doc._.linkedEntities[0]
assert ent != None
length = len(doc._.linkedEntities)
assert length > 0
| 1,491 | 25.175439 | 104 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/tests/test_multithreading.py
|
import unittest
import spacy
from multiprocessing.pool import ThreadPool
class TestMultiThreading(unittest.TestCase):
def __init__(self, arg, *args, **kwargs):
super(TestMultiThreading, self).__init__(arg, *args, **kwargs)
self.nlp = spacy.load('en_core_web_sm')
def test_is_multithread_safe(self):
self.nlp.add_pipe("entityLinker", last=True)
ents = [
'Apple',
'Microsoft',
'Google',
'Amazon',
'Facebook',
'IBM',
'Twitter',
'Tesla',
'SpaceX',
'Alphabet',
]
text = "{} is looking at buying U.K. startup for $1 billion"
def thread_func(i):
doc = self.nlp(text.format(ents[i]))
print(doc)
for ent in doc.ents:
print(ent.text, ent.label_, ent._.linkedEntities)
return i
with ThreadPool(10) as pool:
for res in pool.imap_unordered(thread_func, range(10)):
pass
self.nlp.remove_pipe("entityLinker")
| 1,095 | 25.095238 | 70 |
py
|
spaCy-entity-linker
|
spaCy-entity-linker-master/tests/test_pipe.py
|
import unittest
import spacy
from multiprocessing.pool import ThreadPool
class TestPipe(unittest.TestCase):
def __init__(self, arg, *args, **kwargs):
super(TestPipe, self).__init__(arg, *args, **kwargs)
self.nlp = spacy.load('en_core_web_sm')
def test_serialize(self):
self.nlp.add_pipe("entityLinker", last=True)
ents = [
'Apple',
'Microsoft',
'Google',
'Amazon',
'Facebook',
'IBM',
'Twitter',
'Tesla',
'SpaceX',
'Alphabet',
]
text = "{} is looking at buying U.K. startup for $1 billion"
texts = [text.format(ent) for ent in ents]
docs = self.nlp.pipe(texts, n_process=2)
for doc in docs:
print(doc)
for ent in doc.ents:
print(ent.text, ent.label_, ent._.linkedEntities)
self.nlp.remove_pipe("entityLinker")
| 965 | 24.421053 | 68 |
py
|
LinearGromov
|
LinearGromov-main/LinSinkhorn.py
|
import utils
import numpy as np
import time
from sklearn.cluster import KMeans
from sklearn import preprocessing
import scipy
import types
def KL(A, B):
Ratio_trans = np.log(A) - np.log(B)
return np.sum(A * Ratio_trans)
def LR_Dykstra_Sin(K1, K2, K3, a, b, alpha, max_iter=1000, delta=1e-9, lam=0):
Q = K1
R = K2
g_old = K3
r = np.shape(K3)[0]
v1_old, v2_old = np.ones(r), np.ones(r)
u1, u2 = np.ones(np.shape(a)[0]), np.ones(np.shape(b)[0])
q_gi, q_gp = np.ones(r), np.ones(r)
q_Q, q_R = np.ones(r), np.ones(r)
err = 1
n_iter = 0
while n_iter < max_iter:
u1_prev, v1_prev = u1, v1_old
u2_prev, v2_prev = u2, v2_old
g_prev = g_old
if err > delta:
n_iter = n_iter + 1
# First Projection
u1 = a / (np.dot(K1, v1_old) + lam)
u2 = b / (np.dot(K2, v2_old) + lam)
g = np.maximum(alpha, g_old * q_gi)
q_gi = (g_old * q_gi) / (g + lam)
g_old = g.copy()
# Second Projection
v1_trans = np.dot(K1.T, u1)
v2_trans = np.dot(K2.T, u2)
g = (g_old * q_gp * v1_old * q_Q * v1_trans * v2_old * q_R * v2_trans) ** (
1 / 3
)
v1 = g / (v1_trans + lam)
v2 = g / (v2_trans + lam)
q_gp = (g_old * q_gp) / (g + lam)
q_Q = (q_Q * v1_old) / (v1 + lam)
q_R = (q_R * v2_old) / (v2 + lam)
v1_old = v1.copy()
v2_old = v2.copy()
g_old = g.copy()
# Update the error
u1_trans = np.dot(K1, v1)
err_1 = np.sum(np.abs(u1 * u1_trans - a))
u2_trans = np.dot(K2, v2)
err_2 = np.sum(np.abs(u2 * u2_trans - b))
err = err_1 + err_2
if (
np.any(np.isnan(u1))
or np.any(np.isnan(v1))
or np.any(np.isnan(u2))
or np.any(np.isnan(v2))
or np.any(np.isinf(u1))
or np.any(np.isinf(v1))
or np.any(np.isinf(u2))
or np.any(np.isinf(v2))
):
# we have reached the machine precision
# come back to previous solution and quit loop
print("Warning: numerical error in Dykstra at iteration: ", n_iter)
u1, v1 = u1_prev, v1_prev
u2, v2 = u2_prev, v2_prev
g = g_prev
break
else:
Q = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
R = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
n, m = np.shape(K1)[0], np.shape(K2)[0]
count_op = (
(n_iter + 1) * (20 * r + 2 * n * r + 2 * m * r + n + m)
+ 2 * n * r
+ 2 * m * r
)
return Q, R, g, count_op, n_iter
Q = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
R = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
n, m = np.shape(K1)[0], np.shape(K2)[0]
count_op = (
(n_iter + 1) * (20 * r + 2 * n * r + 2 * m * r + n + m) + 2 * n * r + 2 * m * r
)
return Q, R, g, count_op, n_iter
def LR_Dykstra_LSE_Sin(
C1, C2, C3, a, b, alpha, gamma, max_iter=1000, delta=1e-9, lam=0
):
h_old = -C3
r = np.shape(C3)[0]
g1_old, g2_old = np.zeros(r), np.zeros(r)
f1, f2 = np.zeros(np.shape(a)[0]), np.zeros(np.shape(b)[0])
w_gi, w_gp = np.zeros(r), np.zeros(
r
) # q_gi, q_gp = np.exp(gamma * w_gi), np.exp(gamma * w_gp)
w_Q, w_R = np.zeros(r), np.zeros(
r
) # q_Q, q_R = np.exp(gamma * w_Q), np.exp(gamma * w_R)
err = 1
n_iter = 0
while n_iter < max_iter:
f1_prev, g1_prev = f1, g1_old
f2_prev, g2_prev = f2, g2_old
h_prev = h_old
if err > delta:
n_iter = n_iter + 1
# First Projection
C1_tilde = f1[:, None] + g1_old[None, :] - C1 # 2 * n * r
C1_tilde = C1_tilde * gamma # n * r
f1 = (
(1 / gamma) * np.log(a)
+ f1
- (1 / gamma) * scipy.special.logsumexp(C1_tilde, axis=1)
) # 2 * n + 2 * n + n * r
C2_tilde = f2[:, None] + g2_old[None, :] - C2 # 2 * m * r
C2_tilde = C2_tilde * gamma # m * r
f2 = (
(1 / gamma) * np.log(b)
+ f2
- (1 / gamma) * scipy.special.logsumexp(C2_tilde, axis=1)
) # 2 * m + 2 * m + m * r
h = w_gi + h_old # 2 * r
h = np.maximum((np.log(alpha) / gamma), h) # r
w_gi = h_old + w_gi - h # 2 * r
h_old = h.copy()
# Update couplings
C1_tilde = f1[:, None] + g1_old[None, :] - C1 # 2 * n * r
C1_tilde = C1_tilde * gamma # n * r
alpha_1_trans = scipy.special.logsumexp(C1_tilde, axis=0) # n * r
C2_tilde = f2[:, None] + g2_old[None, :] - C2 # 2 * m * r
C2_tilde = C2_tilde * gamma # m * r
alpha_2_trans = scipy.special.logsumexp(C2_tilde, axis=0) # m * r
# Second Projection
h = (1 / 3) * (h_old + w_gp + w_Q + w_R) # 4 * r
h = h + (1 / (3 * gamma)) * alpha_1_trans # 2 * r
h = h + (1 / (3 * gamma)) * alpha_2_trans # 2 * r
g1 = h + g1_old - (1 / gamma) * alpha_1_trans # 3 * r
g2 = h + g2_old - (1 / gamma) * alpha_2_trans # 3 * r
w_Q = w_Q + g1_old - g1 # 2 * r
w_R = w_R + g2_old - g2 # 2 * r
w_gp = h_old + w_gp - h # 2 * r
g1_old = g1.copy()
g2_old = g2.copy()
h_old = h.copy()
# Update couplings
C1_tilde = f1[:, None] + g1_old[None, :] - C1 # 2 * n * r
C1_tilde = C1_tilde * gamma # n * r
Q = np.exp(C1_tilde) # n * r
C2_tilde = f2[:, None] + g2_old[None, :] - C2 # 2 * n * r
C2_tilde = C2_tilde * gamma # n * r
R = np.exp(C2_tilde) # n * r
g = np.exp(gamma * h) # 2 * r
# Update the error
err_1 = np.sum(np.abs(np.sum(Q, axis=1) - a))
err_2 = np.sum(np.abs(np.sum(R, axis=1) - b))
err = err_1 + err_2
if (
np.any(np.isnan(f1))
or np.any(np.isnan(g1))
or np.any(np.isnan(f2))
or np.any(np.isnan(g2))
or np.any(np.isinf(f1))
or np.any(np.isinf(g1))
or np.any(np.isinf(f2))
or np.any(np.isinf(g2))
):
# we have reached the machine precision
# come back to previous solution and quit loop
print("Warning: numerical error in Dykstra LSE at iteration", n_iter)
f1, g1 = f1_prev, g1_prev
f2, g2 = f2_prev, g2_prev
h = h_prev
# Update couplings
C1_tilde = f1[:, None] + g1_old[None, :] - C1
C1_tilde = C1_tilde * gamma
Q = np.exp(C1_tilde)
C2_tilde = f2[:, None] + g2_old[None, :] - C2
C2_tilde = C2_tilde * gamma
R = np.exp(C2_tilde)
g = np.exp(gamma * h)
n, m = np.shape(C1)[0], np.shape(C2)[0]
count_op = (
(n_iter) * (8 * n * r + 8 * m * r + 4 * n + 4 * m + 27 * r)
+ 4 * n * r
+ 4 * m * r
)
return Q, R, g, count_op
else:
n, m = np.shape(C1)[0], np.shape(C2)[0]
count_op = (
(n_iter + 1) * (8 * n * r + 8 * m * r + 4 * n + 4 * m + 27 * r)
+ 4 * n * r
+ 4 * m * r
)
return Q, R, g, count_op
n, m = np.shape(C1)[0], np.shape(C2)[0]
count_op = (
(n_iter + 1) * (8 * n * r + 8 * m * r + 4 * n + 4 * m + 27 * r)
+ 4 * n * r
+ 4 * m * r
)
return Q, R, g, count_op
def LR_IBP_Sin(K1, K2, K3, a, b, max_iter=1000, delta=1e-9, lam=0):
Q = K1
R = K2
g = K3
r = np.shape(K3)[0]
v1, v2 = np.ones(r), np.ones(r)
u1, u2 = np.ones(np.shape(a)[0]), np.ones(np.shape(b)[0])
u1_trans = np.dot(K1, v1) # n * r
u2_trans = np.dot(K2, v2) # m * r
err = 1
n_iter = 0
while n_iter < max_iter:
u1_prev, v1_prev = u1, v1
u2_prev, v2_prev = u2, v2
g_prev = g
if err > delta:
n_iter = n_iter + 1
# Update u1
u1 = a / u1_trans # n
v1_trans = np.dot(K1.T, u1) # n * r
# Update u2
u2 = b / u2_trans # m
v2_trans = np.dot(K2.T, u2) # m * r
# Update g
# g = g / np.sum(g)
g = (g * v1 * v1_trans * v2 * v2_trans) ** (1 / 3) # 5 * r
# Update v1
v1 = g / v1_trans # r
# Update v2
v2 = g / v2_trans # r
# Update the couplings
# Q = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
# R = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
# Update the error
u1_trans = np.dot(K1, v1)
err_1 = np.sum(np.abs(u1 * u1_trans - a))
u2_trans = np.dot(K2, v2)
err_2 = np.sum(np.abs(u2 * u2_trans - b))
err = err_1 + err_2
if (
np.any(np.isnan(u1))
or np.any(np.isnan(v1))
or np.any(np.isnan(u2))
or np.any(np.isnan(v2))
or np.any(np.isinf(u1))
or np.any(np.isinf(v1))
or np.any(np.isinf(u2))
or np.any(np.isinf(v2))
):
# we have reached the machine precision
# come back to previous solution and quit loop
print("Warning: numerical errors in IBP at iteration", n_iter)
u1, v1 = u1_prev, v1_prev
u2, v2 = u2_prev, v2_prev
g = g_prev
break
else:
Q = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
R = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
n, m = np.shape(K1)[0], np.shape(K2)[0]
count_op = (
(n_iter + 1) * (2 * n * r + 2 * m * r + 7 * r) + 3 * n * r + 3 * m * r
)
return Q, R, g, count_op
Q = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
R = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
n, m = np.shape(K1)[0], np.shape(K2)[0]
count_op = (n_iter + 1) * (2 * n * r + 2 * m * r + 7 * r) + 3 * n * r + 3 * m * r
return Q, R, g, count_op
def self_quad_lot_md_fixed_marginal(
C,
a,
g,
rank,
gamma_0=1,
LSE="False",
alpha=1e-10,
seed_init=49,
max_iter=1000,
delta=1e-5,
max_iter_Sin=10000,
delta_Sin=1e-3,
lam_Sin=0,
time_out=200,
):
start = time.time()
num_op = 0
acc = []
times = []
list_num_op = []
n = np.shape(a)[0]
rank = min(rank, n)
r = rank
# rescale the cost
C = C / C.max()
# Init Q
np.random.seed(seed_init)
Q = np.abs(np.random.randn(n, rank))
Q = Q + 1 # n * r
Q = (Q.T * (a / np.sum(Q, axis=1))).T # n + n * r
# Classical OT
C_trans = np.dot(C, Q)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G)
acc.append(OT_trans)
num_op = num_op + n * r + n * n * r + r * r * n
list_num_op.append(num_op)
time_actual = time.time() - start
times.append(time_actual)
err = 1
niter = 0
count_escape = 1
while (niter < max_iter) and (time_actual < time_out):
Q_prev = Q
g_prev = g
if err > delta:
niter = niter + 1
grad = np.dot(C, Q) + np.dot(C.T, Q)
grad = grad / g
norm = np.max(np.abs(grad)) ** 2
gamma = gamma_0 / norm
C_trans = grad - (1 / gamma) * np.log(Q) # 3 * n * r
num_op = num_op + 2 * n * n * r + 2 * n * r
# Sinkhorn
reg = 1 / gamma
if LSE == "False":
results = utils.Sinkhorn(
C_trans,
reg,
a,
g,
max_iter=max_iter_Sin,
delta=delta_Sin,
lam=lam_Sin,
time_out=time_out,
)
else:
results = utils.Sinkhorn_LSE(
C_trans,
reg,
a,
g,
max_iter=max_iter_Sin,
delta=delta_Sin,
lam=lam_Sin,
time_out=time_out,
)
res_sin, acc_sin, times_sin, Q, num_op_sin = results
num_op = num_op + num_op_sin
# Classical OT
C_trans = np.dot(C, Q)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G)
if np.isnan(OT_trans) == True:
print("Error self LOT: OT cost", niter)
Q = Q_prev
g = g_prev
break
## Update the error: theoritical error
criterion = ((1 / gamma) ** 2) * (KL(Q, Q_prev) + KL(Q_prev, Q))
if niter > 1:
if criterion > delta / 1e-1:
err = criterion
else:
count_escape = count_escape + 1
if count_escape != niter:
err = criterion
## Update the error: Practical error
# err = np.abs(OT_trans - acc[-1]) / acc[-1]
if np.isnan(criterion):
print("Error self LOT: stopping criterion", niter)
Q = Q_prev
g = g_prev
break
acc.append(OT_trans)
list_num_op.append(num_op)
time_actual = time.time() - start
times.append(time_actual)
else:
break
return acc[-1], np.array(acc), np.array(times), np.array(list_num_op), Q
def self_lin_lot_md_fixed_marginal(
C1,
C2,
a,
g,
rank,
gamma_0=1,
LSE="True",
alpha=1e-10,
seed_init=49,
max_iter=1000,
delta=1e-3,
max_iter_Sin=1000,
delta_Sin=1e-9,
lam_Sin=0,
time_out=200,
):
start = time.time()
num_op = 0
acc = []
times = []
list_num_op = []
n, d = np.shape(C1)
rank = min(n, rank)
r = rank
# rescale the costs
C1 = C1 / np.sqrt(C1.max())
C2 = C2 / np.sqrt(C2.max())
# Init Q
np.random.seed(seed_init)
Q = np.abs(np.random.randn(n, rank))
Q = Q + 1 # n * r
Q = (Q.T * (a / np.sum(Q, axis=1))).T # n + n * r
# Classical OT
C_trans = np.dot(C2, Q)
C_trans = np.dot(C1, C_trans)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G)
acc.append(OT_trans)
num_op = num_op + 3 * n * r + n + r
list_num_op.append(num_op)
time_actual = time.time() - start
times.append(time_actual)
err = 1
niter = 0
count_escape = 1
while (niter < max_iter) and (time_actual < time_out):
Q_prev = Q
g_prev = g
if err > delta:
niter = niter + 1
grad = np.dot(C1, np.dot(C2, Q)) + np.dot(C2.T, np.dot(C1.T, Q))
grad = grad / g
norm = np.max(np.abs(grad)) ** 2
gamma = gamma_0 / norm
C_trans = grad - (1 / gamma) * np.log(Q) # 3 * n * r
num_op = num_op + 4 * n * d * r + 4 * n * r
# Sinkhorn
reg = 1 / gamma
if LSE == "False":
results = utils.Sinkhorn(
C_trans,
reg,
a,
g,
max_iter=max_iter_Sin,
delta=delta_Sin,
lam=lam_Sin,
time_out=time_out,
)
else:
results = utils.Sinkhorn_LSE(
C_trans,
reg,
a,
g,
max_iter=max_iter_Sin,
delta=delta_Sin,
lam=lam_Sin,
time_out=time_out,
)
res_sin, acc_sin, times_sin, Q, num_op_sin = results
num_op = num_op + num_op_sin
# Classical OT
C_trans = np.dot(C2, Q)
C_trans = np.dot(C1, C_trans)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G)
if np.isnan(OT_trans) == True:
print("Error self LOT: OT cost", niter)
Q = Q_prev
g = g_prev
break
## Update the error: theoritical error
criterion = ((1 / gamma) ** 2) * (KL(Q, Q_prev) + KL(Q_prev, Q))
if niter > 1:
if criterion > delta / 1e-1:
err = criterion
else:
count_escape = count_escape + 1
if count_escape != niter:
err = criterion
## Update the error: Practical error
# err = np.abs(OT_trans - acc[-1]) / acc[-1]
if np.isnan(err):
print("Error self LOT: stopping criterion", niter)
Q = Q_prev
g = g_prev
break
acc.append(OT_trans)
list_num_op.append(num_op)
time_actual = time.time() - start
times.append(time_actual)
else:
break
return acc[-1], np.array(acc), np.array(times), np.array(list_num_op), Q
# If C_init == True: cost is the tuples C(X,Y), C(X,X), C(Y,Y)
# If C_init == False: cost is the Function
# Init == 'trivial', 'random', 'kmeans','general_kmeans'
def Quad_LOT_MD(
X,
Y,
a,
b,
rank,
cost,
reg=0,
alpha=1e-10,
gamma_0=10,
max_iter=1000,
delta=1e-3,
time_out=200,
Init="kmeans",
seed_init=49,
C_init=False,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=True,
):
num_op = 0
acc = []
times = []
list_num_op = []
if gamma_0 * reg >= 1:
# display(Latex(f'Choose $\gamma$ and $\epsilon$ such that $\gamma$ x $\epsilon<1$'))
print("gamma et epsilon must be well choosen")
return "Error"
n, m = np.shape(a)[0], np.shape(b)[0]
rank = min(n, m, rank)
r = rank
if C_init == False:
C = cost(X, Y)
if len(C) != 1:
print("Error: the cost function is not adapted")
return "Error"
else:
C_X = cost(X, X)
C_Y = cost(Y, Y)
if rescale_cost == True:
C = C / np.max(C)
C_X = C_X / C_X.max()
C_Y = C_Y / C_Y.max()
else:
if len(cost) != 3:
print("Error: cost not adapted")
return "Error"
else:
C, C_X, C_Y = cost
if rescale_cost == True:
C, C_X, C_Y = C / C.max(), C_X / C_X.max(), C_Y / C_Y.max()
start = time.time()
#### Initialization #####
if Init == "general_kmeans":
g = np.ones(rank) / rank
res_q, acc_q, times_q, list_num_op_q, Q = self_quad_lot_md_fixed_marginal(
C_X,
a,
g,
rank,
gamma_0=gamma_0,
LSE=False,
alpha=1e-10,
seed_init=49,
max_iter=10,
delta=delta,
max_iter_Sin=max_iter_IBP,
delta_Sin=delta_IBP,
lam_Sin=lam_IBP,
time_out=time_out / 5,
)
res_r, acc_r, times_r, list_num_op_r, R = self_quad_lot_md_fixed_marginal(
C_Y,
b,
g,
rank,
gamma_0=gamma_0,
LSE=False,
alpha=1e-10,
seed_init=49,
max_iter=10,
delta=delta,
max_iter_Sin=max_iter_IBP,
delta_Sin=delta_IBP,
lam_Sin=lam_IBP,
time_out=time_out / 5,
)
num_op = num_op + list_num_op_q[-1] + list_num_op_r[-1]
if Init == "kmeans":
g = np.ones(rank) / rank
kmeans_X = KMeans(n_clusters=rank, random_state=0).fit(X)
num_iter_kmeans_X = kmeans_X.n_iter_
Z_X = kmeans_X.cluster_centers_
C_trans_X = utils.Square_Euclidean_Distance(X, Z_X)
C_trans_X = C_trans_X / C_trans_X.max()
results = utils.Sinkhorn(
C_trans_X,
reg_init,
a,
g,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
time_out=1e100,
)
res, arr_acc_X, arr_times_X, Q, arr_num_op_X = results
# lb_X = preprocessing.LabelBinarizer()
# lb_X.fit(kmeans_X.labels_)
# Q = lb_X.transform(kmeans_X.labels_)
# Q = (Q.T * a).T
kmeans_Y = KMeans(n_clusters=rank, random_state=0).fit(Y)
num_iter_kmeans_Y = kmeans_Y.n_iter_
Z_Y = kmeans_Y.cluster_centers_
C_trans_Y = utils.Square_Euclidean_Distance(Y, Z_Y)
C_trans_Y = C_trans_Y / C_trans_Y.max()
results = utils.Sinkhorn(
C_trans_Y,
reg_init,
b,
g,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
time_out=1e100,
)
res, arr_acc_Y, arr_times_Y, R, arr_num_op_Y = results
# lb_Y = preprocessing.LabelBinarizer()
# lb_Y.fit(kmeans_Y.labels_)
# R = lb_Y.transform(kmeans_Y.labels_)
# R = (R.T * b).T
num_op = (
num_op
+ (num_iter_kmeans_X + np.shape(arr_acc_X)[0]) * rank * np.shape(X)[0]
+ (num_iter_kmeans_Y + np.shape(arr_acc_Y)[0]) * rank * np.shape(Y)[0]
)
if Init == "kmeans_modified":
## Init with K-means
g = np.ones(rank) / rank
kmeans = KMeans(n_clusters=rank, random_state=0).fit(X)
Z = kmeans.cluster_centers_
num_iter_kmeans = kmeans.n_iter_
num_op = num_op + num_iter_kmeans * rank * np.shape(X)[0] + rank
reg_init = reg_init
gamma1, gamma2, g, count_op_Barycenter = utils.UpdatePlans(
X,
Y,
Z,
a,
b,
reg_init,
cost,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
Q, R = gamma1.T, gamma2.T
num_op = num_op + count_op_Barycenter
# Init random
if Init == "random":
np.random.seed(seed_init)
g = np.abs(np.random.randn(rank))
g = g + 1 # r
g = g / np.sum(g) # r
Q = np.abs(np.random.randn(n, rank))
Q = Q + 1 # n * r
Q = (Q.T * (a / np.sum(Q, axis=1))).T # n + n * r
R = np.abs(np.random.randn(m, rank))
R = R + 1 # m * r
R = (R.T * (b / np.sum(R, axis=1))).T # m + m * r
num_op = num_op + 2 * n * r + 2 * m * r + m + n + 2 * r
### Trivial Init
if Init == "trivial":
g = np.ones(rank) / rank # r
lambda_1 = min(np.min(a), np.min(g), np.min(b)) / 2
a1 = np.arange(1, np.shape(a)[0] + 1)
a1 = a1 / np.sum(a1) # n
a2 = (a - lambda_1 * a1) / (1 - lambda_1) # 2 * n
b1 = np.arange(1, np.shape(b)[0] + 1)
b1 = b1 / np.sum(b1) # m
b2 = (b - lambda_1 * b1) / (1 - lambda_1) # 2 * m
g1 = np.arange(1, rank + 1)
g1 = g1 / np.sum(g1) # r
g2 = (g - lambda_1 * g1) / (1 - lambda_1) # 2 * r
Q = lambda_1 * np.dot(a1[:, None], g1.reshape(1, -1)) + (1 - lambda_1) * np.dot(
a2[:, None], g2.reshape(1, -1) # 4 * n * r
)
R = lambda_1 * np.dot(b1[:, None], g1.reshape(1, -1)) + (1 - lambda_1) * np.dot(
b2[:, None], g2.reshape(1, -1) # 4 * m * r
)
num_op = num_op + 4 * n * r + 4 * m * r + 3 * n + 3 * m + 3 * r
if gamma_init == "theory":
L_trans = (2 / (alpha) ** 4) * (np.linalg.norm(C) ** 2)
L_trans = L_trans + ((reg + 2 * np.linalg.norm(C)) / (alpha**3)) ** 2
L = np.sqrt(3 * L_trans)
gamma = 1 / L
if gamma_init == "regularization":
gamma = 1 / reg
if gamma_init == "arbitrary":
gamma = gamma_0
# Classical OT
C_trans = np.dot(C, R)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G)
acc.append(OT_trans)
list_num_op.append(num_op)
time_actual = time.time() - start
times.append(time_actual)
err = 1
niter = 0
count_escape = 1
while (niter < max_iter) and (time_actual < time_out):
Q_prev = Q
R_prev = R
g_prev = g
if err > delta:
niter = niter + 1
K1_trans_0 = np.dot(C, R) # n * m * r
grad_Q = K1_trans_0 / g
if reg != 0:
grad_Q = grad_Q + reg * np.log(Q)
if gamma_init == "rescale":
# norm_1 = np.linalg.norm(grad_Q)**2
norm_1 = np.max(np.abs(grad_Q)) ** 2
K2_trans_0 = np.dot(C.T, Q) # m * n * r
grad_R = K2_trans_0 / g
if reg != 0:
grad_R = grad_R + reg * np.log(R)
if gamma_init == "rescale":
# norm_2 = np.linalg.norm(grad_R)**2
norm_2 = np.max(np.abs(grad_R)) ** 2
omega = np.diag(np.dot(Q.T, K1_trans_0)) # r * n * r
C3_trans = omega / (g**2)
grad_g = -omega / (g**2)
if reg != 0:
grad_g = grad_g + reg * np.log(g)
if gamma_init == "rescale":
# norm_3 = np.linalg.norm(grad_g)**2
norm_3 = np.max(np.abs(grad_g)) ** 2
if gamma_init == "rescale":
gamma = gamma_0 / max(norm_1, norm_2, norm_3)
C1_trans = grad_Q - (1 / gamma) * np.log(Q) # 3 * n * r
C2_trans = grad_R - (1 / gamma) * np.log(R) # 3 * m * r
C3_trans = grad_g - (1 / gamma) * np.log(g) # 4 * r
num_op = num_op + 2 * n * m * r + r * n * r + 3 * n * r + 3 * m * r + 4 * r
# Update the coupling
if method == "IBP":
K1 = np.exp((-gamma) * C1_trans)
K2 = np.exp((-gamma) * C2_trans)
K3 = np.exp((-gamma) * C3_trans)
Q, R, g = LR_IBP_Sin(
K1,
K2,
K3,
a,
b,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
if method == "Dykstra":
K1 = np.exp((-gamma) * C1_trans)
K2 = np.exp((-gamma) * C2_trans)
K3 = np.exp((-gamma) * C3_trans)
num_op = num_op + 2 * n * r + 2 * m * r + 2 * r
Q, R, g, count_op_Dysktra, n_iter_Dykstra = LR_Dykstra_Sin(
K1,
K2,
K3,
a,
b,
alpha,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
num_op = num_op + count_op_Dysktra
if method == "Dykstra_LSE":
Q, R, g, count_op_Dysktra_LSE = LR_Dykstra_LSE_Sin(
C1_trans,
C2_trans,
C3_trans,
a,
b,
alpha,
gamma,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
num_op = num_op + count_op_Dysktra_LSE
# Classical OT
C_trans = np.dot(C, R)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G)
if np.isnan(OT_trans) == True:
print("Error LOT: OT cost", niter)
Q = Q_prev
R = R_prev
g = g_prev
break
## Update the error: theoritical error
err_1 = ((1 / gamma) ** 2) * (KL(Q, Q_prev) + KL(Q_prev, Q))
err_2 = ((1 / gamma) ** 2) * (KL(R, R_prev) + KL(R_prev, R))
err_3 = ((1 / gamma) ** 2) * (KL(g, g_prev) + KL(g_prev, g))
criterion = err_1 + err_2 + err_3
# print(criterion)
if niter > 1:
if criterion > delta / 1e-1:
err = criterion
else:
count_escape = count_escape + 1
if count_escape != niter:
err = criterion
## Update the error: Practical error
# err = np.abs(OT_trans - acc[-1]) / acc[-1]
if np.isnan(criterion) == True:
print("Error LOT: stopping criterion", niter)
Q = Q_prev
R = R_prev
g = g_prev
break
acc.append(OT_trans)
list_num_op.append(num_op)
time_actual = time.time() - start
times.append(time_actual)
else:
break
return acc[-1], np.array(acc), np.array(times), np.array(list_num_op), Q, R, g
def apply_quad_lr_lot(
X, Y, a, b, rank, cost, gamma_0=10, rescale_cost=True, time_out=50
):
if type(cost) == types.FunctionType:
acc, arr_acc, arr_times, arr_list_num_op, Q, R, g = Quad_LOT_MD(
X,
Y,
a,
b,
rank,
cost,
reg=0,
alpha=1e-10,
gamma_0=gamma_0,
max_iter=1000,
delta=1e-3,
time_out=time_out,
Init="kmeans",
seed_init=49,
C_init=False,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=rescale_cost,
)
else:
acc, arr_acc, arr_times, arr_list_num_op, Q, R, g = Quad_LOT_MD(
X,
Y,
a,
b,
rank,
cost,
reg=0,
alpha=1e-10,
gamma_0=gamma_0,
max_iter=1000,
delta=1e-3,
time_out=time_out,
Init="kmeans",
seed_init=49,
C_init=True,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=rescale_cost,
)
return acc, Q, R, g
# If C_init = True: cost_factorized = (C1, C2, C_X_1, C_X_2, C_Y_1, C_Y_2)
# If C_init = False: cost_factorized is a function
# Init = 'trivial', 'random', 'kmeans', 'general_kmeans'
def Lin_LOT_MD(
X,
Y,
a,
b,
rank,
cost,
cost_factorized,
reg=0,
alpha=1e-10,
gamma_0=10,
max_iter=1000,
delta=1e-3,
time_out=200,
Init="kmeans",
seed_init=49,
C_init=False,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=True,
):
num_op = 0
acc = []
times = []
list_num_op = []
list_criterion = []
if gamma_0 * reg >= 1:
# display(Latex(f'Choose $\gamma$ and $\epsilon$ such that $\gamma$ x $\epsilon<1$'))
print("gamma and epsilon must be well choosen")
return "Error"
n, m = np.shape(a)[0], np.shape(b)[0]
rank = min(n, m, rank)
r = rank
if C_init == False:
C = cost_factorized(X, Y)
if len(C) != 2:
print("Error: cost function is not adapted")
return Error
else:
C1, C2 = C
if rescale_cost == True:
C1, C2 = C1 / np.sqrt(np.max(C1)), C2 / np.sqrt(np.max(C2))
C_X_1, C_X_2 = cost_factorized(X, X)
if rescale_cost == True:
C_X_1, C_X_2 = C_X_1 / np.sqrt(np.max(C_X_1)), C_X_2 / np.sqrt(
np.max(C_X_2)
)
C_Y_1, C_Y_2 = cost_factorized(Y, Y)
if rescale_cost == True:
C_Y_1, C_Y_2 = C_Y_1 / np.sqrt(np.max(C_Y_1)), C_Y_2 / np.sqrt(
np.max(C_Y_2)
)
else:
if len(cost_factorized) != 6:
print("Error: soem cost matrices are missing")
return "Error"
else:
(C1, C2, C_X_1, C_X_2, C_Y_1, C_Y_2) = cost_factorized
n, d = np.shape(C1)
start = time.time()
########### Initialization ###########
#### Initialization #####
if Init == "general_kmeans":
g = np.ones(rank) / rank
res_q, acc_q, times_q, list_num_op_q, Q = self_lin_lot_md_fixed_marginal(
C_X_1,
C_X_2,
a,
g,
rank,
gamma_0=gamma_0,
LSE=False,
seed_init=49,
max_iter=10,
alpha=alpha,
delta=delta,
max_iter_Sin=max_iter_IBP,
delta_Sin=delta_IBP,
lam_Sin=lam_IBP,
time_out=1e100,
)
res_r, acc_r, times_r, list_num_op_r, R = self_lin_lot_md_fixed_marginal(
C_Y_1,
C_Y_2,
b,
g,
rank,
gamma_0=gamma_0,
LSE=False,
seed_init=49,
max_iter=10,
alpha=alpha,
delta=delta,
max_iter_Sin=max_iter_IBP,
delta_Sin=delta_IBP,
lam_Sin=lam_IBP,
time_out=1e100,
)
num_op = num_op + list_num_op_q[-1] + list_num_op_r[-1]
if Init == "kmeans":
g = np.ones(rank) / rank
kmeans_X = KMeans(n_clusters=rank, random_state=0).fit(X)
num_iter_kmeans_X = kmeans_X.n_iter_
Z_X = kmeans_X.cluster_centers_
C_trans_X = utils.Square_Euclidean_Distance(X, Z_X)
C_trans_X = C_trans_X / C_trans_X.max()
results = utils.Sinkhorn(
C_trans_X,
reg_init,
a,
g,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
time_out=1e100,
)
res, arr_acc_X, arr_times_X, Q, arr_num_op_X = results
# lb_X = preprocessing.LabelBinarizer()
# lb_X.fit(kmeans_X.labels_)
# Q = lb_X.transform(kmeans_X.labels_)
# Q = (Q.T * a).T
kmeans_Y = KMeans(n_clusters=rank, random_state=0).fit(Y)
num_iter_kmeans_Y = kmeans_Y.n_iter_
Z_Y = kmeans_Y.cluster_centers_
C_trans_Y = utils.Square_Euclidean_Distance(Y, Z_Y)
C_trans_Y = C_trans_Y / C_trans_Y.max()
results = utils.Sinkhorn(
C_trans_Y,
reg_init,
b,
g,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
time_out=1e100,
)
res, arr_acc_Y, arr_times_Y, R, arr_num_op_Y = results
# lb_Y = preprocessing.LabelBinarizer()
# lb_Y.fit(kmeans_Y.labels_)
# R = lb_Y.transform(kmeans_Y.labels_)
# R = (R.T * b).T
num_op = (
num_op
+ (num_iter_kmeans_X + np.shape(arr_acc_X)[0]) * rank * np.shape(X)[0]
+ (num_iter_kmeans_Y + np.shape(arr_acc_Y)[0]) * rank * np.shape(Y)[0]
)
if Init == "kmeans_modified":
g = np.ones(rank) / rank
kmeans = KMeans(n_clusters=rank, random_state=0).fit(X)
Z = kmeans.cluster_centers_
num_iter_kmeans = kmeans.n_iter_
num_op = num_op + r + num_iter_kmeans * r * n
reg_init = reg_init
gamma1, gamma2, g, count_op_Barycenter = utils.UpdatePlans(
X,
Y,
Z,
a,
b,
reg_init,
cost,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
Q, R = gamma1.T, gamma2.T
num_op = num_op + count_op_Barycenter
## Init random
if Init == "random":
np.random.seed(seed_init)
g = np.abs(np.random.randn(rank))
g = g + 1
g = g / np.sum(g)
n, d = np.shape(X)
m, d = np.shape(Y)
Q = np.abs(np.random.randn(n, rank))
Q = Q + 1
Q = (Q.T * (a / np.sum(Q, axis=1))).T
R = np.abs(np.random.randn(m, rank))
R = R + 1
R = (R.T * (b / np.sum(R, axis=1))).T
num_op = num_op + 2 * n * r + 2 * m * r + m + n + 2 * r
## Init trivial
if Init == "trivial":
g = np.ones(rank) / rank
lambda_1 = min(np.min(a), np.min(g), np.min(b)) / 2
a1 = np.arange(1, np.shape(a)[0] + 1)
a1 = a1 / np.sum(a1)
a2 = (a - lambda_1 * a1) / (1 - lambda_1)
b1 = np.arange(1, np.shape(b)[0] + 1)
b1 = b1 / np.sum(b1)
b2 = (b - lambda_1 * b1) / (1 - lambda_1)
g1 = np.arange(1, rank + 1)
g1 = g1 / np.sum(g1)
g2 = (g - lambda_1 * g1) / (1 - lambda_1)
Q = lambda_1 * np.dot(a1[:, None], g1.reshape(1, -1)) + (1 - lambda_1) * np.dot(
a2[:, None], g2.reshape(1, -1)
)
R = lambda_1 * np.dot(b1[:, None], g1.reshape(1, -1)) + (1 - lambda_1) * np.dot(
b2[:, None], g2.reshape(1, -1)
)
num_op = num_op + 4 * n * r + 4 * m * r + 3 * n + 3 * m + 3 * r
#####################################
if gamma_init == "theory":
L_trans = (
(2 / (alpha) ** 4) * (np.linalg.norm(C1) ** 2) * (np.linalg.norm(C1) ** 2)
)
L_trans = (
L_trans
+ ((reg + 2 * np.linalg.norm(C1) * np.linalg.norm(C1)) / (alpha**3)) ** 2
)
L = np.sqrt(3 * L_trans)
gamma = 1 / L
if gamma_init == "regularization":
gamma = 1 / reg
if gamma_init == "arbitrary":
gamma = gamma_0
# Classical OT
C_trans = np.dot(C2, R)
C_trans = np.dot(C1, C_trans)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G)
acc.append(OT_trans)
list_num_op.append(num_op)
time_actual = time.time() - start
times.append(time_actual)
err = 1
niter = 0
count_escape = 1
while (niter < max_iter) and (time_actual < time_out):
Q_prev = Q
R_prev = R
g_prev = g
if err > delta:
niter = niter + 1
K1_trans_0 = np.dot(C2, R) # d * m * r
K1_trans_0 = np.dot(C1, K1_trans_0) # n * d * r
grad_Q = K1_trans_0 / g
if reg != 0.0:
grad_Q = grad_Q + reg * np.log(Q)
if gamma_init == "rescale":
norm_1 = np.max(np.abs(grad_Q)) ** 2
K2_trans_0 = np.dot(C1.T, Q) # d * n * r
K2_trans_0 = np.dot(C2.T, K2_trans_0) # m * d * r
grad_R = K2_trans_0 / g
if reg != 0.0:
grad_R = grad_R + reg * np.log(R)
if gamma_init == "rescale":
norm_2 = np.max(np.abs(grad_R)) ** 2
omega = np.diag(np.dot(Q.T, K1_trans_0)) # r * n * r
grad_g = -omega / (g**2)
if reg != 0.0:
grad_g = grad_g + reg * np.log(g)
if gamma_init == "rescale":
norm_3 = np.max(np.abs(grad_g)) ** 2
if gamma_init == "rescale":
gamma = gamma_0 / max(norm_1, norm_2, norm_3)
C1_trans = grad_Q - (1 / gamma) * np.log(Q) # 3 * n * r
C2_trans = grad_R - (1 / gamma) * np.log(R) # 3 * m * r
C3_trans = grad_g - (1 / gamma) * np.log(g) # 4 * r
num_op = (
num_op
+ 2 * n * d * r
+ 2 * m * d * r
+ r * n * r
+ 3 * n * r
+ 3 * m * r
+ 4 * r
)
# Update the coupling
if method == "IBP":
K1 = np.exp((-gamma) * C1_trans)
K2 = np.exp((-gamma) * C2_trans)
K3 = np.exp((-gamma) * C3_trans)
Q, R, g = LR_IBP_Sin(
K1,
K2,
K3,
a,
b,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
if method == "Dykstra":
K1 = np.exp((-gamma) * C1_trans)
K2 = np.exp((-gamma) * C2_trans)
K3 = np.exp((-gamma) * C3_trans)
num_op = num_op + 2 * n * r + 2 * m * r + 2 * r
Q, R, g, count_op_Dysktra, n_iter_Dykstra = LR_Dykstra_Sin(
K1,
K2,
K3,
a,
b,
alpha,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
num_op = num_op + count_op_Dysktra
if method == "Dykstra_LSE":
Q, R, g, count_op_Dysktra_LSE = LR_Dykstra_LSE_Sin(
C1_trans,
C2_trans,
C3_trans,
a,
b,
alpha,
gamma,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
num_op = num_op + count_op_Dysktra_LSE
# Classical OT cost
C_trans = np.dot(C2, R)
C_trans = np.dot(C1, C_trans)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G)
if np.isnan(OT_trans) == True:
print("Error LOT: OT cost", niter)
Q = Q_prev
R = R_prev
g = g_prev
break
err_1 = ((1 / gamma) ** 2) * (KL(Q, Q_prev) + KL(Q_prev, Q))
err_2 = ((1 / gamma) ** 2) * (KL(R, R_prev) + KL(R_prev, R))
err_3 = ((1 / gamma) ** 2) * (KL(g, g_prev) + KL(g_prev, g))
criterion = err_1 + err_2 + err_3
# print('Sinkhorn: ' + str(criterion))
if niter > 1:
if criterion > delta / 1e-1:
err = criterion
else:
count_escape = count_escape + 1
if count_escape != niter:
err = criterion
# ## Update the error: Practical error
# err = np.abs(OT_trans - acc[-1]) / acc[-1]
if np.isnan(criterion) == True:
print("Error LOT: stopping criterion", niter)
Q = Q_prev
R = R_prev
g = g_prev
break
acc.append(OT_trans)
list_num_op.append(num_op)
time_actual = time.time() - start
times.append(time_actual)
list_criterion.append(criterion)
else:
break
return (
acc[-1],
np.array(acc),
np.array(times),
np.array(list_num_op),
np.array(list_criterion),
Q,
R,
g,
)
def apply_lin_lr_lot(
X, Y, a, b, rank, cost, cost_factorized, gamma_0=10, rescale_cost=True, time_out=50
):
if type(cost_factorized) == types.FunctionType:
acc, arr_acc, arr_times, arr_list_num_op, arr_critetion, Q, R, g = Lin_LOT_MD(
X,
Y,
a,
b,
rank,
cost,
cost_factorized,
reg=0,
alpha=1e-10,
gamma_0=gamma_0,
max_iter=1000,
delta=1e-3,
time_out=time_out,
Init="kmeans",
seed_init=49,
C_init=False,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=rescale_cost,
)
else:
acc, arr_acc, arr_times, arr_list_num_op, arr_critetion, Q, R, g = Lin_LOT_MD(
X,
Y,
a,
b,
rank,
cost,
cost_factorized,
reg=0,
alpha=1e-10,
gamma_0=gamma_0,
max_iter=1000,
delta=1e-3,
time_out=time_out,
Init="kmeans",
seed_init=49,
C_init=True,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=rescale_cost,
)
return acc, Q, R, g
def clustering_lin_LOT(
X, cost, cost_factorized, num_cluster=2, gamma_0=1, C_init=False, time_out=100
):
a = np.ones(np.shape(X)[0]) / np.shape(X)[0]
results = Lin_LOT_MD(
X,
X,
a,
a,
num_cluster,
cost,
cost_factorized,
gamma_0=gamma_0,
C_init=C_init,
time_out=time_out,
reg=0,
alpha=1e-10,
max_iter=1000,
delta=1e-5,
Init="general_kmeans",
seed_init=49,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
)
res_q, acc_q, times_q, list_num_op_q, list_criterion, Q, R, g = results
y_pred = np.argmax(Q, axis=1)
return y_pred
def clustering_quad_LOT(X, cost, num_cluster=2, gamma_0=1, C_init=False, time_out=100):
a = np.ones(np.shape(X)[0]) / np.shape(X)[0]
results = Quad_LOT_MD(
X,
X,
a,
a,
num_cluster,
cost,
gamma_0=gamma_0,
C_init=C_init,
time_out=time_out,
reg=0,
alpha=1e-10,
max_iter=1000,
delta=1e-5,
Init="general_kmeans",
seed_init=49,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
)
res_q, acc_q, times_q, list_num_op_q, Q, R, g = results
y_pred = np.argmax(Q, axis=1)
return y_pred
| 47,020 | 28.572956 | 93 |
py
|
LinearGromov
|
LinearGromov-main/utils.py
|
import numpy as np
import time
from sklearn.cluster import KMeans
import sklearn
import scipy
from scipy import special
from scipy.sparse.csgraph import dijkstra
from scipy.sparse import csr_matrix
# Here C = C1 * C2 and P = P1 * P2
def compute_OT(P1, P2, C1, C2):
OT_trans_1 = np.dot(P1.T, C1)
OT_trans_2 = np.dot(C2, P2.T)
OT_trans = np.dot(OT_trans_1, OT_trans_2)
res = np.trace(OT_trans)
return res
def compute_SE_OT(X, Y, Q, R, g):
Q_trans = Q / np.sqrt(g)
R_trans = R / np.sqrt(g)
A = np.dot(X.T, Q_trans)
B = np.dot(Y.T, R_trans)
res = np.sum((A - B) ** 2)
return res
def Sinkhorn(
C, reg, a, b, C_init=False, max_iter=1000, delta=1e-3, lam=0, time_out=200
):
start = time.time()
acc = []
times = []
C = C / C.max()
n, m = np.shape(a)[0], np.shape(b)[0]
K = np.exp(-C / reg)
# Next 3 lines equivalent to K= np.exp(-C/reg), but faster to compute
# K = np.empty(C.shape, dtype=C.dtype)
# np.divide(C, -reg, out=K)
# np.exp(K, out=K)
P = K.copy()
v = np.ones(np.shape(b)[0])
u_trans = np.dot(K, v) + lam # add regularization to avoid divide 0
OT_trans = np.sum(P * C)
acc.append(OT_trans)
time_actual = time.time() - start
times.append(time_actual)
err = 1
n_iter = 0
while (n_iter < max_iter) and (time_actual < time_out):
P_prev = P
if err > delta:
n_iter = n_iter + 1
# Update u
u = a / u_trans
# Update v
v_trans = np.dot(K.T, u) + lam
v = b / v_trans
# Update the coupling
P = u.reshape((-1, 1)) * K * v.reshape((1, -1))
# Update the total cost
OT_trans = np.sum(P * C)
if np.isnan(OT_trans) == True:
print("Error Sinkhorn: ", n_iter)
P = P_prev
break
# Update the error
u_trans = np.dot(K, v) + lam
err = np.sum(np.abs(u * u_trans - a))
# print(err)
if np.isnan(err) == True:
print("Error Sinkhorn: ", n_iter)
P = P_prev
break
acc.append(OT_trans)
time_actual = time.time() - start
times.append(time_actual)
else:
break
num_op = 3 * n * m + (n_iter + 1) * (2 * n * m + n + m)
return acc[-1], np.array(acc), np.array(times), P, num_op
def Sinkhorn_LSE(C, reg, a, b, max_iter=1000, delta=1e-3, lam=0, time_out=200):
start = time.time()
acc = []
times = []
C = C / C.max()
n, m = np.shape(a)[0], np.shape(b)[0]
f = np.zeros(n)
g = np.zeros(m)
C_trans = -C / reg
P = np.exp(C_trans)
OT_trans = np.sum(P * C)
acc.append(OT_trans)
time_actual = time.time() - start
times.append(time_actual)
err = 1
n_iter = 0
while n_iter < max_iter and (time_actual < time_out):
P_prev = P
if err > delta:
n_iter = n_iter + 1
# Update f
C_tilde = f[:, None] + g[None, :] - C
C_tilde = C_tilde / reg
f = reg * np.log(a) + f - reg * scipy.special.logsumexp(C_tilde, axis=1)
# Update g
C_tilde = f[:, None] + g[None, :] - C
C_tilde = C_tilde / reg
g = reg * np.log(b) + g - reg * scipy.special.logsumexp(C_tilde, axis=0)
# Update the coupling
C_tilde = f[:, None] + g[None, :] - C
C_tilde = C_tilde / reg
P = np.exp(C_tilde)
# Update the total cost
OT_trans = np.sum(P * C)
if np.isnan(OT_trans) == True:
print("Error Sinkhorn: ", n_iter)
P = P_prev
break
# Update the error
err = np.sum(np.abs(np.sum(P, axis=1) - a))
if np.isnan(err) == True:
print("Error Sinkhorn: ", n_iter)
P = P_prev
break
acc.append(OT_trans)
time_actual = time.time() - start
times.append(time_actual)
else:
break
num_ops = 3 * n * m + (n_iter + 1) * (2 * n * m + n + m)
return acc[-1], np.array(acc), np.array(times), P, num_ops
# Linear RF Sinkhorn: C = C1 * C2
def Lin_RF_Sinkhorn(C1, C2, reg, a, b, rank, seed=49, max_iter=1000, delta=1e-3, lam=0):
start = time.time()
acc = []
times = []
A, B = RF_Approx(-C1, C2, reg, num_samples=rank, seed=seed)
v = np.ones(np.shape(b)[0])
u_trans = np.dot(A, np.dot(B, v)) + lam
err = 1
n_iter = 0
while n_iter < max_iter:
if err > delta:
n_iter = n_iter + 1
# Update u
u = a / u_trans
# Update v
v_trans = np.dot(B.T, np.dot(A.T, u)) + lam
v = b / v_trans
# Update the coupling
P1 = u.reshape((-1, 1)) * A
P2 = B * v.reshape((1, -1))
# Update the error
u_trans = np.dot(A, np.dot(B, v)) + lam
err = np.sum(np.abs(u * u_trans - a))
# Update total cost
OT_trans = compute_OT(P1, P2, C1, C2)
if np.isnan(OT_trans) == True:
print("Error: NaN OT value")
return "Error"
else:
acc.append(OT_trans)
end = time.time()
times.append(end - start)
else:
return acc[-1], np.array(acc), np.array(times), P1, P2
return acc[-1], np.array(acc), np.array(times), P1, P2
# Linear Nys Sinkhorn: C = C1 * C2
def Lin_Nys_Sinkhorn(
C1, C2, reg, a, b, rank, seed=49, max_iter=1000, delta=1e-3, lam=0
):
start = time.time()
acc = []
times = []
V1, V2 = Nys_approx(-C1, C2.T, reg, rank, seed=seed, stable=1e-10)
A = np.dot(V2, np.linalg.inv(V1))
A = A[: len(a), :]
B = V2.T
B = B[:, len(a) :]
v = np.ones(np.shape(b)[0])
u_trans = np.dot(A, np.dot(B, v)) + lam
err = 1
n_iter = 0
while n_iter < max_iter:
if err > delta:
n_iter = n_iter + 1
# Update u
u = a / u_trans
# Update v
v_trans = np.dot(B.T, np.dot(A.T, u)) + lam
v = b / v_trans
# Update the coupling
P1 = u.reshape((-1, 1)) * A
P2 = B * v.reshape((1, -1))
# Update the error
u_trans = np.dot(A, np.dot(B, v)) + lam
err = np.sum(np.abs(u * u_trans - a))
# Update the total cost
OT_trans = compute_OT(P1, P2, C1, C2)
if np.isnan(OT_trans) == True:
print("Error: NaN OT value")
return "Error"
else:
acc.append(OT_trans)
end = time.time()
times.append(end - start)
else:
return acc[-1], np.array(acc), np.array(times), P1, P2
return acc[-1], np.array(acc), np.array(times), P1, P2
def UpdateHubs(X, Y, gamma_1, gamma_2):
Z = np.dot(gamma_1, X) + np.dot(gamma_2, Y)
norm = np.sum(gamma_1 + gamma_2, axis=1)
Z = (Z.T / norm).T
return Z
# Here cost is a function
# Here we have assumed that to compute each entries of thecost matrix it takes O(d)
def UpdatePlans(X, Y, Z, a, b, reg, cost, max_iter=1000, delta=1e-9, lam=0):
C1 = cost(Z, X) # d * n * r
C1 = C1 / C1.max()
K1 = np.exp(-C1 / reg) # size: r x n
C2 = cost(Z, Y) # d * m * r
C2 = C2 / C2.max()
K2 = np.exp(-C2 / reg) # size: r x m
r = np.shape(Z)[0]
u1, u2 = np.ones(r), np.ones(r)
v1, v2 = np.ones(np.shape(a)[0]), np.ones(np.shape(b)[0])
v1_trans = np.dot(K1.T, u1) # r * n
v2_trans = np.dot(K2.T, u2) # r * m
w = np.ones(r) / r # r
err = 1
n_iter = 0
while n_iter < max_iter:
u1_prev, v1_prev = u1, v1
u2_prev, v2_prev = u2, v2
w_prev = w
if err > delta:
n_iter = n_iter + 1
# Update v1, v2
v1 = a / v1_trans # n
u1_trans = np.dot(K1, v1) # n * r
v2 = b / v2_trans # m
u2_trans = np.dot(K2, v2) # m * r
# Update w
w = (u1 * u1_trans * u2 * u2_trans) ** (1 / 2) # 4 * r
# Update u1, u2
u1 = w / u1_trans # r
u2 = w / u2_trans # r
# Update the error
v1_trans = np.dot(K1.T, u1) # n * r
err_1 = np.sum(np.abs(v1 * v1_trans - a))
v2_trans = np.dot(K2.T, u2) # n * r
err_2 = np.sum(np.abs(v2 * v2_trans - b))
err = err_1 + err_2
if (
np.any(np.isnan(u1))
or np.any(np.isnan(v1))
or np.any(np.isnan(u2))
or np.any(np.isnan(v2))
or np.any(np.isinf(u1))
or np.any(np.isinf(v1))
or np.any(np.isinf(u2))
or np.any(np.isinf(v2))
):
# we have reached the machine precision
# come back to previous solution and quit loop
print("Warning: numerical errors UpdatePlans at iteration", n_iter)
u1, v1 = u1_prev, v1_prev
u2, v2 = u2_prev, v2_prev
w = w_prev
break
else:
gamma_1 = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
gamma_2 = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
n, m, d = np.shape(X)[0], np.shape(Y)[0], np.shape(Z)[1]
count_op = (
(n_iter + 1) * (2 * n * r + 2 * m * r + 6 * r + n + m)
+ (d + 2) * n * r
+ (d + 2) * m * r
+ r
)
return gamma_1, gamma_2, w, count_op
gamma_1 = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
gamma_2 = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
n, m, d = np.shape(X)[0], np.shape(Y)[0], np.shape(Z)[1]
count_op = (
(n_iter + 1) * (2 * n * r + 2 * m * r + 6 * r + n + m)
+ (d + 2) * n * r
+ (d + 2) * m * r
+ r
)
return gamma_1, gamma_2, w, count_op
# Here cost is a function
def UpdatePlans_LSE(X, Y, Z, a, b, reg, cost, max_iter=1000, delta=1e-9, lam=0):
C1 = cost(Z, X)
C2 = cost(Z, Y)
r = np.shape(Z)[0]
f1, f2 = np.zeros(r), np.zeros(r)
g1, g2 = np.zeros(np.shape(a)[0]), np.zeros(np.shape(b)[0])
w = np.ones(r) / r
err = 1
n_iter = 0
while n_iter < max_iter:
f1_prev, g1_prev = f1, g1
f2_prev, g2_prev = f2, g2
w_prev = w
if err > delta:
n_iter = n_iter + 1
# Update g1, g2
C1_tilde = (
f1.reshape(-1, 1) * np.ones((1, np.shape(a)[0]))
+ np.ones((r, 1)) * g1.reshape(1, -1)
- C1
)
C1_tilde = C1_tilde / reg
g1 = reg * np.log(a) + g1 - reg * scipy.special.logsumexp(C1_tilde, axis=0)
C2_tilde = (
f2.reshape(-1, 1) * np.ones((1, np.shape(b)[0]))
+ np.ones((r, 1)) * g2.reshape(1, -1)
- C2
)
C2_tilde = C2_tilde / reg
g2 = reg * np.log(b) + g2 - reg * scipy.special.logsumexp(C2_tilde, axis=0)
# Update w
C1_tilde = (
f1.reshape(-1, 1) * np.ones((1, np.shape(a)[0]))
+ np.ones((r, 1)) * g1.reshape(1, -1)
- C1
)
C1_tilde = C1_tilde / reg
P1 = np.exp(C1_tilde)
C2_tilde = (
f2.reshape(-1, 1) * np.ones((1, np.shape(b)[0]))
+ np.ones((r, 1)) * g2.reshape(1, -1)
- C2
)
C2_tilde = C2_tilde / reg
P2 = np.exp(C2_tilde)
w = (np.sum(P1, axis=1) * np.sum(P2, axis=1)) ** (1 / 2)
log_w = (1 / 2) * (
scipy.special.logsumexp(C1_tilde, axis=1)
+ scipy.special.logsumexp(C2_tilde, axis=1)
)
# Update f1, f2
C1_tilde = (
f1.reshape(-1, 1) * np.ones((1, np.shape(a)[0]))
+ np.ones((r, 1)) * g1.reshape(1, -1)
- C1
)
C1_tilde = C1_tilde / reg
f1 = reg * log_w + f1 - reg * scipy.special.logsumexp(C1_tilde, axis=1)
C2_tilde = (
f2.reshape(-1, 1) * np.ones((1, np.shape(b)[0]))
+ np.ones((r, 1)) * g2.reshape(1, -1)
- C2
)
C2_tilde = C2_tilde / reg
f2 = reg * log_w + f2 - reg * scipy.special.logsumexp(C2_tilde, axis=1)
# Update the coupling P1, P2
C1_tilde = (
f1.reshape(-1, 1) * np.ones((1, np.shape(a)[0]))
+ np.ones((r, 1)) * g1.reshape(1, -1)
- C1
)
C1_tilde = C1_tilde / reg
P1 = np.exp(C1_tilde)
C2_tilde = (
f2.reshape(-1, 1) * np.ones((1, np.shape(b)[0]))
+ np.ones((r, 1)) * g2.reshape(1, -1)
- C2
)
C2_tilde = C2_tilde / reg
P2 = np.exp(C2_tilde)
# Update the error
err_1 = np.sum(np.abs(np.sum(P1, axis=0) - a))
err_2 = np.sum(np.abs(np.sum(P2, axis=0) - b))
err = err_1 + err_2
if (
np.any(np.isnan(f1))
or np.any(np.isnan(g1))
or np.any(np.isnan(f2))
or np.any(np.isnan(g2))
or np.any(np.isinf(f1))
or np.any(np.isinf(g1))
or np.any(np.isinf(f2))
or np.any(np.isinf(g2))
):
# we have reached the machine precision
# come back to previous solution and quit loop
print("Warning: numerical errors at iteration", n_iter)
f1, g1 = f1_prev, g1_prev
f2, g2 = f2_prev, g2_prev
w = w_prev
break
else:
return P1, P2, w
# Update the coupling P1, P2
C1_tilde = (
f1.reshape(-1, 1) * np.ones((1, np.shape(a)[0]))
+ np.ones((r, 1)) * g1.reshape(1, -1)
- C1
)
C1_tilde = C1_tilde / reg
P1 = np.exp(C1_tilde)
C2_tilde = (
f2.reshape(-1, 1) * np.ones((1, np.shape(b)[0]))
+ np.ones((r, 1)) * g2.reshape(1, -1)
- C2
)
C2_tilde = C2_tilde / reg
P2 = np.exp(C2_tilde)
return P1, P2, w
# Same as UpdatePlans where the inputs are no more vectors but rather matrices
def UpdatePlans_Matrix(C1, C2, a, b, reg, max_iter=1000, delta=1e-9, lam=0):
K1 = np.exp(-C1.T / reg) # size: r x n
K2 = np.exp(-C2 / reg) # size: r x m
r = np.shape(C1)[1]
u1, u2 = np.ones(r), np.ones(r)
v1, v2 = np.ones(np.shape(a)[0]), np.ones(np.shape(b)[0])
v1_trans = np.dot(K1.T, u1)
v2_trans = np.dot(K2.T, u2)
w = np.ones(r) / r
err = 1
n_iter = 0
while n_iter < max_iter:
u1_prev, v1_prev = u1, v1
u2_prev, v2_prev = u2, v2
w_prev = w
if err > delta:
n_iter = n_iter + 1
# Update v1, v2
v1 = a / v1_trans
u1_trans = np.dot(K1, v1)
v2 = b / v2_trans
u2_trans = np.dot(K2, v2)
# Update w
w = (u1 * u1_trans * u2 * u2_trans) ** (1 / 2)
# Update u1, u2
u1 = w / u1_trans
u2 = w / u2_trans
# Update the error
v1_trans = np.dot(K1.T, u1)
err_1 = np.sum(np.abs(v1 * v1_trans - a))
v2_trans = np.dot(K2.T, u2)
err_2 = np.sum(np.abs(v2 * v2_trans - b))
err = err_1 + err_2
if (
np.any(np.isnan(u1))
or np.any(np.isnan(v1))
or np.any(np.isnan(u2))
or np.any(np.isnan(v2))
or np.any(np.isinf(u1))
or np.any(np.isinf(v1))
or np.any(np.isinf(u2))
or np.any(np.isinf(v2))
):
# we have reached the machine precision
# come back to previous solution and quit loop
print("Warning: numerical errors at iteration", n_iter)
u1, v1 = u1_prev, v1_prev
u2, v2 = u2_prev, v2_prev
w = w_prev
break
else:
gamma_1 = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
gamma_2 = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
return gamma_1.T, gamma_2.T, w
gamma_1 = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
gamma_2 = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
return gamma_1.T, gamma_2.T, w
# Here cost is a function: only the Squared Euclidean is legal
def FactoredOT(
X,
Y,
a,
b,
reg,
rank,
cost,
max_iter=1000,
delta=1e-3,
max_iter_Update=1000,
delta_Update=1e-9,
lam_Update=0,
LSE=True,
time_out=200,
):
start = time.time()
acc = []
times = []
C = cost(X, Y)
kmeans = KMeans(n_clusters=rank, random_state=0).fit(X)
Z = kmeans.cluster_centers_
w = np.ones(rank) / rank
gamma1 = w.reshape((-1, 1)) * a.reshape((1, -1))
gamma2 = w.reshape((-1, 1)) * b.reshape((1, -1))
err = 1
niter = 0
while niter < max_iter:
gamma1_prev = gamma1
gamma2_prev = gamma2
w_prev = w
if err > delta:
niter = niter + 1
if LSE == False:
gamma1, gamma2, w = UpdatePlans(
X,
Y,
Z,
a,
b,
reg,
cost,
max_iter=max_iter_Update,
delta=delta_Update,
lam=lam_Update,
)
else:
gamma1, gamma2, w = UpdatePlans_LSE(
X,
Y,
Z,
a,
b,
reg,
cost,
max_iter=max_iter_Update,
delta=delta_Update,
lam=lam_Update,
)
# Update the Hubs
Z = UpdateHubs(X, Y, gamma1, gamma2)
# Update the total cost
# Metric used in the MIT paper
# OT_trans = compute_SE_OT(X,Y,gamma1.T,gamma2.T,w)
# Classical OT
C_trans = np.dot(C, gamma2.T)
C_trans = C_trans / w
G = np.dot(gamma1, C_trans)
OT_trans = np.trace(G)
if niter > 10:
## Update the error: theoritical error
# err_1 = ((1/gamma)**2) * (KL(Q,Q_prev) + KL(Q_prev,Q))
# err_2 = ((1/gamma)**2) * (KL(R,R_prev) + KL(R_prev,R))
# err_3 = ((1/gamma)**2) * (KL(g,g_prev) + KL(g_prev,g))
# err = err_1 + err_2 + err_3
## Update the error: Practical error
err = np.abs(OT_trans - acc[-1]) / acc[-1]
if np.isnan(err):
print("Error computation of the stopping criterion", niter)
gamma1 = gamma1_prev
gamma2 = gamma2_prev
w = w_prev
break
if np.isnan(OT_trans) == True:
print("Error: NaN OT value")
return "Error"
else:
acc.append(OT_trans)
end = time.time()
tim_actual = end - start
times.append(tim_actual)
if tim_actual > time_out:
return (
acc[-1],
np.array(acc),
np.array(times),
gamma1.T,
gamma2.T,
w,
)
else:
return acc[-1], np.array(acc), np.array(times), gamma1.T, gamma2.T, w
return acc[-1], np.array(acc), np.array(times), gamma1.T, gamma2.T, w
def LR_Dykstra(K1, K2, K3, gamma, a, b, alpha, max_iter=1000, delta=1e-9, lam=0):
Q = K1
R = K2
g_old = K3
r = np.shape(K3)[0]
v1_old, v2_old = np.ones(r), np.ones(r)
u1, u2 = np.ones(np.shape(a)[0]), np.ones(np.shape(b)[0])
q_gi, q_gp = np.ones(r), np.ones(r)
q_Q, q_R = np.ones(r), np.ones(r)
err = 1
n_iter = 0
while n_iter < max_iter:
u1_prev, v1_prev = u1, v1_old
u2_prev, v2_prev = u2, v2_old
g_prev = g_old
if err > delta:
n_iter = n_iter + 1
# First Projection
u1 = a / (np.dot(K1, v1_old) + lam)
u2 = b / (np.dot(K2, v2_old) + lam)
g = np.maximum(alpha, g_old * q_gi)
q_gi = (g_old * q_gi) / (g + lam)
g_old = g.copy()
# Second Projection
v1_trans = np.dot(K1.T, u1)
v2_trans = np.dot(K2.T, u2)
g = (g_old * q_gp * v1_old * q_Q * v1_trans * v2_old * q_R * v2_trans) ** (
1 / 3
)
v1 = g / (v1_trans + lam)
v2 = g / (v2_trans + lam)
q_gp = (g_old * q_gp) / (g + lam)
q_Q = (q_Q * v1_old) / (v1 + lam)
q_R = (q_R * v2_old) / (v2 + lam)
v1_old = v1.copy()
v2_old = v2.copy()
g_old = g.copy()
# Update the error
u1_trans = np.dot(K1, v1)
err_1 = np.sum(np.abs(u1 * u1_trans - a))
u2_trans = np.dot(K2, v2)
err_2 = np.sum(np.abs(u2 * u2_trans - b))
err = err_1 + err_2
if (
np.any(np.isnan(u1))
or np.any(np.isnan(v1))
or np.any(np.isnan(u2))
or np.any(np.isnan(v2))
or np.any(np.isinf(u1))
or np.any(np.isinf(v1))
or np.any(np.isinf(u2))
or np.any(np.isinf(v2))
):
# we have reached the machine precision
# come back to previous solution and quit loop
print("Error Dykstra: ", n_iter)
u1, v1 = u1_prev, v1_prev
u2, v2 = u2_prev, v2_prev
g = g_prev
break
else:
Q = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
R = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
n, m = np.shape(K1)[0], np.shape(K2)[0]
return Q, R, g, np.log(u1), np.log(v1), np.log(u2), np.log(v2)
Q = u1.reshape((-1, 1)) * K1 * v1.reshape((1, -1))
R = u2.reshape((-1, 1)) * K2 * v2.reshape((1, -1))
n, m = np.shape(K1)[0], np.shape(K2)[0]
return (
Q,
R,
g,
np.log(u1) / gamma,
np.log(v1) / gamma,
np.log(u2) / gamma,
np.log(v2) / gamma,
)
# Approximate the kernel k(x,y) = exp(TU/\varepsilon)
def RF_Approx(T, U, reg, num_samples=100, seed=49):
R = np.minimum(theoritical_R(T, U.T), 100)
A = Feature_Map_Gaussian(T, reg, R, num_samples=num_samples, seed=seed)
B = Feature_Map_Gaussian(U.T, reg, R, num_samples=num_samples, seed=seed).T
n, d = np.shape(T)
m, d = np.shape(U.T)
num_op = (
d * n * num_samples
+ 6 * n * num_samples
+ num_samples * d
+ n * d
+ num_samples
+ n
)
num_op = (
num_op
+ d * m * num_samples
+ 6 * m * num_samples
+ num_samples * d
+ m * d
+ num_samples
+ m
)
num_op = num_op + n * d + m * d + n + m
return A, B, num_op
def Nys_approx(X, Y, reg, rank, seed=49, stable=1e-10):
n, d = np.shape(X)
m, d = np.shape(Y)
n_tot = n + m
Z = np.concatenate((X, Y), axis=0)
rank_trans = int(np.minimum(rank, n_tot))
np.random.seed(seed)
ind = np.random.choice(n_tot, rank_trans, replace=False)
ind = np.sort(ind)
Z_1 = Z[ind, :]
A = np.exp(np.dot(Z_1, Z_1.T) / reg)
A = A + stable * np.eye(rank_trans)
V = np.exp(np.dot(Z, Z_1.T) / reg)
return A, V
#################### Cost Matrix #####################
## Feature map of k(x,y) = \langle x,y\rangle ** 2 ##
def Feature_Map_Poly(X):
n, d = np.shape(X)
X_new = np.zeros((n, d**2))
for i in range(n):
x = X[i, :][:, None]
X_new[i, :] = np.dot(x, x.T).reshape(-1)
return X_new
def theoritical_R(X, Y):
norm_X = np.linalg.norm(X, axis=1)
norm_Y = np.linalg.norm(Y, axis=1)
norm_max = np.maximum(np.max(norm_X), np.max(norm_Y))
return norm_max
### Random Feature Maps of RBF Kernel
def Feature_Map_Gaussian(X, reg, R=1, num_samples=100, seed=49):
n, d = np.shape(X)
y = R**2 / (reg * d)
q = np.real((1 / 2) * np.exp(special.lambertw(y)))
C = (2 * q) ** (d / 4)
var = (q * reg) / 4
np.random.seed(seed)
U = np.random.multivariate_normal(np.zeros(d), var * np.eye(d), num_samples)
SED = Square_Euclidean_Distance(X, U)
W = -(2 * SED) / reg
V = np.sum(U**2, axis=1) / (reg * q)
res_trans = V + W
res_trans = C * np.exp(res_trans)
res = (1 / np.sqrt(num_samples)) * res_trans
return res
def Square_Euclidean_Distance(X, Y):
"""Returns the matrix of $|x_i-y_j|^2$."""
X_col = X[:, np.newaxis]
Y_lin = Y[np.newaxis, :]
C = np.sum((X_col - Y_lin) ** 2, 2)
# D = (np.sum(X ** 2, 1)[:, np.newaxis] - 2 * np.dot(X, Y.T) + np.sum(Y ** 2, 1))
return C
# shape of xs: num_samples * dimension
def factorized_square_Euclidean(xs, xt):
square_norm_s = np.sum(xs**2, axis=1) # 2 * n * d
square_norm_t = np.sum(xt**2, axis=1) # 2 * m * d
A_1 = np.zeros((np.shape(xs)[0], 2 + np.shape(xs)[1]))
A_1[:, 0] = square_norm_s
A_1[:, 1] = np.ones(np.shape(xs)[0])
A_1[:, 2:] = -2 * xs # n * d
A_2 = np.zeros((2 + np.shape(xs)[1], np.shape(xt)[0]))
A_2[0, :] = np.ones(np.shape(xt)[0])
A_2[1, :] = square_norm_t
A_2[2:, :] = xt.T
return A_1, A_2
def Euclidean_Distance(X, Y):
X_col = X[:, np.newaxis]
Y_lin = Y[np.newaxis, :]
C = np.sum((X_col - Y_lin) ** 2, 2)
C = np.sqrt(C)
# D = (np.sum(X ** 2, 1)[:, np.newaxis] - 2 * np.dot(X, Y.T) + np.sum(Y ** 2, 1))
return C
def Lp_Distance(X, Y, p=1):
X_col = X[:, np.newaxis]
Y_lin = Y[np.newaxis, :]
C = np.sum(np.abs(X_col - Y_lin) ** p, 2)
C = C ** (1 / p)
# D = (np.sum(X ** 2, 1)[:, np.newaxis] - 2 * np.dot(X, Y.T) + np.sum(Y ** 2, 1))
return C
def rbf_distance(X):
kernel = sklearn.metrics.pairwise.rbf_kernel(X)
D = 1 - kernel
return D
def Learning_linear_subspace(X, Y, cost, U, C_init=False, tol=1e-3):
rank, m = np.shape(U)
U_sym = np.dot(U, U.T) # k x k
# d, v = np.linalg.eigh(U_sym)
u, d, v_transpose = np.linalg.svd(U_sym)
v = v_transpose.T
v = v / np.sqrt(d) # k x k
ind_column = np.random.choice(m, size=int(rank / tol))
U_trans = U[:, ind_column] # k x k/tol
if C_init == False:
A_trans = cost(X, Y[ind_column, :])
else:
A_trans = cost[:, ind_column] # n x k/tol
A_trans = (1 / np.sqrt(int(rank / tol))) * A_trans
B = (1 / np.sqrt(int(rank / tol))) * np.dot(v.T, U_trans) # k x k/tol
Mat = np.linalg.inv(np.dot(B, B.T))
Mat = np.dot(Mat, B) # k x k/tol
alpha = np.dot(Mat, A_trans.T) # k x n
V_f = np.dot(alpha.T, v.T)
return V_f
# If C_init == True: cost is the Matrix
# If C_init == False: cost is the Function
def factorized_distance_cost(X, Y, rank, cost, C_init=False, tol=1e-3, seed=49):
np.random.seed(seed)
if C_init == False:
n, m = np.shape(X)[0], np.shape(Y)[0]
else:
n, m = np.shape(cost)
i_ = np.random.randint(n, size=1)
j_ = np.random.randint(m, size=1)
if C_init == False:
X_trans = X[i_, :]
if np.shape(X_trans)[0] != 1:
X_trans = X_trans[np.newaxis, :]
cost_trans_i = cost(X_trans, Y)
mean = np.mean(cost_trans_i**2)
else:
cost_trans_i = cost[i_, :]
mean = np.mean(cost_trans_i**2)
if C_init == False:
Y_trans = Y[j_, :]
if np.shape(Y_trans)[0] != 1:
Y_trans = Y_trans[np.newaxis, :]
cost_trans_j = cost(X, Y_trans)
else:
cost_trans_j = cost[:, j_]
p_row = cost_trans_j**2 + cost_trans_i[0, j_] ** 2 + mean
p_row = p_row / np.sum(p_row) # vector of size n
# Compute S
ind_row = np.random.choice(n, size=int(rank / tol), p=p_row.reshape(-1))
if C_init == False:
S = cost(X[ind_row, :], Y) # k/tol x m
else:
S = cost[ind_row, :]
p_row_sub = p_row[ind_row]
S = S / np.sqrt(int(rank / tol) * p_row_sub)
norm_square_S = np.sum(S**2)
p_column = np.zeros(m)
for j in range(m):
p_column[j] = np.sum(S[:, j] ** 2) / norm_square_S
p_column = p_column / np.sum(p_column) # vector of size m
# Compute W
ind_column = np.random.choice(m, size=int(rank / tol), p=p_column.reshape(-1))
W = S[:, ind_column] # k/tol x k/tol
p_column_sub = p_column[ind_column]
W = (W.T / np.sqrt(int(rank / tol) * p_column_sub)).T
# Compute U
u, d, v = np.linalg.svd(W)
U = u[:, :rank] # k/tol x k
U_trans = np.dot(W.T, U) # k/tol x k
norm_U = np.sum(U_trans**2, axis=0)
norm_U = np.sqrt(norm_U)
U = np.dot(S.T, U) # m x k
U = U / norm_U
# Compute V
V = Learning_linear_subspace(X, Y, cost, U.T, C_init=C_init, tol=tol)
return V, U.T
# compute the connectivity matrix of a distance matrix
def k_smallest_by_row(D, k=50):
ind_D = np.argpartition(D, k)
ind_D_trans = ind_D[:, :k]
row_indices = tuple(
np.full(len(col_index), i) for i, col_index in enumerate(ind_D_trans)
)
row_indices = np.concatenate(row_indices)
col_indices = np.concatenate(ind_D_trans)
mask = np.zeros((np.shape(D)[0], np.shape(D)[1]))
mask[row_indices, col_indices] = 1
return mask
## shortest path distance for graphs
def shortest_path_distance(X, graph_type="kneighbors_graph", n_neighbors=10):
if graph_type == "kneighbors_graph":
csr_graph = sklearn.neighbors.kneighbors_graph(X, n_neighbors, mode="distance")
if graph_type == "rbf":
kernel = sklearn.metrics.pairwise.rbf_kernel(X)
csr_graph = 1 - kernel
D = dijkstra(csr_graph, directed=False, return_predecessors=False, unweighted=False)
return D
######## Factorized shortest path distance matrix for graphs
def factorised_shortest_path_distance_kernel(
X, num_connection, rank_rf=100, rank=100, tol=1e-3, seed=49
):
reg = np.shape(X)[1]
R = theoritical_R(X, X)
phi_X = Feature_Map_Gaussian(X, reg, R=R, num_samples=rank_rf, seed=seed)
kernel = np.dot(phi_X, phi_X.T)
rescale = np.max(kernel)
csr_graph = rescale - kernel
csr_graph = k_smallest_by_row(csr_graph, k=num_connection)
D = factorized_shortest_path(csr_graph, rank, tol=tol, seed=seed + 10)
return D
## Here k is the number of connectivity allowed
## Here rank_cost is the rank of the factorization of the distance matrix
## Here the cost must be a metric as we factorize it to compute the graph
def k_connectivity_graph(data, k, cost, rank_cost=100, seed=49):
cost_factorized = lambda X, Y: factorized_distance_cost(
X, Y, rank_cost, cost, C_init=False, tol=1e-1, seed=seed
)
D11, D12 = cost_factorized(data, data)
D = np.dot(D11, D12)
graph_data = k_smallest_by_row(D, k=k)
csr_graph = csr_matrix(graph_data)
return csr_graph
## here csr_graph is the sparse connectivity graph
# G = dijkstra(G_trans, directed=False, indices=[], return_predecessors=False, unweighted=False)
def Learning_linear_subspace_shortest_path(csr_graph, U, tol=1e-3):
rank, m = np.shape(U)
U_sym = np.dot(U, U.T) # k x k
# d, v = np.linalg.eigh(U_sym)
u, d, v_transpose = np.linalg.svd(U_sym)
v = v_transpose.T
v = v / np.sqrt(d) # k x k
ind_column = np.random.choice(m, size=int(rank / tol))
U_trans = U[:, ind_column] # k x k/tol
A_trans = dijkstra(
csr_graph,
directed=False,
indices=ind_column,
return_predecessors=False,
unweighted=False,
)
A_trans = A_trans.T
A_trans = (1 / np.sqrt(int(rank / tol))) * A_trans
B = (1 / np.sqrt(int(rank / tol))) * np.dot(v.T, U_trans) # k x k/tol
Mat = np.linalg.inv(np.dot(B, B.T))
Mat = np.dot(Mat, B) # k x k/tol
alpha = np.dot(Mat, A_trans.T) # k x n
V_f = np.dot(alpha.T, v.T)
return V_f
## here csr_graph is the sparse connectivity graph
def factorized_shortest_path(csr_graph, rank, tol=1e-3, seed=49):
np.random.seed(seed)
n, m = np.shape(csr_graph)[0], np.shape(csr_graph)[1]
i_ = np.random.randint(n, size=1)
j_ = np.random.randint(m, size=1)
cost_trans_i = dijkstra(
csr_graph,
directed=False,
indices=i_,
return_predecessors=False,
unweighted=False,
)
cost_trans_i = cost_trans_i.reshape(-1)
mean = np.mean(cost_trans_i**2)
cost_trans_j = dijkstra(
csr_graph,
directed=False,
indices=j_,
return_predecessors=False,
unweighted=False,
)
cost_trans_j = cost_trans_j.reshape(-1)
p_row = cost_trans_j**2 + cost_trans_i[j_] ** 2 + mean
p_row = p_row / np.sum(p_row) # probability of size n
# Compute S
ind_row = np.random.choice(n, size=int(rank / tol), p=p_row.reshape(-1))
S = dijkstra(
csr_graph,
directed=False,
indices=ind_row,
return_predecessors=False,
unweighted=False,
)
p_row_sub = p_row[ind_row]
S = (S.T / np.sqrt(int(rank / tol) * p_row_sub)).T
norm_square_S = np.sum(S**2)
p_column = np.zeros(m)
for j in range(m):
p_column[j] = np.sum(S[:, j] ** 2) / norm_square_S
p_column = p_column / np.sum(p_column) # vector of size m
# Compute W
ind_column = np.random.choice(m, size=int(rank / tol), p=p_column.reshape(-1))
W = S[:, ind_column] # k/tol x k/tol
p_column_sub = p_column[ind_column]
W = (W.T / np.sqrt(int(rank / tol) * p_column_sub)).T
# Compute U
u, d, v = np.linalg.svd(W)
U = u[:, :rank] # k/tol x k
U_trans = np.dot(W.T, U) # k/tol x k
norm_U = np.sum(U_trans**2, axis=0)
norm_U = np.sqrt(norm_U)
U = np.dot(S.T, U) # m x k
U = U / norm_U
# Compute V
V = Learning_linear_subspace_shortest_path(csr_graph, U.T, tol=tol)
return V, U.T
| 34,825 | 28.38903 | 96 |
py
|
LinearGromov
|
LinearGromov-main/toy_examples.py
|
import numpy as np
import FastGromovWass
import utils
import matplotlib.pylab as pl
from mpl_toolkits.mplot3d import Axes3D # noqa
### Some examples of toy data
def Mixture_of_Gaussians(num_samples, sigma, dimension1, dimension2, seed=49):
nX1 = int(num_samples / 3)
nX2 = nX1
nX3 = num_samples - 2 * nX1
cov1 = sigma * np.eye(dimension1)
mean_X1 = np.zeros(dimension1)
mean_X2 = np.zeros(dimension1)
mean_X2[1] = 1
mean_X3 = np.zeros(dimension1)
mean_X3[0], mean_X3[1] = 1, 1
X1 = np.random.multivariate_normal(mean_X1, cov1, nX1)
X2 = np.random.multivariate_normal(mean_X2, cov1, nX2)
X3 = np.random.multivariate_normal(mean_X3, cov1, nX3)
X = np.concatenate([X1, X2, X3], axis=0)
nY1 = int(num_samples / 2)
nY2 = num_samples - nY1
mean_Y1 = np.zeros(dimension2)
mean_Y1[0], mean_Y1[1] = 0.5, 0.5
mean_Y2 = np.zeros(dimension2)
mean_Y2[0], mean_Y2[1] = -0.5, 0.5
cov2 = sigma * np.eye(dimension2)
Y1 = np.random.multivariate_normal(mean_Y1, cov2, nY1)
Y2 = np.random.multivariate_normal(mean_Y2, cov2, nY2)
Y = np.concatenate([Y1, Y2], axis=0)
return X, Y
def simul_two_Gaussians(num_samples, dimension1, dimension2, seed=49):
np.random.seed(seed)
mean_X = np.zeros(dimension1)
var = 1
cov_X = var * np.eye(dimension1)
X = np.random.multivariate_normal(mean_X, cov_X, num_samples)
mean_Y = 4 * np.ones(dimension2)
cov_Y = var * np.eye(dimension2)
Y = np.random.multivariate_normal(mean_Y, cov_Y, num_samples)
# norms = np.linalg.norm(Y, axis=1)
# norm_max = np.max(norms)
# Y = Y / norm_max
return X, Y
def curve_2d_3d(num_samples):
theta = np.linspace(-4 * np.pi, 4 * np.pi, num_samples)
z = np.linspace(1, 2, num_samples)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
X = np.concatenate([x.reshape(-1, 1), z.reshape(-1, 1)], axis=1)
Y = np.concatenate([x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)], axis=1)
return X, Y
n, m = 1000, 1000 # nb samples
X, Y = curve_2d_3d(n)
fig = pl.figure()
ax1 = fig.add_subplot(121)
ax1.plot(X[:, 0], X[:, 1], "+b", label="Source samples")
ax2 = fig.add_subplot(122, projection="3d")
ax2.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color="r")
pl.show()
## Two Gaussians
# dimX, dimY = 10,15
# X,Y = simul_two_Gaussians(n,dimX, dimY,seed=49)
# Y = X.copy()
## Two Mixture of Gaussians
# dimX, dimY = 10,15
# sigma = 0.05
# dimX, dimY = 10, 15
# X,Y = Mixture_of_Gaussians(n, sigma, dimX, dimY, seed=49)
### Define the cost function: here we give several examples
Square_Euclidean_cost = lambda X, Y: utils.Square_Euclidean_Distance(X, Y)
L1_cost = lambda X, Y: utils.Lp_Distance(X, Y, p=1)
L3_cost = lambda X, Y: utils.Lp_Distance(X, Y, p=3)
cost = Square_Euclidean_cost
## Define the factorized cost function
rank_cost = 100
cost_factorized = lambda X, Y: utils.factorized_distance_cost(
X, Y, rank_cost, cost, C_init=False, tol=1e-1, seed=50
)
## Here is an exact implementation of the factorized SE distance
cost_factorized = lambda X, Y: utils.factorized_square_Euclidean(X, Y)
## Compute the cost matrices
D1 = cost(X, X)
D11, D12 = cost_factorized(X, X)
D2 = cost(Y, Y)
D21, D22 = cost_factorized(Y, Y)
## Normalize the cost matrices
r1, r2 = D1.max(), D2.max()
D1, D2 = D1 / r1, D2 / r2
D11, D12 = D11 / np.sqrt(r1), D12 / np.sqrt(r1)
D21, D22 = D21 / np.sqrt(r2), D22 / np.sqrt(r2)
## Define the marginals
a, b = (1 / n) * np.ones(n), (1 / m) * np.ones(m)
### Compute GW cost with a trivial initialization
res = FastGromovWass.GW_init_factorized(D11, D12, D21, D22, a, b)
print(res)
### Entropic GW: cubic method
reg = 5 * 1e-3
res, acc, tim, num_ops, Couplings = FastGromovWass.GW_entropic_distance(
D1,
D2,
reg,
a,
b,
Init="lower_bound",
seed_init=49,
I=100,
delta_sin=1e-3,
num_iter_sin=10000,
lam_sin=0,
LSE=False,
time_out=50,
)
print(res)
# Plot the coupling after an non-trivial initialization
pl.imshow(Couplings[0], interpolation="nearest", cmap="Greys", aspect="auto")
# Plot the final coupling obtained
pl.imshow(Couplings[-1], interpolation="nearest", cmap="Greys", aspect="auto")
### Entropic GW: quadratic method
reg = 5 * 1e-3
res, acc, tim, num_ops, Couplings = FastGromovWass.Quad_GW_entropic_distance(
D11,
D12,
D21,
D22,
reg,
a,
b,
Init="lower_bound",
seed_init=49,
I=100,
delta_sin=1e-3,
num_iter_sin=10000,
lam_sin=0,
LSE=False,
time_out=50,
)
print(res)
# Plot the coupling after an non-trivial initialization
pl.imshow(Couplings[0], interpolation="nearest", cmap="Greys", aspect="auto")
# Plot the final coupling obtained
pl.imshow(Couplings[-1], interpolation="nearest", cmap="Greys", aspect="auto")
### LR-GW: Quadratic method
rank = 10
cost_SE = (D1, D2)
results = FastGromovWass.apply_quad_lr_gw(
X, Y, a, b, rank, cost_SE, gamma_0=10, rescale_cost=False, time_out=50
)
res, Q, R, g = results
print(res)
# Plot the coupling obtained
P = np.dot(Q / g, R.T)
pl.imshow(P, interpolation="nearest", cmap="Greys", aspect="auto")
### LR-GW: Linear method
rank = 10
cost_SE = (D11, D12, D21, D22)
results = FastGromovWass.apply_lin_lr_gw(
X, Y, a, b, rank, cost_SE, gamma_0=10, rescale_cost=False, time_out=50
)
res, Q, R, g = results
print(res)
# Plot the final coupling obtained
P = np.dot(Q / g, R.T)
pl.imshow(P, interpolation="nearest", cmap="Greys", aspect="auto")
| 5,506 | 23.584821 | 86 |
py
|
LinearGromov
|
LinearGromov-main/FastGromovWass.py
|
import numpy as np
import time
import LinSinkhorn
import utils
from sklearn.cluster import KMeans
from sklearn import preprocessing
import types
def KL(A, B):
Ratio_trans = np.log(A) - np.log(B)
return np.sum(A * Ratio_trans)
# D1 = A_1A_2 and D2 = B_1B_2
def GW_init_factorized(A_1, A_2, B_1, B_2, p, q):
tilde_A_1 = Feature_Map_Poly(A_1)
tilde_A_2_T = Feature_Map_Poly(A_2.T)
tilde_A_2 = tilde_A_2_T.T
tilde_B_1 = Feature_Map_Poly(B_1)
tilde_B_2_T = Feature_Map_Poly(B_2.T)
tilde_B_2 = tilde_B_2_T.T
tilde_a = np.dot(tilde_A_1, np.dot(tilde_A_2, p))
tilde_b = np.dot(tilde_B_1, np.dot(tilde_B_2, q))
c = np.dot(tilde_a, p) + np.dot(tilde_b, q)
P1 = p[:, None]
P2 = q[None, :]
G_1 = np.dot(A_2, P1)
G_2 = np.dot(P2, B_1)
G = np.dot(G_1, G_2)
G_1_1 = np.dot(B_2, P2.T)
G_2_1 = np.dot(P1.T, A_1)
G_trans = np.dot(G_1_1, G_2_1)
M = np.dot(G, G_trans)
res = c - 2 * np.trace(M)
return res
# need
def GW_init_cubic(D_1, D_2, a, b):
P = a[:, None] * b[None, :]
const_1 = np.dot(
np.dot(D_1**2, a.reshape(-1, 1)), np.ones(len(b)).reshape(1, -1)
) # 2 * n * n + n * m
const_2 = np.dot(
np.ones(len(a)).reshape(-1, 1), np.dot(b.reshape(1, -1), (D_2**2).T)
) # 2 * m * m + n * m
const = const_1 + const_2
L = const - 2 * np.dot(np.dot(D_1, P), D_2)
res = np.sum(L * P)
return res
#### CUBIC VERSION ####
## Stable version: works for every $\varepsilon ##
# Here the costs considered are C = 2 (constant - 2 DPD')
def GW_entropic_distance(
D_1,
D_2,
reg,
a,
b,
Init="trivial",
seed_init=49,
I=10,
delta=1e-6,
delta_sin=1e-9,
num_iter_sin=1000,
lam_sin=0,
LSE=False,
time_out=50,
):
start = time.time()
num_op = 0
acc = []
times = []
list_num_op = []
Couplings = []
n, m = np.shape(a)[0], np.shape(b)[0]
if Init == "trivial":
P = a[:, None] * b[None, :]
Couplings.append(P)
num_op = num_op + n * m
if Init == "lower_bound":
X_new = np.sqrt(np.dot(D_1**2, a).reshape(-1, 1)) # 2 * n * n + n
Y_new = np.sqrt(np.dot(D_2**2, b).reshape(-1, 1)) # 2 * m * m + m
C_init = Square_Euclidean_Distance(X_new, Y_new) # n * m
num_op = num_op + n * m + 2 * n * n + 2 * m * m + n + m
if LSE == False:
u, v, K, count_op_Sin = Sinkhorn(
C_init, reg, a, b, delta=delta_sin, num_iter=num_iter_sin, lam=lam_sin
)
num_op = num_op + count_op_Sin
P = u[:, None] * K * v[None, :]
num_op = num_op + 2 * n * m
else:
P, count_op_Sin_LSE = LSE_Sinkhorn(
C_init, reg, a, b, delta=delta_sin, num_iter=num_iter_sin, lam=lam_sin
)
num_op = num_op + count_op_Sin_LSE
Couplings.append(P)
if Init == "random":
np.random.seed(seed_init)
P = np.abs(np.random.randn(n, m))
P = P + 1
P = (P.T * (a / np.sum(P, axis=1))).T
Couplings.append(P)
num_op = num_op + 3 * n * m + n
const_1 = np.dot(
np.dot(D_1**2, a.reshape(-1, 1)), np.ones(len(b)).reshape(1, -1)
) # 2 * n * n + n * m
const_2 = np.dot(
np.ones(len(a)).reshape(-1, 1), np.dot(b.reshape(1, -1), (D_2**2).T)
) # 2 * m * m + n * m
num_op = num_op + 2 * n * m + 2 * n * n + 2 * m * m
const = const_1 + const_2
L = const - 2 * np.dot(np.dot(D_1, P), D_2)
res = np.sum(L * P)
# print(res)
end = time.time()
curr_time = end - start
times.append(curr_time)
acc.append(res)
list_num_op.append(num_op)
err = 1
for k in range(I):
if err < delta:
return (
acc[-1],
np.array(acc),
np.array(times),
np.array(list_num_op),
Couplings,
)
P_prev = P
if LSE == False:
u, v, K, count_op_Sin = Sinkhorn(
2 * L, reg, a, b, delta=delta_sin, num_iter=num_iter_sin, lam=lam_sin
)
num_op = num_op + count_op_Sin
P = u.reshape((-1, 1)) * K * v.reshape((1, -1))
num_op = num_op + 2 * n * m
else:
P, count_op_Sin_LSE = LSE_Sinkhorn(
2 * L, reg, a, b, delta=delta_sin, num_iter=num_iter_sin, lam=lam_sin
)
num_op = num_op + count_op_Sin_LSE
L = const - 2 * np.dot(np.dot(D_1, P), D_2)
num_op = num_op + n * n * m + n * m * m + 2 * n * m
res = np.sum(L * P)
# print(res)
if np.isnan(res) == True:
return "Error"
else:
acc.append(res)
Couplings.append(P)
err = np.linalg.norm(P - P_prev)
end = time.time()
curr_time = end - start
times.append(curr_time)
list_num_op.append(num_op)
if curr_time > time_out:
return (
acc[-1],
np.array(acc),
np.array(times),
np.array(list_num_op),
Couplings,
)
return acc[-1], np.array(acc), np.array(times), np.array(list_num_op), Couplings
#### QUAD VERSION ####
## Stable version: works for every $\varepsilon ##
# Here the costs considered are C = 2 (constant - 2 DPD')
def Quad_GW_entropic_distance(
A_1,
A_2,
B_1,
B_2,
reg,
a,
b,
Init="trivial",
seed_init=49,
I=10,
delta=1e-6,
delta_sin=1e-9,
num_iter_sin=1000,
lam_sin=0,
time_out=50,
LSE=False,
):
start = time.time()
num_op = 0
acc = []
times = []
list_num_op = []
Couplings = []
n, d1 = np.shape(A_1)
m, d2 = np.shape(B_1)
tilde_A_1 = Feature_Map_Poly(A_1)
tilde_A_2_T = Feature_Map_Poly(A_2.T)
tilde_A_2 = tilde_A_2_T.T
tilde_B_1 = Feature_Map_Poly(B_1)
tilde_B_2_T = Feature_Map_Poly(B_2.T)
tilde_B_2 = tilde_B_2_T.T
num_op = num_op + 2 * n * d1 * d1 + 2 * m * d2 * d2
tilde_a = np.dot(tilde_A_1, np.dot(tilde_A_2, a)) # 2 * d1 * d1 * n
tilde_b = np.dot(tilde_B_1, np.dot(tilde_B_2, b)) # 2 * d2 * d2 * m
c = np.dot(tilde_a, a) + np.dot(tilde_b, b) # n + m
const_1 = np.dot(tilde_a.reshape(-1, 1), np.ones(len(b)).reshape(1, -1)) # n * m
const_2 = np.dot(np.ones(len(a)).reshape(-1, 1), tilde_b.reshape(1, -1)) # n * m
const = const_1 + const_2
num_op = num_op + 2 * d1 * d1 * n + 2 * d2 * d2 * m + 3 * n * m
if Init == "trivial":
P = a[:, None] * b[None, :]
Couplings.append(P)
num_op = num_op + n * m
if Init == "lower_bound":
X_new = np.dot(tilde_A_2, a)
X_new = np.sqrt(np.dot(tilde_A_1, X_new).reshape(-1, 1))
Y_new = np.dot(tilde_B_2, b)
Y_new = np.sqrt(np.dot(tilde_B_1, Y_new).reshape(-1, 1))
C_init = Square_Euclidean_Distance(X_new, Y_new)
num_op = num_op + n * m + 2 * d1 * d1 * n + 2 * d2 * d2 * m + n + m
if LSE == False:
u, v, K, count_op_Sin = Sinkhorn(
C_init, reg, a, b, delta=delta_sin, num_iter=num_iter_sin, lam=lam_sin
)
num_op = num_op + count_op_Sin
P = u[:, None] * K * v[None, :]
num_op = num_op + 2 * n * m
else:
P, count_op_Sin_LSE = LSE_Sinkhorn(
C_init, reg, a, b, delta=delta_sin, num_iter=num_iter_sin, lam=lam_sin
)
num_op = num_op + count_op_Sin_LSE
Couplings.append(P)
if Init == "random":
np.random.seed(seed_init)
P = np.abs(np.random.randn(n, m))
P = P + 1
P = (P.T * (a / np.sum(P, axis=1))).T
Couplings.append(P)
num_op = num_op + 3 * n * m + n
C_trans = np.dot(np.dot(A_2, P), B_1) # d1 * n * m + d1 * m * d2
num_op = num_op + d1 * n * m + d1 * d2 * m
C_trans_2 = np.dot(np.dot(B_2, P.T), A_1)
C_f = np.dot(C_trans_2, C_trans)
res = c - 2 * np.trace(C_f)
# print(res)
acc.append(res)
end = time.time()
curr_time = end - start
times.append(curr_time)
list_num_op.append(num_op)
L = const - 2 * np.dot(
np.dot(A_1, C_trans), B_2
) # n * m + n * d1 * d2 + n * d2 * m
num_op = num_op + n * m + n * d1 * d2 + n * d2 * m
err = 1
for k in range(I):
if err < delta:
return (
acc[-1],
np.array(acc),
np.array(times),
np.array(list_num_op),
Couplings,
)
P_prev = P
if LSE == False:
u, v, K, count_op_Sin = Sinkhorn(
2 * L, reg, a, b, delta=delta_sin, num_iter=num_iter_sin, lam=lam_sin
)
P = u.reshape((-1, 1)) * K * v.reshape((1, -1))
num_op = num_op + count_op_Sin + 2 * n * m
else:
P, count_op_Sin_LSE = LSE_Sinkhorn(
2 * L, reg, a, b, delta=delta_sin, num_iter=num_iter_sin, lam=lam_sin
)
num_op = num_op + count_op_Sin_LSE
C_trans = np.dot(np.dot(A_2, P), B_1)
L = const - 2 * np.dot(np.dot(A_1, C_trans), B_2)
num_op = num_op + d1 * n * m + d2 * n * m + d1 * d2 * n + d1 * d2 * m + n * m
C_trans_2 = np.dot(np.dot(B_2, P.T), A_1)
C_f = np.dot(C_trans_2, C_trans)
res = c - 2 * np.trace(C_f)
# print(res)
if np.isnan(res) == True:
return "Error"
else:
acc.append(res)
Couplings.append(P)
err = np.linalg.norm(P - P_prev)
end = time.time()
curr_time = end - start
times.append(curr_time)
list_num_op.append(num_op)
if curr_time > time_out:
return (
acc[-1],
np.array(acc),
np.array(times),
np.array(list_num_op),
Couplings,
)
return acc[-1], np.array(acc), np.array(times), np.array(list_num_op), Couplings
####### GROMOV WASSERSTEIN #######
def update_Quad_cost_GW(D1, D2, Q, R, g):
n, m = np.shape(D1)[0], np.shape(D2)[0]
r = np.shape(g)[0]
cost_trans_1 = np.dot(D1, Q)
cost_trans_1 = -4 * cost_trans_1 / g
cost_trans_2 = np.dot(R.T, D2)
num_op = n * n * r + 2 * n * r + r * m * m
return cost_trans_1, cost_trans_2, num_op
# If C_init = True, cost is a tuple of matrices
# If C_init = False, cost is a function
# Init = 'trivial', 'random', 'lower_bound'
def Quad_LGW_MD(
X,
Y,
a,
b,
rank,
cost,
time_out=200,
max_iter=1000,
delta=1e-3,
gamma_0=10,
gamma_init="rescale",
reg=0,
alpha=1e-10,
C_init=False,
Init="kmeans",
seed_init=49,
reg_init=1e-1,
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=True,
):
start = time.time()
num_op = 0
acc = []
times = []
list_num_op = []
Couplings = []
if gamma_0 * reg >= 1:
# display(Latex(f'Choose $\gamma$ and $\epsilon$ such that $\gamma$ x $\epsilon<1$'))
print("gamma et epsilon must be well choosen")
return "Error"
r = rank
n, m = np.shape(a)[0], np.shape(b)[0]
if C_init == True:
if len(cost) != 2:
print("Error: some cost matrices are missing")
return "Error"
else:
D1, D2 = cost
if rescale_cost == True:
D1, D2 = D1 / D1.max(), D2 / D2.max()
else:
D1, D2 = cost(X, X), cost(Y, Y)
if len(D1) != 1:
print("Error: the cost function is not adapted")
return "Error"
else:
if rescale_cost == True:
D1, D2 = D1 / D1.max(), D2 / D2.max()
########### Initialization ###########
if Init == "kmeans":
g = np.ones(rank) / rank
kmeans_X = KMeans(n_clusters=rank, random_state=0).fit(X)
num_iter_kmeans_X = kmeans_X.n_iter_
Z_X = kmeans_X.cluster_centers_
C_trans_X = utils.Square_Euclidean_Distance(X, Z_X)
C_trans_X = C_trans_X / C_trans_X.max()
results = utils.Sinkhorn(
C_trans_X,
reg_init,
a,
g,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
time_out=1e100,
)
res, arr_acc_X, arr_times_X, Q, arr_num_op_X = results
# lb_X = preprocessing.LabelBinarizer()
# lb_X.fit(kmeans_X.labels_)
# Q = lb_X.transform(kmeans_X.labels_)
# Q = (Q.T * a).T
kmeans_Y = KMeans(n_clusters=rank, random_state=0).fit(Y)
num_iter_kmeans_Y = kmeans_Y.n_iter_
Z_Y = kmeans_Y.cluster_centers_
C_trans_Y = utils.Square_Euclidean_Distance(Y, Z_Y)
C_trans_Y = C_trans_Y / C_trans_Y.max()
results = utils.Sinkhorn(
C_trans_Y,
reg_init,
b,
g,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
time_out=1e100,
)
res, arr_acc_Y, arr_times_Y, R, arr_num_op_Y = results
# lb_Y = preprocessing.LabelBinarizer()
# lb_Y.fit(kmeans_Y.labels_)
# R = lb_Y.transform(kmeans_Y.labels_)
# R = (R.T * b).T
num_op = (
num_op
+ (num_iter_kmeans_X + np.shape(arr_acc_X)[0]) * rank * np.shape(X)[0]
+ (num_iter_kmeans_Y + np.shape(arr_acc_Y)[0]) * rank * np.shape(Y)[0]
)
## Init Lower bound
if Init == "lower_bound":
X_new = np.sqrt(np.dot(D1**2, a).reshape(-1, 1)) # 2 * n * n + n
Y_new = np.sqrt(np.dot(D2**2, b).reshape(-1, 1)) # 2 * m * m + m
num_op = num_op + 2 * n * n + 2 * m * m
cost_factorized_init = lambda X, Y: factorized_square_Euclidean(X, Y)
cost_init = lambda z1, z2: Square_Euclidean_Distance(z1, z2)
results = LinSinkhorn.Lin_LOT_MD(
X_new,
Y_new,
a,
b,
rank,
cost_init,
cost_factorized_init,
reg=0,
alpha=1e-10,
gamma_0=gamma_0,
max_iter=1000,
delta=1e-3,
time_out=5,
Init="kmeans",
seed_init=49,
C_init=False,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=True,
)
res_init, acc_init, times_init, num_op_init, list_criterion, Q, R, g = results
Couplings.append((Q, R, g))
num_op = num_op + num_op_init[-1]
# print('res: '+str(res_init))
## Init random
if Init == "random":
np.random.seed(seed_init)
g = np.abs(np.random.randn(rank))
g = g + 1 # r
g = g / np.sum(g) # r
Q = np.abs(np.random.randn(n, rank))
Q = Q + 1 # n * r
Q = (Q.T * (a / np.sum(Q, axis=1))).T # n + 2 * n * r
R = np.abs(np.random.randn(m, rank))
R = R + 1 # n * r
R = (R.T * (b / np.sum(R, axis=1))).T # m + 2 * m * r
Couplings.append((Q, R, g))
num_op = num_op + 2 * n * r + 2 * m * r + n + m + 2 * r
## Init trivial
if Init == "trivial":
g = np.ones(rank) / rank
lambda_1 = min(np.min(a), np.min(g), np.min(b)) / 2
a1 = np.arange(1, np.shape(a)[0] + 1)
a1 = a1 / np.sum(a1)
a2 = (a - lambda_1 * a1) / (1 - lambda_1)
b1 = np.arange(1, np.shape(b)[0] + 1)
b1 = b1 / np.sum(b1)
b2 = (b - lambda_1 * b1) / (1 - lambda_1)
g1 = np.arange(1, rank + 1)
g1 = g1 / np.sum(g1)
g2 = (g - lambda_1 * g1) / (1 - lambda_1)
Q = lambda_1 * np.dot(a1[:, None], g1.reshape(1, -1)) + (1 - lambda_1) * np.dot(
a2[:, None], g2.reshape(1, -1)
)
R = lambda_1 * np.dot(b1[:, None], g1.reshape(1, -1)) + (1 - lambda_1) * np.dot(
b2[:, None], g2.reshape(1, -1)
)
Couplings.append((Q, R, g))
num_op = num_op + 4 * n * r + 4 * m * r + 3 * n + 3 * m + 3 * r
#####################################
if gamma_init == "theory":
gamma = 1 # to compute
if gamma_init == "regularization":
gamma = 1 / reg
if gamma_init == "arbitrary":
gamma = gamma_0
c = np.dot(np.dot(D1**2, a), a) + np.dot(
np.dot(D2**2, b), b
) # 2 * n * n + n + 2 * m * m + m
C1, C2, num_op_update = update_Quad_cost_GW(D1, D2, Q, R, g)
num_op = num_op + 2 * n * n + n + 2 * m * m + m + num_op_update
# GW cost
C_trans = np.dot(C2, R)
C_trans = np.dot(C1, C_trans)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G) # \langle -4DPD',P\rangle
GW_trans = c + OT_trans / 2
# print(GW_trans)
acc.append(GW_trans)
end = time.time()
time_actual = end - start
times.append(time_actual)
list_num_op.append(num_op)
err = 1
niter = 0
count_escape = 1
while (niter < max_iter) and (time_actual < time_out):
Q_prev = Q
R_prev = R
g_prev = g
# P_prev = np.dot(Q/g,R.T)
if err > delta:
niter = niter + 1
K1_trans_0 = np.dot(C2, R) # r * m * r
K1_trans_0 = np.dot(C1, K1_trans_0) # n * r * r
grad_Q = K1_trans_0 / g
if reg != 0.0:
grad_Q = grad_Q + reg * np.log(Q)
if gamma_init == "rescale":
# norm_1 = np.linalg.norm(grad_Q)**2
norm_1 = np.max(np.abs(grad_Q)) ** 2
K2_trans_0 = np.dot(C1.T, Q) # r * n * r
K2_trans_0 = np.dot(C2.T, K2_trans_0) # m * r * r
grad_R = K2_trans_0 / g
if reg != 0.0:
grad_R = grad_R + reg * np.log(R)
if gamma_init == "rescale":
# norm_2 = np.linalg.norm(grad_R)**2
norm_2 = np.max(np.abs(grad_R)) ** 2
omega = np.diag(np.dot(Q.T, K1_trans_0)) # r * n * r
grad_g = -(omega / (g**2))
if reg != 0.0:
grad_g = grad_g + reg * np.log(g)
if gamma_init == "rescale":
# norm_3 = np.linalg.norm(grad_g)**2
norm_3 = np.max(np.abs(grad_g)) ** 2
if gamma_init == "rescale":
gamma = gamma_0 / max(norm_1, norm_2, norm_3)
C1_trans = grad_Q - (1 / gamma) * np.log(Q) # 3 * n * r
C2_trans = grad_R - (1 / gamma) * np.log(R) # 3 * m * r
C3_trans = grad_g - (1 / gamma) * np.log(g) # 4 * r
num_op = (
num_op + 3 * n * r * r + 2 * m * r * r + 3 * n * r + 3 * m * r + 4 * r
)
# Update the coupling
if method == "IBP":
K1 = np.exp((-gamma) * C1_trans)
K2 = np.exp((-gamma) * C2_trans)
K3 = np.exp((-gamma) * C3_trans)
Q, R, g = LinSinkhorn.LR_IBP_Sin(
K1,
K2,
K3,
a,
b,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
if method == "Dykstra":
K1 = np.exp((-gamma) * C1_trans)
K2 = np.exp((-gamma) * C2_trans)
K3 = np.exp((-gamma) * C3_trans)
num_op = num_op + 2 * n * r + 2 * m * r + 2 * r
Q, R, g, count_op_Dysktra, n_iter_Dykstra = LinSinkhorn.LR_Dykstra_Sin(
K1,
K2,
K3,
a,
b,
alpha,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
num_op = num_op + count_op_Dysktra
if method == "Dykstra_LSE":
Q, R, g, count_op_Dysktra_LSE = LinSinkhorn.LR_Dykstra_LSE_Sin(
C1_trans,
C2_trans,
C3_trans,
a,
b,
alpha,
gamma,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
num_op = num_op + count_op_Dysktra_LSE
# Update the total cost
C1, C2, num_op_update = update_Quad_cost_GW(D1, D2, Q, R, g)
num_op = num_op + num_op_update
# GW cost
C_trans = np.dot(C2, R)
C_trans = np.dot(C1, C_trans)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G) # \langle -4DPD',P\rangle
GW_trans = c + OT_trans / 2
# print(GW_trans)
if np.isnan(GW_trans) == True:
print("Error LR-GW: GW cost", niter)
Q = Q_prev
R = R_prev
g = g_prev
break
## Update the error: Practical error
# err = np.abs(GW_trans - acc[-1]) / acc[-1]
# err = np.abs(GW_trans - acc[-1]) / np.log(num_op - list_num_op[-1])
## Update error: difference between couplings
# P_act = np.dot(Q/g,R.T)
# err = np.linalg.norm(P_act - P_prev)
# print(err)
## Update the error: theoritical error
err_1 = ((1 / gamma) ** 2) * (KL(Q, Q_prev) + KL(Q_prev, Q))
err_2 = ((1 / gamma) ** 2) * (KL(R, R_prev) + KL(R_prev, R))
err_3 = ((1 / gamma) ** 2) * (KL(g, g_prev) + KL(g_prev, g))
criterion = err_1 + err_2 + err_3
# print(criterion)
if niter > 1:
if criterion > delta / 1e-1:
err = criterion
else:
count_escape = count_escape + 1
if count_escape != niter:
err = criterion
if np.isnan(criterion) == True:
print("Error LR-GW: stopping criterion", niter)
Q = Q_prev
R = R_prev
g = g_prev
break
acc.append(GW_trans)
Couplings.append((Q, R, g))
time_actual = time.time() - start
times.append(time_actual)
list_num_op.append(num_op)
else:
break
return acc[-1], np.array(acc), np.array(times), np.array(list_num_op), Couplings
def apply_quad_lr_gw(
X, Y, a, b, rank, cost, gamma_0=10, rescale_cost=True, time_out=50
):
if type(cost) == types.FunctionType:
res, arr_acc, arr_times, arr_list_num_op, Couplings = Quad_LGW_MD(
X,
Y,
a,
b,
rank,
cost,
time_out=time_out,
max_iter=1000,
delta=1e-3,
gamma_0=gamma_0,
gamma_init="rescale",
reg=0,
alpha=1e-10,
C_init=False,
Init="kmeans",
seed_init=49,
reg_init=1e-1,
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=rescale_cost,
)
else:
res, arr_acc, arr_times, arr_list_num_op, Couplings = Quad_LGW_MD(
X,
Y,
a,
b,
rank,
cost,
time_out=time_out,
max_iter=1000,
delta=1e-3,
gamma_0=gamma_0,
gamma_init="rescale",
reg=0,
alpha=1e-10,
C_init=True,
Init="kmeans",
seed_init=49,
reg_init=1e-1,
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=rescale_cost,
)
Q, R, g = Couplings[-1]
return res, Q, R, g
def update_Lin_cost_GW(D11, D12, D21, D22, Q, R, g):
n, d1 = np.shape(D11)
m, d2 = np.shape(D21)
r = np.shape(g)[0]
cost_trans_1 = np.dot(D12, Q) # d1 * n * r
cost_trans_1 = -4 * np.dot(
D11, cost_trans_1 / g
) # n * d1 * r + d1 * r + n * r # size: n * r
cost_trans_2 = np.dot(R.T, D21) # r * m * d2
cost_trans_2 = np.dot(cost_trans_2, D22) # r * d2 * m # size: r * m
num_op = 2 * n * r * d1 + 2 * r * d2 * m + d1 * r + n * r
return cost_trans_1, cost_trans_2, num_op
# If C_init = True, cost_factorized is a tuple of matrices (D11,D12,D21,D22)
# D1 = D11D12, D2 = D21D22
# If C_init = False, cost_factorized is a function
# Init = 'trivial', 'random', 'lower_bound'
def Lin_LGW_MD(
X,
Y,
a,
b,
rank,
cost_factorized,
time_out=50,
max_iter=1000,
delta=1e-3,
gamma_0=10,
gamma_init="rescale",
reg=0,
alpha=1e-10,
C_init=False,
Init="kmeans",
seed_init=49,
reg_init=1e-1,
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=True,
):
start = time.time()
num_op = 0
acc = []
times = []
list_num_op = []
Couplings = []
list_niter_Dykstra = []
if gamma_0 * reg >= 1:
# display(Latex(f'Choose $\gamma$ and $\epsilon$ such that $\gamma$ x $\epsilon<1$'))
print("gamma et epsilon must be well choosen")
return "Error"
if C_init == True:
if len(cost_factorized) != 4:
print("Error: some cost matrices are missing")
return "Error"
else:
D11, D12, D21, D22 = cost_factorized
if rescale_cost == True:
D11, D12, D21, D22 = (
D11 / np.sqrt(np.max(D11)),
D12 / np.sqrt(np.max(D12)),
D21 / np.sqrt(np.max(D21)),
D22 / np.sqrt(np.max(D22)),
)
else:
D1 = cost_factorized(X, X)
if len(D1) != 2:
print("Error: the cost function is not adapted")
return "Error"
else:
D11, D12 = D1
D21, D22 = cost_factorized(Y, Y)
if rescale_cost == True:
D11, D12, D21, D22 = (
D11 / np.sqrt(np.max(D11)),
D12 / np.sqrt(np.max(D12)),
D21 / np.sqrt(np.max(D21)),
D22 / np.sqrt(np.max(D22)),
)
r = rank
n, d1 = np.shape(D11)
m, d2 = np.shape(D21)
########### Initialization ###########
if Init == "kmeans":
g = np.ones(rank) / rank
kmeans_X = KMeans(n_clusters=rank, random_state=0).fit(X)
num_iter_kmeans_X = kmeans_X.n_iter_
Z_X = kmeans_X.cluster_centers_
C_trans_X = utils.Square_Euclidean_Distance(X, Z_X)
C_trans_X = C_trans_X / C_trans_X.max()
results = utils.Sinkhorn(
C_trans_X,
reg_init,
a,
g,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
time_out=1e100,
)
res, arr_acc_X, arr_times_X, Q, arr_num_op_X = results
# lb_X = preprocessing.LabelBinarizer()
# lb_X.fit(kmeans_X.labels_)
# Q = lb_X.transform(kmeans_X.labels_)
# Q = (Q.T * a).T
kmeans_Y = KMeans(n_clusters=rank, random_state=0).fit(Y)
num_iter_kmeans_Y = kmeans_Y.n_iter_
Z_Y = kmeans_Y.cluster_centers_
C_trans_Y = utils.Square_Euclidean_Distance(Y, Z_Y)
C_trans_Y = C_trans_Y / C_trans_Y.max()
results = utils.Sinkhorn(
C_trans_Y,
reg_init,
b,
g,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
time_out=1e100,
)
res, arr_acc_Y, arr_times_Y, R, arr_num_op_Y = results
# lb_Y = preprocessing.LabelBinarizer()
# lb_Y.fit(kmeans_Y.labels_)
# R = lb_Y.transform(kmeans_Y.labels_)
# R = (R.T * b).T
num_op = (
num_op
+ (num_iter_kmeans_X + np.shape(arr_acc_X)[0]) * rank * np.shape(X)[0]
+ (num_iter_kmeans_Y + np.shape(arr_acc_Y)[0]) * rank * np.shape(Y)[0]
)
## Init Lower bound
if Init == "lower_bound":
tilde_D11 = Feature_Map_Poly(D11) # n * d1 * d1
tilde_D12_T = Feature_Map_Poly(D12.T) # n * d1 * d1
tilde_D12 = tilde_D12_T.T
tilde_D21 = Feature_Map_Poly(D21) # m * d2 * d2
tilde_D22_T = Feature_Map_Poly(D22.T) # m * d2 * d2
tilde_D22 = tilde_D22_T.T
X_new = np.dot(tilde_D12, a) # d1 * d1 * n
X_new = np.sqrt(np.dot(tilde_D11, X_new).reshape(-1, 1)) # n * d1 * d1 + n
Y_new = np.dot(tilde_D22, b) # d2 * d2 * m
Y_new = np.sqrt(np.dot(tilde_D21, Y_new).reshape(-1, 1)) # m * d2 * d2 + m
num_op = num_op + 4 * n * d1 * d1 + 4 * m * d2 * d2 + 4 * n + 4 * n
cost_factorized_init = lambda X, Y: factorized_square_Euclidean(
X, Y
) # 3 * m + 3 * n
cost_init = lambda z1, z2: Square_Euclidean_Distance(z1, z2)
results = LinSinkhorn.Lin_LOT_MD(
X_new,
Y_new,
a,
b,
rank,
cost_init,
cost_factorized_init,
reg=0,
alpha=1e-10,
gamma_0=gamma_0,
max_iter=1000,
delta=1e-3,
time_out=5,
Init="kmeans",
seed_init=49,
C_init=False,
reg_init=1e-1,
gamma_init="rescale",
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=True,
)
(
res_init,
acc_init,
times_init,
num_op_init,
list_criterion_init,
Q,
R,
g,
) = results
Couplings.append((Q, R, g))
num_op = num_op + num_op_init[-1]
# print('res: '+str(res_init))
## Init random
if Init == "random":
np.random.seed(seed_init)
g = np.abs(np.random.randn(rank))
g = g + 1
g = g / np.sum(g)
n, d = np.shape(X)
m, d = np.shape(Y)
Q = np.abs(np.random.randn(n, rank))
Q = Q + 1
Q = (Q.T * (a / np.sum(Q, axis=1))).T
R = np.abs(np.random.randn(m, rank))
R = R + 1
R = (R.T * (b / np.sum(R, axis=1))).T
Couplings.append((Q, R, g))
num_op = num_op + 2 * n * r + 2 * m * r + n + m + 2 * r
## Init trivial
if Init == "trivial":
g = np.ones(rank) / rank
lambda_1 = min(np.min(a), np.min(g), np.min(b)) / 2
a1 = np.arange(1, np.shape(a)[0] + 1)
a1 = a1 / np.sum(a1)
a2 = (a - lambda_1 * a1) / (1 - lambda_1)
b1 = np.arange(1, np.shape(b)[0] + 1)
b1 = b1 / np.sum(b1)
b2 = (b - lambda_1 * b1) / (1 - lambda_1)
g1 = np.arange(1, rank + 1)
g1 = g1 / np.sum(g1)
g2 = (g - lambda_1 * g1) / (1 - lambda_1)
Q = lambda_1 * np.dot(a1[:, None], g1.reshape(1, -1)) + (1 - lambda_1) * np.dot(
a2[:, None], g2.reshape(1, -1)
)
R = lambda_1 * np.dot(b1[:, None], g1.reshape(1, -1)) + (1 - lambda_1) * np.dot(
b2[:, None], g2.reshape(1, -1)
)
Couplings.append((Q, R, g))
num_op = num_op + 4 * n * r + 4 * m * r + 3 * n + 3 * m + 3 * r
#####################################
if gamma_init == "theory":
gamma = 1
if gamma_init == "regularization":
gamma = 1 / reg
if gamma_init == "arbitrary":
gamma = gamma_0
tilde_D11 = Feature_Map_Poly(D11) # n * d1 * d1
tilde_D12_T = Feature_Map_Poly(D12.T) # n * d1 * d1
tilde_D12 = tilde_D12_T.T
tilde_D21 = Feature_Map_Poly(D21) # m * d2 * d2
tilde_D22_T = Feature_Map_Poly(D22.T) # m * d2 * d2
tilde_D22 = tilde_D22_T.T
a_tilde = np.dot(
np.dot(tilde_D12, a), np.dot(np.transpose(tilde_D11), a)
) # 2 * d1 * d1 * n + d1 * d1
b_tilde = np.dot(
np.dot(tilde_D22, b), np.dot(np.transpose(tilde_D21), b)
) # 2 * m * d2 * d2 + d2 * d2
c = a_tilde + b_tilde
num_op = num_op + 4 * n * d1 * d1 + 4 * m * d2 * d2 + d1 * d1 + d2 * d2
C1, C2, num_op_update = update_Lin_cost_GW(D11, D12, D21, D22, Q, R, g)
num_op = num_op + num_op_update
C_trans = np.dot(C2, R)
C_trans = np.dot(C1, C_trans)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G) # \langle -4DPD',P\rangle
GW_trans = c + OT_trans / 2
# print(GW_trans)
acc.append(GW_trans)
end = time.time()
time_actual = end - start
times.append(time_actual)
list_num_op.append(num_op)
err = 1
niter = 0
count_escape = 1
while (niter < max_iter) and (time_actual < time_out):
Q_prev = Q
R_prev = R
g_prev = g
# P_prev = np.dot(Q/g,R.T)
if err > delta:
niter = niter + 1
K1_trans_0 = np.dot(C2, R) # d * m * r
K1_trans_0 = np.dot(C1, K1_trans_0) # n * d * r
grad_Q = K1_trans_0 / g
if reg != 0.0:
grad_Q = grad_Q + reg * np.log(Q)
if gamma_init == "rescale":
norm_1 = np.max(np.abs(grad_Q)) ** 2
K2_trans_0 = np.dot(C1.T, Q) # d * n * r
K2_trans_0 = np.dot(C2.T, K2_trans_0) # m * d * r
grad_R = K2_trans_0 / g
if reg != 0.0:
grad_R = grad_R + reg * np.log(R)
if gamma_init == "rescale":
norm_2 = np.max(np.abs(grad_R)) ** 2
omega = np.diag(np.dot(Q.T, K1_trans_0)) # r * n * r
grad_g = -omega / (g**2)
if reg != 0.0:
grad_g = grad_g + reg * np.log(g)
if gamma_init == "rescale":
norm_3 = np.max(np.abs(grad_g)) ** 2
if gamma_init == "rescale":
gamma = gamma_0 / max(norm_1, norm_2, norm_3)
C1_trans = grad_Q - (1 / gamma) * np.log(Q) # 3 * n * r
C2_trans = grad_R - (1 / gamma) * np.log(R) # 3 * m * r
C3_trans = grad_g - (1 / gamma) * np.log(g) # 4 * r
num_op = (
num_op + 3 * n * r * r + 2 * m * r * r + 3 * n * r + 3 * m * r + 4 * r
)
# Update the coupling
if method == "IBP":
K1 = np.exp((-gamma) * C1_trans)
K2 = np.exp((-gamma) * C2_trans)
K3 = np.exp((-gamma) * C3_trans)
Q, R, g = LinSinkhorn.LR_IBP_Sin(
K1,
K2,
K3,
a,
b,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
if method == "Dykstra":
K1 = np.exp((-gamma) * C1_trans)
K2 = np.exp((-gamma) * C2_trans)
K3 = np.exp((-gamma) * C3_trans)
num_op = num_op + 2 * n * r + 2 * m * r + 2 * r
Q, R, g, count_op_Dysktra, n_iter_Dykstra = LinSinkhorn.LR_Dykstra_Sin(
K1,
K2,
K3,
a,
b,
alpha,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
num_op = num_op + count_op_Dysktra
list_niter_Dykstra.append(n_iter_Dykstra)
if method == "Dykstra_LSE":
Q, R, g, count_op_Dysktra_LSE = LinSinkhorn.LR_Dykstra_LSE_Sin(
C1_trans,
C2_trans,
C3_trans,
a,
b,
alpha,
gamma,
max_iter=max_iter_IBP,
delta=delta_IBP,
lam=lam_IBP,
)
num_op = num_op + count_op_Dysktra_LSE
# Update the total cost
C1, C2, num_op_update = update_Lin_cost_GW(D11, D12, D21, D22, Q, R, g)
num_op = num_op + num_op_update
# GW cost
C_trans = np.dot(C2, R)
C_trans = np.dot(C1, C_trans)
C_trans = C_trans / g
G = np.dot(Q.T, C_trans)
OT_trans = np.trace(G) # \langle -4DPD',P\rangle
GW_trans = c + OT_trans / 2
# print(GW_trans)
if np.isnan(GW_trans) == True:
print("Error LR-GW: GW cost", niter)
Q = Q_prev
R = R_prev
g = g_prev
break
## Update the error: theoritical error
# err_1 = ((1/gamma)**2) * (KL(Q,Q_prev) + KL(Q_prev,Q))
# err_2 = ((1/gamma)**2) * (KL(R,R_prev) + KL(R_prev,R))
# err_3 = ((1/gamma)**2) * (KL(g,g_prev) + KL(g_prev,g))
# err = err_1 + err_2 + err_3
## Update the error: Practical error
# err = np.abs(GW_trans - acc[-1]) / acc[-1]
# err = np.abs(GW_trans - acc[-1]) / np.log(num_op - list_num_op[-1])
## Update error: difference between couplings
# P_act = np.dot(Q/g,R.T)
# err = np.linalg.norm(P_act - P_prev)
# print(err)
err_1 = ((1 / gamma) ** 2) * (KL(Q, Q_prev) + KL(Q_prev, Q))
err_2 = ((1 / gamma) ** 2) * (KL(R, R_prev) + KL(R_prev, R))
err_3 = ((1 / gamma) ** 2) * (KL(g, g_prev) + KL(g_prev, g))
criterion = err_1 + err_2 + err_3
# print(criterion)
if niter > 1:
if criterion > delta / 1e-1:
err = criterion
else:
count_escape = count_escape + 1
if count_escape != niter:
err = criterion
if np.isnan(criterion) == True:
print("Error LR-GW: stopping criterion", niter)
Q = Q_prev
R = R_prev
g = g_prev
break
# here we let the error to be one always !
# err = 1
acc.append(GW_trans)
Couplings.append((Q, R, g))
end = time.time()
time_actual = end - start
times.append(time_actual)
list_num_op.append(num_op)
else:
break
return (
acc[-1],
np.array(acc),
np.array(times),
np.array(list_num_op),
Couplings,
np.array(list_niter_Dykstra),
)
def apply_lin_lr_gw(
X, Y, a, b, rank, cost_factorized, gamma_0=10, rescale_cost=True, time_out=50
):
if type(cost_factorized) == types.FunctionType:
(
res,
arr_acc,
arr_times,
arr_list_num_op,
Couplings,
arr_list_niter_Dykstra,
) = Lin_LGW_MD(
X,
Y,
a,
b,
rank,
cost_factorized,
time_out=time_out,
max_iter=1000,
delta=1e-3,
gamma_0=gamma_0,
gamma_init="rescale",
reg=0,
alpha=1e-10,
C_init=False,
Init="kmeans",
seed_init=49,
reg_init=1e-1,
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=rescale_cost,
)
else:
(
res,
arr_acc,
arr_times,
arr_list_num_op,
Couplings,
arr_list_niter_Dykstra,
) = Lin_LGW_MD(
X,
Y,
a,
b,
rank,
cost_factorized,
time_out=time_out,
max_iter=1000,
delta=1e-3,
gamma_0=gamma_0,
gamma_init="rescale",
reg=0,
alpha=1e-10,
C_init=True,
Init="kmeans",
seed_init=49,
reg_init=1e-1,
method="Dykstra",
max_iter_IBP=10000,
delta_IBP=1e-3,
lam_IBP=0,
rescale_cost=rescale_cost,
)
Q, R, g = Couplings[-1]
return res, Q, R, g
def Sinkhorn(C, reg, a, b, delta=1e-9, num_iter=1000, lam=1e-6):
n, m = np.shape(C)
# K = np.exp(-C/reg)
# Next 3 lines equivalent to K= np.exp(-C/reg), but faster to compute
K = np.empty(C.shape, dtype=C.dtype)
np.divide(C, -reg, out=K) # n * m
np.exp(K, out=K) # n * m
u = np.ones(np.shape(a)[0]) # /np.shape(a)[0]
v = np.ones(np.shape(b)[0]) # /np.shape(b)[0]
v_trans = np.dot(K.T, u) + lam # add regularization to avoid divide 0
err = 1
index = 0
while index < num_iter:
uprev = u
vprev = v
if err > delta:
index = index + 1
v = b / v_trans
u_trans = np.dot(K, v) + lam # add regularization to avoid divide 0
u = a / u_trans
if (
np.any(np.isnan(u))
or np.any(np.isnan(v))
or np.any(np.isinf(u))
or np.any(np.isinf(v))
):
# we have reached the machine precision
# come back to previous solution and quit loop
print("Warning: numerical errors at iteration", index)
u = uprev
v = vprev
break
v_trans = np.dot(K.T, u) + lam
err = np.sum(np.abs(v * v_trans - b))
else:
num_op = 3 * n * m + (index + 1) * (2 * n * m + n + m)
return u, v, K, num_op
num_op = 3 * n * m + (index + 1) * (2 * n * m + n + m)
return u, v, K, num_op
def LSE_Sinkhorn(C, reg, a, b, num_iter=1000, delta=1e-3, lam=0):
f = np.zeros(np.shape(a)[0])
g = np.zeros(np.shape(b)[0])
n, m = np.shape(C)
C_tilde = f[:, None] + g[None, :] - C # 2 * n * m
C_tilde = C_tilde / reg # n * m
P = np.exp(C_tilde)
err = 1
n_iter = 0
while n_iter < num_iter:
P_prev = P
if err > delta:
n_iter = n_iter + 1
# Update f
f = reg * np.log(a) + f - reg * scipy.special.logsumexp(C_tilde, axis=1)
# Update g
C_tilde = f[:, None] + g[None, :] - C
C_tilde = C_tilde / reg
g = reg * np.log(b) + g - reg * scipy.special.logsumexp(C_tilde, axis=0)
if (
np.any(np.isnan(f))
or np.any(np.isnan(g))
or np.any(np.isinf(f))
or np.any(np.isinf(g))
):
# we have reached the machine precision
# come back to previous solution and quit loop
print("Warning: numerical errors at iteration", n_iter)
P = P_prev
break
# Update the error
C_tilde = f[:, None] + g[None, :] - C
C_tilde = C_tilde / reg
P = np.exp(C_tilde)
err = np.sum(np.abs(np.sum(P, axis=1) - a))
else:
num_op = 4 * n * m + (n_iter + 1) * (8 * n * m + 3 * n + 3 * m) + n * m
return P, num_op
num_op = 4 * n * m + (n_iter + 1) * (8 * n * m + 3 * n + 3 * m) + n * m
return P, num_op
## Feature map of k(x,y) = \langle x,y\rangle ** 2 ##
def Feature_Map_Poly(X):
n, d = np.shape(X)
X_new = np.zeros((n, d**2))
for i in range(n):
x = X[i, :][:, None]
X_new[i, :] = np.dot(x, x.T).reshape(-1)
return X_new
def Square_Euclidean_Distance(X, Y):
"""Returns the matrix of $|x_i-y_j|^2$."""
X_col = X[:, np.newaxis]
Y_lin = Y[np.newaxis, :]
C = np.sum((X_col - Y_lin) ** 2, 2)
# D = (np.sum(X ** 2, 1)[:, np.newaxis] - 2 * np.dot(X, Y.T) + np.sum(Y ** 2, 1))
return C
# shape of xs: num_samples * dimension
def factorized_square_Euclidean(xs, xt):
square_norm_s = np.sum(xs**2, axis=1)
square_norm_t = np.sum(xt**2, axis=1)
A_1 = np.zeros((np.shape(xs)[0], 2 + np.shape(xs)[1]))
A_1[:, 0] = square_norm_s
A_1[:, 1] = np.ones(np.shape(xs)[0])
A_1[:, 2:] = -2 * xs
A_2 = np.zeros((2 + np.shape(xs)[1], np.shape(xt)[0]))
A_2[0, :] = np.ones(np.shape(xt)[0])
A_2[1, :] = square_norm_t
A_2[2:, :] = xt.T
return A_1, A_2
| 44,843 | 29.076459 | 93 |
py
|
evaluate
|
evaluate-main/setup.py
|
# Lint as: python3
""" HuggingFace/Evaluate is an open library for evaluation.
Note:
VERSION needs to be formatted following the MAJOR.MINOR.PATCH convention
(we need to follow this convention to be able to retrieve versioned scripts)
Simple check list for release from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
0. Prerequisites:
- Dependencies:
- twine: "pip install twine"
- Create an account in (and join the 'evaluate' project):
- PyPI: https://pypi.org/
- Test PyPI: https://test.pypi.org/
1. Change the version in:
- __init__.py
- setup.py
2. Commit these changes: "git commit -m 'Release: VERSION'"
3. Add a tag in git to mark the release: "git tag VERSION -m 'Add tag VERSION for pypi'"
Push the tag to remote: git push --tags origin main
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
First, delete any "build" directory that may exist from previous builds.
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv/notebook by running:
pip install huggingface_hub fsspec aiohttp
pip install -U tqdm
pip install -i https://testpypi.python.org/pypi evaluate
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Fill release notes in the tag in github once everything is looking hunky-dory.
8. Change the version in __init__.py and setup.py to X.X.X+1.dev0 (e.g. VERSION=1.18.3 -> 1.18.4.dev0).
Then push the change with a message 'set dev version'
"""
import os
from setuptools import find_packages, setup
REQUIRED_PKGS = [
# We need datasets as a backend
"datasets>=2.0.0",
# We use numpy>=1.17 to have np.random.Generator (Dataset shuffling)
"numpy>=1.17",
# For smart caching dataset processing
"dill",
# For performance gains with apache arrow
"pandas",
# for downloading datasets over HTTPS
"requests>=2.19.0",
# progress bars in download and scripts
"tqdm>=4.62.1",
# for fast hashing
"xxhash",
# for better multiprocessing
"multiprocess",
# to get metadata of optional dependencies such as torch or tensorflow for Python versions that don't have it
"importlib_metadata;python_version<'3.8'",
# to save datasets locally or on any filesystem
# minimum 2021.05.0 to have the AbstractArchiveFileSystem
"fsspec[http]>=2021.05.0",
# To get datasets from the Datasets Hub on huggingface.co
"huggingface-hub>=0.7.0",
# Utilities from PyPA to e.g., compare versions
"packaging",
"responses<0.19",
]
TEMPLATE_REQUIRE = [
# to populate metric template
"cookiecutter",
# for the gradio widget
"gradio>=3.0.0"
]
EVALUATOR_REQUIRE = [
"transformers",
# for bootstrap computations in Evaluator
"scipy>=1.7.1",
]
TESTS_REQUIRE = [
# test dependencies
"absl-py",
"charcut>=1.1.1", # for charcut_mt
"cer>=1.2.0", # for characTER
"nltk", # for NIST and probably others
"pytest",
"pytest-datadir",
"pytest-xdist",
# optional dependencies
"tensorflow>=2.3,!=2.6.0,!=2.6.1, <=2.10",
"torch",
# metrics dependencies
"accelerate", # for frugalscore (calls transformers' Trainer)
"bert_score>=0.3.6",
"rouge_score>=0.1.2",
"sacrebleu",
"sacremoses",
"scipy",
"seqeval",
"scikit-learn",
"jiwer",
"sentencepiece", # for bleurt
"transformers", # for evaluator
"mauve-text",
"trectools",
# to speed up pip backtracking
"toml>=0.10.1",
"requests_file>=1.5.1",
"tldextract>=3.1.0",
"texttable>=1.6.3",
"unidecode>=1.3.4",
"Werkzeug>=1.0.1",
"six~=1.15.0",
]
QUALITY_REQUIRE = ["black~=22.0", "flake8>=3.8.3", "isort>=5.0.0", "pyyaml>=5.3.1"]
EXTRAS_REQUIRE = {
"tensorflow": ["tensorflow>=2.2.0,!=2.6.0,!=2.6.1"],
"tensorflow_gpu": ["tensorflow-gpu>=2.2.0,!=2.6.0,!=2.6.1"],
"torch": ["torch"],
"dev": TESTS_REQUIRE + QUALITY_REQUIRE,
"tests": TESTS_REQUIRE,
"quality": QUALITY_REQUIRE,
"docs": [
# Might need to add doc-builder and some specific deps in the future
"s3fs",
],
"template": TEMPLATE_REQUIRE,
"evaluator": EVALUATOR_REQUIRE
}
setup(
name="evaluate",
version="0.4.1.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
description="HuggingFace community-driven open-source library of evaluation",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
author="HuggingFace Inc.",
author_email="[email protected]",
url="https://github.com/huggingface/evaluate",
download_url="https://github.com/huggingface/evaluate/tags",
license="Apache 2.0",
package_dir={"": "src"},
packages=find_packages("src"),
entry_points={"console_scripts": ["evaluate-cli=evaluate.commands.evaluate_cli:main"]},
install_requires=REQUIRED_PKGS,
extras_require=EXTRAS_REQUIRE,
python_requires=">=3.7.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="metrics machine learning evaluate evaluation",
zip_safe=False, # Required for mypy to find the py.typed file
)
| 6,346 | 31.88601 | 116 |
py
|
evaluate
|
evaluate-main/comparisons/mcnemar/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("mcnemar", module_type="comparison")
launch_gradio_widget(module)
| 155 | 21.285714 | 59 |
py
|
evaluate
|
evaluate-main/comparisons/mcnemar/mcnemar.py
|
# Copyright 2022 The HuggingFace Evaluate Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""McNemar test for model comparison."""
import datasets
from scipy.stats import chi2
import evaluate
_DESCRIPTION = """
McNemar's test is a diagnostic test over a contingency table resulting from the predictions of two classifiers. The test compares the sensitivity and specificity of the diagnostic tests on the same group reference labels. It can be computed with:
McNemar = (SE - SP)**2 / SE + SP
Where:
SE: Sensitivity (Test 1 positive; Test 2 negative)
SP: Specificity (Test 1 negative; Test 2 positive)
"""
_KWARGS_DESCRIPTION = """
Args:
predictions1 (`list` of `int`): Predicted labels for model 1.
predictions2 (`list` of `int`): Predicted labels for model 2.
references (`list` of `int`): Ground truth labels.
Returns:
stat (`float`): McNemar test score.
p (`float`): The p value. Minimum possible value is 0. Maximum possible value is 1.0. A lower p value means a more significant difference.
Examples:
>>> mcnemar = evaluate.load("mcnemar")
>>> results = mcnemar.compute(references=[1, 0, 1], predictions1=[1, 1, 1], predictions2=[1, 0, 1])
>>> print(results)
{'stat': 1.0, 'p': 0.31731050786291115}
"""
_CITATION = """
@article{mcnemar1947note,
title={Note on the sampling error of the difference between correlated proportions or percentages},
author={McNemar, Quinn},
journal={Psychometrika},
volume={12},
number={2},
pages={153--157},
year={1947},
publisher={Springer-Verlag}
}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class McNemar(evaluate.Comparison):
def _info(self):
return evaluate.ComparisonInfo(
module_type="comparison",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions1": datasets.Value("int64"),
"predictions2": datasets.Value("int64"),
"references": datasets.Value("int64"),
}
),
)
def _compute(self, predictions1, predictions2, references):
# construct contingency table
tbl = [[0, 0], [0, 0]]
for gt, p1, p2 in zip(references, predictions1, predictions2):
if p1 == gt and p2 == gt:
tbl[0][0] += 1
elif p1 == gt:
tbl[0][1] += 1
elif p2 == gt:
tbl[1][0] += 1
else:
tbl[1][1] += 1
# compute statistic
b, c = tbl[0][1], tbl[1][0]
statistic = abs(b - c) ** 2 / (1.0 * (b + c))
df = 1
pvalue = chi2.sf(statistic, df)
return {"stat": statistic, "p": pvalue}
| 3,343 | 32.777778 | 246 |
py
|
evaluate
|
evaluate-main/comparisons/wilcoxon/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("wilcoxon", module_type="comparison")
launch_gradio_widget(module)
| 156 | 21.428571 | 60 |
py
|
evaluate
|
evaluate-main/comparisons/wilcoxon/wilcoxon.py
|
# Copyright 2022 The HuggingFace Evaluate Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wilcoxon test for model comparison."""
import datasets
from scipy.stats import wilcoxon
import evaluate
_DESCRIPTION = """
Wilcoxon's test is a non-parametric signed-rank test that tests whether the distribution of the differences is symmetric about zero. It can be used to compare the predictions of two models.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions1 (`list` of `float`): Predictions for model 1.
predictions2 (`list` of `float`): Predictions for model 2.
Returns:
stat (`float`): Wilcoxon test score.
p (`float`): The p value. Minimum possible value is 0. Maximum possible value is 1.0. A lower p value means a more significant difference.
Examples:
>>> wilcoxon = evaluate.load("wilcoxon")
>>> results = wilcoxon.compute(predictions1=[-7, 123.45, 43, 4.91, 5], predictions2=[1337.12, -9.74, 1, 2, 3.21])
>>> print(results)
{'stat': 5.0, 'p': 0.625}
"""
_CITATION = """
@incollection{wilcoxon1992individual,
title={Individual comparisons by ranking methods},
author={Wilcoxon, Frank},
booktitle={Breakthroughs in statistics},
pages={196--202},
year={1992},
publisher={Springer}
}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Wilcoxon(evaluate.Comparison):
def _info(self):
return evaluate.ComparisonInfo(
module_type="comparison",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions1": datasets.Value("float"),
"predictions2": datasets.Value("float"),
}
),
)
def _compute(self, predictions1, predictions2):
# calculate difference
d = [p1 - p2 for (p1, p2) in zip(predictions1, predictions2)]
# compute statistic
res = wilcoxon(d)
return {"stat": res.statistic, "p": res.pvalue}
| 2,580 | 31.670886 | 189 |
py
|
evaluate
|
evaluate-main/comparisons/exact_match/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("exact_match", module_type="comparison")
launch_gradio_widget(module)
| 159 | 21.857143 | 63 |
py
|
evaluate
|
evaluate-main/comparisons/exact_match/exact_match.py
|
# Copyright 2022 The HuggingFace Evaluate Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exact match test for model comparison."""
import datasets
import numpy as np
import evaluate
_DESCRIPTION = """
Returns the rate at which the predictions of one model exactly match those of another model.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions1 (`list` of `int`): Predicted labels for model 1.
predictions2 (`list` of `int`): Predicted labels for model 2.
Returns:
exact_match (`float`): Dictionary containing exact_match rate. Possible values are between 0.0 and 1.0, inclusive.
Examples:
>>> exact_match = evaluate.load("exact_match", module_type="comparison")
>>> results = exact_match.compute(predictions1=[1, 1, 1], predictions2=[1, 1, 1])
>>> print(results)
{'exact_match': 1.0}
"""
_CITATION = """
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class ExactMatch(evaluate.Comparison):
def _info(self):
return evaluate.ComparisonInfo(
module_type="comparison",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions1": datasets.Value("int64"),
"predictions2": datasets.Value("int64"),
}
),
)
def _compute(self, predictions1, predictions2):
score_list = [p1 == p2 for p1, p2 in zip(predictions1, predictions2)]
return {"exact_match": np.mean(score_list)}
| 2,106 | 30.924242 | 118 |
py
|
evaluate
|
evaluate-main/.github/hub/push_evaluations_to_hub.py
|
from pathlib import Path
from huggingface_hub import create_repo, Repository
import tempfile
import subprocess
import os
import shutil
import logging
import re
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
GIT_UP_TO_DATE = "On branch main\nYour branch is up to date with 'origin/main'.\
\n\nnothing to commit, working tree clean\n"
COMMIT_PLACEHOLDER = "{COMMIT_PLACEHOLDER}"
def get_git_tag(lib_path, commit_hash):
# check if commit has a tag, see: https://stackoverflow.com/questions/1474115/how-to-find-the-tag-associated-with-a-given-git-commit
command = f"git describe --exact-match {commit_hash}"
output = subprocess.run(command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
cwd=lib_path,
env=os.environ.copy(),
)
tag = output.stdout.strip()
if re.match(r"v\d*\.\d*\.\d*", tag) is not None:
return tag
else:
return None
def copy_recursive(source_base_path, target_base_path):
"""Copy directory recursively and overwrite existing files."""
for item in source_base_path.iterdir():
target_path = target_base_path / item.name
if item.is_dir():
target_path.mkdir(exist_ok=True)
copy_recursive(item, target_path)
else:
shutil.copy(item, target_path)
def update_evaluate_dependency(requirements_path, commit_hash):
"""Updates the evaluate requirement with the latest commit."""
with open(requirements_path, "r") as f:
file_content = f.read()
file_content = file_content.replace(COMMIT_PLACEHOLDER, commit_hash)
with open(requirements_path, "w") as f:
f.write(file_content)
def push_module_to_hub(module_path, type, token, commit_hash, tag=None):
module_name = module_path.stem
org = f"evaluate-{type}"
repo_url = create_repo(org + "/" + module_name, repo_type="space", space_sdk="gradio", exist_ok=True, token=token)
repo_path = Path(tempfile.mkdtemp())
scheme = urlparse(repo_url).scheme
repo_url = repo_url.replace(f"{scheme}://", f"{scheme}://user:{token}@")
clean_repo_url = re.sub(r"(https?)://.*@", r"\1://", repo_url)
try:
subprocess.run(
f"git clone {repo_url}".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=repo_path,
env=os.environ.copy(),
)
except OSError:
# make sure we don't accidentally expose token
raise OSError(f"Could not clone from '{clean_repo_url}'")
repo = Repository(local_dir=repo_path / module_name, use_auth_token=token)
copy_recursive(module_path, repo_path / module_name)
update_evaluate_dependency(repo_path / module_name / "requirements.txt", commit_hash)
repo.git_add()
try:
repo.git_commit(f"Update Space (evaluate main: {commit_hash[:8]})")
repo.git_push()
logger.info(f"Module '{module_name}' pushed to the hub")
except OSError as error:
if str(error) == GIT_UP_TO_DATE:
logger.info(f"Module '{module_name}' is already up to date.")
else:
raise error
if tag is not None:
repo.add_tag(tag, message="add evaluate tag", remote="origin")
shutil.rmtree(repo_path)
if __name__ == "__main__":
evaluation_paths = ["metrics", "comparisons", "measurements"]
evaluation_types = ["metric", "comparison", "measurement"]
token = os.getenv("HF_TOKEN")
evaluate_lib_path = Path(os.getenv("EVALUATE_LIB_PATH"))
commit_hash = os.getenv("GIT_HASH")
git_tag = get_git_tag(evaluate_lib_path, commit_hash)
if git_tag is not None:
logger.info(f"Found tag: {git_tag}.")
for type, dir in zip(evaluation_types, evaluation_paths):
if (evaluate_lib_path/dir).exists():
for module_path in (evaluate_lib_path/dir).iterdir():
if module_path.is_dir():
logger.info(f"Updating: module {module_path.name}.")
push_module_to_hub(module_path, type, token, commit_hash, tag=git_tag)
else:
logger.warning(f"No folder {str(evaluate_lib_path/dir)} for {type} found.")
| 4,307 | 35.201681 | 136 |
py
|
evaluate
|
evaluate-main/src/evaluate/loading.py
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Access datasets."""
import filecmp
import importlib
import inspect
import json
import os
import re
import shutil
import time
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple, Type, Union
from urllib.parse import urlparse
from datasets import DownloadConfig, DownloadMode
from datasets.builder import DatasetBuilder
from datasets.packaged_modules import _EXTENSION_TO_MODULE, _hash_python_lines
from datasets.utils.filelock import FileLock
from datasets.utils.version import Version
from . import SCRIPTS_VERSION, config
from .module import EvaluationModule
from .utils.file_utils import (
cached_path,
head_hf_s3,
hf_hub_url,
init_hf_modules,
is_relative_path,
relative_to_absolute_path,
url_or_path_join,
)
from .utils.logging import get_logger
logger = get_logger(__name__)
ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + ["zip"]
def init_dynamic_modules(
name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
):
"""
Create a module with name `name` in which you can add dynamic modules
such as metrics or datasets. The module can be imported using its name.
The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
be overriden by specifying a path to another directory in `hf_modules_cache`.
"""
hf_modules_cache = init_hf_modules(hf_modules_cache)
dynamic_modules_path = os.path.join(hf_modules_cache, name)
os.makedirs(dynamic_modules_path, exist_ok=True)
if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
pass
return dynamic_modules_path
def import_main_class(module_path) -> Optional[Union[Type[DatasetBuilder], Type[EvaluationModule]]]:
"""Import a module at module_path and return its main class, a Metric by default"""
module = importlib.import_module(module_path)
main_cls_type = EvaluationModule
# Find the main class in our imported module
module_main_cls = None
for name, obj in module.__dict__.items():
if isinstance(obj, type) and issubclass(obj, main_cls_type):
if inspect.isabstract(obj):
continue
module_main_cls = obj
break
return module_main_cls
def files_to_hash(file_paths: List[str]) -> str:
"""
Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
"""
# List all python files in directories if directories are supplied as part of external imports
to_use_files: List[Union[Path, str]] = []
for file_path in file_paths:
if os.path.isdir(file_path):
to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
else:
to_use_files.append(file_path)
# Get the code from all these files
lines = []
for file_path in to_use_files:
with open(file_path, encoding="utf-8") as f:
lines.extend(f.readlines())
return _hash_python_lines(lines)
def convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
"""Convert a link to a file on a github repo in a link to the raw github object."""
parsed = urlparse(url_path)
sub_directory = None
if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
if "blob" in url_path:
if not url_path.endswith(".py"):
raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'")
url_path = url_path.replace("blob", "raw") # Point to the raw file
else:
# Parse github url to point to zip
github_path = parsed.path[1:]
repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
repo_owner, repo_name = repo_info.split("/")
url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip"
sub_directory = f"{repo_name}-{branch}"
return url_path, sub_directory
def increase_load_count(name: str, resource_type: str):
"""Update the download count of a dataset or metric."""
if not config.HF_EVALUATE_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS:
try:
head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset"))
except Exception:
pass
def get_imports(file_path: str) -> Tuple[str, str, str, str]:
"""Find whether we should import or clone additional files for a given processing script.
And list the import.
We allow:
- library dependencies,
- local dependencies and
- external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
external dependencies will be downloaded (and extracted if needed in the dataset folder).
We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
Note that only direct import in the dataset processing script will be handled
We don't recursively explore the additional import to download further files.
Example::
import tensorflow
import .c4_utils
import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
"""
lines = []
with open(file_path, encoding="utf-8") as f:
lines.extend(f.readlines())
logger.debug(f"Checking {file_path} for additional imports.")
imports: List[Tuple[str, str, str, Optional[str]]] = []
is_in_docstring = False
for line in lines:
docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
if len(docstr_start_match) == 1:
# flip True <=> False only if doctstring
# starts at line without finishing
is_in_docstring = not is_in_docstring
if is_in_docstring:
# import statements in doctstrings should
# not be added as required dependencies
continue
match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
if match is None:
match = re.match(
r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
line,
flags=re.MULTILINE,
)
if match is None:
continue
if match.group(1):
# The import starts with a '.', we will download the relevant file
if any(imp[1] == match.group(2) for imp in imports):
# We already have this import
continue
if match.group(3):
# The import has a comment with 'From:', we'll retrieve it from the given url
url_path = match.group(3)
url_path, sub_directory = convert_github_url(url_path)
imports.append(("external", match.group(2), url_path, sub_directory))
elif match.group(2):
# The import should be at the same place as the file
imports.append(("internal", match.group(2), match.group(2), None))
else:
if match.group(3):
# The import has a comment with `From: git+https:...`, asks user to pip install from git.
url_path = match.group(3)
imports.append(("library", match.group(2), url_path, None))
else:
imports.append(("library", match.group(2), match.group(2), None))
return imports
def _download_additional_modules(
name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig]
) -> List[Tuple[str, str]]:
"""
Download additional module for a module <name>.py at URL (or local path) <base_path>/<name>.py
The imports must have been parsed first using ``get_imports``.
If some modules need to be installed with pip, an error is raised showing how to install them.
This function return the list of downloaded modules as tuples (import_name, module_file_path).
The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``.
"""
local_imports = []
library_imports = []
download_config = download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading extra modules"
for import_type, import_name, import_path, sub_directory in imports:
if import_type == "library":
library_imports.append((import_name, import_path)) # Import from a library
continue
if import_name == name:
raise ValueError(
f"Error in the {name} script, importing relative {import_name} module "
f"but {import_name} is the name of the script. "
f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
f"comment pointing to the original relative import file path."
)
if import_type == "internal":
url_or_filename = url_or_path_join(base_path, import_path + ".py")
elif import_type == "external":
url_or_filename = import_path
else:
raise ValueError("Wrong import_type")
local_import_path = cached_path(
url_or_filename,
download_config=download_config,
)
if sub_directory is not None:
local_import_path = os.path.join(local_import_path, sub_directory)
local_imports.append((import_name, local_import_path))
# Check library imports
needs_to_be_installed = set()
for library_import_name, library_import_path in library_imports:
try:
lib = importlib.import_module(library_import_name) # noqa F841
except ImportError:
needs_to_be_installed.add((library_import_name, library_import_path))
if needs_to_be_installed:
raise ImportError(
f"To be able to use {name}, you need to install the following dependencies"
f"{[lib_name for lib_name, lib_path in needs_to_be_installed]} using 'pip install "
f"{' '.join([lib_path for lib_name, lib_path in needs_to_be_installed])}' for instance'"
)
return local_imports
def _copy_script_and_other_resources_in_importable_dir(
name: str,
importable_directory_path: str,
subdirectory_name: str,
original_local_path: str,
local_imports: List[Tuple[str, str]],
additional_files: List[Tuple[str, str]],
download_mode: Optional[DownloadMode],
) -> str:
"""Copy a script and its required imports to an importable directory
Args:
name (str): name of the resource to load
importable_directory_path (str): path to the loadable folder in the dynamic modules directory
subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script
original_local_path (str): local path to the resource script
local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy)
additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy)
download_mode (Optional[DownloadMode]): download mode
Return:
importable_local_file: path to an importable module with importlib.import_module
"""
# Define a directory with a unique name in our dataset or metric folder
# path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
# we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together
importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name)
importable_local_file = os.path.join(importable_subdirectory, name + ".py")
# Prevent parallel disk operations
lock_path = importable_directory_path + ".lock"
with FileLock(lock_path):
# Create main dataset/metrics folder if needed
if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path):
shutil.rmtree(importable_directory_path)
os.makedirs(importable_directory_path, exist_ok=True)
# add an __init__ file to the main dataset folder if needed
init_file_path = os.path.join(importable_directory_path, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Create hash dataset folder if needed
os.makedirs(importable_subdirectory, exist_ok=True)
# add an __init__ file to the hash dataset folder if needed
init_file_path = os.path.join(importable_subdirectory, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Copy dataset.py file in hash folder if needed
if not os.path.exists(importable_local_file):
shutil.copyfile(original_local_path, importable_local_file)
# Record metadata associating original dataset path with local unique folder
meta_path = importable_local_file.split(".py")[0] + ".json"
if not os.path.exists(meta_path):
meta = {"original file path": original_local_path, "local file path": importable_local_file}
# the filename is *.py in our case, so better rename to filenam.json instead of filename.py.json
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
# Copy all the additional imports
for import_name, import_path in local_imports:
if os.path.isfile(import_path):
full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py")
if not os.path.exists(full_path_local_import):
shutil.copyfile(import_path, full_path_local_import)
elif os.path.isdir(import_path):
full_path_local_import = os.path.join(importable_subdirectory, import_name)
if not os.path.exists(full_path_local_import):
shutil.copytree(import_path, full_path_local_import)
else:
raise OSError(f"Error with local import at {import_path}")
# Copy aditional files like dataset infos file if needed
for file_name, original_path in additional_files:
destination_additional_path = os.path.join(importable_subdirectory, file_name)
if not os.path.exists(destination_additional_path) or not filecmp.cmp(
original_path, destination_additional_path
):
shutil.copyfile(original_path, destination_additional_path)
return importable_local_file
def _create_importable_file(
local_path: str,
local_imports: List[Tuple[str, str]],
additional_files: List[Tuple[str, str]],
dynamic_modules_path: str,
module_namespace: str,
name: str,
download_mode: DownloadMode,
) -> Tuple[str, str]:
importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
Path(importable_directory_path).mkdir(parents=True, exist_ok=True)
(Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True)
hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
importable_local_file = _copy_script_and_other_resources_in_importable_dir(
name=name.split("/")[-1],
importable_directory_path=importable_directory_path,
subdirectory_name=hash,
original_local_path=local_path,
local_imports=local_imports,
additional_files=additional_files,
download_mode=download_mode,
)
logger.debug(f"Created importable dataset file at {importable_local_file}")
module_path = ".".join(
[os.path.basename(dynamic_modules_path), module_namespace, name.replace("/", "--"), hash, name.split("/")[-1]]
)
return module_path, hash
@dataclass
class ImportableModule:
module_path: str
hash: str
class _EvaluationModuleFactory:
def get_module(self) -> ImportableModule:
raise NotImplementedError
class LocalEvaluationModuleFactory(_EvaluationModuleFactory):
"""Get the module of a local metric. The metric script is loaded from a local script."""
def __init__(
self,
path: str,
module_type: str = "metrics",
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[DownloadMode] = None,
dynamic_modules_path: Optional[str] = None,
):
self.path = path
self.module_type = module_type
self.name = Path(path).stem
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
def get_module(self) -> ImportableModule:
# get script and other files
imports = get_imports(self.path)
local_imports = _download_additional_modules(
name=self.name,
base_path=str(Path(self.path).parent),
imports=imports,
download_config=self.download_config,
)
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
module_path, hash = _create_importable_file(
local_path=self.path,
local_imports=local_imports,
additional_files=[],
dynamic_modules_path=dynamic_modules_path,
module_namespace=self.module_type,
name=self.name,
download_mode=self.download_mode,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
return ImportableModule(module_path, hash)
class HubEvaluationModuleFactory(_EvaluationModuleFactory):
"""Get the module of a metric from a metric repository on the Hub."""
def __init__(
self,
name: str,
module_type: str = "metrics",
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[DownloadMode] = None,
dynamic_modules_path: Optional[str] = None,
):
self.name = name
self.module_type = module_type
self.revision = revision
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
assert self.name.count("/") == 1
increase_load_count(name, resource_type="metric")
def download_loading_script(self, revision) -> str:
file_path = hf_hub_url(path=self.name, name=self.name.split("/")[1] + ".py", revision=revision)
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading builder script"
return cached_path(file_path, download_config=download_config)
def get_module(self) -> ImportableModule:
revision = self.revision or os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION)
if re.match(r"\d*\.\d*\.\d*", revision): # revision is version number (three digits separated by full stops)
revision = "v" + revision # tagging convention on evaluate repository starts with v
# get script and other files
try:
local_path = self.download_loading_script(revision)
except FileNotFoundError as err:
# if there is no file found with current revision tag try to load main
if self.revision is None and os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION) != "main":
revision = "main"
local_path = self.download_loading_script(revision)
else:
raise err
imports = get_imports(local_path)
local_imports = _download_additional_modules(
name=self.name,
base_path=hf_hub_url(path=self.name, name="", revision=revision),
imports=imports,
download_config=self.download_config,
)
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
module_path, hash = _create_importable_file(
local_path=local_path,
local_imports=local_imports,
additional_files=[],
dynamic_modules_path=dynamic_modules_path,
module_namespace=self.module_type,
name=self.name,
download_mode=self.download_mode,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
return ImportableModule(module_path, hash)
class CachedEvaluationModuleFactory(_EvaluationModuleFactory):
"""
Get the module of a metric that has been loaded once already and cached.
The script that is loaded from the cache is the most recent one with a matching name.
"""
def __init__(
self,
name: str,
module_type: str = "metrics",
dynamic_modules_path: Optional[str] = None,
):
self.name = name
self.module_type = module_type
self.dynamic_modules_path = dynamic_modules_path
assert self.name.count("/") == 0
def get_module(self) -> ImportableModule:
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
importable_directory_path = os.path.join(dynamic_modules_path, self.module_type, self.name)
hashes = (
[h for h in os.listdir(importable_directory_path) if len(h) == 64]
if os.path.isdir(importable_directory_path)
else None
)
if not hashes:
raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}")
# get most recent
def _get_modification_time(module_hash):
return (
(Path(importable_directory_path) / module_hash / (self.name.split("--")[-1] + ".py")).stat().st_mtime
)
hash = sorted(hashes, key=_get_modification_time)[-1]
logger.warning(
f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub."
)
# make the new module to be noticed by the import system
module_path = ".".join(
[os.path.basename(dynamic_modules_path), self.module_type, self.name, hash, self.name.split("--")[-1]]
)
importlib.invalidate_caches()
return ImportableModule(module_path, hash)
def evaluation_module_factory(
path: str,
module_type: Optional[str] = None,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[DownloadMode] = None,
force_local_path: Optional[str] = None,
dynamic_modules_path: Optional[str] = None,
**download_kwargs,
) -> ImportableModule:
"""
Download/extract/cache a metric module.
Metrics codes are cached inside the the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
Args:
path (str): Path or name of the metric script.
- if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory):
-> load the module from the metric script
e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``.
- if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`)
-> load the module from the metric script in the github repository at huggingface/datasets
e.g. ``'accuracy'`` or ``'rouge'``.
revision (Optional ``Union[str, datasets.Version]``):
If specified, the module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version of the lib.
- it will also try to load it from the master branch if it's not available at the local version of the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
force_local_path (Optional str): Optional path to a local path to download and prepare the script to.
Used to inspect or modify the script folder.
dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
By default the datasets and metrics are stored inside the `datasets_modules` module.
download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
Returns:
ImportableModule
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
download_config.extract_compressed_file = True
download_config.force_extract = True
filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
if not filename.endswith(".py"):
filename = filename + ".py"
combined_path = os.path.join(path, filename)
# Try locally
if path.endswith(filename):
if os.path.isfile(path):
return LocalEvaluationModuleFactory(
path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
).get_module()
else:
raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}")
elif os.path.isfile(combined_path):
return LocalEvaluationModuleFactory(
combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
).get_module()
elif is_relative_path(path) and path.count("/") <= 1 and not force_local_path:
try:
# load a canonical evaluation module from hub
if path.count("/") == 0:
# if no type provided look through all possible modules
if module_type is None:
for current_type in ["metric", "comparison", "measurement"]:
try:
return HubEvaluationModuleFactory(
f"evaluate-{current_type}/{path}",
revision=revision,
download_config=download_config,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
).get_module()
except ConnectionError:
pass
raise FileNotFoundError
# if module_type provided load specific module_type
else:
return HubEvaluationModuleFactory(
f"evaluate-{module_type}/{path}",
revision=revision,
download_config=download_config,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
).get_module()
# load community evaluation module from hub
elif path.count("/") == 1:
return HubEvaluationModuleFactory(
path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
dynamic_modules_path=dynamic_modules_path,
).get_module()
except Exception as e1: # noqa: all the attempts failed, before raising the error we should check if the module is already cached.
# if it's a canonical module we need to check if it's any of the types
if path.count("/") == 0:
for current_type in ["metric", "comparison", "measurement"]:
try:
return CachedEvaluationModuleFactory(
f"evaluate-{current_type}--{path}", dynamic_modules_path=dynamic_modules_path
).get_module()
except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist.
pass
# if it's a community module we just need to check on path
elif path.count("/") == 1:
try:
return CachedEvaluationModuleFactory(
path.replace("/", "--"), dynamic_modules_path=dynamic_modules_path
).get_module()
except Exception as e2: # noqa: if it's not in the cache, then it doesn't exist.
pass
if not isinstance(e1, (ConnectionError, FileNotFoundError)):
raise e1 from None
raise FileNotFoundError(
f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}. "
f"Module '{path}' doesn't exist on the Hugging Face Hub either."
) from None
else:
raise FileNotFoundError(f"Couldn't find a module script at {relative_to_absolute_path(combined_path)}.")
def load(
path: str,
config_name: Optional[str] = None,
module_type: Optional[str] = None,
process_id: int = 0,
num_process: int = 1,
cache_dir: Optional[str] = None,
experiment_id: Optional[str] = None,
keep_in_memory: bool = False,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[DownloadMode] = None,
revision: Optional[Union[str, Version]] = None,
**init_kwargs,
) -> EvaluationModule:
"""Load a [`~evaluate.EvaluationModule`].
Args:
path (`str`):
Path to the evaluation processing script with the evaluation builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. `'./metrics/rouge'` or `'./metrics/rouge/rouge.py'`
- a evaluation module identifier on the HuggingFace evaluate repo e.g. `'rouge'` or `'bleu'` that are in either `'metrics/'`,
`'comparisons/'`, or `'measurements/'` depending on the provided `module_type`
config_name (`str`, *optional*):
Selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset).
module_type (`str`, default `'metric'`):
Type of evaluation module, can be one of `'metric'`, `'comparison'`, or `'measurement'`.
process_id (`int`, *optional*):
For distributed evaluation: id of the process.
num_process (`int`, *optional*):
For distributed evaluation: total number of processes.
cache_dir (`str`, *optional*):
Path to store the temporary predictions and references (default to `~/.cache/huggingface/evaluate/`).
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
keep_in_memory (`bool`):
Whether to store the temporary results in memory (defaults to `False`).
download_config ([`~evaluate.DownloadConfig`], *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`], defaults to `REUSE_DATASET_IF_EXISTS`):
Download/generate mode.
revision (`Union[str, evaluate.Version]`, *optional*):
If specified, the module will be loaded from the datasets repository
at this version. By default it is set to the local version of the lib. Specifying a version that is different from
your local version of the lib might cause compatibility issues.
Returns:
[`evaluate.EvaluationModule`]
Example:
```py
>>> from evaluate import load
>>> accuracy = evaluate.load("accuracy")
```
"""
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
evaluation_module = evaluation_module_factory(
path, module_type=module_type, revision=revision, download_config=download_config, download_mode=download_mode
)
evaluation_cls = import_main_class(evaluation_module.module_path)
evaluation_instance = evaluation_cls(
config_name=config_name,
process_id=process_id,
num_process=num_process,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
experiment_id=experiment_id,
hash=evaluation_module.hash,
**init_kwargs,
)
if module_type and module_type != evaluation_instance.module_type:
raise TypeError(
f"No module of module type '{module_type}' not found for '{path}' locally, or on the Hugging Face Hub. Found module of module type '{evaluation_instance.module_type}' instead."
)
# Download and prepare resources for the metric
evaluation_instance.download_and_prepare(download_config=download_config)
return evaluation_instance
| 35,118 | 44.549935 | 188 |
py
|
evaluate
|
evaluate-main/src/evaluate/visualization.py
|
import textwrap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class ComplexRadar:
"""Create a complex radar chart with different scales for each variable
Args:
fig (`matplotlib.figure`) : A matplotlib figure object to add the axes on.
variables (`list`) : a list of variables to. plot
ranges (`list` of `tuples`): A list of ranges (min, max) for each variable
n_ring_levels (`int): Number of ordinate or ring levels to draw.
Default: 5.
show_scales (`bool`): Indicates if we the ranges for each variable are plotted.
Default: True.
format_cfg (`dict`): A dictionary with formatting configurations.
Default: None.
Returns:
`matplotlib.figure.Figure`: a radar plot.
"""
def __init__(self, fig, variables, ranges, n_ring_levels=5, show_scales=True, format_cfg=None):
self.format_cfg = format_cfg
# Calculate angles and create for each variable an axes
# Consider here the trick with having the first axes element twice (len+1)
angles = np.arange(0, 360, 360.0 / len(variables))
axes = [
fig.add_axes([0.1, 0.1, 0.9, 0.9], polar=True, label="axes{}".format(i), **self.format_cfg["axes_args"])
for i in range(len(variables) + 1)
]
# Ensure clockwise rotation (first variable at the top N)
for ax in axes:
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
ax.set_axisbelow(True)
# Writing the ranges on each axes
for i, ax in enumerate(axes):
# Here we do the trick by repeating the first iteration
j = 0 if (i == 0 or i == 1) else i - 1
ax.set_ylim(*ranges[j])
# Set endpoint to True if you like to have values right before the last circle
grid = np.linspace(*ranges[j], num=n_ring_levels, endpoint=self.format_cfg["incl_endpoint"])
gridlabel = ["{}".format(round(x, 2)) for x in grid]
gridlabel[0] = "" # remove values from the center
lines, labels = ax.set_rgrids(
grid, labels=gridlabel, angle=angles[j], **self.format_cfg["rgrid_tick_lbls_args"]
)
ax.set_ylim(*ranges[j])
ax.spines["polar"].set_visible(False)
ax.grid(visible=False)
if show_scales is False:
ax.set_yticklabels([])
# Set all axes except the first one unvisible
for ax in axes[1:]:
ax.patch.set_visible(False)
ax.xaxis.set_visible(False)
# Setting the attributes
self.angle = np.deg2rad(np.r_[angles, angles[0]])
self.ranges = ranges
self.ax = axes[0]
self.ax1 = axes[1]
self.plot_counter = 0
# Draw (inner) circles and lines
self.ax.yaxis.grid(**self.format_cfg["rad_ln_args"])
# Draw outer circle
self.ax.spines["polar"].set(**self.format_cfg["outer_ring"])
# Draw angle lines
self.ax.xaxis.grid(**self.format_cfg["angle_ln_args"])
# ax1 is the duplicate of axes[0] (self.ax)
# Remove everything from ax1 except the plot itself
self.ax1.axis("off")
self.ax1.set_zorder(9)
# Create the outer labels for each variable
l, text = self.ax.set_thetagrids(angles, labels=variables)
# Beautify them
labels = [t.get_text() for t in self.ax.get_xticklabels()]
labels = [
"\n".join(
textwrap.wrap(
label,
self.format_cfg["theta_tick_lbls_txt_wrap"],
break_long_words=self.format_cfg["theta_tick_lbls_brk_lng_wrds"],
)
)
for label in labels
]
self.ax.set_xticklabels(labels, **self.format_cfg["theta_tick_lbls"])
for t, a in zip(self.ax.get_xticklabels(), angles):
if a == 0:
t.set_ha("center")
elif a > 0 and a < 180:
t.set_ha("left")
elif a == 180:
t.set_ha("center")
else:
t.set_ha("right")
self.ax.tick_params(axis="both", pad=self.format_cfg["theta_tick_lbls_pad"])
def _scale_data(self, data, ranges):
"""Scales data[1:] to ranges[0]"""
for d, (y1, y2) in zip(data[1:], ranges[1:]):
assert (y1 <= d <= y2) or (y2 <= d <= y1)
x1, x2 = ranges[0]
d = data[0]
sdata = [d]
for d, (y1, y2) in zip(data[1:], ranges[1:]):
sdata.append((d - y1) / (y2 - y1) * (x2 - x1) + x1)
return sdata
def plot(self, data, *args, **kwargs):
"""Plots a line"""
sdata = self._scale_data(data, self.ranges)
self.ax1.plot(self.angle, np.r_[sdata, sdata[0]], *args, **kwargs)
self.plot_counter = self.plot_counter + 1
def use_legend(self, *args, **kwargs):
"""Shows a legend"""
self.ax1.legend(*args, **kwargs)
def radar_plot(data, model_names, invert_range=[], config=None, fig=None):
"""Create a complex radar chart with different scales for each variable
Source: https://towardsdatascience.com/how-to-create-and-visualize-complex-radar-charts-f7764d0f3652
Args:
data (`List[dict]`): the results (list of metric + value pairs).
E.g. data = [{"accuracy": 0.9, "precision":0.8},{"accuracy": 0.7, "precision":0.6}]
names (`List[dict]`): model names.
E.g. names = ["model1", "model 2", ...]
invert_range (`List[dict]`, optional): the metrics to invert (in cases when smaller is better, e.g. speed)
E.g. invert_range=["latency_in_seconds"]
config (`dict`, optional) : a specification of the formatting configurations, namely:
- rad_ln_args (`dict`, default `{"visible": True}`): The visibility of the radial (circle) lines.
- outer_ring (`dict`, default `{"visible": True}`): The visibility of the outer ring.
- angle_ln_args (`dict`, default `{"visible": True}`): The visibility of the angle lines.
- rgrid_tick_lbls_args (`dict`, default `{"fontsize": 12}`): The font size of the tick labels on the scales.
- theta_tick_lbls (`dict`, default `{"fontsize": 12}`): The font size of the variable labels on the plot.
- theta_tick_lbls_pad (`int`, default `3`): The padding of the variable labels on the plot.
- theta_tick_lbls_brk_lng_wrds (`bool`, default `True` ): Whether long words in the label are broken up or not.
- theta_tick_lbls_txt_wrap (`int`, default `15`): Text wrap for tick labels
- incl_endpoint (`bool`, default `False`): Include value endpoints on calse
- marker (`str`, default `"o"`): the shape of the marker used in the radar plot.
- markersize (`int`, default `3`): the shape of the marker used in the radar plot.
- legend_loc (`str`, default `"upper right"`): the location of the legend in the radar plot. Must be one of: 'upper left', 'upper right', 'lower left', 'lower right'.
- bbox_to_anchor (`tuple`, default `(2, 1)`: anchor for the legend.
fig (`matplotlib.figure.Figure`, optional): figure used to plot the radar plot.
Returns:
`matplotlib.figure.Figure`
"""
data = pd.DataFrame(data)
data.index = model_names
variables = data.keys()
if all(x in variables for x in invert_range) is False:
raise ValueError("All of the metrics in `invert_range` should be in the data provided.")
min_max_per_variable = data.describe().T[["min", "max"]]
min_max_per_variable["min"] = min_max_per_variable["min"] - 0.1 * (
min_max_per_variable["max"] - min_max_per_variable["min"]
)
min_max_per_variable["max"] = min_max_per_variable["max"] + 0.1 * (
min_max_per_variable["max"] - min_max_per_variable["min"]
)
ranges = list(min_max_per_variable.itertuples(index=False, name=None))
ranges = [
(max_value, min_value) if var in invert_range else (min_value, max_value)
for var, (min_value, max_value) in zip(variables, ranges)
]
format_cfg = {
"axes_args": {},
"rad_ln_args": {"visible": True},
"outer_ring": {"visible": True},
"angle_ln_args": {"visible": True},
"rgrid_tick_lbls_args": {"fontsize": 12},
"theta_tick_lbls": {"fontsize": 12},
"theta_tick_lbls_pad": 3,
"theta_tick_lbls_brk_lng_wrds": True,
"theta_tick_lbls_txt_wrap": 15,
"incl_endpoint": False,
"marker": "o",
"markersize": 3,
"legend_loc": "upper right",
"bbox_to_anchor": (2, 1),
}
if config is not None:
format_cfg.update(config)
if fig is None:
fig = plt.figure()
radar = ComplexRadar(
fig,
variables,
ranges,
n_ring_levels=3,
show_scales=True,
format_cfg=format_cfg,
)
for g in zip(data.index):
radar.plot(data.loc[g].values, label=g, marker=format_cfg["marker"], markersize=format_cfg["markersize"])
radar.use_legend(**{"loc": format_cfg["legend_loc"], "bbox_to_anchor": format_cfg["bbox_to_anchor"]})
return fig
| 9,293 | 39.233766 | 178 |
py
|
evaluate
|
evaluate-main/src/evaluate/inspect.py
|
# Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" List and inspect metrics."""
from typing import Optional
import requests
from datasets import DownloadConfig
from .config import EVALUATION_MODULE_TYPES, HF_LIST_ENDPOINT
from .loading import evaluation_module_factory
from .utils.logging import get_logger
logger = get_logger(__name__)
class SplitsNotFoundError(ValueError):
pass
def list_evaluation_modules(module_type=None, include_community=True, with_details=False):
"""List all evaluation modules available on the Hugging Face Hub.
Args:
module_type (`str`, *optional*, defaults to `None`):
Type of evaluation modules to list. Has to be one of `'metric'`, `'comparison'`, or `'measurement'`. If `None`, all types are listed.
include_community (`bool`, *optional*, defaults to `True`):
Include community modules in the list.
with_details (`bool`, *optional*, defaults to `False`):
Return the full details on the metrics instead of only the ID.
Returns:
`List[Union[str, dict]]`
Example:
```py
>>> from evaluate import list_evaluation_modules
>>> list_evaluation_modules(module_type="metric")
```
"""
if module_type is None:
evaluations_list = []
for module_type in EVALUATION_MODULE_TYPES:
evaluations_list.extend(
_list_evaluation_modules_type(
module_type, include_community=include_community, with_details=with_details
)
)
else:
if module_type not in EVALUATION_MODULE_TYPES:
raise ValueError(f"Invalid module type '{module_type}'. Has to be one of {EVALUATION_MODULE_TYPES}.")
evaluations_list = _list_evaluation_modules_type(
module_type, include_community=include_community, with_details=with_details
)
return evaluations_list
def _list_evaluation_modules_type(module_type, include_community=True, with_details=False):
r = requests.get(HF_LIST_ENDPOINT.format(type=module_type))
r.raise_for_status()
d = r.json()
if not include_community:
d = [element for element in d if element["id"].split("/")[0] == f"evaluate-{module_type}"]
# remove namespace for canonical modules and add community tag
for element in d:
if element["id"].split("/")[0] == f"evaluate-{module_type}":
element["id"] = element["id"].split("/")[1]
element["community"] = False
else:
element["community"] = True
if with_details:
return [
{
"name": element["id"],
"type": module_type,
"community": element["community"],
"likes": element.get("likes", 0),
}
for element in d
]
else:
return [element["id"] for element in d]
def inspect_evaluation_module(
path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs
):
r"""
Allow inspection/modification of a evaluation script by copying it on local drive at local_path.
Args:
path (``str``): path to the evaluation script. Can be either:
- a local path to script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``evaluate.list_evaluation_modules()``)
e.g. ``'accuracy'``, ``'bleu'`` or ``'word_length'``
local_path (``str``): path to the local folder to copy the datset script to.
download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
**download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
"""
evaluation_module = evaluation_module_factory(
path, download_config=download_config, force_local_path=local_path, **download_kwargs
)
print(
f"The processing scripts for metric {path} can be inspected at {local_path}. "
f"The main class is in {evaluation_module.module_path}. "
f"You can modify this processing scripts and use it with `evaluate.load({local_path})`."
)
| 4,969 | 37.230769 | 145 |
py
|
evaluate
|
evaluate-main/src/evaluate/hub.py
|
from typing import Dict
import requests
from huggingface_hub import dataset_info, model_info
from huggingface_hub.repocard import metadata_update
from .config import HF_HUB_ALLOWED_TASKS
from .utils.logging import get_logger
logger = get_logger(__name__)
def push_to_hub(
model_id: str,
task_type: str,
dataset_type: str,
dataset_name: str,
metric_type: str,
metric_name: str,
metric_value: float,
task_name: str = None,
dataset_config: str = None,
dataset_split: str = None,
dataset_revision: str = None,
dataset_args: Dict[str, int] = None,
metric_config: str = None,
metric_args: Dict[str, int] = None,
overwrite: bool = False,
):
r"""
Pushes the result of a metric to the metadata of a model repository in the Hub.
Args:
model_id (`str`):
Model id from https://hf.co/models.
task_type (`str`):
Task id, refer to the [Hub allowed tasks](https://github.com/huggingface/evaluate/blob/main/src/evaluate/config.py#L154) for allowed values.
dataset_type (`str`):
Dataset id from https://hf.co/datasets.
dataset_name (`str`):
Pretty name for the dataset.
metric_type (`str`):
Metric id from https://hf.co/metrics.
metric_name (`str`):
Pretty name for the metric.
metric_value (`float`):
Computed metric value.
task_name (`str`, *optional*):
Pretty name for the task.
dataset_config (`str`, *optional*):
Dataset configuration used in [`~datasets.load_dataset`].
See [`~datasets.load_dataset`] for more info.
dataset_split (`str`, *optional*):
Name of split used for metric computation.
dataset_revision (`str`, *optional*):
Git hash for the specific version of the dataset.
dataset_args (`dict[str, int]`, *optional*):
Additional arguments passed to [`~datasets.load_dataset`].
metric_config (`str`, *optional*):
Configuration for the metric (e.g. the GLUE metric has a configuration for each subset).
metric_args (`dict[str, int]`, *optional*):
Arguments passed during [`~evaluate.EvaluationModule.compute`].
overwrite (`bool`, *optional*, defaults to `False`):
If set to `True` an existing metric field can be overwritten, otherwise
attempting to overwrite any existing fields will cause an error.
Example:
```python
>>> push_to_hub(
... model_id="huggingface/gpt2-wikitext2",
... metric_value=0.5
... metric_type="bleu",
... metric_name="BLEU",
... dataset_name="WikiText",
... dataset_type="wikitext",
... dataset_split="test",
... task_type="text-generation",
... task_name="Text Generation"
... )
```"""
if task_type not in HF_HUB_ALLOWED_TASKS:
raise ValueError(f"Task type not supported. Task has to be one of {HF_HUB_ALLOWED_TASKS}")
try:
dataset_info(dataset_type)
except requests.exceptions.HTTPError:
logger.warning(f"Dataset {dataset_type} not found on the Hub at hf.co/datasets/{dataset_type}")
try:
model_info(model_id)
except requests.exceptions.HTTPError:
raise ValueError(f"Model {model_id} not found on the Hub at hf.co/{model_id}")
result = {
"task": {
"type": task_type,
},
"dataset": {
"type": dataset_type,
"name": dataset_name,
},
"metrics": [
{
"type": metric_type,
"value": metric_value,
},
],
}
if dataset_config is not None:
result["dataset"]["config"] = dataset_config
if dataset_split is not None:
result["dataset"]["split"] = dataset_split
if dataset_revision is not None:
result["dataset"]["revision"] = dataset_revision
if dataset_args is not None:
result["dataset"]["args"] = dataset_args
if task_name is not None:
result["task"]["name"] = task_name
if metric_name is not None:
result["metrics"][0]["name"] = metric_name
if metric_config is not None:
result["metrics"][0]["config"] = metric_config
if metric_args is not None:
result["metrics"][0]["args"] = metric_args
metadata = {"model-index": [{"results": [result]}]}
return metadata_update(repo_id=model_id, metadata=metadata, overwrite=overwrite)
| 4,550 | 32.962687 | 152 |
py
|
evaluate
|
evaluate-main/src/evaluate/module.py
|
# Copyright 2020 The HuggingFace Datasets Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" EvaluationModule base class."""
import collections
import itertools
import os
import types
import uuid
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
from datasets import DatasetInfo, DownloadConfig, DownloadManager
from datasets.arrow_dataset import Dataset
from datasets.arrow_reader import ArrowReader
from datasets.arrow_writer import ArrowWriter
from datasets.features import Features, Sequence, Value
from datasets.features.features import _check_non_null_non_empty_recursive
from datasets.utils.filelock import BaseFileLock, FileLock, Timeout
from datasets.utils.py_utils import copyfunc, temp_seed, zip_dict
from . import config
from .info import EvaluationModuleInfo
from .naming import camelcase_to_snakecase
from .utils.logging import get_logger
logger = get_logger(__name__)
class FileFreeLock(BaseFileLock):
"""Thread lock until a file **cannot** be locked"""
def __init__(self, lock_file, *args, **kwargs):
self.filelock = FileLock(lock_file)
super().__init__(lock_file, *args, **kwargs)
def _acquire(self):
try:
self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
except Timeout:
# We couldn't acquire the lock, the file is locked!
self._lock_file_fd = self.filelock.lock_file
else:
# We were able to acquire the lock, the file is not yet locked!
self.filelock.release()
self._lock_file_fd = None
def _release(self):
self._lock_file_fd = None
# lists - summarize long lists similarly to NumPy
# arrays/tensors - let the frameworks control formatting
def summarize_if_long_list(obj):
if not type(obj) == list or len(obj) <= 6:
return f"{obj}"
def format_chunk(chunk):
return ", ".join(repr(x) for x in chunk)
return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
class EvaluationModuleInfoMixin:
"""This base class exposes some attributes of EvaluationModuleInfo
at the base level of the EvaluationModule for easy access.
"""
def __init__(self, info: EvaluationModuleInfo):
self._module_info = info
@property
def info(self):
""":class:`evaluate.EvaluationModuleInfo` object containing all the metadata in the evaluation module."""
return self._module_info
@property
def name(self) -> str:
return self._module_info.module_name
@property
def experiment_id(self) -> Optional[str]:
return self._module_info.experiment_id
@property
def description(self) -> str:
return self._module_info.description
@property
def citation(self) -> str:
return self._module_info.citation
@property
def features(self) -> Features:
return self._module_info.features
@property
def inputs_description(self) -> str:
return self._module_info.inputs_description
@property
def homepage(self) -> Optional[str]:
return self._module_info.homepage
@property
def license(self) -> str:
return self._module_info.license
@property
def codebase_urls(self) -> Optional[List[str]]:
return self._module_info.codebase_urls
@property
def reference_urls(self) -> Optional[List[str]]:
return self._module_info.reference_urls
@property
def streamable(self) -> bool:
return self._module_info.streamable
@property
def format(self) -> Optional[str]:
return self._module_info.format
@property
def module_type(self) -> str:
return self._module_info.module_type
class EvaluationModule(EvaluationModuleInfoMixin):
"""A `EvaluationModule` is the base class and common API for metrics, comparisons, and measurements.
Args:
config_name (`str`):
This is used to define a hash specific to a module computation script and prevents the module's data
to be overridden when the module loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute module in distributed setups (in particular non-additive modules like F1).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
seed (`int`, optional):
If specified, this will temporarily set numpy's random seed when [`~evaluate.EvaluationModule.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute module in distributed setups (in particular non-additive metrics like F1).
hash (`str`):
Used to identify the evaluation module according to the hashed file contents.
max_concurrent_cache_files (`int`):
Max number of concurrent module cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
"""
def __init__(
self,
config_name: Optional[str] = None,
keep_in_memory: bool = False,
cache_dir: Optional[str] = None,
num_process: int = 1,
process_id: int = 0,
seed: Optional[int] = None,
experiment_id: Optional[str] = None,
hash: str = None,
max_concurrent_cache_files: int = 10000,
timeout: Union[int, float] = 100,
**kwargs,
):
# prepare info
self.config_name = config_name or "default"
info = self._info()
info.module_name = camelcase_to_snakecase(self.__class__.__name__)
info.config_name = self.config_name
info.experiment_id = experiment_id or "default_experiment"
EvaluationModuleInfoMixin.__init__(self, info) # For easy access on low level
# Safety checks on num_process and process_id
if not isinstance(process_id, int) or process_id < 0:
raise ValueError("'process_id' should be a number greater than 0")
if not isinstance(num_process, int) or num_process <= process_id:
raise ValueError("'num_process' should be a number greater than process_id")
if keep_in_memory and num_process != 1:
raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
self.num_process = num_process
self.process_id = process_id
self.max_concurrent_cache_files = max_concurrent_cache_files
self.keep_in_memory = keep_in_memory
self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
self.data_dir = self._build_data_dir()
if seed is None:
_, seed, pos, *_ = np.random.get_state()
self.seed: int = seed[pos] if pos < 624 else seed[0]
else:
self.seed: int = seed
self.timeout: Union[int, float] = timeout
# Update 'compute' and 'add' docstring
# methods need to be copied otherwise it changes the docstrings of every instance
self.compute = types.MethodType(copyfunc(self.compute), self)
self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
self.add = types.MethodType(copyfunc(self.add), self)
self.compute.__func__.__doc__ += self.info.inputs_description
self.add_batch.__func__.__doc__ += self.info.inputs_description
self.add.__func__.__doc__ += self.info.inputs_description
# self.arrow_schema = pa.schema(field for field in self.info.features.type)
self.selected_feature_format = None
self.buf_writer = None
self.writer = None
self.writer_batch_size = None
self.data = None
# This is the cache file we store our predictions/references in
# Keep it None for now so we can (cloud)pickle the object
self.cache_file_name = None
self.filelock = None
self.rendez_vous_lock = None
# This is all the cache files on which we have a lock when we are in a distributed setting
self.file_paths = None
self.filelocks = None
# This fingerprints the evaluation module according to the hashed contents of the module code
self._hash = hash
def __len__(self):
"""Return the number of examples (predictions or predictions/references pair)
currently stored in the evaluation module's cache.
"""
return 0 if self.writer is None else len(self.writer)
def __repr__(self):
return (
f'EvaluationModule(name: "{self.name}", module_type: "{self.module_type}", '
f'features: {self.features}, usage: """{self.inputs_description}""", '
f"stored examples: {len(self)})"
)
def _build_data_dir(self):
"""Path of this evaluation module in cache_dir:
Will be:
self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
"""
builder_data_dir = self._data_dir_root
builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
os.makedirs(builder_data_dir, exist_ok=True)
return builder_data_dir
def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
"""Create a new cache file. If the default cache file is used, we generated a new hash."""
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
filelock = None
for i in range(self.max_concurrent_cache_files):
filelock = FileLock(file_path + ".lock")
try:
filelock.acquire(timeout=timeout)
except Timeout:
# If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
# We raise an error
if self.num_process != 1:
raise ValueError(
f"Error in _create_cache_file: another evaluation module instance is already using the local cache file at {file_path}. "
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
f"between distributed evaluation module instances."
) from None
if i == self.max_concurrent_cache_files - 1:
raise ValueError(
f"Cannot acquire lock, too many evaluation module instance are operating concurrently on this file system."
f"You should set a larger value of max_concurrent_cache_files when creating the evaluation module "
f"(current value is {self.max_concurrent_cache_files})."
) from None
# In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
file_uuid = str(uuid.uuid4())
file_path = os.path.join(
self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
)
else:
break
return file_path, filelock
def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
"""Get a lock on all the cache files in a distributed setup.
We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
"""
if self.num_process == 1:
if self.cache_file_name is None:
raise ValueError(
"Evaluation module cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
"at least once before calling `compute`."
)
file_paths = [self.cache_file_name]
else:
file_paths = [
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
for process_id in range(self.num_process)
]
# Let's acquire a lock on each process files to be sure they are finished writing
filelocks = []
for process_id, file_path in enumerate(file_paths):
if process_id == 0: # process 0 already has its lock file
filelocks.append(self.filelock)
else:
filelock = FileLock(file_path + ".lock")
try:
filelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Cannot acquire lock on cached file {file_path} for process {process_id}."
) from None
else:
filelocks.append(filelock)
return file_paths, filelocks
def _check_all_processes_locks(self):
expected_lock_file_names = [
os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
for process_id in range(self.num_process)
]
for expected_lock_file_name in expected_lock_file_names:
nofilelock = FileFreeLock(expected_lock_file_name)
try:
nofilelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
) from None
else:
nofilelock.release()
def _check_rendez_vous(self):
expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
nofilelock = FileFreeLock(expected_lock_file_name)
try:
nofilelock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(
f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
) from None
else:
nofilelock.release()
lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
rendez_vous_lock = FileLock(lock_file_name)
try:
rendez_vous_lock.acquire(timeout=self.timeout)
except Timeout:
raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
else:
rendez_vous_lock.release()
def _finalize(self):
"""Close all the writing process and load/gather the data
from all the nodes if main node or all_process is True.
"""
if self.writer is not None:
self.writer.finalize()
self.writer = None
# release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
if self.filelock is not None and self.process_id > 0:
self.filelock.release()
if self.keep_in_memory:
# Read the predictions and references
reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.selected_feature_format))
self.data = Dataset.from_buffer(self.buf_writer.getvalue())
elif self.process_id == 0:
# Let's acquire a lock on each node files to be sure they are finished writing
file_paths, filelocks = self._get_all_cache_files()
# Read the predictions and references
try:
reader = ArrowReader(path="", info=DatasetInfo(features=self.selected_feature_format))
self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
except FileNotFoundError:
raise ValueError(
"Error in finalize: another evaluation module instance is already using the local cache file. "
"Please specify an experiment_id to avoid collision between distributed evaluation module instances."
) from None
# Store file paths and locks and we will release/delete them after the computation.
self.file_paths = file_paths
self.filelocks = filelocks
def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
"""Compute the evaluation module.
Usage of positional arguments is not allowed to prevent mistakes.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
**kwargs (optional):
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
method (see details in the docstring).
Return:
`dict` or `None`
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.compute(predictions=[0, 1, 1, 0], references=[0, 1, 0, 1])
```
"""
all_kwargs = {"predictions": predictions, "references": references, **kwargs}
if predictions is None and references is None:
missing_kwargs = {k: None for k in self._feature_names() if k not in all_kwargs}
all_kwargs.update(missing_kwargs)
else:
missing_inputs = [k for k in self._feature_names() if k not in all_kwargs]
if missing_inputs:
raise ValueError(
f"Evaluation module inputs are missing: {missing_inputs}. All required inputs are {list(self._feature_names())}"
)
inputs = {input_name: all_kwargs[input_name] for input_name in self._feature_names()}
compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self._feature_names()}
if any(v is not None for v in inputs.values()):
self.add_batch(**inputs)
self._finalize()
self.cache_file_name = None
self.filelock = None
self.selected_feature_format = None
if self.process_id == 0:
self.data.set_format(type=self.info.format)
inputs = {input_name: self.data[input_name] for input_name in self._feature_names()}
with temp_seed(self.seed):
output = self._compute(**inputs, **compute_kwargs)
if self.buf_writer is not None:
self.buf_writer = None
del self.data
self.data = None
else:
# Release locks and delete all the cache files. Process 0 is released last.
for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
logger.info(f"Removing {file_path}")
del self.data
self.data = None
del self.writer
self.writer = None
os.remove(file_path)
filelock.release()
return output
else:
return None
def add_batch(self, *, predictions=None, references=None, **kwargs):
"""Add a batch of predictions and references for the evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... accuracy.add_batch(references=refs, predictions=preds)
```
"""
bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
if bad_inputs:
raise ValueError(
f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
)
batch = {"predictions": predictions, "references": references, **kwargs}
batch = {input_name: batch[input_name] for input_name in self._feature_names()}
if self.writer is None:
self.selected_feature_format = self._infer_feature_from_batch(batch)
self._init_writer()
try:
for key, column in batch.items():
if len(column) > 0:
self._enforce_nested_string_type(self.selected_feature_format[key], column[0])
batch = self.selected_feature_format.encode_batch(batch)
self.writer.write_batch(batch)
except (pa.ArrowInvalid, TypeError):
if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
col0 = next(iter(batch))
bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
error_msg = (
f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
)
elif set(self.selected_feature_format) != {"references", "predictions"}:
error_msg = (
f"Module inputs don't match the expected format.\n"
f"Expected format: {self.selected_feature_format },\n"
)
error_msg_inputs = ",\n".join(
f"Input {input_name}: {summarize_if_long_list(batch[input_name])}"
for input_name in self.selected_feature_format
)
error_msg += error_msg_inputs
else:
error_msg = (
f"Predictions and/or references don't match the expected format.\n"
f"Expected format: {self.selected_feature_format },\n"
f"Input predictions: {summarize_if_long_list(predictions)},\n"
f"Input references: {summarize_if_long_list(references)}"
)
raise ValueError(error_msg) from None
def add(self, *, prediction=None, reference=None, **kwargs):
"""Add one prediction and reference for the evaluation module's stack.
Args:
prediction (`list/array/tensor`, *optional*):
Predictions.
reference (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> accuracy.add(references=[0,1], predictions=[1,0])
```
"""
bad_inputs = [input_name for input_name in kwargs if input_name not in self._feature_names()]
if bad_inputs:
raise ValueError(
f"Bad inputs for evaluation module: {bad_inputs}. All required inputs are {list(self._feature_names())}"
)
example = {"predictions": prediction, "references": reference, **kwargs}
example = {input_name: example[input_name] for input_name in self._feature_names()}
if self.writer is None:
self.selected_feature_format = self._infer_feature_from_example(example)
self._init_writer()
try:
self._enforce_nested_string_type(self.selected_feature_format, example)
example = self.selected_feature_format.encode_example(example)
self.writer.write(example)
except (pa.ArrowInvalid, TypeError):
error_msg = (
f"Evaluation module inputs don't match the expected format.\n"
f"Expected format: {self.selected_feature_format},\n"
)
error_msg_inputs = ",\n".join(
f"Input {input_name}: {summarize_if_long_list(example[input_name])}"
for input_name in self.selected_feature_format
)
error_msg += error_msg_inputs
raise ValueError(error_msg) from None
def _infer_feature_from_batch(self, batch):
if isinstance(self.features, Features):
return self.features
else:
example = dict([(k, v[0]) for k, v in batch.items()])
return self._infer_feature_from_example(example)
def _infer_feature_from_example(self, example):
if isinstance(self.features, Features):
return self.features
else:
for features in self.features:
try:
self._enforce_nested_string_type(features, example)
features.encode_example(example)
return features
except (ValueError, TypeError):
continue
feature_strings = "\n".join([f"Feature option {i}: {feature}" for i, feature in enumerate(self.features)])
error_msg = (
f"Predictions and/or references don't match the expected format.\n"
f"Expected format:\n{feature_strings},\n"
f"Input predictions: {summarize_if_long_list(example['predictions'])},\n"
f"Input references: {summarize_if_long_list(example['references'])}"
)
raise ValueError(error_msg) from None
def _feature_names(self):
if isinstance(self.features, list):
feature_names = list(self.features[0].keys())
else:
feature_names = list(self.features.keys())
return feature_names
def _init_writer(self, timeout=1):
if self.num_process > 1:
if self.process_id == 0:
file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
self.rendez_vous_lock = FileLock(file_path)
try:
self.rendez_vous_lock.acquire(timeout=timeout)
except TimeoutError:
raise ValueError(
f"Error in _init_writer: another evalution module instance is already using the local cache file at {file_path}. "
f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
f"between distributed evaluation module instances."
) from None
if self.keep_in_memory:
self.buf_writer = pa.BufferOutputStream()
self.writer = ArrowWriter(
features=self.selected_feature_format, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
)
else:
self.buf_writer = None
# Get cache file name and lock it
if self.cache_file_name is None or self.filelock is None:
cache_file_name, filelock = self._create_cache_file() # get ready
self.cache_file_name = cache_file_name
self.filelock = filelock
self.writer = ArrowWriter(
features=self.selected_feature_format,
path=self.cache_file_name,
writer_batch_size=self.writer_batch_size,
)
# Setup rendez-vous here if
if self.num_process > 1:
if self.process_id == 0:
self._check_all_processes_locks() # wait for everyone to be ready
self.rendez_vous_lock.release() # let everyone go
else:
self._check_rendez_vous() # wait for master to be ready and to let everyone go
def _info(self) -> EvaluationModuleInfo:
"""Construct the EvaluationModuleInfo object. See `EvaluationModuleInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (EvaluationModuleInfo) The EvaluationModule information
"""
raise NotImplementedError
def download_and_prepare(
self,
download_config: Optional[DownloadConfig] = None,
dl_manager: Optional[DownloadManager] = None,
):
"""Downloads and prepares evaluation module for reading.
Args:
download_config ([`DownloadConfig`], *optional*):
Specific download configuration parameters.
dl_manager ([`DownloadManager`], *optional*):
Specific download manager to use.
Example:
```py
>>> import evaluate
```
"""
if dl_manager is None:
if download_config is None:
download_config = DownloadConfig()
download_config.cache_dir = os.path.join(self.data_dir, "downloads")
download_config.force_download = False
dl_manager = DownloadManager(
dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
)
self._download_and_prepare(dl_manager)
def _download_and_prepare(self, dl_manager):
"""Downloads and prepares resources for the evaluation module.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required resources for the evaluation module.
Args:
dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
"""
return None
def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
"""This method defines the common API for all the evaluation module in the library"""
raise NotImplementedError
def __del__(self):
if hasattr(self, "filelock") and self.filelock is not None:
self.filelock.release()
if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
self.rendez_vous_lock.release()
if hasattr(self, "writer"): # in case it was already deleted
del self.writer
if hasattr(self, "data"): # in case it was already deleted
del self.data
def _enforce_nested_string_type(self, schema, obj):
"""
Recursively checks if there is any Value feature of type string and throws TypeError if corresponding object is not a string.
Since any Python object can be cast to string this avoids implicitly casting wrong input types (e.g. lists) to string without error.
"""
# Nested structures: we allow dict, list, tuples, sequences
if isinstance(schema, dict):
return [self._enforce_nested_string_type(sub_schema, o) for k, (sub_schema, o) in zip_dict(schema, obj)]
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
return [self._enforce_nested_string_type(sub_schema, o) for o in obj]
elif isinstance(schema, Sequence):
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
if isinstance(obj, (list, tuple)):
# obj is a list of dict
for k, dict_tuples in zip_dict(schema.feature, *obj):
for sub_obj in dict_tuples[1:]:
if _check_non_null_non_empty_recursive(sub_obj, dict_tuples[0]):
self._enforce_nested_string_type(dict_tuples[0], sub_obj)
break
return None
else:
# obj is a single dict
for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj):
for sub_obj in sub_objs:
if _check_non_null_non_empty_recursive(sub_obj, sub_schema):
self._enforce_nested_string_type(sub_schema, sub_obj)
break
return None
# schema.feature is not a dict
if isinstance(obj, str): # don't interpret a string as a list
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
break
if not isinstance(first_elmt, list):
return self._enforce_nested_string_type(schema.feature, first_elmt)
elif isinstance(schema, Value):
if pa.types.is_string(schema.pa_type) and not isinstance(obj, str):
raise TypeError(f"Expected type str but got {type(obj)}.")
class Metric(EvaluationModule):
"""A Metric is the base class and common API for all metrics.
Args:
config_name (`str`):
This is used to define a hash specific to a metric computation script and prevents the metric's data
to be overridden when the metric loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
seed (`int`, *optional*):
If specified, this will temporarily set numpy's random seed when [`~evaluate.Metric.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
max_concurrent_cache_files (`int`):
Max number of concurrent metric cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
"""
class Comparison(EvaluationModule):
"""A Comparison is the base class and common API for all comparisons.
Args:
config_name (`str`):
This is used to define a hash specific to a comparison computation script and prevents the comparison's data
to be overridden when the comparison loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
seed (`int`, *optional*):
If specified, this will temporarily set numpy's random seed when [`~evaluate.Comparison.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute comparisons in distributed setups (in particular non-additive comparisons).
max_concurrent_cache_files (`int`):
Max number of concurrent comparison cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
"""
class Measurement(EvaluationModule):
"""A Measurement is the base class and common API for all measurements.
Args:
config_name (`str`):
This is used to define a hash specific to a measurement computation script and prevents the measurement's data
to be overridden when the measurement loading script is modified.
keep_in_memory (`bool`):
Keep all predictions and references in memory. Not possible in distributed settings.
cache_dir (`str`):
Path to a directory in which temporary prediction/references data will be stored.
The data directory should be located on a shared file-system in distributed setups.
num_process (`int`):
Specify the total number of nodes in a distributed settings.
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
process_id (`int`):
Specify the id of the current process in a distributed setup (between 0 and num_process-1)
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
seed (`int`, *optional*):
If specified, this will temporarily set numpy's random seed when [`~evaluate.Measurement.compute`] is run.
experiment_id (`str`):
A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute measurements in distributed setups (in particular non-additive measurements).
max_concurrent_cache_files (`int`):
Max number of concurrent measurement cache files (default `10000`).
timeout (`Union[int, float]`):
Timeout in second for distributed setting synchronization.
"""
class CombinedEvaluations:
def __init__(self, evaluation_modules, force_prefix=False):
from .loading import load # avoid circular imports
self.evaluation_module_names = None
if isinstance(evaluation_modules, list):
self.evaluation_modules = evaluation_modules
elif isinstance(evaluation_modules, dict):
self.evaluation_modules = list(evaluation_modules.values())
self.evaluation_module_names = list(evaluation_modules.keys())
loaded_modules = []
for module in self.evaluation_modules:
if isinstance(module, str):
module = load(module)
loaded_modules.append(module)
self.evaluation_modules = loaded_modules
if self.evaluation_module_names is None:
self.evaluation_module_names = [module.name for module in self.evaluation_modules]
self.force_prefix = force_prefix
def add(self, prediction=None, reference=None, **kwargs):
"""Add one prediction and reference for each evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for ref, pred in zip([0,1,0,1], [1,0,0,1]):
... clf_metrics.add(references=ref, predictions=pred)
```
"""
for evaluation_module in self.evaluation_modules:
batch = {"predictions": prediction, "references": reference, **kwargs}
batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
evaluation_module.add(**batch)
def add_batch(self, predictions=None, references=None, **kwargs):
"""Add a batch of predictions and references for each evaluation module's stack.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> for refs, preds in zip([[0,1],[0,1]], [[1,0],[0,1]]):
... clf_metrics.add(references=refs, predictions=preds)
```
"""
for evaluation_module in self.evaluation_modules:
batch = {"predictions": predictions, "references": references, **kwargs}
batch = {input_name: batch[input_name] for input_name in evaluation_module._feature_names()}
evaluation_module.add_batch(**batch)
def compute(self, predictions=None, references=None, **kwargs):
"""Compute each evaluation module.
Usage of positional arguments is not allowed to prevent mistakes.
Args:
predictions (`list/array/tensor`, *optional*):
Predictions.
references (`list/array/tensor`, *optional*):
References.
**kwargs (*optional*):
Keyword arguments that will be forwarded to the evaluation module [`~evaluate.EvaluationModule.compute`]
method (see details in the docstring).
Return:
`dict` or `None`
- Dictionary with the results if this evaluation module is run on the main process (`process_id == 0`).
- `None` if the evaluation module is not run on the main process (`process_id != 0`).
Example:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
>>> clf_metrics.compute(predictions=[0,1], references=[1,1])
{'accuracy': 0.5, 'f1': 0.6666666666666666}
```
"""
results = []
for evaluation_module in self.evaluation_modules:
batch = {"predictions": predictions, "references": references, **kwargs}
results.append(evaluation_module.compute(**batch))
return self._merge_results(results)
def _merge_results(self, results):
merged_results = {}
results_keys = list(itertools.chain.from_iterable([r.keys() for r in results]))
duplicate_keys = {item for item, count in collections.Counter(results_keys).items() if count > 1}
duplicate_names = [
item for item, count in collections.Counter(self.evaluation_module_names).items() if count > 1
]
duplicate_counter = {name: 0 for name in duplicate_names}
for module_name, result in zip(self.evaluation_module_names, results):
for k, v in result.items():
if k not in duplicate_keys and not self.force_prefix:
merged_results[f"{k}"] = v
elif module_name in duplicate_counter:
merged_results[f"{module_name}_{duplicate_counter[module_name]}_{k}"] = v
else:
merged_results[f"{module_name}_{k}"] = v
if module_name in duplicate_counter:
duplicate_counter[module_name] += 1
return merged_results
def combine(evaluations, force_prefix=False):
"""Combines several metrics, comparisons, or measurements into a single `CombinedEvaluations` object that
can be used like a single evaluation module.
If two scores have the same name, then they are prefixed with their module names.
And if two modules have the same name, please use a dictionary to give them different names, otherwise an integer id is appended to the prefix.
Args:
evaluations (`Union[list, dict]`):
A list or dictionary of evaluation modules. The modules can either be passed
as strings or loaded `EvaluationModule`s. If a dictionary is passed its keys are the names used and the values the modules.
The names are used as prefix in case there are name overlaps in the returned results of each module or if `force_prefix=True`.
force_prefix (`bool`, *optional*, defaults to `False`):
If `True` all scores from the modules are prefixed with their name. If
a dictionary is passed the keys are used as name otherwise the module's name.
Examples:
```py
>>> import evaluate
>>> accuracy = evaluate.load("accuracy")
>>> f1 = evaluate.load("f1")
>>> clf_metrics = combine(["accuracy", "f1"])
```
"""
return CombinedEvaluations(evaluations, force_prefix=force_prefix)
| 46,290 | 43.942718 | 147 |
py
|
evaluate
|
evaluate-main/src/evaluate/config.py
|
import importlib
import os
import platform
from pathlib import Path
from packaging import version
from .utils.logging import get_logger
logger = get_logger(__name__)
# Metrics
S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/metrics/{path}/{name}"
REPO_MEASUREMENTS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/measurements/{path}/{name}"
REPO_COMPARISONS_URL = "https://raw.githubusercontent.com/huggingface/evaluate/{revision}/comparisons/{path}/{name}"
# Evaluation module types
EVALUATION_MODULE_TYPES = ["metric", "comparison", "measurement"]
# Hub
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
HF_LIST_ENDPOINT = HF_ENDPOINT + "/api/spaces?filter={type}"
HUB_EVALUATE_URL = HF_ENDPOINT + "/spaces/{path}/resolve/{revision}/{name}"
HUB_DEFAULT_VERSION = "main"
PY_VERSION = version.parse(platform.python_version())
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
# General environment variables accepted values for booleans
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
# Imports
PANDAS_VERSION = version.parse(importlib_metadata.version("pandas"))
PYARROW_VERSION = version.parse(importlib_metadata.version("pyarrow"))
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
TORCH_VERSION = "N/A"
TORCH_AVAILABLE = False
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
if TORCH_AVAILABLE:
try:
TORCH_VERSION = version.parse(importlib_metadata.version("torch"))
logger.info(f"PyTorch version {TORCH_VERSION} available.")
except importlib_metadata.PackageNotFoundError:
pass
else:
logger.info("Disabling PyTorch because USE_TF is set")
TF_VERSION = "N/A"
TF_AVAILABLE = False
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
if TF_AVAILABLE:
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
for package in [
"tensorflow",
"tensorflow-cpu",
"tensorflow-gpu",
"tf-nightly",
"tf-nightly-cpu",
"tf-nightly-gpu",
"intel-tensorflow",
"tensorflow-rocm",
"tensorflow-macos",
]:
try:
TF_VERSION = version.parse(importlib_metadata.version(package))
except importlib_metadata.PackageNotFoundError:
continue
else:
break
else:
TF_AVAILABLE = False
if TF_AVAILABLE:
if TF_VERSION.major < 2:
logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
TF_AVAILABLE = False
else:
logger.info(f"TensorFlow version {TF_VERSION} available.")
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
JAX_VERSION = "N/A"
JAX_AVAILABLE = False
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
JAX_AVAILABLE = importlib.util.find_spec("jax") is not None
if JAX_AVAILABLE:
try:
JAX_VERSION = version.parse(importlib_metadata.version("jax"))
logger.info(f"JAX version {JAX_VERSION} available.")
except importlib_metadata.PackageNotFoundError:
pass
else:
logger.info("Disabling JAX because USE_JAX is set to False")
# Cache location
DEFAULT_XDG_CACHE_HOME = "~/.cache"
XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
DEFAULT_HF_EVALUATE_CACHE = os.path.join(HF_CACHE_HOME, "evaluate")
HF_EVALUATE_CACHE = Path(os.getenv("HF_EVALUATE_CACHE", DEFAULT_HF_EVALUATE_CACHE))
DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
DOWNLOADED_DATASETS_DIR = "downloads"
DEFAULT_DOWNLOADED_EVALUATE_PATH = os.path.join(HF_EVALUATE_CACHE, DOWNLOADED_DATASETS_DIR)
DOWNLOADED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_EVALUATE_PATH", DEFAULT_DOWNLOADED_EVALUATE_PATH))
EXTRACTED_EVALUATE_DIR = "extracted"
DEFAULT_EXTRACTED_EVALUATE_PATH = os.path.join(DEFAULT_DOWNLOADED_EVALUATE_PATH, EXTRACTED_EVALUATE_DIR)
EXTRACTED_EVALUATE_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_EVALUATE_PATH", DEFAULT_EXTRACTED_EVALUATE_PATH))
# Download count for the website
HF_UPDATE_DOWNLOAD_COUNTS = (
os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
)
# Offline mode
HF_EVALUATE_OFFLINE = os.environ.get("HF_EVALUATE_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
# File names
LICENSE_FILENAME = "LICENSE"
METRIC_INFO_FILENAME = "metric_info.json"
DATASETDICT_JSON_FILENAME = "dataset_dict.json"
MODULE_NAME_FOR_DYNAMIC_MODULES = "evaluate_modules"
HF_HUB_ALLOWED_TASKS = [
"image-classification",
"translation",
"image-segmentation",
"fill-mask",
"automatic-speech-recognition",
"token-classification",
"sentence-similarity",
"audio-classification",
"question-answering",
"summarization",
"zero-shot-classification",
"table-to-text",
"feature-extraction",
"other",
"multiple-choice",
"text-classification",
"text-to-image",
"text2text-generation",
"zero-shot-image-classification",
"tabular-classification",
"tabular-regression",
"image-to-image",
"tabular-to-text",
"unconditional-image-generation",
"text-retrieval",
"text-to-speech",
"object-detection",
"audio-to-audio",
"text-generation",
"conversational",
"table-question-answering",
"visual-question-answering",
"image-to-text",
"reinforcement-learning",
"voice-activity-detection",
"time-series-forecasting",
"document-question-answering",
]
| 6,648 | 33.450777 | 118 |
py
|
evaluate
|
evaluate-main/src/evaluate/saving.py
|
import json
import os
import subprocess
import sys
from datetime import datetime
from pathlib import Path
from datasets.utils.filelock import FileLock
from . import __version__
def save(path_or_file, **data):
"""
Saves results to a JSON file. Also saves system information such as current time, current commit
hash if inside a repository, and Python system information.
Args:
path_or_file (`str`):
Path or file to store the file. If only a folder is provided
the results file will be saved in the format `"result-%Y_%m_%d-%H_%M_%S.json"`.
Example:
```py
>>> import evaluate
>>> result = {"bleu": 0.7}
>>> params = {"model": "gpt-2"}
>>> evaluate.save("./results/", **result, **params)
```
"""
current_time = datetime.now()
file_path = _setup_path(path_or_file, current_time)
data["_timestamp"] = current_time.isoformat()
data["_git_commit_hash"] = _git_commit_hash()
data["_evaluate_version"] = __version__
data["_python_version"] = sys.version
data["_interpreter_path"] = sys.executable
with FileLock(str(file_path) + ".lock"):
with open(file_path, "w") as f:
json.dump(data, f)
# cleanup lock file
try:
os.remove(str(file_path) + ".lock")
except FileNotFoundError:
pass
return file_path
def _setup_path(path_or_file, current_time):
path_or_file = Path(path_or_file)
is_file = len(path_or_file.suffix) > 0
if is_file:
folder = path_or_file.parent
file_name = path_or_file.name
else:
folder = path_or_file
file_name = "result-" + current_time.strftime("%Y_%m_%d-%H_%M_%S") + ".json"
folder.mkdir(parents=True, exist_ok=True)
return folder / file_name
def _git_commit_hash():
res = subprocess.run("git rev-parse --is-inside-work-tree".split(), cwd="./", stdout=subprocess.PIPE)
if res.stdout.decode().strip() == "true":
res = subprocess.run("git rev-parse HEAD".split(), cwd=os.getcwd(), stdout=subprocess.PIPE)
return res.stdout.decode().strip()
else:
return None
| 2,159 | 28.189189 | 105 |
py
|
evaluate
|
evaluate-main/src/evaluate/info.py
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" EvaluationModuleInfo records information we know about a dataset and a metric.
"""
import dataclasses
import json
import os
from dataclasses import asdict, dataclass, field
from typing import List, Optional, Union
from datasets.features import Features, Value
from . import config
from .utils.logging import get_logger
logger = get_logger(__name__)
@dataclass
class EvaluationModuleInfo:
"""Base class to store information about an evaluation used for `MetricInfo`, `ComparisonInfo`,
and `MeasurementInfo`.
`EvaluationModuleInfo` documents an evaluation, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
# Set in the dataset scripts
description: str
citation: str
features: Union[Features, List[Features]]
inputs_description: str = field(default_factory=str)
homepage: str = field(default_factory=str)
license: str = field(default_factory=str)
codebase_urls: List[str] = field(default_factory=list)
reference_urls: List[str] = field(default_factory=list)
streamable: bool = False
format: Optional[str] = None
module_type: str = "metric" # deprecate this in the future
# Set later by the builder
module_name: Optional[str] = None
config_name: Optional[str] = None
experiment_id: Optional[str] = None
def __post_init__(self):
if self.format is not None:
for key, value in self.features.items():
if not isinstance(value, Value):
raise ValueError(
f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
f"Here {key} is an instance of {value.__class__.__name__}"
)
def write_to_directory(self, metric_info_dir):
"""Write `EvaluationModuleInfo` as JSON to `metric_info_dir`.
Also save the license separately in LICENSE.
Args:
metric_info_dir (`str`):
The directory to save `metric_info_dir` to.
Example:
```py
>>> my_metric.info.write_to_directory("/path/to/directory/")
```
"""
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
json.dump(asdict(self), f)
with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
f.write(self.license)
@classmethod
def from_directory(cls, metric_info_dir) -> "EvaluationModuleInfo":
"""Create `EvaluationModuleInfo` from the JSON file in `metric_info_dir`.
Args:
metric_info_dir (`str`):
The directory containing the `metric_info` JSON file. This
should be the root directory of a specific metric version.
Example:
```py
>>> my_metric = EvaluationModuleInfo.from_directory("/path/to/directory/")
```
"""
logger.info(f"Loading Metric info from {metric_info_dir}")
if not metric_info_dir:
raise ValueError("Calling EvaluationModuleInfo.from_directory() with undefined metric_info_dir.")
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
metric_info_dict = json.load(f)
return cls.from_dict(metric_info_dict)
@classmethod
def from_dict(cls, metric_info_dict: dict) -> "EvaluationModuleInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
@dataclass
class MetricInfo(EvaluationModuleInfo):
"""Information about a metric.
`EvaluationModuleInfo` documents a metric, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
module_type: str = "metric"
@dataclass
class ComparisonInfo(EvaluationModuleInfo):
"""Information about a comparison.
`EvaluationModuleInfo` documents a comparison, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
module_type: str = "comparison"
@dataclass
class MeasurementInfo(EvaluationModuleInfo):
"""Information about a measurement.
`EvaluationModuleInfo` documents a measurement, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
module_type: str = "measurement"
| 5,490 | 33.753165 | 109 |
py
|
evaluate
|
evaluate-main/src/evaluate/__init__.py
|
# flake8: noqa
# Copyright 2020 The HuggingFace Evaluate Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "0.4.1.dev0"
from packaging import version
SCRIPTS_VERSION = "main" if version.parse(__version__).is_devrelease else __version__
del version
from .evaluation_suite import EvaluationSuite
from .evaluator import (
AudioClassificationEvaluator,
AutomaticSpeechRecognitionEvaluator,
Evaluator,
ImageClassificationEvaluator,
QuestionAnsweringEvaluator,
SummarizationEvaluator,
Text2TextGenerationEvaluator,
TextClassificationEvaluator,
TextGenerationEvaluator,
TokenClassificationEvaluator,
TranslationEvaluator,
evaluator,
)
from .hub import push_to_hub
from .info import ComparisonInfo, EvaluationModuleInfo, MeasurementInfo, MetricInfo
from .inspect import inspect_evaluation_module, list_evaluation_modules
from .loading import load
from .module import CombinedEvaluations, Comparison, EvaluationModule, Measurement, Metric, combine
from .saving import save
from .utils import *
from .utils import gradio, logging
| 1,759 | 32.846154 | 99 |
py
|
evaluate
|
evaluate-main/src/evaluate/naming.py
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for file names."""
import itertools
import os
import re
_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
_single_underscore_re = re.compile(r"(?<!_)_(?!_)")
_multiple_underscores_re = re.compile(r"(_{2,})")
_split_re = r"^\w+(\.\w+)*$"
def camelcase_to_snakecase(name):
"""Convert camel-case string to snake-case."""
name = _uppercase_uppercase_re.sub(r"\1_\2", name)
name = _lowercase_uppercase_re.sub(r"\1_\2", name)
return name.lower()
def snakecase_to_camelcase(name):
"""Convert snake-case string to camel-case string."""
name = _single_underscore_re.split(name)
name = [_multiple_underscores_re.split(n) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
def filename_prefix_for_name(name):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
return camelcase_to_snakecase(name)
def filename_prefix_for_split(name, split):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
return f"{filename_prefix_for_name(name)}-{split}"
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
filepath = os.path.join(data_dir, prefix)
return f"{filepath}*"
def filename_for_dataset_split(dataset_name, split, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
return prefix
def filepath_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
filename = filename_for_dataset_split(
dataset_name=dataset_name,
split=split,
filetype_suffix=filetype_suffix,
)
filepath = os.path.join(data_dir, filename)
return filepath
| 2,827 | 33.072289 | 90 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/base.py
|
# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from numbers import Number
from typing import Any, Callable, Dict, List, Optional, Union
# Lint as: python3
from datasets import Dataset, load_dataset
from evaluate.evaluator.utils import choose_split
try:
from scipy.stats import bootstrap
SCIPY_AVAILABLE = True
except ImportError:
SCIPY_AVAILABLE = False
try:
import transformers
from transformers import Pipeline, pipeline
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
from time import perf_counter
from typing_extensions import Literal
from ..loading import load
from ..module import EvaluationModule
from ..utils.logging import get_logger
from .utils import DatasetColumn
logger = get_logger(__name__)
EVALUTOR_COMPUTE_START_DOCSTRING = r"""
Compute the metric for a given pipeline and dataset combination.
Args:
model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task (in this case
`text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
data (`str` or `Dataset`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Defines which dataset subset to load. If `None` is passed the default subset is loaded.
split (`str`, defaults to `None`):
Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
metric (`str` or `EvaluationModule`, defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
specifies the evaluation strategy. Possible values are:
- `"simple"` - we evaluate the metric and return the scores.
- `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
of the returned metric keys, using `scipy`'s `bootstrap` method
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
confidence_level (`float`, defaults to `0.95`):
The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
n_resamples (`int`, defaults to `9999`):
The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
device (`int`, defaults to `None`):
Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
CUDA:0 used if available, CPU otherwise.
random_state (`int`, *optional*, defaults to `None`):
The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
debugging.
"""
EVALUATOR_COMPUTE_RETURN_DOCSTRING = r"""
Return:
A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
`"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
containing the score, the confidence interval and the standard error calculated for each metric key.
"""
class Evaluator(ABC):
"""
The [`Evaluator`] class is the class from which all evaluators inherit. Refer to this class for methods shared across
different evaluators.
Base class implementing evaluator operations.
"""
PIPELINE_KWARGS = {}
METRIC_KWARGS = {}
def __init__(self, task: str, default_metric_name: str = None):
if not TRANSFORMERS_AVAILABLE:
raise ImportError(
"If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[evaluator]`."
)
if not SCIPY_AVAILABLE:
raise ImportError(
"If you want to use the `Evaluator` you need `scipy>=1.7.1`. Run `pip install evaluate[evaluator]`."
)
self.task = task
self.default_metric_name = default_metric_name
@staticmethod
def _compute_confidence_interval(
metric,
metric_inputs,
metric_keys: List[str],
confidence_level: float = 0.95,
n_resamples: int = 9999,
random_state: Optional[int] = None,
) -> Dict[str, Any]:
"""
A utility function enabling the confidence interval calculation for metrics computed
by the evaluator based on `scipy`'s `bootstrap` method.
"""
# bootstrap only works with functions that use args and no kwargs
def build_args_metric(metric, key, **kwargs):
def args_metric(*args):
return metric.compute(**{k: v for k, v in zip(kwargs.keys(), args)})[key]
return args_metric
bootstrap_dict = {}
for key in metric_keys:
bs = bootstrap(
data=list(metric_inputs.values()),
statistic=build_args_metric(metric, key, **metric_inputs),
paired=True,
vectorized=False,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
bootstrap_dict[key] = {
"confidence_interval": (bs.confidence_interval.low, bs.confidence_interval.high),
"standard_error": bs.standard_error,
}
return bootstrap_dict
@staticmethod
def _compute_time_perf(start_time: float, end_time: float, num_samples: int) -> Dict[str, Any]:
"""
A utility function computing time performance metrics:
- `total_time_in_seconds` - pipeline inference runtime for the evaluation data in seconds,
- `samples_per_second` - pipeline throughput in the number of samples per second.
- `latency_in_seconds` - pipeline inference runtime for the evaluation data in seconds per sample,
"""
latency = end_time - start_time
throughput = num_samples / latency
latency_sample = 1.0 / throughput
return {
"total_time_in_seconds": latency,
"samples_per_second": throughput,
"latency_in_seconds": latency_sample,
}
@staticmethod
def _infer_device() -> int:
"""Helper function to check if GPU or CPU is available for inference."""
# try infer with torch first
try:
import torch
if torch.cuda.is_available():
device = 0 # first GPU
else:
device = -1 # CPU
except ImportError:
# if not available try TF
try:
import tensorflow as tf
if len(tf.config.list_physical_devices("GPU")) > 0:
device = 0 # first GPU
else:
device = -1 # CPU
except ImportError:
device = -1
if device == -1:
logger.info("No GPU found. The default device for pipeline inference is set to CPU.")
else:
logger.info("GPU found. The default device for pipeline inference is set to GPU (CUDA:0).")
return device
@abstractmethod
def predictions_processor(self, *args, **kwargs):
"""
A core method of the `Evaluator` class, which processes the pipeline outputs for compatibility with the metric.
"""
raise NotImplementedError()
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Dict[str, float]:
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(data=data, input_column=input_column, label_column=label_column)
pipe = self.prepare_pipeline(
model_or_pipeline=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, label_mapping)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
# TODO: To clarify why `wer` and `cer` return float
# even though metric.compute contract says that it
# returns Optional[dict].
if type(metric_results) == float:
metric_results = {metric.name: metric_results}
result.update(metric_results)
result.update(perf_results)
return result
@staticmethod
def check_for_mismatch_in_device_setup(device, model_or_pipeline):
if device is not None and device != -1 and isinstance(model_or_pipeline, Pipeline):
if model_or_pipeline.device.type == "cpu":
raise ValueError(
"The value of the `device` kwarg passed to `compute` suggests that this pipe should be run on an "
"accelerator, but the pipe was instantiated on CPU. Pass `device` to the pipeline during "
"initialization to use an accelerator, or pass `device=None` to `compute`. "
)
elif device != model_or_pipeline.device.index:
raise ValueError(
f"This pipeline was instantiated on device {model_or_pipeline.device.index} but device={device} was passed to `compute`."
)
def check_required_columns(self, data: Union[str, Dataset], columns_names: Dict[str, str]):
"""
Ensure the columns required for the evaluation are present in the dataset.
Args:
data (`str` or [`Dataset`]):
Specifies the dataset we will run evaluation on.
columns_names (`List[str]`):
List of column names to check in the dataset. The keys are the arguments to the [`evaluate.EvaluationModule.compute`] method,
while the values are the column names to check.
Example:
```py
>>> from datasets import load_dataset
>>> from evaluate import evaluator
>>> data = load_dataset("rotten_tomatoes', split="train")
>>> evaluator.check_required_columns(data, {"input_column": "text", "label_column": "label"})
```
"""
for input_name, column_name in columns_names.items():
if column_name not in data.column_names:
raise ValueError(
f"Invalid `{input_name}` {column_name} specified. The dataset contains the following columns: {data.column_names}."
)
@staticmethod
def get_dataset_split(data, subset=None, split=None):
"""
Infers which split to use if `None` is given.
Args:
data (`str`):
Name of dataset.
subset (`str`):
Name of config for datasets with multiple configurations (e.g. 'glue/cola').
split (`str`, defaults to `None`):
Split to use.
Returns:
`split`: `str` containing which split to use
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").get_dataset_split(data="rotten_tomatoes")
WARNING:evaluate.evaluator.base:Dataset split not defined! Automatically evaluating with split: TEST
'test'
```
"""
if split is None:
split = choose_split(data, subset)
logger.warning(f"Dataset split not defined! Automatically evaluating with split: {split.upper()}")
return split
def load_data(self, data: Union[str, Dataset], subset: str = None, split: str = None):
"""
Load dataset with given subset and split.
Args:
data ([`Dataset`] or `str`, defaults to `None`):
Specifies the dataset we will run evaluation on. If it is of
type `str`, we treat it as the dataset name, and load it. Otherwise we assume it represents a pre-loaded dataset.
subset (`str`, defaults to `None`):
Specifies dataset subset to be passed to `name` in `load_dataset`. To be
used with datasets with several configurations (e.g. glue/sst2).
split (`str`, defaults to `None`):
User-defined dataset split by name (e.g. train, validation, test). Supports slice-split (`test[:n]`).
If not defined and data is a `str` type, will automatically select the best one via `choose_split()`.
Returns:
data ([`Dataset`]): Loaded dataset which will be used for evaluation.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").load_data(data="rotten_tomatoes", split="train")
Dataset({
features: ['text', 'label'],
num_rows: 8530
})
```
"""
if isinstance(data, str):
split = self.get_dataset_split(data, subset, split)
data = load_dataset(data, name=subset, split=split)
return data
elif isinstance(data, Dataset):
if split is not None or subset is not None:
logger.warning("`data` is a preloaded Dataset! Ignoring `subset` and `split`.")
return data
else:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
def prepare_data(self, data: Dataset, input_column: str, label_column: str, *args, **kwargs):
"""
Prepare data.
Args:
data ([`Dataset`]):
Specifies the dataset we will run evaluation on.
input_column (`str`, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column(`str`, *optional*):
The name of the column containing the second text feature if there is one. Otherwise, set to `None`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
Returns:
`dict`: metric inputs.
`list`: pipeline inputs.
Example:
```py
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> evaluator("text-classification").prepare_data(ds, input_column="text", second_input_column=None, label_column="label")
```
"""
self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
return {"references": data[label_column]}, DatasetColumn(data, input_column)
def prepare_pipeline(
self,
model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
device: int = None,
):
"""
Prepare pipeline.
Args:
model_or_pipeline (`str` or [`~transformers.Pipeline`] or `Callable` or [`~transformers.PreTrainedModel`] or [`~transformers.TFPreTrainedModel`], defaults to `None`):
If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or
is a model instance, we use it to initialize a new [`~transformers.Pipeline`] with the given model. Otherwise we assume the
argument specifies a pre-initialized pipeline.
preprocessor ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.FeatureExtractionMixin`], *optional*, defaults to `None`):
Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for
which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
this argument.
Returns:
The initialized pipeline.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_pipeline(model_or_pipeline="distilbert-base-uncased")
```
"""
if device is None:
device = self._infer_device()
if (
isinstance(model_or_pipeline, str)
or isinstance(model_or_pipeline, transformers.PreTrainedModel)
or isinstance(model_or_pipeline, transformers.TFPreTrainedModel)
):
pipe = pipeline(
self.task,
model=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
else:
if model_or_pipeline is None:
pipe = pipeline(self.task, device=device)
else:
pipe = model_or_pipeline
if tokenizer is not None and feature_extractor is not None:
logger.warning("Ignoring the value of the preprocessor argument (`tokenizer` or `feature_extractor`).")
if (pipe.task != self.task) and not (self.task == "translation" and pipe.task.startswith("translation")):
raise ValueError(
f"Incompatible `model_or_pipeline`. Please specify `model_or_pipeline` compatible with the `{self.task}` task."
)
return pipe
def prepare_metric(self, metric: Union[str, EvaluationModule]):
"""
Prepare metric.
Args:
metric (`str` or [`EvaluationModule`], defaults to `None`):
Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
load it. Otherwise we assume it represents a pre-loaded metric.
Returns:
The loaded metric.
Example:
```py
>>> from evaluate import evaluator
>>> evaluator("text-classification").prepare_metric("accuracy")
```
"""
# Prepare metric.
if metric is None:
if self.default_metric_name is None:
raise ValueError(
"`Evaluator` doesn't specify a default metric. Please specify a valid `metric` argument."
)
metric = load(self.default_metric_name)
elif isinstance(metric, str):
metric = load(metric)
return metric
def call_pipeline(self, pipe, *args, **kwargs):
start_time = perf_counter()
pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS)
end_time = perf_counter()
return pipe_output, self._compute_time_perf(start_time, end_time, len(pipe_output))
def compute_metric(
self,
metric: EvaluationModule,
metric_inputs: Dict,
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
random_state: Optional[int] = None,
):
"""Compute and return metrics."""
result = metric.compute(**metric_inputs, **self.METRIC_KWARGS)
if strategy == "bootstrap":
metric_keys = result.keys()
bootstrap_dict = self._compute_confidence_interval(
metric,
metric_inputs,
metric_keys,
confidence_level,
n_resamples,
random_state,
)
for key in metric_keys:
bootstrap_dict[key]["score"] = result[key]
return bootstrap_dict
return result
| 22,881 | 40.985321 | 178 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/automatic_speech_recognition.py
|
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
from datasets import Dataset
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
if TYPE_CHECKING:
from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
TASK_DOCUMENTATION = r"""
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("automatic-speech-recognition")
>>> data = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="validation[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="https://huggingface.co/openai/whisper-tiny.en",
>>> data=data,
>>> input_column="path",
>>> label_column="sentence",
>>> metric="wer",
>>> )
```
"""
class AutomaticSpeechRecognitionEvaluator(Evaluator):
"""
Automatic speech recognition evaluator.
This automatic speech recognition evaluator can currently be loaded from [`evaluator`] using the default task name
`automatic-speech-recognition`.
Methods in this class assume a data format compatible with the [`AutomaticSpeechRecognitionPipeline`].
"""
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="automatic-speech-recognition", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions, label_mapping):
return {"predictions": [pred["text"] for pred in predictions]}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "path",
label_column: str = "sentence",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"path"`):
the name of the column containing the input audio path in the dataset specified by `data`.
label_column (`str`, defaults to `"sentence"`):
the name of the column containing the labels in the dataset specified by `data`.
generation_kwargs (`Dict`, *optional*, defaults to `None`):
The generation kwargs are passed to the pipeline and set the text generation strategy.
"""
if generation_kwargs is not None:
self.PIPELINE_KWARGS.update(generation_kwargs)
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
)
return result
| 4,392 | 37.876106 | 118 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/token_classification.py
|
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from datasets import ClassLabel, Dataset, Sequence
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
from .utils import DatasetColumn
if TYPE_CHECKING:
from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
TASK_DOCUMENTATION = r"""
The dataset input and label columns are expected to be formatted as a list of words and a list of labels respectively, following [conll2003 dataset](https://huggingface.co/datasets/conll2003). Datasets whose inputs are single strings, and labels are a list of offset are not supported.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("token-classification")
>>> data = load_dataset("conll2003", split="validation[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="elastic/distilbert-base-uncased-finetuned-conll03-english",
>>> data=data,
>>> metric="seqeval",
>>> )
```
<Tip>
For example, the following dataset format is accepted by the evaluator:
```python
dataset = Dataset.from_dict(
mapping={
"tokens": [["New", "York", "is", "a", "city", "and", "Felix", "a", "person", "."]],
"ner_tags": [[1, 2, 0, 0, 0, 0, 3, 0, 0, 0]],
},
features=Features({
"tokens": Sequence(feature=Value(dtype="string")),
"ner_tags": Sequence(feature=ClassLabel(names=["O", "B-LOC", "I-LOC", "B-PER", "I-PER"])),
}),
)
```
</Tip>
<Tip warning={true}>
For example, the following dataset format is **not** accepted by the evaluator:
```python
dataset = Dataset.from_dict(
mapping={
"tokens": [["New York is a city and Felix a person."]],
"starts": [[0, 23]],
"ends": [[7, 27]],
"ner_tags": [["LOC", "PER"]],
},
features=Features({
"tokens": Value(dtype="string"),
"starts": Sequence(feature=Value(dtype="int32")),
"ends": Sequence(feature=Value(dtype="int32")),
"ner_tags": Sequence(feature=Value(dtype="string")),
}),
)
```
</Tip>
"""
class TokenClassificationEvaluator(Evaluator):
"""
Token classification evaluator.
This token classification evaluator can currently be loaded from [`evaluator`] using the default task name
`token-classification`.
Methods in this class assume a data format compatible with the [`~transformers.TokenClassificationPipeline`].
"""
PIPELINE_KWARGS = {"ignore_labels": []}
def __init__(self, task="token-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions: List[List[Dict]], words: List[List[str]], join_by: str):
"""
Transform the pipeline predictions into a list of predicted labels of the same length as the true labels.
Args:
predictions (`List[List[Dict]]`):
List of pipeline predictions, where each token has been labeled.
words (`List[List[str]]`):
Original input data to the pipeline, used to build predicted labels of the same length.
join_by (`str`):
String to use to join two words. In English, it will typically be " ".
Returns:
`dict`: a dictionary holding the predictions
"""
preds = []
# iterate over the data rows
for i, prediction in enumerate(predictions):
pred_processed = []
# get a list of tuples giving the indexes of the start and end character of each word
words_offsets = self.words_to_offsets(words[i], join_by)
token_index = 0
for word_offset in words_offsets:
# for each word, we may keep only the predicted label for the first token, discard the others
while prediction[token_index]["start"] < word_offset[0]:
token_index += 1
if prediction[token_index]["start"] > word_offset[0]: # bad indexing
pred_processed.append("O")
elif prediction[token_index]["start"] == word_offset[0]:
pred_processed.append(prediction[token_index]["entity"])
preds.append(pred_processed)
return {"predictions": preds}
def words_to_offsets(self, words: List[str], join_by: str):
"""
Convert a list of words to a list of offsets, where word are joined by `join_by`.
Args:
words (`List[str]`):
List of words to get offsets from.
join_by (`str`):
String to insert between words.
Returns:
`List[Tuple[int, int]]`: List of the characters (start index, end index) for each of the words.
"""
offsets = []
start = 0
for word in words:
end = start + len(word) - 1
offsets.append((start, end))
start = end + len(join_by) + 1
return offsets
def prepare_data(self, data: Union[str, Dataset], input_column: str, label_column: str, join_by: str):
super().prepare_data(data, input_column, label_column)
if not isinstance(data.features[input_column], Sequence) or not isinstance(
data.features[label_column], Sequence
):
raise ValueError(
"TokenClassificationEvaluator expects the input and label columns to be provided as lists."
)
# If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere.
# Otherwise, we have to get the list of labels manually.
labels_are_int = isinstance(data.features[label_column].feature, ClassLabel)
if labels_are_int:
label_list = data.features[label_column].feature.names # list of string labels
id_to_label = {i: label for i, label in enumerate(label_list)}
references = [[id_to_label[label_id] for label_id in label_ids] for label_ids in data[label_column]]
elif data.features[label_column].feature.dtype.startswith("int"):
raise NotImplementedError(
"References provided as integers, but the reference column is not a Sequence of ClassLabels."
)
else:
# In the event the labels are not a `Sequence[ClassLabel]`, we have already labels as strings
# An example is labels as ["PER", "PER", "O", "LOC", "O", "LOC", "O"], e.g. in polyglot_ner dataset
references = data[label_column]
metric_inputs = {"references": references}
data = data.map(lambda x: {input_column: join_by.join(x[input_column])})
pipeline_inputs = DatasetColumn(data, input_column)
return metric_inputs, pipeline_inputs
def prepare_pipeline(
self,
model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
device: int = None,
):
pipe = super().prepare_pipeline(model_or_pipeline, tokenizer, feature_extractor, device)
# check the pipeline outputs start characters in its predictions
dummy_output = pipe(["2003 New York Gregory"], **self.PIPELINE_KWARGS)
if dummy_output[0][0]["start"] is None:
raise ValueError(
"TokenClassificationEvaluator supports only pipelines giving 'start' index as a pipeline output (got None). "
"Transformers pipelines with a slow tokenizer will raise this error."
)
return pipe
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: str = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: Optional[int] = None,
random_state: Optional[int] = None,
input_column: str = "tokens",
label_column: str = "ner_tags",
join_by: Optional[str] = " ",
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"tokens"`):
The name of the column containing the tokens feature in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
join_by (`str`, *optional*, defaults to `" "`):
This evaluator supports dataset whose input column is a list of words. This parameter specifies how to join
words to generate a string input. This is especially useful for languages that do not separate words by a space.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data, input_column=input_column, label_column=label_column, join_by=join_by
)
pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, data[input_column], join_by)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
| 11,546 | 40.387097 | 289 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/text_classification.py
|
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numbers import Number
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
from datasets import Dataset, load_dataset
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
from .utils import DatasetColumnPair
if TYPE_CHECKING:
from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
TASK_DOCUMENTATION = r"""
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("text-classification")
>>> data = load_dataset("imdb", split="test[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli",
>>> data=data,
>>> metric="accuracy",
>>> label_mapping={"LABEL_0": 0.0, "LABEL_1": 1.0},
>>> strategy="bootstrap",
>>> n_resamples=10,
>>> random_state=0
>>> )
```
"""
class TextClassificationEvaluator(Evaluator):
"""
Text classification evaluator.
This text classification evaluator can currently be loaded from [`evaluator`] using the default task name
`text-classification` or with a `"sentiment-analysis"` alias.
Methods in this class assume a data format compatible with the [`~transformers.TextClassificationPipeline`] - a single textual
feature as input and a categorical label as output.
"""
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="text-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def prepare_data(self, data: Union[str, Dataset], input_column: str, second_input_column: str, label_column: str):
if data is None:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
if second_input_column is not None:
self.check_required_columns(data, {"second_input_column": second_input_column})
data = load_dataset(data) if isinstance(data, str) else data
return {"references": data[label_column]}, DatasetColumnPair(
data, input_column, second_input_column, "text", "text_pair"
)
def predictions_processor(self, predictions, label_mapping):
predictions = [
label_mapping[element["label"]] if label_mapping is not None else element["label"]
for element in predictions
]
return {"predictions": predictions}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
second_input_column: Optional[str] = None,
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, *optional*, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
second_input_column (`str`, *optional*, defaults to `None`):
The name of the second column containing the text features. This may be useful for classification tasks
as MNLI, where two columns are used.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data, input_column=input_column, second_input_column=second_input_column, label_column=label_column
)
pipe = self.prepare_pipeline(
model_or_pipeline=model_or_pipeline,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
device=device,
)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, label_mapping)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
| 6,676 | 40.47205 | 130 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/utils.py
|
from datasets import Dataset, get_dataset_split_names
class DatasetColumn(list):
"""Helper class to avoid loading a dataset column into memory when accessing it."""
def __init__(self, dataset: Dataset, key: str):
self.dataset = dataset
self.key = key
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
return self.dataset[i][self.key]
def __iter__(self):
return (self.dataset[i][self.key] for i in range(len(self)))
def choose_split(data, subset=None):
available_splits = get_dataset_split_names(data, subset)
preferred_split_order = [
"test",
"testing",
"eval",
"evaluation",
"validation",
"val",
"valid",
"dev",
"train",
"training",
]
for split in preferred_split_order:
if split in available_splits:
return split
raise ValueError("No dataset split defined! Pass an explicit value to the `split` kwarg.")
class DatasetColumnPair(list):
"""Helper class to avoid loading two dataset columns into memory when accessing it."""
def __init__(
self,
dataset: Dataset,
first_col: str,
second_col: str,
first_key: str,
second_key: str,
):
"""
Args:
dataset (Dataset): dataset to build an iterator on
first_col (str): first column name to use in the dataset
second_col (str): second column name to use in the dataset
first_key (str): key name used for the first column in the returned dictionary
second_key (str): key name used for the second column in the returned dictionary
"""
self.dataset = dataset
self.first_col = first_col
self.second_col = second_col
self.first_key = first_key
self.second_key = second_key
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
return {
self.first_key: self.dataset[i][self.first_col],
self.second_key: self.dataset[i][self.second_col] if self.second_col else None,
}
def __iter__(self):
return (
{
self.first_key: self.dataset[i][self.first_col],
self.second_key: self.dataset[i][self.second_col] if self.second_col else None,
}
for i in range(len(self))
)
| 2,451 | 27.847059 | 95 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/question_answering.py
|
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Lint as: python3
from datasets import Dataset
try:
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
from ..utils.logging import get_logger
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
from .utils import DatasetColumn
if TYPE_CHECKING:
from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
logger = get_logger(__name__)
TASK_DOCUMENTATION = r"""
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("question-answering")
>>> data = load_dataset("squad", split="validation[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="sshleifer/tiny-distilbert-base-cased-distilled-squad",
>>> data=data,
>>> metric="squad",
>>> )
```
<Tip>
Datasets where the answer may be missing in the context are supported, for example SQuAD v2 dataset. In this case, it is safer to pass `squad_v2_format=True` to
the compute() call.
</Tip>
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("question-answering")
>>> data = load_dataset("squad_v2", split="validation[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="mrm8488/bert-tiny-finetuned-squadv2",
>>> data=data,
>>> metric="squad_v2",
>>> squad_v2_format=True,
>>> )
```
"""
class QuestionAnsweringEvaluator(Evaluator):
"""
Question answering evaluator. This evaluator handles
[**extractive** question answering](https://huggingface.co/docs/transformers/task_summary#extractive-question-answering),
where the answer to the question is extracted from a context.
This question answering evaluator can currently be loaded from [`evaluator`] using the default task name
`question-answering`.
Methods in this class assume a data format compatible with the
[`~transformers.QuestionAnsweringPipeline`].
"""
PIPELINE_KWARGS = {}
def __init__(self, task="question-answering", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def prepare_data(
self, data: Dataset, question_column: str, context_column: str, id_column: str, label_column: str
):
"""Prepare data."""
if data is None:
raise ValueError(
"Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
)
self.check_required_columns(
data,
{
"question_column": question_column,
"context_column": context_column,
"id_column": id_column,
"label_column": label_column,
},
)
metric_inputs = dict()
metric_inputs["references"] = [
{"id": element[id_column], "answers": element[label_column]} for element in data
]
return metric_inputs, {
"question": DatasetColumn(data, question_column),
"context": DatasetColumn(data, context_column),
}
def is_squad_v2_format(self, data: Dataset, label_column: str = "answers"):
"""
Check if the provided dataset follows the squad v2 data schema, namely possible samples where the answer is not in the context.
In this case, the answer text list should be `[]`.
"""
original_num_rows = data.num_rows
nonempty_num_rows = data.filter(
lambda x: len(x[label_column]["text"]) > 0, load_from_cache_file=False
).num_rows
if original_num_rows > nonempty_num_rows:
return True
else:
return False
def predictions_processor(self, predictions: List, squad_v2_format: bool, ids: List):
result = []
for i in range(len(predictions)):
pred = {"prediction_text": predictions[i]["answer"], "id": ids[i]}
if squad_v2_format:
pred["no_answer_probability"] = predictions[i]["score"]
result.append(pred)
return {"predictions": result}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
question_column: str = "question",
context_column: str = "context",
id_column: str = "id",
label_column: str = "answers",
squad_v2_format: Optional[bool] = None,
) -> Tuple[Dict[str, float], Any]:
"""
question_column (`str`, defaults to `"question"`):
The name of the column containing the question in the dataset specified by `data`.
context_column (`str`, defaults to `"context"`):
The name of the column containing the context in the dataset specified by `data`.
id_column (`str`, defaults to `"id"`):
The name of the column containing the identification field of the question and answer pair in the
dataset specified by `data`.
label_column (`str`, defaults to `"answers"`):
The name of the column containing the answers in the dataset specified by `data`.
squad_v2_format (`bool`, *optional*, defaults to `None`):
Whether the dataset follows the format of squad_v2 dataset. This is the case when the provided dataset
has questions where the answer is not in the context, more specifically when are answers as
`{"text": [], "answer_start": []}` in the answer column. If all questions have at least one answer, this parameter
should be set to `False`. If this parameter is not provided, the format will be automatically inferred.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data,
question_column=question_column,
context_column=context_column,
id_column=id_column,
label_column=label_column,
)
if squad_v2_format is None:
squad_v2_format = self.is_squad_v2_format(data=data, label_column=label_column)
logger.warning(
f"`squad_v2_format` parameter not provided to QuestionAnsweringEvaluator.compute(). Automatically inferred `squad_v2_format` as {squad_v2_format}."
)
pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
metric = self.prepare_metric(metric)
if squad_v2_format and metric.name == "squad":
logger.warning(
"The dataset has SQuAD v2 format but you are using the SQuAD metric. Consider passing the 'squad_v2' metric."
)
if not squad_v2_format and metric.name == "squad_v2":
logger.warning(
"The dataset has SQuAD v1 format but you are using the SQuAD v2 metric. Consider passing the 'squad' metric."
)
if squad_v2_format:
self.PIPELINE_KWARGS["handle_impossible_answer"] = True
else:
self.PIPELINE_KWARGS["handle_impossible_answer"] = False
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, **pipe_inputs)
predictions = self.predictions_processor(predictions, squad_v2_format=squad_v2_format, ids=data[id_column])
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
| 9,566 | 38.8625 | 164 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/audio_classification.py
|
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numbers import Number
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
from datasets import Dataset
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
if TYPE_CHECKING:
from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
TASK_DOCUMENTATION = r"""
Examples:
<Tip>
Remember that, in order to process audio files, you need ffmpeg installed (https://ffmpeg.org/download.html)
</Tip>
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("audio-classification")
>>> data = load_dataset("superb", 'ks', split="test[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"",
>>> data=data,
>>> label_column="label",
>>> input_column="file",
>>> metric="accuracy",
>>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"}
>>> )
```
<Tip>
The evaluator supports raw audio data as well, in the form of a numpy array. However, be aware that calling
the audio column automatically decodes and resamples the audio files, which can be slow for large datasets.
</Tip>
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("audio-classification")
>>> data = load_dataset("superb", 'ks', split="test[:40]")
>>> data = data.map(lambda example: {"audio": example["audio"]["array"]})
>>> results = task_evaluator.compute(
>>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"",
>>> data=data,
>>> label_column="label",
>>> input_column="audio",
>>> metric="accuracy",
>>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"}
>>> )
```
"""
class AudioClassificationEvaluator(Evaluator):
"""
Audio classification evaluator.
This audio classification evaluator can currently be loaded from [`evaluator`] using the default task name
`audio-classification`.
Methods in this class assume a data format compatible with the [`transformers.AudioClassificationPipeline`].
"""
PIPELINE_KWARGS = {}
def __init__(self, task="audio-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions, label_mapping):
pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions]
pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label]
return {"predictions": pred_label}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "file",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"file"`):
The name of the column containing either the audio files or a raw waveform, represented as a numpy array, in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
label_mapping=label_mapping,
)
return result
| 5,804 | 37.190789 | 153 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/text2text_generation.py
|
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
from datasets import Dataset
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_start_docstrings
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
if TYPE_CHECKING:
from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
TASK_DOCUMENTATION_KWARGS = r"""
input_column (`str`, defaults to `"text"`):
the name of the column containing the input text in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
the name of the column containing the labels in the dataset specified by `data`.
generation_kwargs (`Dict`, *optional*, defaults to `None`):
The generation kwargs are passed to the pipeline and set the text generation strategy.
"""
TEXT2TEXT_TASK_DOCSTRING_EXAMPLE = r"""
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("text2text-generation")
>>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="facebook/bart-large-cnn",
>>> data=data,
>>> input_column="article",
>>> label_column="highlights",
>>> metric="rouge",
>>> )
```
"""
SUMMARIZATION_TASK_DOCSTRING_EXAMPLE = r"""
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("summarization")
>>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="facebook/bart-large-cnn",
>>> data=data,
>>> input_column="article",
>>> label_column="highlights",
>>> )
```
"""
TRANSLATION_TASK_DOCSTRING_EXAMPLE = r"""
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("translation")
>>> data = load_dataset("wmt19", "fr-de", split="validation[:40]")
>>> data = data.map(lambda x: {"text": x["translation"]["de"], "label": x["translation"]["fr"]})
>>> results = task_evaluator.compute(
>>> model_or_pipeline="Helsinki-NLP/opus-mt-de-fr",
>>> data=data,
>>> )
```
"""
class Text2TextGenerationEvaluator(Evaluator):
"""
Text2Text generation evaluator.
This Text2Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
`text2text-generation`.
Methods in this class assume a data format compatible with the [`~transformers.Text2TextGenerationPipeline`].
"""
PREDICTION_PREFIX = "generated"
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="text2text-generation", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions, label_mapping):
return {"predictions": [pred[f"{self.PREDICTION_PREFIX}_text"] for pred in predictions]}
@add_start_docstrings(
EVALUTOR_COMPUTE_START_DOCSTRING,
TASK_DOCUMENTATION_KWARGS,
EVALUATOR_COMPUTE_RETURN_DOCSTRING,
TEXT2TEXT_TASK_DOCSTRING_EXAMPLE,
)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
if generation_kwargs is not None:
self.PIPELINE_KWARGS.update(generation_kwargs)
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
)
return result
class SummarizationEvaluator(Text2TextGenerationEvaluator):
"""
Text summarization evaluator.
This text summarization evaluator can currently be loaded from [`evaluator`] using the default task name
`summarization`.
Methods in this class assume a data format compatible with the [`SummarizationEvaluator`].
"""
PREDICTION_PREFIX = "summary"
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="summarization", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
@add_start_docstrings(
EVALUTOR_COMPUTE_START_DOCSTRING,
TASK_DOCUMENTATION_KWARGS,
EVALUATOR_COMPUTE_RETURN_DOCSTRING,
SUMMARIZATION_TASK_DOCSTRING_EXAMPLE,
)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
generation_kwargs=generation_kwargs,
)
return result
class TranslationEvaluator(Text2TextGenerationEvaluator):
"""
Translation evaluator.
This translation generation evaluator can currently be loaded from [`evaluator`] using the default task name
`translation`.
Methods in this class assume a data format compatible with the [`~transformers.TranslationPipeline`].
"""
PREDICTION_PREFIX = "translation"
PIPELINE_KWARGS = {"truncation": True}
def __init__(self, task="translation", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
@add_start_docstrings(
EVALUTOR_COMPUTE_START_DOCSTRING,
TASK_DOCUMENTATION_KWARGS,
EVALUATOR_COMPUTE_RETURN_DOCSTRING,
TRANSLATION_TASK_DOCSTRING_EXAMPLE,
)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "text",
label_column: str = "label",
generation_kwargs: dict = None,
) -> Tuple[Dict[str, float], Any]:
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
generation_kwargs=generation_kwargs,
)
return result
| 9,676 | 35.108209 | 113 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/__init__.py
|
# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from transformers.pipelines import SUPPORTED_TASKS as SUPPORTED_PIPELINE_TASKS
from transformers.pipelines import TASK_ALIASES
from transformers.pipelines import check_task as check_pipeline_task
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
from typing import Dict, List
from .audio_classification import AudioClassificationEvaluator
from .automatic_speech_recognition import AutomaticSpeechRecognitionEvaluator
from .base import Evaluator
from .image_classification import ImageClassificationEvaluator
from .question_answering import QuestionAnsweringEvaluator
from .text2text_generation import SummarizationEvaluator, Text2TextGenerationEvaluator, TranslationEvaluator
from .text_classification import TextClassificationEvaluator
from .text_generation import TextGenerationEvaluator
from .token_classification import TokenClassificationEvaluator
SUPPORTED_EVALUATOR_TASKS = {
"text-classification": {
"implementation": TextClassificationEvaluator,
"default_metric_name": "accuracy",
},
"image-classification": {
"implementation": ImageClassificationEvaluator,
"default_metric_name": "accuracy",
},
"question-answering": {
"implementation": QuestionAnsweringEvaluator,
"default_metric_name": "squad",
},
"token-classification": {
"implementation": TokenClassificationEvaluator,
"default_metric_name": "seqeval",
},
"text-generation": {
"implementation": TextGenerationEvaluator,
"default_metric_name": "word_count",
},
"text2text-generation": {
"implementation": Text2TextGenerationEvaluator,
"default_metric_name": "bleu",
},
"summarization": {
"implementation": SummarizationEvaluator,
"default_metric_name": "rouge",
},
"translation": {
"implementation": TranslationEvaluator,
"default_metric_name": "bleu",
},
"automatic-speech-recognition": {
"implementation": AutomaticSpeechRecognitionEvaluator,
"default_metric_name": "wer",
},
"audio-classification": {
"implementation": AudioClassificationEvaluator,
"default_metric_name": "accuracy",
},
}
def get_supported_tasks() -> List[str]:
"""
Returns a list of supported task strings.
"""
return list(SUPPORTED_EVALUATOR_TASKS.keys())
def check_task(task: str) -> Dict:
"""
Checks an incoming task string, to validate it's correct and returns the default Evaluator class and default metric
name. It first performs a check to validata that the string is a valid `Pipeline` task, then it checks if it's a
valid `Evaluator` task. `Evaluator` tasks are a substet of `Pipeline` tasks.
Args:
task (`str`):
The task defining which evaluator will be returned. Currently accepted tasks are:
- `"image-classification"`
- `"question-answering"`
- `"text-classification"` (alias `"sentiment-analysis"` available)
- `"token-classification"`
Returns:
task_defaults: `dict`, contains the implementasion class of a give Evaluator and the default metric name.
"""
if task in TASK_ALIASES:
task = TASK_ALIASES[task]
if not check_pipeline_task(task):
raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.")
if task in SUPPORTED_EVALUATOR_TASKS.keys() and task in SUPPORTED_PIPELINE_TASKS.keys():
return SUPPORTED_EVALUATOR_TASKS[task]
raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.")
def evaluator(task: str = None) -> Evaluator:
"""
Utility factory method to build an [`Evaluator`].
Evaluators encapsulate a task and a default metric name. They leverage `pipeline` functionality from `transformers`
to simplify the evaluation of multiple combinations of models, datasets and metrics for a given task.
Args:
task (`str`):
The task defining which evaluator will be returned. Currently accepted tasks are:
- `"image-classification"`: will return a [`ImageClassificationEvaluator`].
- `"question-answering"`: will return a [`QuestionAnsweringEvaluator`].
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a [`TextClassificationEvaluator`].
- `"token-classification"`: will return a [`TokenClassificationEvaluator`].
Returns:
[`Evaluator`]: An evaluator suitable for the task.
Examples:
```python
>>> from evaluate import evaluator
>>> # Sentiment analysis evaluator
>>> evaluator("sentiment-analysis")
```"""
if not TRANSFORMERS_AVAILABLE:
raise ImportError(
"If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[transformers]`."
)
targeted_task = check_task(task)
evaluator_class = targeted_task["implementation"]
default_metric_name = targeted_task["default_metric_name"]
return evaluator_class(task=task, default_metric_name=default_metric_name)
| 5,788 | 40.056738 | 126 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/text_generation.py
|
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Tuple
from datasets import Dataset
from .base import Evaluator
from .utils import DatasetColumn
TASK_DOCUMENTATION_KWARGS = r"""
input_column (`str`, defaults to `"text"`):
the name of the column containing the input text in the dataset specified by `data`.
generation_kwargs (`Dict`, *optional*, defaults to `None`):
The generation kwargs are passed to the pipeline and set the text generation strategy.
"""
class TextGenerationEvaluator(Evaluator):
"""
Text generation evaluator.
This Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
`text-generation`.
Methods in this class assume a data format compatible with the [`~transformers.TextGenerationPipeline`].
"""
def predictions_processor(self, predictions, *args, **kwargs):
"""
Args:
predictions: A list of lists of dicts
Returns:
`dict`: All the generated texts are flattened and stored under the "data" key.
"""
return {"data": [pred[f"{self.predictions_prefix}_text"] for pred_list in predictions for pred in pred_list]}
def __init__(self, task="text-generation", default_metric_name=None, predictions_prefix: str = "generated"):
super().__init__(task=task, default_metric_name=default_metric_name)
self.predictions_prefix = predictions_prefix
def prepare_data(self, data: Dataset, input_column: str, *args, **kwargs) -> Tuple[Dict, DatasetColumn]:
"""
Prepare data.
Args:
data ([`Dataset`]):
Specifies the dataset we will run evaluation on.
input_column (`str`, defaults to `"text"`):
The name of the column containing the text feature in the dataset specified by `data`.
Returns:
`dict`: metric inputs.
`list`: pipeline inputs.
"""
self.check_required_columns(data, {"input_column": input_column})
return {}, DatasetColumn(data, input_column)
| 2,679 | 37.285714 | 117 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluator/image_classification.py
|
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numbers import Number
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
from datasets import Dataset
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
if TYPE_CHECKING:
from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
TASK_DOCUMENTATION = r"""
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("image-classification")
>>> data = load_dataset("beans", split="test[:40]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="nateraw/vit-base-beans",
>>> data=data,
>>> label_column="labels",
>>> metric="accuracy",
>>> label_mapping={'angular_leaf_spot': 0, 'bean_rust': 1, 'healthy': 2},
>>> strategy="bootstrap"
>>> )
```
"""
class ImageClassificationEvaluator(Evaluator):
"""
Image classification evaluator.
This image classification evaluator can currently be loaded from [`evaluator`] using the default task name
`image-classification`.
Methods in this class assume a data format compatible with the [`ImageClassificationPipeline`].
"""
PIPELINE_KWARGS = {}
def __init__(self, task="image-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions, label_mapping):
pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions]
pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label]
return {"predictions": pred_label}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "image",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"image"`):
The name of the column containing the images as PIL ImageFile in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
label_mapping=label_mapping,
)
return result
| 4,751 | 38.6 | 118 |
py
|
evaluate
|
evaluate-main/src/evaluate/evaluation_suite/__init__.py
|
import importlib
import inspect
from dataclasses import dataclass
from pathlib import Path
from typing import Callable, Dict, Optional, Union
from datasets import Dataset, DownloadConfig, DownloadMode, load_dataset
from datasets.utils.version import Version
from ..evaluator import evaluator
from ..loading import evaluation_module_factory
from ..utils.logging import get_logger
logger = get_logger(__name__)
@dataclass
class SubTask:
task_type: str
data: Optional[Union[str, Dataset]] = None
subset: Optional[str] = None
split: Optional[str] = None
data_preprocessor: Optional[Callable] = None
args_for_task: Optional[dict] = None
def __post_init__(self):
if type(self.task_type) is not str:
raise ValueError(f"'task_type' must be type 'str', got {type(self.task_type)}")
if type(self.data) not in [Dataset, str]:
raise ValueError(
f"'data' must be an already-instantiated Dataset object or type 'str', got {type(self.data)}"
)
if self.subset and type(self.subset) is not str:
raise ValueError(f"'subset' must be type 'str', got {type(self.subset)}")
if self.split and type(self.split) is not str:
raise ValueError(f"'split' must be type 'str', got {type(self.split)}")
if self.data_preprocessor and not callable(self.data_preprocessor):
raise ValueError(f"'data_preprocessor' must be a Callable', got {self.data_preprocessor}")
if self.args_for_task and type(self.args_for_task) is not dict:
raise ValueError(f"'args_for_task' must be type 'dict', got {type(self.args_for_task)}")
def import_main_class(module_path):
"""Import a module at module_path and return the EvaluationSuite class"""
module = importlib.import_module(module_path)
module_main_cls = None
for name, obj in module.__dict__.items():
if isinstance(obj, type) and obj.__name__ == "Suite":
if inspect.isabstract(obj):
continue
module_main_cls = obj
break
return module_main_cls
class EvaluationSuite:
"""
This class instantiates an evaluation suite made up of multiple tasks, where each task consists of a dataset and
an associated metric, and runs evaluation on a model or pipeline. Evaluation suites can be a Python script found
either locally or uploaded as a Space on the Hugging Face Hub.
Usage:
```python
from evaluate import EvaluationSuite
suite = EvaluationSuite.load("evaluate/evaluation-suite-ci")
results = suite.run("lvwerra/distilbert-imdb")
```
"""
def __init__(self, name):
self.name = name
@staticmethod
def load(
path: str,
download_mode: Optional[DownloadMode] = None,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
):
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
evaluation_module = evaluation_module_factory(
path, module_type=None, revision=revision, download_config=download_config, download_mode=download_mode
)
name = Path(path).stem
evaluation_cls = import_main_class(evaluation_module.module_path)
evaluation_instance = evaluation_cls(name)
return evaluation_instance
def __repr__(self):
self.tasks = [str(task) for task in self.suite]
return f'EvaluationSuite name: "{self.name}", ' f"Tasks: {self.tasks})"
def assert_suite_nonempty(self):
if not self.suite:
raise ValueError(
"No evaluation tasks found. The EvaluationSuite must include at least one SubTask definition."
)
def run(
self, model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"] # noqa: F821
) -> Dict[str, float]:
self.assert_suite_nonempty()
results_all = []
for task in self.suite:
task_name = task.data
if task.data_preprocessor: # task requires extra preprocessing
ds = load_dataset(task.data, name=task.subset, split=task.split)
task.data = ds.map(task.data_preprocessor)
task_evaluator = evaluator(task.task_type)
args_for_task = task.args_for_task
args_for_task["model_or_pipeline"] = model_or_pipeline
args_for_task["data"] = task.data
args_for_task["subset"] = task.subset
args_for_task["split"] = task.split
results = task_evaluator.compute(**args_for_task)
results["task_name"] = task_name + "/" + task.subset if task.subset else task_name
results["data_preprocessor"] = str(task.data_preprocessor) if task.data_preprocessor is not None else None
results_all.append(results)
return results_all
| 4,941 | 37.310078 | 119 |
py
|
evaluate
|
evaluate-main/src/evaluate/commands/evaluate_cli.py
|
import argparse
import os
import subprocess
from pathlib import Path
from cookiecutter.main import cookiecutter
from huggingface_hub import HfApi, Repository, create_repo
from evaluate.utils.logging import get_logger
logger = get_logger(__name__)
INSTRUCTIONS = """\
A new repository for your module "{module_name}" of type "{module_type}" has been created at {output_dir} and pushed to the Hugging Face Hub: {repo_url}.
Here are the next steps:
- implement the module logic in {module_slug}/{module_slug}.py
- document your module in {module_slug}/README.md
- add test cases for your module in {module_slug}/tests.py
- if your module has any dependencies update them in {module_slug}/requirements.txt
You can test your module's widget locally by running:
```
python {output_dir}/{module_slug}/app.py
```
When you are happy with your changes you can push your changes with the following commands to the Hugging Face Hub:
```
cd {output_dir}/{module_slug}
git add .
git commit -m "Updating module"
git push
```
You should then see the update widget on the Hugging Face Hub: {repo_url}
And you can load your module in Python with the following code:
```
from evaluate import load
module = load("{namespace}/{module_slug}")
```
"""
def main():
parser = argparse.ArgumentParser("HuggingFace Evaluate CLI tool", usage="evaluate-cli <command> [<args>]")
subparsers = parser.add_subparsers()
parser_create = subparsers.add_parser("create", help="Create new evaluation module.")
parser_create.add_argument(
"module_name", type=str, help='Pretty name of new evaluation module, e.g. "Recall" or "Exact Match".'
)
parser_create.add_argument(
"--module_type",
default="metric",
type=str,
help="Type of module, has to be one of [metric|comparison|measurement].",
)
parser_create.add_argument(
"--dataset_name", default="", type=str, help="Name of dataset if evaluation module is dataset specific."
)
parser_create.add_argument("--module_description", type=str, help="Short description of evaluation module.")
parser_create.add_argument("--output_dir", default=Path.cwd(), type=str, help="Path to output directory.")
parser_create.add_argument(
"--organization", default=None, type=str, help="Organization on the Hub to push evaluation module to."
)
parser_create.add_argument("--private", action="store_true", help="Sets evaluation module repository to private.")
args = vars(parser.parse_args())
if args["module_type"] not in ["metric", "comparison", "measurement"]:
raise ValueError("The module_type needs to be one of metric, comparison, or measurement")
if "-" in args["module_name"]:
raise ValueError("Hyphens ('-') are not allowed in module names.")
output_dir = Path(args["output_dir"])
organization = args["organization"]
module_slug = args["module_name"].lower().replace(" ", "_")
if organization is None:
hfapi = HfApi()
namespace = hfapi.whoami()["name"]
else:
namespace = organization
args["namespace"] = namespace
repo_url = f"https://huggingface.co/spaces/{namespace}/{module_slug}"
try:
create_repo(namespace + "/" + module_slug, repo_type="space", space_sdk="gradio", private=args["private"])
except Exception as exception:
logger.error(
f"Could not create Space for module at hf.co/spaces/{namespace}/{module_slug}. Make sure this space does not exist already."
)
raise exception
subprocess.run(
f"git clone {repo_url}".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=output_dir,
env=os.environ.copy(),
)
repo = Repository(
local_dir=output_dir / module_slug,
)
cookiecutter(
"https://github.com/huggingface/evaluate/",
directory="templates",
no_input=True,
extra_context=args,
output_dir=output_dir,
overwrite_if_exists=True,
)
repo.git_add()
repo.git_commit("add module default template")
repo.git_push()
print(
INSTRUCTIONS.format(
module_name=args["module_name"],
module_type=args["module_type"],
module_slug=module_slug,
namespace=namespace,
repo_url=repo_url,
output_dir=output_dir,
)
)
if __name__ == "__main__":
main()
| 4,491 | 31.550725 | 153 |
py
|
evaluate
|
evaluate-main/src/evaluate/commands/__init__.py
| 0 | 0 | 0 |
py
|
|
evaluate
|
evaluate-main/src/evaluate/utils/gradio.py
|
import json
import os
import re
import sys
from pathlib import Path
import numpy as np
from datasets import Value
from .logging import get_logger
logger = get_logger(__name__)
REGEX_YAML_BLOCK = re.compile(r"---[\n\r]+([\S\s]*?)[\n\r]+---[\n\r]")
def infer_gradio_input_types(feature_types):
"""
Maps metric feature types to input types for gradio Dataframes:
- float/int -> numbers
- string -> strings
- any other -> json
Note that json is not a native gradio type but will be treated as string that
is then parsed as a json.
"""
input_types = []
for feature_type in feature_types:
input_type = "json"
if isinstance(feature_type, Value):
if feature_type.dtype.startswith("int") or feature_type.dtype.startswith("float"):
input_type = "number"
elif feature_type.dtype == "string":
input_type = "str"
input_types.append(input_type)
return input_types
def json_to_string_type(input_types):
"""Maps json input type to str."""
return ["str" if i == "json" else i for i in input_types]
def parse_readme(filepath):
"""Parses a repositories README and removes"""
if not os.path.exists(filepath):
return "No README.md found."
with open(filepath, "r") as f:
text = f.read()
match = REGEX_YAML_BLOCK.search(text)
if match:
text = text[match.end() :]
return text
def parse_gradio_data(data, input_types):
"""Parses data from gradio Dataframe for use in metric."""
metric_inputs = {}
data.replace("", np.nan, inplace=True)
data.dropna(inplace=True)
for feature_name, input_type in zip(data, input_types):
if input_type == "json":
metric_inputs[feature_name] = [json.loads(d) for d in data[feature_name].to_list()]
elif input_type == "str":
metric_inputs[feature_name] = [d.strip('"') for d in data[feature_name].to_list()]
else:
metric_inputs[feature_name] = data[feature_name]
return metric_inputs
def parse_test_cases(test_cases, feature_names, input_types):
"""
Parses test cases to be used in gradio Dataframe. Note that an apostrophe is added
to strings to follow the format in json.
"""
if len(test_cases) == 0:
return None
examples = []
for test_case in test_cases:
parsed_cases = []
for feat, input_type in zip(feature_names, input_types):
if input_type == "json":
parsed_cases.append([str(element) for element in test_case[feat]])
elif input_type == "str":
parsed_cases.append(['"' + element + '"' for element in test_case[feat]])
else:
parsed_cases.append(test_case[feat])
examples.append([list(i) for i in zip(*parsed_cases)])
return examples
def launch_gradio_widget(metric):
"""Launches `metric` widget with Gradio."""
try:
import gradio as gr
except ImportError as error:
logger.error("To create a metric widget with Gradio make sure gradio is installed.")
raise error
local_path = Path(sys.path[0])
# if there are several input types, use first as default.
if isinstance(metric.features, list):
(feature_names, feature_types) = zip(*metric.features[0].items())
else:
(feature_names, feature_types) = zip(*metric.features.items())
gradio_input_types = infer_gradio_input_types(feature_types)
def compute(data):
return metric.compute(**parse_gradio_data(data, gradio_input_types))
iface = gr.Interface(
fn=compute,
inputs=gr.inputs.Dataframe(
headers=feature_names,
col_count=len(feature_names),
row_count=1,
datatype=json_to_string_type(gradio_input_types),
),
outputs=gr.outputs.Textbox(label=metric.name),
description=(
metric.info.description + "\nIf this is a text-based metric, make sure to wrap you input in double quotes."
" Alternatively you can use a JSON-formatted list as input."
),
title=f"Metric: {metric.name}",
article=parse_readme(local_path / "README.md"),
# TODO: load test cases and use them to populate examples
# examples=[parse_test_cases(test_cases, feature_names, gradio_input_types)]
)
iface.launch()
| 4,434 | 32.598485 | 119 |
py
|
evaluate
|
evaluate-main/src/evaluate/utils/logging.py
|
# Copyright 2020 Optuna, Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Logging utilities. """
import logging
import os
from logging import CRITICAL # NOQA
from logging import DEBUG # NOQA
from logging import ERROR # NOQA
from logging import FATAL # NOQA
from logging import INFO # NOQA
from logging import NOTSET # NOQA
from logging import WARN # NOQA
from logging import WARNING # NOQA
from typing import Optional
from tqdm import auto as tqdm_lib
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_default_log_level = logging.WARNING
def _get_default_logging_level():
"""
If EVALUATE_VERBOSITY env var is set to one of the valid choices return that as the new default level.
If it is not - fall back to ``_default_log_level``
"""
env_level_str = os.getenv("EVALUATE_VERBOSITY", None)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option EVALUATE_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys()) }"
)
return _default_log_level
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> logging.Logger:
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
# Apply our default configuration to the library root logger.
library_root_logger = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def _reset_library_root_logger() -> None:
library_root_logger = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def get_logger(name: Optional[str] = None) -> logging.Logger:
"""Return a logger with the specified name."""
if name is None:
name = _get_library_name()
return logging.getLogger(name)
def get_verbosity() -> int:
"""Return the current level for the Hugging Face Evaluate library's root logger.
Returns:
Logging level, e.g., `evaluate.logging.DEBUG` and `evaluate.logging.INFO`.
<Tip>
Hugging Face Evaluate library has following logging levels:
- `evaluate.logging.CRITICAL`, `evaluate.logging.FATAL`
- `evaluate.logging.ERROR`
- `evaluate.logging.WARNING`, `evaluate.logging.WARN`
- `evaluate.logging.INFO`
- `evaluate.logging.DEBUG`
</Tip>
"""
return _get_library_root_logger().getEffectiveLevel()
def set_verbosity(verbosity: int) -> None:
"""Set the level for the Hugging Face Evaluate library's root logger.
Args:
verbosity:
Logging level, e.g., `evaluate.logging.DEBUG` and `evaluate.logging.INFO`.
"""
_get_library_root_logger().setLevel(verbosity)
def set_verbosity_info():
"""Set the level for the Hugging Face Evaluate library's root logger to `INFO`.
This will display most of the logging information and tqdm bars.
Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.INFO)`.
"""
return set_verbosity(INFO)
def set_verbosity_warning():
"""Set the level for the Hugging Face Evaluate library's root logger to `WARNING`.
This will display only the warning and errors logging information and tqdm bars.
Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.WARNING)`.
"""
return set_verbosity(WARNING)
def set_verbosity_debug():
"""Set the level for the Hugging Face Evaluate library's root logger to `DEBUG`.
This will display all the logging information and tqdm bars.
Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.DEBUG)`.
"""
return set_verbosity(DEBUG)
def set_verbosity_error():
"""Set the level for the Hugging Face Evaluate library's root logger to `ERROR`.
This will display only the errors logging information and tqdm bars.
Shortcut to `evaluate.logging.set_verbosity(evaluate.logging.ERROR)`.
"""
return set_verbosity(ERROR)
def disable_propagation() -> None:
"""Disable propagation of the library log outputs.
Note that log propagation is disabled by default.
"""
_get_library_root_logger().propagate = False
def enable_propagation() -> None:
"""Enable propagation of the library log outputs.
Please disable the Hugging Face Evaluate library's default handler to prevent double logging if the root logger has
been configured.
"""
_get_library_root_logger().propagate = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class EmptyTqdm:
"""Dummy tqdm which doesn't do anything."""
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self._iterator = args[0] if args else None
def __iter__(self):
return iter(self._iterator)
def __getattr__(self, _):
"""Return empty function."""
def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
return
_tqdm_active = True
class _tqdm_cls:
def __call__(self, *args, **kwargs):
if _tqdm_active:
return tqdm_lib.tqdm(*args, **kwargs)
else:
return EmptyTqdm(*args, **kwargs)
def set_lock(self, *args, **kwargs):
self._lock = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*args, **kwargs)
def get_lock(self):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
tqdm = _tqdm_cls()
def is_progress_bar_enabled() -> bool:
"""Return a boolean indicating whether tqdm progress bars are enabled."""
global _tqdm_active
return bool(_tqdm_active)
def enable_progress_bar():
"""Enable tqdm progress bar."""
global _tqdm_active
_tqdm_active = True
def disable_progress_bar():
"""Enable tqdm progress bar."""
global _tqdm_active
_tqdm_active = False
| 6,698 | 27.506383 | 119 |
py
|
evaluate
|
evaluate-main/src/evaluate/utils/file_utils.py
|
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import copy
import io
import json
import os
import posixpath
import re
import shutil
import sys
import tempfile
import time
import urllib
from contextlib import closing, contextmanager
from functools import partial
from hashlib import sha256
from pathlib import Path
from typing import List, Optional, Type, TypeVar, Union
from urllib.parse import urljoin, urlparse
import requests
from datasets import DownloadConfig
from datasets.utils.extract import ExtractManager
from datasets.utils.filelock import FileLock
from .. import __version__, config
from . import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
INCOMPLETE_SUFFIX = ".incomplete"
T = TypeVar("T", str, Path)
def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
"""
Add hf_modules_cache to the python path.
By default hf_modules_cache='~/.cache/huggingface/modules'.
It can also be set with the environment variable HF_MODULES_CACHE.
This is used to add modules such as `datasets_modules`
"""
hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
hf_modules_cache = str(hf_modules_cache)
if hf_modules_cache not in sys.path:
sys.path.append(hf_modules_cache)
os.makedirs(hf_modules_cache, exist_ok=True)
if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
pass
return hf_modules_cache
def is_remote_url(url_or_filename: str) -> bool:
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3", "gs", "hdfs", "ftp")
def is_local_path(url_or_filename: str) -> bool:
# On unix the scheme of a local path is empty (for both absolute and relative),
# while on windows the scheme is the drive name (ex: "c") for absolute paths.
# for details on the windows behavior, see https://bugs.python.org/issue42215
return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
def is_relative_path(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
def relative_to_absolute_path(path: T) -> T:
"""Convert relative path to absolute path."""
abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path))))
return Path(abs_path_str) if isinstance(path, Path) else abs_path_str
def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
if dataset:
endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
else:
endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
return "/".join((endpoint, identifier, filename))
def head_hf_s3(
identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
) -> Union[requests.Response, Exception]:
return http_head(
hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
max_retries=max_retries,
)
def hf_hub_url(path: str, name: str, revision: Optional[str] = None) -> str:
revision = revision or config.HUB_DEFAULT_VERSION
return config.HUB_EVALUATE_URL.format(path=path, name=name, revision=revision)
def url_or_path_join(base_name: str, *pathnames: str) -> str:
if is_remote_url(base_name):
return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames))
else:
return Path(base_name, *pathnames).as_posix()
def url_or_path_parent(url_or_path: str) -> str:
if is_remote_url(url_or_path):
return url_or_path[: url_or_path.rindex("/")]
else:
return os.path.dirname(url_or_path)
def hash_url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".py"):
filename += ".py"
return filename
def cached_path(
url_or_filename,
download_config=None,
**download_kwargs,
) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
ValueError: if it couldn't parse the url or filename correctly
requests.exceptions.ConnectionError: in case of internet connection issue
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
cache_dir = download_config.cache_dir or config.DOWNLOADED_EVALUATE_PATH
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=download_config.force_download,
proxies=download_config.proxies,
resume_download=download_config.resume_download,
user_agent=download_config.user_agent,
local_files_only=download_config.local_files_only,
use_etag=download_config.use_etag,
max_retries=download_config.max_retries,
use_auth_token=download_config.use_auth_token,
ignore_url_params=download_config.ignore_url_params,
download_desc=download_config.download_desc,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif is_local_path(url_or_filename):
# File, but it doesn't exist.
raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
if output_path is None:
return output_path
if download_config.extract_compressed_file:
output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
output_path, force_extract=download_config.force_extract
)
return output_path
def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
ua = f"datasets/{__version__}; python/{config.PY_VERSION}"
ua += f"; pyarrow/{config.PYARROW_VERSION}"
if config.TORCH_AVAILABLE:
ua += f"; torch/{config.TORCH_VERSION}"
if config.TF_AVAILABLE:
ua += f"; tensorflow/{config.TF_VERSION}"
if config.JAX_AVAILABLE:
ua += f"; jax/{config.JAX_VERSION}"
if isinstance(user_agent, dict):
ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}"
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
def get_authentication_headers_for_url(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> dict:
"""Handle the HF authentication"""
headers = {}
if url.startswith(config.HF_ENDPOINT):
token = None
if isinstance(use_auth_token, str):
token = use_auth_token
elif bool(use_auth_token):
from huggingface_hub import hf_api
token = hf_api.HfFolder.get_token()
if token:
headers["authorization"] = f"Bearer {token}"
return headers
class OfflineModeIsEnabled(ConnectionError):
pass
def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):
"""Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_EVALUATE_OFFLINE is True."""
if config.HF_EVALUATE_OFFLINE:
raise OfflineModeIsEnabled(
"Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg)
)
def _retry(
func,
func_args: Optional[tuple] = None,
func_kwargs: Optional[dict] = None,
exceptions: Type[requests.exceptions.RequestException] = requests.exceptions.RequestException,
status_codes: Optional[List[int]] = None,
max_retries: int = 0,
base_wait_time: float = 0.5,
max_wait_time: float = 2,
):
func_args = func_args or ()
func_kwargs = func_kwargs or {}
retry = 0
while True:
try:
return func(*func_args, **func_kwargs)
except exceptions as err:
if retry >= max_retries or (status_codes and err.response.status_code not in status_codes):
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(f"{func} timed out, retrying in {sleep_time}s... [{retry/max_retries}]")
time.sleep(sleep_time)
retry += 1
def _request_with_retry(
method: str,
url: str,
max_retries: int = 0,
base_wait_time: float = 0.5,
max_wait_time: float = 2,
timeout: float = 10.0,
**params,
) -> requests.Response:
"""Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff.
Note that if the environment variable HF_EVALUATE_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised.
Args:
method (str): HTTP method, such as 'GET' or 'HEAD'.
url (str): The URL of the resource to fetch.
max_retries (int): Maximum number of retries, defaults to 0 (no retries).
base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between
retries then grows exponentially, capped by max_wait_time.
max_wait_time (float): Maximum amount of time between two retries, in seconds.
**params: Params to pass to :obj:`requests.request`.
"""
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
tries, success = 0, False
while not success:
tries += 1
try:
response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
success = True
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err:
if tries > max_retries:
raise err
else:
logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]")
sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
time.sleep(sleep_time)
return response
def ftp_head(url, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
r.read(1)
except Exception:
return False
return True
def ftp_get(url, temp_file, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
logger.info(f"Getting through FTP {url} into {temp_file.name}")
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
shutil.copyfileobj(r, temp_file)
except urllib.error.URLError as e:
raise ConnectionError(e) from None
def http_get(
url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None
):
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
if resume_size > 0:
headers["Range"] = f"bytes={resume_size:d}-"
response = _request_with_retry(
method="GET",
url=url,
stream=True,
proxies=proxies,
headers=headers,
cookies=cookies,
max_retries=max_retries,
timeout=timeout,
)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
with logging.tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc=desc or "Downloading",
disable=not logging.is_progress_bar_enabled(),
) as progress:
for chunk in response.iter_content(chunk_size=1024):
progress.update(len(chunk))
temp_file.write(chunk)
def http_head(
url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0
) -> requests.Response:
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
response = _request_with_retry(
method="HEAD",
url=url,
proxies=proxies,
headers=headers,
cookies=cookies,
allow_redirects=allow_redirects,
timeout=timeout,
max_retries=max_retries,
)
return response
def request_etag(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> Optional[str]:
headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
response = http_head(url, headers=headers, max_retries=3)
response.raise_for_status()
etag = response.headers.get("ETag") if response.ok else None
return etag
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=100,
resume_download=False,
user_agent=None,
local_files_only=False,
use_etag=True,
max_retries=0,
use_auth_token=None,
ignore_url_params=False,
download_desc=None,
) -> str:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
"""
if cache_dir is None:
cache_dir = config.HF_EVALUATE_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if ignore_url_params:
# strip all query parameters and #fragments from the URL
cached_url = urljoin(url, urlparse(url).path)
else:
cached_url = url # additional parameters may be added to the given URL
connected = False
response = None
cookies = None
etag = None
head_error = None
# Try a first time to file the file on the local file system without eTag (None)
# if we don't ask for 'force_download' then we spare a request
filename = hash_url_to_filename(cached_url, etag=None)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download and not use_etag:
return cache_path
# Prepare headers for authentication
headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
if user_agent is not None:
headers["user-agent"] = user_agent
# We don't have the file locally or we need an eTag
if not local_files_only:
if url.startswith("ftp://"):
connected = ftp_head(url)
try:
response = http_head(
url,
allow_redirects=True,
proxies=proxies,
timeout=etag_timeout,
max_retries=max_retries,
headers=headers,
)
if response.status_code == 200: # ok
etag = response.headers.get("ETag") if use_etag else None
for k, v in response.cookies.items():
# In some edge cases, we need to get a confirmation token
if k.startswith("download_warning") and "drive.google.com" in url:
url += "&confirm=" + v
cookies = response.cookies
connected = True
# Fix Google Drive URL to avoid Virus scan warning
if "drive.google.com" in url and "confirm=" not in url:
url += "&confirm=t"
# In some edge cases, head request returns 400 but the connection is actually ok
elif (
(response.status_code == 400 and "firebasestorage.googleapis.com" in url)
or (response.status_code == 405 and "drive.google.com" in url)
or (
response.status_code == 403
and (
re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url)
)
)
or (response.status_code == 403 and "ndownloader.figstatic.com" in url)
):
connected = True
logger.info(f"Couldn't get ETag version for url {url}")
elif response.status_code == 401 and config.HF_ENDPOINT in url and use_auth_token is None:
raise ConnectionError(
f"Unauthorized for URL {url}. Please use the parameter ``use_auth_token=True`` after logging in with ``huggingface-cli login``"
)
except (OSError, requests.exceptions.Timeout) as e:
# not connected
head_error = e
pass
# connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if not connected:
if os.path.exists(cache_path) and not force_download:
return cache_path
if local_files_only:
raise FileNotFoundError(
f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
" disabled. To enable file online look-ups, set 'local_files_only' to False."
)
elif response is not None and response.status_code == 404:
raise FileNotFoundError(f"Couldn't find file at {url}")
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
if head_error is not None:
raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})")
elif response is not None:
raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})")
else:
raise ConnectionError(f"Couldn't reach {url}")
# Try a second time
filename = hash_url_to_filename(cached_url, etag)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download:
return cache_path
# From now on, connected is True.
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
# GET file object
if url.startswith("ftp://"):
ftp_get(url, temp_file)
else:
http_get(
url,
temp_file,
proxies=proxies,
resume_size=resume_size,
headers=headers,
cookies=cookies,
max_retries=max_retries,
desc=download_desc,
)
logger.info(f"storing {url} in cache at {cache_path}")
shutil.move(temp_file.name, cache_path)
logger.info(f"creating metadata file for {cache_path}")
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
return cache_path
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr)
return fn
return docstring_decorator
def estimate_dataset_size(paths):
return sum(path.stat().st_size for path in paths)
def readline(f: io.RawIOBase):
# From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525
res = bytearray()
while True:
b = f.read(1)
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
| 22,602 | 35.515347 | 147 |
py
|
evaluate
|
evaluate-main/src/evaluate/utils/__init__.py
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
"""Util import."""
__all__ = [
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"infer_gradio_input_types",
"json_to_string_type",
"parse_readme",
"parse_gradio_data",
"parse_test_cases",
"launch_gradio_widget",
]
from .gradio import (
infer_gradio_input_types,
json_to_string_type,
launch_gradio_widget,
parse_gradio_data,
parse_readme,
parse_test_cases,
)
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
| 1,201 | 29.05 | 87 |
py
|
evaluate
|
evaluate-main/templates/{{ cookiecutter.module_slug }}/tests.py
|
test_cases = [
{
"predictions": [0, 0],
"references": [1, 1],
"result": {"metric_score": 0}
},
{
"predictions": [1, 1],
"references": [1, 1],
"result": {"metric_score": 1}
},
{
"predictions": [1, 0],
"references": [1, 1],
"result": {"metric_score": 0.5}
}
]
| 353 | 19.823529 | 39 |
py
|
evaluate
|
evaluate-main/templates/{{ cookiecutter.module_slug }}/app.py
|
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("{{ cookiecutter.namespace }}/{{ cookiecutter.module_slug }}")
launch_gradio_widget(module)
| 180 | 29.166667 | 85 |
py
|
evaluate
|
evaluate-main/templates/{{ cookiecutter.module_slug }}/{{ cookiecutter.module_slug }}.py
|
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
import evaluate
import datasets
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}
"""
# TODO: Add description of the module here
_DESCRIPTION = """\
This new module is designed to solve this great ML task and is crafted with a lot of care.
"""
# TODO: Add description of the arguments of the module here
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of predictions to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Returns:
accuracy: description of the first score,
another_score: description of the second score,
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> my_new_module = evaluate.load("my_new_module")
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
>>> print(results)
{'accuracy': 1.0}
"""
# TODO: Define external resources urls if needed
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class {{ cookiecutter.module_class_name }}(evaluate.{{ cookiecutter.module_type | capitalize}}):
"""TODO: Short description of my evaluation module."""
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.{{ cookiecutter.module_type | capitalize}}Info(
# This is the description that will appear on the modules page.
module_type="{{ cookiecutter.module_type}}",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features({
'predictions': datasets.Value('int64'),
'references': datasets.Value('int64'),
}),
# Homepage of the module for documentation
homepage="http://module.homepage",
# Additional links to the codebase or references
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
reference_urls=["http://path.to.reference.url/new_module"]
)
def _download_and_prepare(self, dl_manager):
"""Optional: download external resources useful to compute the scores"""
# TODO: Download external resources if needed
pass
def _compute(self, predictions, references):
"""Returns the scores"""
# TODO: Compute the different scores of the module
accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
return {
"accuracy": accuracy,
}
| 3,682 | 37.768421 | 96 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.