body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def begin_update_patch(self, resource_group_name, service_name, monitoring_setting_resource, **kwargs):
"Update the Monitoring Setting.\n\n :param resource_group_name: The name of the resource group that contains the resource. You can\n obtain this value from the Azure Resource Manager API or the portal.\n :type resource_group_name: str\n :param service_name: The name of the Service resource.\n :type service_name: str\n :param monitoring_setting_resource: Parameters for the update operation.\n :type monitoring_setting_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the ARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either MonitoringSettingResource or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._update_patch_initial(resource_group_name=resource_group_name, service_name=service_name, monitoring_setting_resource=monitoring_setting_resource, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceName': self._serialize.url('service_name', service_name, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | -1,511,098,406,233,714,000 | Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]
:raises ~azure.core.exceptions.HttpResponseError: | sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_11_01_preview/operations/_monitoring_settings_operations.py | begin_update_patch | AriZavala2/azure-sdk-for-python | python | def begin_update_patch(self, resource_group_name, service_name, monitoring_setting_resource, **kwargs):
"Update the Monitoring Setting.\n\n :param resource_group_name: The name of the resource group that contains the resource. You can\n obtain this value from the Azure Resource Manager API or the portal.\n :type resource_group_name: str\n :param service_name: The name of the Service resource.\n :type service_name: str\n :param monitoring_setting_resource: Parameters for the update operation.\n :type monitoring_setting_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the ARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either MonitoringSettingResource or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._update_patch_initial(resource_group_name=resource_group_name, service_name=service_name, monitoring_setting_resource=monitoring_setting_resource, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'serviceName': self._serialize.url('service_name', service_name, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def compute(self, admat, features):
' Forward Propagation through the layer according to the spectral rule '
self.D = torch.diag(admat.sum(1), diagonal=0)
self.out = torch.empty(admat.size[0], self.op_size)
self.a_hat = (admat + torch.eye(admat.size[0]))
self.D_inv = (self.D ** (- 0.5))
self.a_hat = ((self.D_inv * self.a_hat) * self.D_inv)
self.out = torch.dot(torch.dot(self.a_hat, features), self.weights)
return self.out | 3,169,009,021,672,720,000 | Forward Propagation through the layer according to the spectral rule | gcn/layers.py | compute | veds12/aihaven | python | def compute(self, admat, features):
' '
self.D = torch.diag(admat.sum(1), diagonal=0)
self.out = torch.empty(admat.size[0], self.op_size)
self.a_hat = (admat + torch.eye(admat.size[0]))
self.D_inv = (self.D ** (- 0.5))
self.a_hat = ((self.D_inv * self.a_hat) * self.D_inv)
self.out = torch.dot(torch.dot(self.a_hat, features), self.weights)
return self.out |
def get_train_tfdataset(self) -> tf.data.Dataset:
'\n Returns the training :class:`~tf.data.Dataset`.\n\n Subclass and override this method if you want to inject some custom behavior.\n '
if (self.train_dataset is None):
raise ValueError('Trainer: training requires a train_dataset.')
self.total_train_batch_size = (self.args.train_batch_size * self.args.gradient_accumulation_steps)
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if (self.num_train_examples < 0):
raise ValueError('The training dataset must have an asserted cardinality')
ds = self.train_dataset.repeat().shuffle(self.num_train_examples, seed=self.args.seed).batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last).prefetch(tf.data.experimental.AUTOTUNE)
return self.args.strategy.experimental_distribute_dataset(ds) | -1,645,174,303,493,521,000 | Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior. | src/transformers/trainer_tf.py | get_train_tfdataset | AdrienDS/transformers | python | def get_train_tfdataset(self) -> tf.data.Dataset:
'\n Returns the training :class:`~tf.data.Dataset`.\n\n Subclass and override this method if you want to inject some custom behavior.\n '
if (self.train_dataset is None):
raise ValueError('Trainer: training requires a train_dataset.')
self.total_train_batch_size = (self.args.train_batch_size * self.args.gradient_accumulation_steps)
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if (self.num_train_examples < 0):
raise ValueError('The training dataset must have an asserted cardinality')
ds = self.train_dataset.repeat().shuffle(self.num_train_examples, seed=self.args.seed).batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last).prefetch(tf.data.experimental.AUTOTUNE)
return self.args.strategy.experimental_distribute_dataset(ds) |
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset]=None) -> tf.data.Dataset:
'\n Returns the evaluation :class:`~tf.data.Dataset`.\n\n Args:\n eval_dataset (:class:`~tf.data.Dataset`, `optional`):\n If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,\n labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``\n is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If\n ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the\n loss is instead calculated by calling ``model(features, **labels)``.\n\n Subclass and override this method if you want to inject some custom behavior.\n '
if ((eval_dataset is None) and (self.eval_dataset is None)):
raise ValueError('Trainer: evaluation requires an eval_dataset.')
eval_dataset = (eval_dataset if (eval_dataset is not None) else self.eval_dataset)
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if (num_examples < 0):
raise ValueError('The training dataset must have an asserted cardinality')
approx = (math.floor if self.args.dataloader_drop_last else math.ceil)
steps = approx((num_examples / self.args.eval_batch_size))
ds = eval_dataset.repeat().batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last).prefetch(tf.data.experimental.AUTOTUNE)
return (self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples) | -8,114,545,385,704,695,000 | Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior. | src/transformers/trainer_tf.py | get_eval_tfdataset | AdrienDS/transformers | python | def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset]=None) -> tf.data.Dataset:
'\n Returns the evaluation :class:`~tf.data.Dataset`.\n\n Args:\n eval_dataset (:class:`~tf.data.Dataset`, `optional`):\n If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,\n labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``\n is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If\n ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the\n loss is instead calculated by calling ``model(features, **labels)``.\n\n Subclass and override this method if you want to inject some custom behavior.\n '
if ((eval_dataset is None) and (self.eval_dataset is None)):
raise ValueError('Trainer: evaluation requires an eval_dataset.')
eval_dataset = (eval_dataset if (eval_dataset is not None) else self.eval_dataset)
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if (num_examples < 0):
raise ValueError('The training dataset must have an asserted cardinality')
approx = (math.floor if self.args.dataloader_drop_last else math.ceil)
steps = approx((num_examples / self.args.eval_batch_size))
ds = eval_dataset.repeat().batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last).prefetch(tf.data.experimental.AUTOTUNE)
return (self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples) |
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
'\n Returns a test :class:`~tf.data.Dataset`.\n\n Args:\n test_dataset (:class:`~tf.data.Dataset`):\n The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is\n a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is\n calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such\n as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated\n by calling ``model(features, **labels)``.\n\n Subclass and override this method if you want to inject some custom behavior.\n '
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if (num_examples < 0):
raise ValueError('The training dataset must have an asserted cardinality')
approx = (math.floor if self.args.dataloader_drop_last else math.ceil)
steps = approx((num_examples / self.args.eval_batch_size))
ds = test_dataset.repeat().batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last).prefetch(tf.data.experimental.AUTOTUNE)
return (self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples) | -9,158,064,499,687,991,000 | Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior. | src/transformers/trainer_tf.py | get_test_tfdataset | AdrienDS/transformers | python | def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
'\n Returns a test :class:`~tf.data.Dataset`.\n\n Args:\n test_dataset (:class:`~tf.data.Dataset`):\n The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is\n a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is\n calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such\n as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated\n by calling ``model(features, **labels)``.\n\n Subclass and override this method if you want to inject some custom behavior.\n '
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if (num_examples < 0):
raise ValueError('The training dataset must have an asserted cardinality')
approx = (math.floor if self.args.dataloader_drop_last else math.ceil)
steps = approx((num_examples / self.args.eval_batch_size))
ds = test_dataset.repeat().batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last).prefetch(tf.data.experimental.AUTOTUNE)
return (self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples) |
def create_optimizer_and_scheduler(self, num_training_steps: int):
"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n TFTrainer's init through :obj:`optimizers`, or subclass and override this method.\n "
if ((not self.optimizer) and (not self.lr_scheduler)):
(self.optimizer, self.lr_scheduler) = create_optimizer(self.args.learning_rate, num_training_steps, self.args.warmup_steps, adam_beta1=self.args.adam_beta1, adam_beta2=self.args.adam_beta2, adam_epsilon=self.args.adam_epsilon, weight_decay_rate=self.args.weight_decay, power=self.args.poly_power) | 638,026,942,663,855,200 | Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method. | src/transformers/trainer_tf.py | create_optimizer_and_scheduler | AdrienDS/transformers | python | def create_optimizer_and_scheduler(self, num_training_steps: int):
"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n TFTrainer's init through :obj:`optimizers`, or subclass and override this method.\n "
if ((not self.optimizer) and (not self.lr_scheduler)):
(self.optimizer, self.lr_scheduler) = create_optimizer(self.args.learning_rate, num_training_steps, self.args.warmup_steps, adam_beta1=self.args.adam_beta1, adam_beta2=self.args.adam_beta2, adam_epsilon=self.args.adam_epsilon, weight_decay_rate=self.args.weight_decay, power=self.args.poly_power) |
def setup_wandb(self):
'\n Setup the optional Weights & Biases (`wandb`) integration.\n\n One can subclass and override this method to customize the setup if needed. Find more information\n `here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:\n\n Environment:\n WANDB_PROJECT:\n (Optional): str - "huggingface" by default, set this to a custom string to store results in a different project\n WANDB_DISABLED:\n (Optional): boolean - defaults to false, set to "true" to disable wandb entirely\n '
if hasattr(self, '_setup_wandb'):
warnings.warn("The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.", FutureWarning)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv('WANDB_PROJECT', 'huggingface'), config=combined_dict, name=self.args.run_name) | -5,292,940,192,721,177,000 | Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely | src/transformers/trainer_tf.py | setup_wandb | AdrienDS/transformers | python | def setup_wandb(self):
'\n Setup the optional Weights & Biases (`wandb`) integration.\n\n One can subclass and override this method to customize the setup if needed. Find more information\n `here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:\n\n Environment:\n WANDB_PROJECT:\n (Optional): str - "huggingface" by default, set this to a custom string to store results in a different project\n WANDB_DISABLED:\n (Optional): boolean - defaults to false, set to "true" to disable wandb entirely\n '
if hasattr(self, '_setup_wandb'):
warnings.warn("The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.", FutureWarning)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv('WANDB_PROJECT', 'huggingface'), config=combined_dict, name=self.args.run_name) |
def setup_comet(self):
'\n Setup the optional Comet.ml integration.\n\n Environment:\n COMET_MODE:\n (Optional): str - "OFFLINE", "ONLINE", or "DISABLED"\n COMET_PROJECT_NAME:\n (Optional): str - Comet.ml project name for experiments\n COMET_OFFLINE_DIRECTORY:\n (Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"\n\n For a number of configurable items in the environment,\n see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__\n '
comet_mode = os.getenv('COMET_MODE', 'ONLINE').upper()
args = {'project_name': os.getenv('COMET_PROJECT_NAME', 'huggingface')}
experiment = None
if (comet_mode == 'ONLINE'):
experiment = comet_ml.Experiment(**args)
logger.info('Automatic Comet.ml online logging enabled')
elif (comet_mode == 'OFFLINE'):
args['offline_directory'] = os.getenv('COMET_OFFLINE_DIRECTORY', './')
experiment = comet_ml.OfflineExperiment(**args)
logger.info('Automatic Comet.ml offline logging enabled; use `comet upload` when finished')
if (experiment is not None):
experiment._set_model_graph(self.model, framework='transformers')
experiment._log_parameters(self.args, prefix='args/', framework='transformers')
experiment._log_parameters(self.model.config, prefix='config/', framework='transformers') | -6,661,743,849,111,943,000 | Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__ | src/transformers/trainer_tf.py | setup_comet | AdrienDS/transformers | python | def setup_comet(self):
'\n Setup the optional Comet.ml integration.\n\n Environment:\n COMET_MODE:\n (Optional): str - "OFFLINE", "ONLINE", or "DISABLED"\n COMET_PROJECT_NAME:\n (Optional): str - Comet.ml project name for experiments\n COMET_OFFLINE_DIRECTORY:\n (Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"\n\n For a number of configurable items in the environment,\n see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__\n '
comet_mode = os.getenv('COMET_MODE', 'ONLINE').upper()
args = {'project_name': os.getenv('COMET_PROJECT_NAME', 'huggingface')}
experiment = None
if (comet_mode == 'ONLINE'):
experiment = comet_ml.Experiment(**args)
logger.info('Automatic Comet.ml online logging enabled')
elif (comet_mode == 'OFFLINE'):
args['offline_directory'] = os.getenv('COMET_OFFLINE_DIRECTORY', './')
experiment = comet_ml.OfflineExperiment(**args)
logger.info('Automatic Comet.ml offline logging enabled; use `comet upload` when finished')
if (experiment is not None):
experiment._set_model_graph(self.model, framework='transformers')
experiment._log_parameters(self.args, prefix='args/', framework='transformers')
experiment._log_parameters(self.model.config, prefix='config/', framework='transformers') |
def prediction_loop(self, dataset: tf.data.Dataset, steps: int, num_examples: int, description: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and\n :func:`~transformers.TFTrainer.predict`.\n\n Works both with or without labels.\n '
if hasattr(self, '_prediction_loop'):
warnings.warn("The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.", FutureWarning)
return self._prediction_loop(dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only)
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.args.prediction_loss_only)
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', num_examples)
logger.info(' Batch size = %d', self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
if (self.args.past_index >= 0):
self._past = None
for (step, batch) in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
(_, labels) = batch
if (not prediction_loss_only):
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if (self.args.n_replicas > 1):
for val in logits.values:
if (preds is None):
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if (label_ids is None):
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if (preds is None):
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if (label_ids is None):
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if (step == steps):
break
if ((self.compute_metrics is not None) and (preds is not None) and (label_ids is not None)):
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics['eval_loss'] = (self.eval_loss.result().numpy() / steps)
for key in list(metrics.keys()):
if (not key.startswith('eval_')):
metrics[f'eval_{key}'] = metrics.pop(key)
if (self.args.past_index and hasattr(self, '_past')):
delattr(self, '_past')
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) | -8,581,805,643,994,753,000 | Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels. | src/transformers/trainer_tf.py | prediction_loop | AdrienDS/transformers | python | def prediction_loop(self, dataset: tf.data.Dataset, steps: int, num_examples: int, description: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and\n :func:`~transformers.TFTrainer.predict`.\n\n Works both with or without labels.\n '
if hasattr(self, '_prediction_loop'):
warnings.warn("The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.", FutureWarning)
return self._prediction_loop(dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only)
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.args.prediction_loss_only)
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', num_examples)
logger.info(' Batch size = %d', self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
if (self.args.past_index >= 0):
self._past = None
for (step, batch) in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
(_, labels) = batch
if (not prediction_loss_only):
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if (self.args.n_replicas > 1):
for val in logits.values:
if (preds is None):
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if (label_ids is None):
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if (preds is None):
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if (label_ids is None):
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if (step == steps):
break
if ((self.compute_metrics is not None) and (preds is not None) and (label_ids is not None)):
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics['eval_loss'] = (self.eval_loss.result().numpy() / steps)
for key in list(metrics.keys()):
if (not key.startswith('eval_')):
metrics[f'eval_{key}'] = metrics.pop(key)
if (self.args.past_index and hasattr(self, '_past')):
delattr(self, '_past')
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) |
def log(self, logs: Dict[(str, float)]) -> None:
'\n Log :obj:`logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (:obj:`Dict[str, float]`):\n The values to log.\n '
if hasattr(self, '_log'):
warnings.warn("The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.", FutureWarning)
return self._log(logs)
logs['epoch'] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for (k, v) in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if (experiment is not None):
experiment._log_metrics(logs, step=self.global_step, epoch=self.epoch_logging, framework='transformers')
output = {**logs, **{'step': self.global_step}}
logger.info(output) | -4,662,213,817,398,437,000 | Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log. | src/transformers/trainer_tf.py | log | AdrienDS/transformers | python | def log(self, logs: Dict[(str, float)]) -> None:
'\n Log :obj:`logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (:obj:`Dict[str, float]`):\n The values to log.\n '
if hasattr(self, '_log'):
warnings.warn("The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.", FutureWarning)
return self._log(logs)
logs['epoch'] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for (k, v) in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if (experiment is not None):
experiment._log_metrics(logs, step=self.global_step, epoch=self.epoch_logging, framework='transformers')
output = {**logs, **{'step': self.global_step}}
logger.info(output) |
def evaluate(self, eval_dataset: Optional[tf.data.Dataset]=None) -> Dict[(str, float)]:
'\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are\n task-dependent (pass it to the init :obj:`compute_metrics` argument).\n\n Args:\n eval_dataset (:class:`~tf.data.Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of\n ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.\n If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,\n labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with\n multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions.\n '
(eval_ds, steps, num_examples) = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description='Evaluation')
logs = {**output.metrics}
logs['epoch'] = self.epoch_logging
self.log(logs)
return output.metrics | 7,102,218,496,738,634,000 | Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. | src/transformers/trainer_tf.py | evaluate | AdrienDS/transformers | python | def evaluate(self, eval_dataset: Optional[tf.data.Dataset]=None) -> Dict[(str, float)]:
'\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are\n task-dependent (pass it to the init :obj:`compute_metrics` argument).\n\n Args:\n eval_dataset (:class:`~tf.data.Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of\n ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.\n If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,\n labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with\n multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions.\n '
(eval_ds, steps, num_examples) = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description='Evaluation')
logs = {**output.metrics}
logs['epoch'] = self.epoch_logging
self.log(logs)
return output.metrics |
def prediction_step(self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor) -> tf.Tensor:
'\n Compute the prediction on features and update the loss with labels.\n\n Subclass and override to inject some custom behavior.\n '
(per_example_loss, logits) = self.run_model(features, labels, False)
scaled_loss = (per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype))
self.eval_loss.update_state(scaled_loss)
return logits | 8,889,516,695,795,166,000 | Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior. | src/transformers/trainer_tf.py | prediction_step | AdrienDS/transformers | python | def prediction_step(self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor) -> tf.Tensor:
'\n Compute the prediction on features and update the loss with labels.\n\n Subclass and override to inject some custom behavior.\n '
(per_example_loss, logits) = self.run_model(features, labels, False)
scaled_loss = (per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype))
self.eval_loss.update_state(scaled_loss)
return logits |
def train(self) -> None:
'\n Train method to train the model.\n '
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = (self.num_train_examples / self.total_train_batch_size)
approx = (math.floor if self.args.dataloader_drop_last else math.ceil)
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if (self.args.max_steps > 0):
t_total = self.args.max_steps
epochs = ((self.args.max_steps // self.steps_per_epoch) + int(((self.args.max_steps % self.steps_per_epoch) > 0)))
else:
t_total = (self.steps_per_epoch * self.args.num_train_epochs)
epochs = self.args.num_train_epochs
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info('Checkpoint file %s found and restoring from checkpoint', self.model.ckpt_manager.latest_checkpoint)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = (self.global_step // self.steps_per_epoch)
steps_trained_in_current_epoch = (self.global_step % self.steps_per_epoch)
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', self.global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text('args', self.args.to_json_string())
self.tb_writer.flush()
logger.info('***** Running training *****')
logger.info(' Num examples = %d', self.num_train_examples)
logger.info(' Num Epochs = %d', epochs)
logger.info(' Instantaneous batch size per device = %d', self.args.per_device_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', self.total_train_batch_size)
logger.info(' Gradient Accumulation steps = %d', self.args.gradient_accumulation_steps)
logger.info(' Steps per epoch = %d', self.steps_per_epoch)
logger.info(' Total optimization steps = %d', t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
if (self.args.past_index >= 0):
self._past = None
for (step, batch) in enumerate(train_ds):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = (epoch_iter + ((step + 1) / self.steps_per_epoch))
training_loss = (self.train_loss.result() / (step + 1))
if self.args.debug:
logs = {}
logs['loss'] = training_loss.numpy()
logs['epoch'] = self.epoch_logging
self.log(logs)
if ((self.global_step == 1) and self.args.debug):
with self.tb_writer.as_default():
tf.summary.trace_export(name='training', step=self.global_step, profiler_outdir=self.args.logging_dir)
if ((self.args.eval_steps > 0) and self.args.evaluate_during_training and ((self.global_step % self.args.eval_steps) == 0)):
self.evaluate()
if (((self.args.logging_steps > 0) and ((self.global_step % self.args.logging_steps) == 0)) or ((self.global_step == 1) and self.args.logging_first_step)):
logs = {}
logs['loss'] = training_loss.numpy()
logs['learning_rate'] = self.lr_scheduler(self.global_step).numpy()
logs['epoch'] = self.epoch_logging
self.log(logs)
if ((self.args.save_steps > 0) and ((self.global_step % self.args.save_steps) == 0)):
ckpt_save_path = self.model.ckpt_manager.save()
logger.info('Saving checkpoint for step {} at {}'.format(self.global_step, ckpt_save_path))
if ((self.args.max_steps > 0) and (self.global_step >= t_total)):
break
if ((self.global_step % self.steps_per_epoch) == 0):
break
self.train_loss.reset_states()
if ((self.args.max_steps > 0) and (self.global_step >= self.args.max_steps)):
break
end_time = datetime.datetime.now()
logger.info('Training took: {}'.format(str((end_time - start_time))))
if (self.args.past_index and hasattr(self, '_past')):
delattr(self, '_past') | 2,479,631,024,617,770,500 | Train method to train the model. | src/transformers/trainer_tf.py | train | AdrienDS/transformers | python | def train(self) -> None:
'\n \n '
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = (self.num_train_examples / self.total_train_batch_size)
approx = (math.floor if self.args.dataloader_drop_last else math.ceil)
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if (self.args.max_steps > 0):
t_total = self.args.max_steps
epochs = ((self.args.max_steps // self.steps_per_epoch) + int(((self.args.max_steps % self.steps_per_epoch) > 0)))
else:
t_total = (self.steps_per_epoch * self.args.num_train_epochs)
epochs = self.args.num_train_epochs
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info('Checkpoint file %s found and restoring from checkpoint', self.model.ckpt_manager.latest_checkpoint)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = (self.global_step // self.steps_per_epoch)
steps_trained_in_current_epoch = (self.global_step % self.steps_per_epoch)
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', self.global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text('args', self.args.to_json_string())
self.tb_writer.flush()
logger.info('***** Running training *****')
logger.info(' Num examples = %d', self.num_train_examples)
logger.info(' Num Epochs = %d', epochs)
logger.info(' Instantaneous batch size per device = %d', self.args.per_device_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', self.total_train_batch_size)
logger.info(' Gradient Accumulation steps = %d', self.args.gradient_accumulation_steps)
logger.info(' Steps per epoch = %d', self.steps_per_epoch)
logger.info(' Total optimization steps = %d', t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
if (self.args.past_index >= 0):
self._past = None
for (step, batch) in enumerate(train_ds):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = (epoch_iter + ((step + 1) / self.steps_per_epoch))
training_loss = (self.train_loss.result() / (step + 1))
if self.args.debug:
logs = {}
logs['loss'] = training_loss.numpy()
logs['epoch'] = self.epoch_logging
self.log(logs)
if ((self.global_step == 1) and self.args.debug):
with self.tb_writer.as_default():
tf.summary.trace_export(name='training', step=self.global_step, profiler_outdir=self.args.logging_dir)
if ((self.args.eval_steps > 0) and self.args.evaluate_during_training and ((self.global_step % self.args.eval_steps) == 0)):
self.evaluate()
if (((self.args.logging_steps > 0) and ((self.global_step % self.args.logging_steps) == 0)) or ((self.global_step == 1) and self.args.logging_first_step)):
logs = {}
logs['loss'] = training_loss.numpy()
logs['learning_rate'] = self.lr_scheduler(self.global_step).numpy()
logs['epoch'] = self.epoch_logging
self.log(logs)
if ((self.args.save_steps > 0) and ((self.global_step % self.args.save_steps) == 0)):
ckpt_save_path = self.model.ckpt_manager.save()
logger.info('Saving checkpoint for step {} at {}'.format(self.global_step, ckpt_save_path))
if ((self.args.max_steps > 0) and (self.global_step >= t_total)):
break
if ((self.global_step % self.steps_per_epoch) == 0):
break
self.train_loss.reset_states()
if ((self.args.max_steps > 0) and (self.global_step >= self.args.max_steps)):
break
end_time = datetime.datetime.now()
logger.info('Training took: {}'.format(str((end_time - start_time))))
if (self.args.past_index and hasattr(self, '_past')):
delattr(self, '_past') |
def training_step(self, features, labels, nb_instances_in_global_batch):
'\n Perform a training step on features and labels.\n\n Subclass and override to inject some custom behavior.\n '
(per_example_loss, _) = self.run_model(features, labels, True)
scaled_loss = (per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype))
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [(g if (g is not None) else tf.zeros_like(v)) for (g, v) in zip(gradients, self.model.trainable_variables)]
if (self.args.gradient_accumulation_steps > 1):
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if (self.args.gradient_accumulation_steps == 1):
return gradients | 2,539,421,775,325,609,500 | Perform a training step on features and labels.
Subclass and override to inject some custom behavior. | src/transformers/trainer_tf.py | training_step | AdrienDS/transformers | python | def training_step(self, features, labels, nb_instances_in_global_batch):
'\n Perform a training step on features and labels.\n\n Subclass and override to inject some custom behavior.\n '
(per_example_loss, _) = self.run_model(features, labels, True)
scaled_loss = (per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype))
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [(g if (g is not None) else tf.zeros_like(v)) for (g, v) in zip(gradients, self.model.trainable_variables)]
if (self.args.gradient_accumulation_steps > 1):
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if (self.args.gradient_accumulation_steps == 1):
return gradients |
def run_model(self, features, labels, training):
'\n Computes the loss of the given features and labels pair.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n features (:obj:`tf.Tensor`): A batch of input features.\n labels (:obj:`tf.Tensor`): A batch of labels.\n training (:obj:`bool`): Whether or not to run the model in training mode.\n\n Returns:\n A tuple of two :obj:`tf.Tensor`: The loss and logits.\n '
if hasattr(self, '_run_model'):
warnings.warn("The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.", FutureWarning)
return self._run_model(features, labels, training)
if ((self.args.past_index >= 0) and (getattr(self, '_past', None) is not None)):
features['mems'] = self._past
if isinstance(labels, dict):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
(loss, logits) = outputs[:2]
if (self.args.past_index >= 0):
self._past = outputs[self.args.past_index]
return (loss, logits) | 8,507,172,863,236,026,000 | Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits. | src/transformers/trainer_tf.py | run_model | AdrienDS/transformers | python | def run_model(self, features, labels, training):
'\n Computes the loss of the given features and labels pair.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n features (:obj:`tf.Tensor`): A batch of input features.\n labels (:obj:`tf.Tensor`): A batch of labels.\n training (:obj:`bool`): Whether or not to run the model in training mode.\n\n Returns:\n A tuple of two :obj:`tf.Tensor`: The loss and logits.\n '
if hasattr(self, '_run_model'):
warnings.warn("The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.", FutureWarning)
return self._run_model(features, labels, training)
if ((self.args.past_index >= 0) and (getattr(self, '_past', None) is not None)):
features['mems'] = self._past
if isinstance(labels, dict):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
(loss, logits) = outputs[:2]
if (self.args.past_index >= 0):
self._past = outputs[self.args.past_index]
return (loss, logits) |
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
'\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels.\n In that case, this method will also return metrics, like in :obj:`evaluate()`.\n\n Args:\n test_dataset (:class:`~tf.data.Dataset`):\n Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where\n ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,\n the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is\n a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead\n calculated by calling ``model(features, **labels)``.\n Returns:\n `NamedTuple`:\n predictions (:obj:`np.ndarray`):\n The predictions on :obj:`test_dataset`.\n label_ids (:obj:`np.ndarray`, `optional`):\n The labels (if the dataset contained some).\n metrics (:obj:`Dict[str, float]`, `optional`):\n The potential dictionary of metrics (if the dataset contained labels).\n '
(test_ds, steps, num_examples) = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description='Prediction') | -7,247,605,620,693,667,000 | Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels). | src/transformers/trainer_tf.py | predict | AdrienDS/transformers | python | def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
'\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels.\n In that case, this method will also return metrics, like in :obj:`evaluate()`.\n\n Args:\n test_dataset (:class:`~tf.data.Dataset`):\n Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where\n ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,\n the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is\n a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead\n calculated by calling ``model(features, **labels)``.\n Returns:\n `NamedTuple`:\n predictions (:obj:`np.ndarray`):\n The predictions on :obj:`test_dataset`.\n label_ids (:obj:`np.ndarray`, `optional`):\n The labels (if the dataset contained some).\n metrics (:obj:`Dict[str, float]`, `optional`):\n The potential dictionary of metrics (if the dataset contained labels).\n '
(test_ds, steps, num_examples) = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description='Prediction') |
def save_model(self, output_dir: Optional[str]=None):
'\n Will save the model, so you can reload it using :obj:`from_pretrained()`.\n '
output_dir = (output_dir if (output_dir is not None) else self.args.output_dir)
logger.info('Saving model in {}'.format(output_dir))
if (not isinstance(self.model, TFPreTrainedModel)):
raise ValueError('Trainer.model appears to not be a PreTrainedModel')
self.model.save_pretrained(output_dir) | -7,942,789,213,139,627,000 | Will save the model, so you can reload it using :obj:`from_pretrained()`. | src/transformers/trainer_tf.py | save_model | AdrienDS/transformers | python | def save_model(self, output_dir: Optional[str]=None):
'\n \n '
output_dir = (output_dir if (output_dir is not None) else self.args.output_dir)
logger.info('Saving model in {}'.format(output_dir))
if (not isinstance(self.model, TFPreTrainedModel)):
raise ValueError('Trainer.model appears to not be a PreTrainedModel')
self.model.save_pretrained(output_dir) |
def initialize(self, cfn):
'Initialize the rule'
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
self.resource_property_types.append(resource_type_spec)
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_property_types.append(property_type_spec) | 7,519,247,917,375,911,000 | Initialize the rule | src/cfnlint/rules/resources/properties/AllowedValue.py | initialize | janssenivo/cfn-python-lint | python | def initialize(self, cfn):
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
self.resource_property_types.append(resource_type_spec)
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_property_types.append(property_type_spec) |
def check_value(self, value, path, property_name, **kwargs):
'Check Value'
matches = []
allowed_value_specs = kwargs.get('value_specs', {}).get('AllowedValues', {})
if allowed_value_specs:
if (str(value) not in allowed_value_specs):
message = 'You must specify a valid value for {0} ({1}).\nValid values are {2}'
matches.append(RuleMatch(path, message.format(property_name, value, allowed_value_specs)))
return matches | 2,931,616,691,034,806,000 | Check Value | src/cfnlint/rules/resources/properties/AllowedValue.py | check_value | janssenivo/cfn-python-lint | python | def check_value(self, value, path, property_name, **kwargs):
matches = []
allowed_value_specs = kwargs.get('value_specs', {}).get('AllowedValues', {})
if allowed_value_specs:
if (str(value) not in allowed_value_specs):
message = 'You must specify a valid value for {0} ({1}).\nValid values are {2}'
matches.append(RuleMatch(path, message.format(property_name, value, allowed_value_specs)))
return matches |
def check(self, cfn, properties, value_specs, property_specs, path):
'Check itself'
matches = list()
for (p_value, p_path) in properties.items_safe(path[:]):
for prop in p_value:
if (prop in value_specs):
value = value_specs.get(prop).get('Value', {})
if value:
value_type = value.get('ValueType', '')
property_type = property_specs.get('Properties').get(prop).get('Type')
matches.extend(cfn.check_value(p_value, prop, p_path, check_value=self.check_value, value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}), cfn=cfn, property_type=property_type, property_name=prop))
return matches | -235,133,823,034,690,980 | Check itself | src/cfnlint/rules/resources/properties/AllowedValue.py | check | janssenivo/cfn-python-lint | python | def check(self, cfn, properties, value_specs, property_specs, path):
matches = list()
for (p_value, p_path) in properties.items_safe(path[:]):
for prop in p_value:
if (prop in value_specs):
value = value_specs.get(prop).get('Value', {})
if value:
value_type = value.get('ValueType', )
property_type = property_specs.get('Properties').get(prop).get('Type')
matches.extend(cfn.check_value(p_value, prop, p_path, check_value=self.check_value, value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}), cfn=cfn, property_type=property_type, property_name=prop))
return matches |
def match_resource_sub_properties(self, properties, property_type, path, cfn):
'Match for sub properties'
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
matches.extend(self.check(cfn, properties, specs, property_specs, path))
return matches | -6,248,846,930,321,518,000 | Match for sub properties | src/cfnlint/rules/resources/properties/AllowedValue.py | match_resource_sub_properties | janssenivo/cfn-python-lint | python | def match_resource_sub_properties(self, properties, property_type, path, cfn):
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
matches.extend(self.check(cfn, properties, specs, property_specs, path))
return matches |
def match_resource_properties(self, properties, resource_type, path, cfn):
'Check CloudFormation Properties'
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
matches.extend(self.check(cfn, properties, specs, resource_specs, path))
return matches | -1,158,974,634,323,442,000 | Check CloudFormation Properties | src/cfnlint/rules/resources/properties/AllowedValue.py | match_resource_properties | janssenivo/cfn-python-lint | python | def match_resource_properties(self, properties, resource_type, path, cfn):
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
matches.extend(self.check(cfn, properties, specs, resource_specs, path))
return matches |
@api_view(['GET'])
def get_public_channel_list(request, version):
' Endpoint: /public/<version>/channels/?=<query params> '
try:
channel_list = _get_channel_list(version, request.query_params)
except LookupError:
return HttpResponseNotFound(json.dumps({'id': error_constants.NOT_FOUND, 'metadata': {'view': ''}}), content_type='application/json')
return HttpResponse(json.dumps(PublicChannelSerializer(channel_list, many=True).data), content_type='application/json') | -6,538,074,424,372,295,000 | Endpoint: /public/<version>/channels/?=<query params> | kolibri/core/public/api.py | get_public_channel_list | MikiasEphrem/kolibri | python | @api_view(['GET'])
def get_public_channel_list(request, version):
' '
try:
channel_list = _get_channel_list(version, request.query_params)
except LookupError:
return HttpResponseNotFound(json.dumps({'id': error_constants.NOT_FOUND, 'metadata': {'view': }}), content_type='application/json')
return HttpResponse(json.dumps(PublicChannelSerializer(channel_list, many=True).data), content_type='application/json') |
@api_view(['GET'])
def get_public_channel_lookup(request, version, identifier):
' Endpoint: /public/<version>/channels/lookup/<identifier> '
try:
channel_list = _get_channel_list(version, request.query_params, identifier=identifier.strip().replace('-', ''))
except LookupError:
return HttpResponseNotFound(json.dumps({'id': error_constants.NOT_FOUND, 'metadata': {'view': ''}}), content_type='application/json')
if (not channel_list.exists()):
return HttpResponseNotFound(json.dumps({'id': error_constants.NOT_FOUND, 'metadata': {'view': ''}}), content_type='application/json')
return HttpResponse(json.dumps(PublicChannelSerializer(channel_list, many=True).data), content_type='application/json') | 1,402,104,657,418,538,200 | Endpoint: /public/<version>/channels/lookup/<identifier> | kolibri/core/public/api.py | get_public_channel_lookup | MikiasEphrem/kolibri | python | @api_view(['GET'])
def get_public_channel_lookup(request, version, identifier):
' '
try:
channel_list = _get_channel_list(version, request.query_params, identifier=identifier.strip().replace('-', ))
except LookupError:
return HttpResponseNotFound(json.dumps({'id': error_constants.NOT_FOUND, 'metadata': {'view': }}), content_type='application/json')
if (not channel_list.exists()):
return HttpResponseNotFound(json.dumps({'id': error_constants.NOT_FOUND, 'metadata': {'view': }}), content_type='application/json')
return HttpResponse(json.dumps(PublicChannelSerializer(channel_list, many=True).data), content_type='application/json') |
def list(self, request):
'Returns metadata information about the device'
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
info = {'application': 'kolibri', 'kolibri_version': kolibri.__version__, 'instance_id': instance_model.id, 'device_name': instance_model.hostname, 'operating_system': platform.system()}
return Response(info) | 203,232,092,821,044,770 | Returns metadata information about the device | kolibri/core/public/api.py | list | MikiasEphrem/kolibri | python | def list(self, request):
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
info = {'application': 'kolibri', 'kolibri_version': kolibri.__version__, 'instance_id': instance_model.id, 'device_name': instance_model.hostname, 'operating_system': platform.system()}
return Response(info) |
def setup_method(self, method):
'Initialize the test problem. '
self.aux_names = [] | -6,245,637,525,381,043,000 | Initialize the test problem. | proteus/tests/HotStart_3P/test_HotStart_rans3p.py | setup_method | burgreen/proteus | python | def setup_method(self, method):
' '
self.aux_names = [] |
def __init__(self, attacker, classifier, invoke_limit=100, average_invoke=False, **kwargs):
'\n :param Attacker attacker: The attacker you use.\n :param Classifier classifier: The classifier you want to attack.\n :param int invoke_limit: Limitation of invoke for each instance.\n :param bool average_invoke: If true, returns "Avg. Victim Model Queries".\n :param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` for detail.\n '
super().__init__(attacker, classifier, **kwargs)
self.classifier = InvokeLimitClassifierWrapper(self.classifier, invoke_limit)
self.attacker = InvokeLimitAttackerWrapper(self.attacker, self.classifier)
self.__attacker = self.attacker
self.__classifier = self.classifier
self.__average_invoke = average_invoke | 7,128,506,053,846,003,000 | :param Attacker attacker: The attacker you use.
:param Classifier classifier: The classifier you want to attack.
:param int invoke_limit: Limitation of invoke for each instance.
:param bool average_invoke: If true, returns "Avg. Victim Model Queries".
:param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` for detail. | OpenAttack/attack_evals/invoke_limit_eval.py | __init__ | agcopenhaver/OpenAttack | python | def __init__(self, attacker, classifier, invoke_limit=100, average_invoke=False, **kwargs):
'\n :param Attacker attacker: The attacker you use.\n :param Classifier classifier: The classifier you want to attack.\n :param int invoke_limit: Limitation of invoke for each instance.\n :param bool average_invoke: If true, returns "Avg. Victim Model Queries".\n :param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` for detail.\n '
super().__init__(attacker, classifier, **kwargs)
self.classifier = InvokeLimitClassifierWrapper(self.classifier, invoke_limit)
self.attacker = InvokeLimitAttackerWrapper(self.attacker, self.classifier)
self.__attacker = self.attacker
self.__classifier = self.classifier
self.__average_invoke = average_invoke |
def copytree(source, destination, ignore=None, include=None):
'\n Similar to shutil.copytree except that it removes the limitation that the destination directory should\n be present.\n\n :type source: str\n :param source:\n Path to the source folder to copy\n\n :type destination: str\n :param destination:\n Path to destination folder\n\n :type ignore: function\n :param ignore:\n A function that returns a set of file names to ignore, given a list of available file names. Similar to the\n ``ignore`` property of ``shutils.copytree`` method\n\n :type include: Callable[[str], bool]\n :param include:\n A function that will decide whether a file should be copied or skipped it. It accepts file name as parameter\n and return True or False. Returning True will continue copy operation, returning False will skip copy operation\n for that file\n '
if (not os.path.exists(source)):
LOG.warning('Skipping copy operation since source %s does not exist', source)
return
if (not os.path.exists(destination)):
LOG.debug('Creating target folders at %s', destination)
os.makedirs(destination)
try:
LOG.debug('Copying directory metadata from source (%s) to destination (%s)', source, destination)
shutil.copystat(source, destination)
except OSError as ex:
LOG.debug('Unable to copy file access times from %s to %s', source, destination, exc_info=ex)
names = os.listdir(source)
if (ignore is not None):
ignored_names = ignore(source, names)
else:
ignored_names = set()
for name in names:
if (name in ignored_names):
LOG.debug('File (%s) is in ignored set, skipping it', name)
continue
new_source = os.path.join(source, name)
new_destination = os.path.join(destination, name)
if (include and (not os.path.isdir(new_source)) and (not include(name))):
LOG.debug("File (%s) doesn't satisfy the include rule, skipping it", name)
continue
if os.path.isdir(new_source):
copytree(new_source, new_destination, ignore=ignore, include=include)
else:
LOG.debug('Copying source file (%s) to destination (%s)', new_source, new_destination)
shutil.copy2(new_source, new_destination) | -8,982,296,800,963,278,000 | Similar to shutil.copytree except that it removes the limitation that the destination directory should
be present.
:type source: str
:param source:
Path to the source folder to copy
:type destination: str
:param destination:
Path to destination folder
:type ignore: function
:param ignore:
A function that returns a set of file names to ignore, given a list of available file names. Similar to the
``ignore`` property of ``shutils.copytree`` method
:type include: Callable[[str], bool]
:param include:
A function that will decide whether a file should be copied or skipped it. It accepts file name as parameter
and return True or False. Returning True will continue copy operation, returning False will skip copy operation
for that file | aws_lambda_builders/utils.py | copytree | awslabs/aws-lambda-builders | python | def copytree(source, destination, ignore=None, include=None):
'\n Similar to shutil.copytree except that it removes the limitation that the destination directory should\n be present.\n\n :type source: str\n :param source:\n Path to the source folder to copy\n\n :type destination: str\n :param destination:\n Path to destination folder\n\n :type ignore: function\n :param ignore:\n A function that returns a set of file names to ignore, given a list of available file names. Similar to the\n ``ignore`` property of ``shutils.copytree`` method\n\n :type include: Callable[[str], bool]\n :param include:\n A function that will decide whether a file should be copied or skipped it. It accepts file name as parameter\n and return True or False. Returning True will continue copy operation, returning False will skip copy operation\n for that file\n '
if (not os.path.exists(source)):
LOG.warning('Skipping copy operation since source %s does not exist', source)
return
if (not os.path.exists(destination)):
LOG.debug('Creating target folders at %s', destination)
os.makedirs(destination)
try:
LOG.debug('Copying directory metadata from source (%s) to destination (%s)', source, destination)
shutil.copystat(source, destination)
except OSError as ex:
LOG.debug('Unable to copy file access times from %s to %s', source, destination, exc_info=ex)
names = os.listdir(source)
if (ignore is not None):
ignored_names = ignore(source, names)
else:
ignored_names = set()
for name in names:
if (name in ignored_names):
LOG.debug('File (%s) is in ignored set, skipping it', name)
continue
new_source = os.path.join(source, name)
new_destination = os.path.join(destination, name)
if (include and (not os.path.isdir(new_source)) and (not include(name))):
LOG.debug("File (%s) doesn't satisfy the include rule, skipping it", name)
continue
if os.path.isdir(new_source):
copytree(new_source, new_destination, ignore=ignore, include=include)
else:
LOG.debug('Copying source file (%s) to destination (%s)', new_source, new_destination)
shutil.copy2(new_source, new_destination) |
def which(cmd, mode=(os.F_OK | os.X_OK), executable_search_paths=None):
'Given a command, mode, and executable search paths list, return the paths which\n conforms to the given mode on the PATH with the prepended additional search paths,\n or None if there is no such file.\n `mode` defaults to os.F_OK | os.X_OK. the default search `path` defaults\n to the result of os.environ.get("PATH")\n Note: This function was backported from the Python 3 source code.\n\n :type cmd: str\n :param cmd:\n Executable to be looked up in PATH.\n\n :type mode: str\n :param mode:\n Modes of access for the executable.\n\n :type executable_search_paths: list\n :param executable_search_paths:\n List of paths to look for `cmd` in preference order.\n '
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and (not os.path.isdir(fn)))
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
path = os.environ.get('PATH', os.defpath)
if (not path):
return None
path = path.split(os.pathsep)
if executable_search_paths:
path = (executable_search_paths + path)
if (sys.platform == 'win32'):
if (os.curdir not in path):
path.insert(0, os.curdir)
pathext = os.environ.get('PATHEXT', '').split(os.pathsep)
if any((cmd.lower().endswith(ext.lower()) for ext in pathext)):
files = [cmd]
else:
files = [(cmd + ext) for ext in pathext]
else:
files = [cmd]
seen = set()
paths = []
for dir in path:
normdir = os.path.normcase(dir)
if (normdir not in seen):
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
paths.append(name)
return paths | 8,573,287,553,414,485,000 | Given a command, mode, and executable search paths list, return the paths which
conforms to the given mode on the PATH with the prepended additional search paths,
or None if there is no such file.
`mode` defaults to os.F_OK | os.X_OK. the default search `path` defaults
to the result of os.environ.get("PATH")
Note: This function was backported from the Python 3 source code.
:type cmd: str
:param cmd:
Executable to be looked up in PATH.
:type mode: str
:param mode:
Modes of access for the executable.
:type executable_search_paths: list
:param executable_search_paths:
List of paths to look for `cmd` in preference order. | aws_lambda_builders/utils.py | which | awslabs/aws-lambda-builders | python | def which(cmd, mode=(os.F_OK | os.X_OK), executable_search_paths=None):
'Given a command, mode, and executable search paths list, return the paths which\n conforms to the given mode on the PATH with the prepended additional search paths,\n or None if there is no such file.\n `mode` defaults to os.F_OK | os.X_OK. the default search `path` defaults\n to the result of os.environ.get("PATH")\n Note: This function was backported from the Python 3 source code.\n\n :type cmd: str\n :param cmd:\n Executable to be looked up in PATH.\n\n :type mode: str\n :param mode:\n Modes of access for the executable.\n\n :type executable_search_paths: list\n :param executable_search_paths:\n List of paths to look for `cmd` in preference order.\n '
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and (not os.path.isdir(fn)))
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
path = os.environ.get('PATH', os.defpath)
if (not path):
return None
path = path.split(os.pathsep)
if executable_search_paths:
path = (executable_search_paths + path)
if (sys.platform == 'win32'):
if (os.curdir not in path):
path.insert(0, os.curdir)
pathext = os.environ.get('PATHEXT', ).split(os.pathsep)
if any((cmd.lower().endswith(ext.lower()) for ext in pathext)):
files = [cmd]
else:
files = [(cmd + ext) for ext in pathext]
else:
files = [cmd]
seen = set()
paths = []
for dir in path:
normdir = os.path.normcase(dir)
if (normdir not in seen):
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
paths.append(name)
return paths |
def get_goarch(architecture):
'\n Parameters\n ----------\n architecture : str\n name of the type of architecture\n\n Returns\n -------\n str\n returns a valid GO Architecture value\n '
return ('arm64' if (architecture == ARM64) else 'amd64') | -6,302,580,191,235,693,000 | Parameters
----------
architecture : str
name of the type of architecture
Returns
-------
str
returns a valid GO Architecture value | aws_lambda_builders/utils.py | get_goarch | awslabs/aws-lambda-builders | python | def get_goarch(architecture):
'\n Parameters\n ----------\n architecture : str\n name of the type of architecture\n\n Returns\n -------\n str\n returns a valid GO Architecture value\n '
return ('arm64' if (architecture == ARM64) else 'amd64') |
def partition_lines(lines, step=1000000):
'Note: line numbers are **1-based**\n '
lo = pd.DataFrame.from_records([dict(start=lo, qty=min(((lines + 1) - lo), step), lines=lines) for lo in range(1, (lines + 1), step)])
return lo | 644,283,970,201,553,900 | Note: line numbers are **1-based** | nb4/slogfiles.py | partition_lines | Agoric/testnet-notes | python | def partition_lines(lines, step=1000000):
'\n '
lo = pd.DataFrame.from_records([dict(start=lo, qty=min(((lines + 1) - lo), step), lines=lines) for lo in range(1, (lines + 1), step)])
return lo |
def iter_cranks(path):
'split each slogfile into runs (each beginning with an import-kernel event),\n process each run by finding sequential matching deliver+deliver-result pairs,\n turn each pair into a (crankNum, computrons, wallclock) triple\n '
log.info('iter_cranks: %s', path)
with gzip.open(path) as f:
kernel = None
deliver = None
block = None
syscalls = None
for (ix, line) in enumerate(f):
try:
data = json.loads(line)
except json.JSONDecodeError:
log.warning('%s:%d: bad JSON: %s', path.name, ix, repr(line))
continue
ty = data['type']
if (ty == 'import-kernel-finish'):
kernel = data
deliver = None
syscalls = None
(yield dict(kernel, slogfile=path.name, line=ix))
elif (ty == 'create-vat'):
(yield dict(slogfile=path.name, line=ix, time=data['time'], type=ty, vatID=data['vatID'], description=data['description'], managerType=data['managerType'], time_kernel=kernel['time']))
elif (ty == 'cosmic-swingset-end-block-start'):
block = data
elif (ty == 'cosmic-swingset-end-block-finish'):
time = data['time']
time_start = block['time']
dur = (time - time_start)
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
(yield dict(slogfile=path.name, line=ix, time=time, type=ty, time_start=time_start, dur=dur, blockHeight=data['blockHeight'], blockTime=data['blockTime'], time_kernel=time_kernel))
block = None
elif (deliver is None):
if (ty == 'deliver'):
deliver = data
syscalls = 0
elif (data['type'] == 'deliver-result'):
time = data['time']
time_start = deliver['time']
dur = (time - time_start)
method = (deliver['kd'][2]['method'] if (deliver['kd'][0] == 'message') else None)
compute = (data['dr'][2]['compute'] if (type(data['dr'][2]) is type({})) else None)
if block:
blockHeight = block['blockHeight']
blockTime = block['blockTime']
else:
log.warning('%s:%d: missing block context', path.name, ix)
blockHeight = blockTime = np.nan
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
(yield dict(slogfile=path.name, line=ix, time=time, type=ty, crankNum=data['crankNum'], deliveryNum=data['deliveryNum'], vatID=data['vatID'], kd=deliver['kd'], method=method, syscalls=syscalls, dr=data['dr'], compute=compute, time_start=time_start, dur=dur, blockHeight=blockHeight, blockTime=blockTime, time_kernel=time_kernel))
deliver = None
elif (ty == 'syscall-result'):
syscalls += 1
elif (ty in ['clist', 'syscall']):
continue
else:
log.warning('%s:%d: expected deliver-result; got: %s', path.name, ix, ty)
deliver = None | -8,181,327,759,624,395,000 | split each slogfile into runs (each beginning with an import-kernel event),
process each run by finding sequential matching deliver+deliver-result pairs,
turn each pair into a (crankNum, computrons, wallclock) triple | nb4/slogfiles.py | iter_cranks | Agoric/testnet-notes | python | def iter_cranks(path):
'split each slogfile into runs (each beginning with an import-kernel event),\n process each run by finding sequential matching deliver+deliver-result pairs,\n turn each pair into a (crankNum, computrons, wallclock) triple\n '
log.info('iter_cranks: %s', path)
with gzip.open(path) as f:
kernel = None
deliver = None
block = None
syscalls = None
for (ix, line) in enumerate(f):
try:
data = json.loads(line)
except json.JSONDecodeError:
log.warning('%s:%d: bad JSON: %s', path.name, ix, repr(line))
continue
ty = data['type']
if (ty == 'import-kernel-finish'):
kernel = data
deliver = None
syscalls = None
(yield dict(kernel, slogfile=path.name, line=ix))
elif (ty == 'create-vat'):
(yield dict(slogfile=path.name, line=ix, time=data['time'], type=ty, vatID=data['vatID'], description=data['description'], managerType=data['managerType'], time_kernel=kernel['time']))
elif (ty == 'cosmic-swingset-end-block-start'):
block = data
elif (ty == 'cosmic-swingset-end-block-finish'):
time = data['time']
time_start = block['time']
dur = (time - time_start)
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
(yield dict(slogfile=path.name, line=ix, time=time, type=ty, time_start=time_start, dur=dur, blockHeight=data['blockHeight'], blockTime=data['blockTime'], time_kernel=time_kernel))
block = None
elif (deliver is None):
if (ty == 'deliver'):
deliver = data
syscalls = 0
elif (data['type'] == 'deliver-result'):
time = data['time']
time_start = deliver['time']
dur = (time - time_start)
method = (deliver['kd'][2]['method'] if (deliver['kd'][0] == 'message') else None)
compute = (data['dr'][2]['compute'] if (type(data['dr'][2]) is type({})) else None)
if block:
blockHeight = block['blockHeight']
blockTime = block['blockTime']
else:
log.warning('%s:%d: missing block context', path.name, ix)
blockHeight = blockTime = np.nan
if kernel:
time_kernel = kernel['time']
else:
log.warning('%s:%d: missing kernel context', path.name, ix)
time_kernel = np.nan
(yield dict(slogfile=path.name, line=ix, time=time, type=ty, crankNum=data['crankNum'], deliveryNum=data['deliveryNum'], vatID=data['vatID'], kd=deliver['kd'], method=method, syscalls=syscalls, dr=data['dr'], compute=compute, time_start=time_start, dur=dur, blockHeight=blockHeight, blockTime=blockTime, time_kernel=time_kernel))
deliver = None
elif (ty == 'syscall-result'):
syscalls += 1
elif (ty in ['clist', 'syscall']):
continue
else:
log.warning('%s:%d: expected deliver-result; got: %s', path.name, ix, ty)
deliver = None |
def setUp(self):
'Sets up the needed objects used throughout the test.'
super(CPIOBinaryFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.bin.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(location='/syslog', parent=path_spec) | 631,313,947,189,636,700 | Sets up the needed objects used throughout the test. | tests/file_io/cpio_file_io.py | setUp | Acidburn0zzz/dfvfs | python | def setUp(self):
super(CPIOBinaryFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.bin.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(location='/syslog', parent=path_spec) |
def testOpenClosePathSpec(self):
'Test the open and close functionality using a path specification.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() | -6,653,219,118,026,506,000 | Test the open and close functionality using a path specification. | tests/file_io/cpio_file_io.py | testOpenClosePathSpec | Acidburn0zzz/dfvfs | python | def testOpenClosePathSpec(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() |
def testSeek(self):
'Test the seek functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() | 3,067,408,371,601,975,300 | Test the seek functionality. | tests/file_io/cpio_file_io.py | testSeek | Acidburn0zzz/dfvfs | python | def testSeek(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() |
def testRead(self):
'Test the read functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() | 7,601,449,651,663,779,000 | Test the read functionality. | tests/file_io/cpio_file_io.py | testRead | Acidburn0zzz/dfvfs | python | def testRead(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() |
def setUp(self):
'Sets up the needed objects used throughout the test.'
super(CPIOPortableASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.odc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(location='/syslog', parent=path_spec) | -7,919,440,319,864,761,000 | Sets up the needed objects used throughout the test. | tests/file_io/cpio_file_io.py | setUp | Acidburn0zzz/dfvfs | python | def setUp(self):
super(CPIOPortableASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.odc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(location='/syslog', parent=path_spec) |
def testOpenClosePathSpec(self):
'Test the open and close functionality using a path specification.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() | -6,653,219,118,026,506,000 | Test the open and close functionality using a path specification. | tests/file_io/cpio_file_io.py | testOpenClosePathSpec | Acidburn0zzz/dfvfs | python | def testOpenClosePathSpec(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() |
def testSeek(self):
'Test the seek functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() | 3,067,408,371,601,975,300 | Test the seek functionality. | tests/file_io/cpio_file_io.py | testSeek | Acidburn0zzz/dfvfs | python | def testSeek(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() |
def testRead(self):
'Test the read functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() | 7,601,449,651,663,779,000 | Test the read functionality. | tests/file_io/cpio_file_io.py | testRead | Acidburn0zzz/dfvfs | python | def testRead(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() |
def setUp(self):
'Sets up the needed objects used throughout the test.'
super(CPIONewASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.newc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(location='/syslog', parent=path_spec) | -8,965,766,636,364,369,000 | Sets up the needed objects used throughout the test. | tests/file_io/cpio_file_io.py | setUp | Acidburn0zzz/dfvfs | python | def setUp(self):
super(CPIONewASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.newc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(location='/syslog', parent=path_spec) |
def testOpenClosePathSpec(self):
'Test the open and close functionality using a path specification.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() | -6,653,219,118,026,506,000 | Test the open and close functionality using a path specification. | tests/file_io/cpio_file_io.py | testOpenClosePathSpec | Acidburn0zzz/dfvfs | python | def testOpenClosePathSpec(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() |
def testSeek(self):
'Test the seek functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() | 3,067,408,371,601,975,300 | Test the seek functionality. | tests/file_io/cpio_file_io.py | testSeek | Acidburn0zzz/dfvfs | python | def testSeek(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() |
def testRead(self):
'Test the read functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() | 7,601,449,651,663,779,000 | Test the read functionality. | tests/file_io/cpio_file_io.py | testRead | Acidburn0zzz/dfvfs | python | def testRead(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() |
def setUp(self):
'Sets up the needed objects used throughout the test.'
super(CPIONewASCIIFileWithChecksumTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.crc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(location='/syslog', parent=path_spec) | 8,044,887,331,691,074,000 | Sets up the needed objects used throughout the test. | tests/file_io/cpio_file_io.py | setUp | Acidburn0zzz/dfvfs | python | def setUp(self):
super(CPIONewASCIIFileWithChecksumTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.crc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOPathSpec(location='/syslog', parent=path_spec) |
def testOpenClosePathSpec(self):
'Test the open and close functionality using a path specification.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() | -6,653,219,118,026,506,000 | Test the open and close functionality using a path specification. | tests/file_io/cpio_file_io.py | testOpenClosePathSpec | Acidburn0zzz/dfvfs | python | def testOpenClosePathSpec(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() |
def testSeek(self):
'Test the seek functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() | 3,067,408,371,601,975,300 | Test the seek functionality. | tests/file_io/cpio_file_io.py | testSeek | Acidburn0zzz/dfvfs | python | def testSeek(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() |
def testRead(self):
'Test the read functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() | 7,601,449,651,663,779,000 | Test the read functionality. | tests/file_io/cpio_file_io.py | testRead | Acidburn0zzz/dfvfs | python | def testRead(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() |
def registry_record_matches(registry_record_str, registry, repository):
'\n\n :param registry_record_str: the string with optional wildcard to match against a the registry/repository combo\n :param registry: the registry to match against\n :param repository: the repository to match against\n :return: bool true if a match, false if not\n '
return (((registry_record_str[(- 1)] == '*') and '{}/{}'.format(registry, repository).startswith(registry_record_str[:(- 1)])) or (('/' in registry_record_str) and (registry_record_str == '{}/{}'.format(registry, repository))) or (registry_record_str == registry)) | -8,767,196,122,331,420,000 | :param registry_record_str: the string with optional wildcard to match against a the registry/repository combo
:param registry: the registry to match against
:param repository: the repository to match against
:return: bool true if a match, false if not | anchore_engine/auth/common.py | registry_record_matches | Mattlk13/anchore-engine | python | def registry_record_matches(registry_record_str, registry, repository):
'\n\n :param registry_record_str: the string with optional wildcard to match against a the registry/repository combo\n :param registry: the registry to match against\n :param repository: the repository to match against\n :return: bool true if a match, false if not\n '
return (((registry_record_str[(- 1)] == '*') and '{}/{}'.format(registry, repository).startswith(registry_record_str[:(- 1)])) or (('/' in registry_record_str) and (registry_record_str == '{}/{}'.format(registry, repository))) or (registry_record_str == registry)) |
def _compute_power_transforms(Ys: Dict[(str, List[float])]) -> Dict[(str, PowerTransformer)]:
'Compute power transforms.'
power_transforms = {}
for (k, ys) in Ys.items():
y = np.array(ys)[:, None]
pt = PowerTransformer(method='yeo-johnson').fit(y)
power_transforms[k] = pt
return power_transforms | 6,725,729,149,776,200,000 | Compute power transforms. | ax/modelbridge/transforms/power_transform_y.py | _compute_power_transforms | danielcohenlive/Ax-1 | python | def _compute_power_transforms(Ys: Dict[(str, List[float])]) -> Dict[(str, PowerTransformer)]:
power_transforms = {}
for (k, ys) in Ys.items():
y = np.array(ys)[:, None]
pt = PowerTransformer(method='yeo-johnson').fit(y)
power_transforms[k] = pt
return power_transforms |
def _compute_inverse_bounds(power_transforms: Dict[(str, PowerTransformer)], tol: float=1e-10) -> Dict[(str, Tuple[(float, float)])]:
'Computes the image of the transform so we can clip when we untransform.\n\n The inverse of the Yeo-Johnson transform is given by:\n if X >= 0 and lambda == 0:\n X = exp(X_trans) - 1\n elif X >= 0 and lambda != 0:\n X = (X_trans * lambda + 1) ** (1 / lambda) - 1\n elif X < 0 and lambda != 2:\n X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))\n elif X < 0 and lambda == 2:\n X = 1 - exp(-X_trans)\n\n We can break this down into three cases:\n lambda < 0: X < -1 / lambda\n 0 <= lambda <= 2: X is unbounded\n lambda > 2: X > 1 / (2 - lambda)\n\n Sklearn standardizes the transformed values to have mean zero and standard\n deviation 1, so we also need to account for this when we compute the bounds.\n '
inv_bounds = defaultdict()
for (k, pt) in power_transforms.items():
bounds = [(- np.inf), np.inf]
(mu, sigma) = (pt._scaler.mean_.item(), pt._scaler.scale_.item())
lambda_ = pt.lambdas_.item()
if (lambda_ < ((- 1) * tol)):
bounds[1] = ((((- 1.0) / lambda_) - mu) / sigma)
elif (lambda_ > (2.0 + tol)):
bounds[0] = (((1.0 / (2.0 - lambda_)) - mu) / sigma)
inv_bounds[k] = tuple(checked_cast_list(float, bounds))
return inv_bounds | -6,789,349,316,306,147,000 | Computes the image of the transform so we can clip when we untransform.
The inverse of the Yeo-Johnson transform is given by:
if X >= 0 and lambda == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda != 0:
X = (X_trans * lambda + 1) ** (1 / lambda) - 1
elif X < 0 and lambda != 2:
X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))
elif X < 0 and lambda == 2:
X = 1 - exp(-X_trans)
We can break this down into three cases:
lambda < 0: X < -1 / lambda
0 <= lambda <= 2: X is unbounded
lambda > 2: X > 1 / (2 - lambda)
Sklearn standardizes the transformed values to have mean zero and standard
deviation 1, so we also need to account for this when we compute the bounds. | ax/modelbridge/transforms/power_transform_y.py | _compute_inverse_bounds | danielcohenlive/Ax-1 | python | def _compute_inverse_bounds(power_transforms: Dict[(str, PowerTransformer)], tol: float=1e-10) -> Dict[(str, Tuple[(float, float)])]:
'Computes the image of the transform so we can clip when we untransform.\n\n The inverse of the Yeo-Johnson transform is given by:\n if X >= 0 and lambda == 0:\n X = exp(X_trans) - 1\n elif X >= 0 and lambda != 0:\n X = (X_trans * lambda + 1) ** (1 / lambda) - 1\n elif X < 0 and lambda != 2:\n X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))\n elif X < 0 and lambda == 2:\n X = 1 - exp(-X_trans)\n\n We can break this down into three cases:\n lambda < 0: X < -1 / lambda\n 0 <= lambda <= 2: X is unbounded\n lambda > 2: X > 1 / (2 - lambda)\n\n Sklearn standardizes the transformed values to have mean zero and standard\n deviation 1, so we also need to account for this when we compute the bounds.\n '
inv_bounds = defaultdict()
for (k, pt) in power_transforms.items():
bounds = [(- np.inf), np.inf]
(mu, sigma) = (pt._scaler.mean_.item(), pt._scaler.scale_.item())
lambda_ = pt.lambdas_.item()
if (lambda_ < ((- 1) * tol)):
bounds[1] = ((((- 1.0) / lambda_) - mu) / sigma)
elif (lambda_ > (2.0 + tol)):
bounds[0] = (((1.0 / (2.0 - lambda_)) - mu) / sigma)
inv_bounds[k] = tuple(checked_cast_list(float, bounds))
return inv_bounds |
def transform_observation_data(self, observation_data: List[ObservationData], observation_features: List[ObservationFeatures]) -> List[ObservationData]:
'Winsorize observation data in place.'
for obsd in observation_data:
for (i, m) in enumerate(obsd.metric_names):
if (m in self.metric_names):
transform = self.power_transforms[m].transform
(obsd.means[i], obsd.covariance[(i, i)]) = match_ci_width_truncated(mean=obsd.means[i], variance=obsd.covariance[(i, i)], transform=(lambda y: transform(np.array(y, ndmin=2))), lower_bound=(- np.inf), upper_bound=np.inf)
return observation_data | 4,156,824,596,059,636,700 | Winsorize observation data in place. | ax/modelbridge/transforms/power_transform_y.py | transform_observation_data | danielcohenlive/Ax-1 | python | def transform_observation_data(self, observation_data: List[ObservationData], observation_features: List[ObservationFeatures]) -> List[ObservationData]:
for obsd in observation_data:
for (i, m) in enumerate(obsd.metric_names):
if (m in self.metric_names):
transform = self.power_transforms[m].transform
(obsd.means[i], obsd.covariance[(i, i)]) = match_ci_width_truncated(mean=obsd.means[i], variance=obsd.covariance[(i, i)], transform=(lambda y: transform(np.array(y, ndmin=2))), lower_bound=(- np.inf), upper_bound=np.inf)
return observation_data |
def untransform_observation_data(self, observation_data: List[ObservationData], observation_features: List[ObservationFeatures]) -> List[ObservationData]:
'Winsorize observation data in place.'
for obsd in observation_data:
for (i, m) in enumerate(obsd.metric_names):
if (m in self.metric_names):
(l, u) = self.inv_bounds[m]
transform = self.power_transforms[m].inverse_transform
if ((not self.clip_mean) and ((obsd.means[i] < l) or (obsd.means[i] > u))):
raise ValueError("Can't untransform mean outside the bounds without clipping")
(obsd.means[i], obsd.covariance[(i, i)]) = match_ci_width_truncated(mean=obsd.means[i], variance=obsd.covariance[(i, i)], transform=(lambda y: transform(np.array(y, ndmin=2))), lower_bound=l, upper_bound=u, clip_mean=True)
return observation_data | -321,994,114,304,318,900 | Winsorize observation data in place. | ax/modelbridge/transforms/power_transform_y.py | untransform_observation_data | danielcohenlive/Ax-1 | python | def untransform_observation_data(self, observation_data: List[ObservationData], observation_features: List[ObservationFeatures]) -> List[ObservationData]:
for obsd in observation_data:
for (i, m) in enumerate(obsd.metric_names):
if (m in self.metric_names):
(l, u) = self.inv_bounds[m]
transform = self.power_transforms[m].inverse_transform
if ((not self.clip_mean) and ((obsd.means[i] < l) or (obsd.means[i] > u))):
raise ValueError("Can't untransform mean outside the bounds without clipping")
(obsd.means[i], obsd.covariance[(i, i)]) = match_ci_width_truncated(mean=obsd.means[i], variance=obsd.covariance[(i, i)], transform=(lambda y: transform(np.array(y, ndmin=2))), lower_bound=l, upper_bound=u, clip_mean=True)
return observation_data |
def _print_out(inputstring):
'Print the inputstring. To make it compatible with Python2 and Python3.'
sys.stdout.write((inputstring + '\n')) | -1,625,176,883,322,357,000 | Print the inputstring. To make it compatible with Python2 and Python3. | dummy_serial.py | _print_out | edgar-bonet/minimalmodbus | python | def _print_out(inputstring):
sys.stdout.write((inputstring + '\n')) |
def __repr__(self):
'String representation of the dummy_serial object'
return '{0}.{1}<id=0x{2:x}, open={3}>(port={4!r}, timeout={5!r}, waiting_data={6!r})'.format(self.__module__, self.__class__.__name__, id(self), self._isOpen, self.port, self.timeout, self._waiting_data) | 8,809,353,308,238,116,000 | String representation of the dummy_serial object | dummy_serial.py | __repr__ | edgar-bonet/minimalmodbus | python | def __repr__(self):
return '{0}.{1}<id=0x{2:x}, open={3}>(port={4!r}, timeout={5!r}, waiting_data={6!r})'.format(self.__module__, self.__class__.__name__, id(self), self._isOpen, self.port, self.timeout, self._waiting_data) |
def open(self):
'Open a (previously initialized) port on dummy_serial.'
if VERBOSE:
_print_out('\nDummy_serial: Opening port\n')
if self._isOpen:
raise IOError('Dummy_serial: The port is already open')
self._isOpen = True
self.port = self._initial_port_name | 3,247,762,950,439,310,000 | Open a (previously initialized) port on dummy_serial. | dummy_serial.py | open | edgar-bonet/minimalmodbus | python | def open(self):
if VERBOSE:
_print_out('\nDummy_serial: Opening port\n')
if self._isOpen:
raise IOError('Dummy_serial: The port is already open')
self._isOpen = True
self.port = self._initial_port_name |
def close(self):
'Close a port on dummy_serial.'
if VERBOSE:
_print_out('\nDummy_serial: Closing port\n')
if (not self._isOpen):
raise IOError('Dummy_serial: The port is already closed')
self._isOpen = False
self.port = None | -4,742,343,131,919,546,000 | Close a port on dummy_serial. | dummy_serial.py | close | edgar-bonet/minimalmodbus | python | def close(self):
if VERBOSE:
_print_out('\nDummy_serial: Closing port\n')
if (not self._isOpen):
raise IOError('Dummy_serial: The port is already closed')
self._isOpen = False
self.port = None |
def write(self, inputdata):
'Write to a port on dummy_serial.\n\n Args:\n inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response\n for subsequent read operations.\n\n Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.\n\n '
if VERBOSE:
_print_out((('\nDummy_serial: Writing to port. Given:' + repr(inputdata)) + '\n'))
if (sys.version_info[0] > 2):
if (not (type(inputdata) == bytes)):
raise TypeError(('The input must be type bytes. Given:' + repr(inputdata)))
inputstring = str(inputdata, encoding='latin1')
else:
inputstring = inputdata
if (not self._isOpen):
raise IOError(('Dummy_serial: Trying to write, but the port is not open. Given:' + repr(inputdata)))
try:
response = RESPONSES[inputstring]
except:
response = DEFAULT_RESPONSE
self._waiting_data = response | 5,103,098,685,743,835,000 | Write to a port on dummy_serial.
Args:
inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response
for subsequent read operations.
Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**. | dummy_serial.py | write | edgar-bonet/minimalmodbus | python | def write(self, inputdata):
'Write to a port on dummy_serial.\n\n Args:\n inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response\n for subsequent read operations.\n\n Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.\n\n '
if VERBOSE:
_print_out((('\nDummy_serial: Writing to port. Given:' + repr(inputdata)) + '\n'))
if (sys.version_info[0] > 2):
if (not (type(inputdata) == bytes)):
raise TypeError(('The input must be type bytes. Given:' + repr(inputdata)))
inputstring = str(inputdata, encoding='latin1')
else:
inputstring = inputdata
if (not self._isOpen):
raise IOError(('Dummy_serial: Trying to write, but the port is not open. Given:' + repr(inputdata)))
try:
response = RESPONSES[inputstring]
except:
response = DEFAULT_RESPONSE
self._waiting_data = response |
def read(self, numberOfBytes):
'Read from a port on dummy_serial.\n\n The response is dependent on what was written last to the port on dummy_serial,\n and what is defined in the :data:`RESPONSES` dictionary.\n\n Args:\n numberOfBytes (int): For compability with the real function.\n\n Returns a **string** for Python2 and **bytes** for Python3.\n\n If the response is shorter than numberOfBytes, it will sleep for timeout.\n If the response is longer than numberOfBytes, it will return only numberOfBytes bytes.\n\n '
if VERBOSE:
_print_out('\nDummy_serial: Reading from port (max length {!r} bytes)'.format(numberOfBytes))
if (numberOfBytes < 0):
raise IOError('Dummy_serial: The numberOfBytes to read must not be negative. Given: {!r}'.format(numberOfBytes))
if (not self._isOpen):
raise IOError('Dummy_serial: Trying to read, but the port is not open.')
if (self._waiting_data == DEFAULT_RESPONSE):
returnstring = self._waiting_data
elif (numberOfBytes == len(self._waiting_data)):
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
elif (numberOfBytes < len(self._waiting_data)):
if VERBOSE:
_print_out(('Dummy_serial: The numberOfBytes to read is smaller than the available data. ' + 'Some bytes will be kept for later. Available data: {!r} (length = {}), numberOfBytes: {}'.format(self._waiting_data, len(self._waiting_data), numberOfBytes)))
returnstring = self._waiting_data[:numberOfBytes]
self._waiting_data = self._waiting_data[numberOfBytes:]
else:
if VERBOSE:
_print_out(('Dummy_serial: The numberOfBytes to read is larger than the available data. ' + 'Will sleep until timeout. Available data: {!r} (length = {}), numberOfBytes: {}'.format(self._waiting_data, len(self._waiting_data), numberOfBytes)))
time.sleep(self.timeout)
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
if VERBOSE:
_print_out('Dummy_serial read return data: {!r} (has length {})\n'.format(returnstring, len(returnstring)))
if (sys.version_info[0] > 2):
return bytes(returnstring, encoding='latin1')
else:
return returnstring | 4,200,138,346,946,070,500 | Read from a port on dummy_serial.
The response is dependent on what was written last to the port on dummy_serial,
and what is defined in the :data:`RESPONSES` dictionary.
Args:
numberOfBytes (int): For compability with the real function.
Returns a **string** for Python2 and **bytes** for Python3.
If the response is shorter than numberOfBytes, it will sleep for timeout.
If the response is longer than numberOfBytes, it will return only numberOfBytes bytes. | dummy_serial.py | read | edgar-bonet/minimalmodbus | python | def read(self, numberOfBytes):
'Read from a port on dummy_serial.\n\n The response is dependent on what was written last to the port on dummy_serial,\n and what is defined in the :data:`RESPONSES` dictionary.\n\n Args:\n numberOfBytes (int): For compability with the real function.\n\n Returns a **string** for Python2 and **bytes** for Python3.\n\n If the response is shorter than numberOfBytes, it will sleep for timeout.\n If the response is longer than numberOfBytes, it will return only numberOfBytes bytes.\n\n '
if VERBOSE:
_print_out('\nDummy_serial: Reading from port (max length {!r} bytes)'.format(numberOfBytes))
if (numberOfBytes < 0):
raise IOError('Dummy_serial: The numberOfBytes to read must not be negative. Given: {!r}'.format(numberOfBytes))
if (not self._isOpen):
raise IOError('Dummy_serial: Trying to read, but the port is not open.')
if (self._waiting_data == DEFAULT_RESPONSE):
returnstring = self._waiting_data
elif (numberOfBytes == len(self._waiting_data)):
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
elif (numberOfBytes < len(self._waiting_data)):
if VERBOSE:
_print_out(('Dummy_serial: The numberOfBytes to read is smaller than the available data. ' + 'Some bytes will be kept for later. Available data: {!r} (length = {}), numberOfBytes: {}'.format(self._waiting_data, len(self._waiting_data), numberOfBytes)))
returnstring = self._waiting_data[:numberOfBytes]
self._waiting_data = self._waiting_data[numberOfBytes:]
else:
if VERBOSE:
_print_out(('Dummy_serial: The numberOfBytes to read is larger than the available data. ' + 'Will sleep until timeout. Available data: {!r} (length = {}), numberOfBytes: {}'.format(self._waiting_data, len(self._waiting_data), numberOfBytes)))
time.sleep(self.timeout)
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
if VERBOSE:
_print_out('Dummy_serial read return data: {!r} (has length {})\n'.format(returnstring, len(returnstring)))
if (sys.version_info[0] > 2):
return bytes(returnstring, encoding='latin1')
else:
return returnstring |
def _calc_validation_statistics(validation_results):
'\n Calculate summary statistics for the validation results and\n return ``ExpectationStatistics``.\n '
successful_expectations = sum((exp.success for exp in validation_results))
evaluated_expectations = len(validation_results)
unsuccessful_expectations = (evaluated_expectations - successful_expectations)
success = (successful_expectations == evaluated_expectations)
try:
success_percent = ((successful_expectations / evaluated_expectations) * 100)
except ZeroDivisionError:
success_percent = None
return ValidationStatistics(successful_expectations=successful_expectations, evaluated_expectations=evaluated_expectations, unsuccessful_expectations=unsuccessful_expectations, success=success, success_percent=success_percent) | -3,360,372,455,839,803,400 | Calculate summary statistics for the validation results and
return ``ExpectationStatistics``. | great_expectations/data_asset/data_asset.py | _calc_validation_statistics | BSofo/great_expectations | python | def _calc_validation_statistics(validation_results):
'\n Calculate summary statistics for the validation results and\n return ``ExpectationStatistics``.\n '
successful_expectations = sum((exp.success for exp in validation_results))
evaluated_expectations = len(validation_results)
unsuccessful_expectations = (evaluated_expectations - successful_expectations)
success = (successful_expectations == evaluated_expectations)
try:
success_percent = ((successful_expectations / evaluated_expectations) * 100)
except ZeroDivisionError:
success_percent = None
return ValidationStatistics(successful_expectations=successful_expectations, evaluated_expectations=evaluated_expectations, unsuccessful_expectations=unsuccessful_expectations, success=success, success_percent=success_percent) |
def __init__(self, *args, **kwargs):
"\n Initialize the DataAsset.\n\n :param profiler (profiler class) = None: The profiler that should be run on the data_asset to\n build a baseline expectation suite.\n\n Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a\n Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments\n so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of\n *args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the\n support for the profiler parameter not obvious from the signature.\n\n "
interactive_evaluation = kwargs.pop('interactive_evaluation', True)
profiler = kwargs.pop('profiler', None)
expectation_suite = kwargs.pop('expectation_suite', None)
expectation_suite_name = kwargs.pop('expectation_suite_name', None)
data_context = kwargs.pop('data_context', None)
batch_kwargs = kwargs.pop('batch_kwargs', BatchKwargs(ge_batch_id=str(uuid.uuid1())))
batch_parameters = kwargs.pop('batch_parameters', {})
batch_markers = kwargs.pop('batch_markers', {})
if ('autoinspect_func' in kwargs):
warnings.warn('Autoinspect_func is no longer supported; use a profiler instead (migration is easy!).', category=DeprecationWarning)
super().__init__(*args, **kwargs)
self._config = {'interactive_evaluation': interactive_evaluation}
self._initialize_expectations(expectation_suite=expectation_suite, expectation_suite_name=expectation_suite_name)
self._data_context = data_context
self._batch_kwargs = BatchKwargs(batch_kwargs)
self._batch_markers = batch_markers
self._batch_parameters = batch_parameters
self._active_validation = False
if (profiler is not None):
profiler.profile(self)
if (data_context and hasattr(data_context, '_expectation_explorer_manager')):
self.set_default_expectation_argument('include_config', True) | -3,279,447,003,300,268,500 | Initialize the DataAsset.
:param profiler (profiler class) = None: The profiler that should be run on the data_asset to
build a baseline expectation suite.
Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a
Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments
so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of
*args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the
support for the profiler parameter not obvious from the signature. | great_expectations/data_asset/data_asset.py | __init__ | BSofo/great_expectations | python | def __init__(self, *args, **kwargs):
"\n Initialize the DataAsset.\n\n :param profiler (profiler class) = None: The profiler that should be run on the data_asset to\n build a baseline expectation suite.\n\n Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a\n Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments\n so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of\n *args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the\n support for the profiler parameter not obvious from the signature.\n\n "
interactive_evaluation = kwargs.pop('interactive_evaluation', True)
profiler = kwargs.pop('profiler', None)
expectation_suite = kwargs.pop('expectation_suite', None)
expectation_suite_name = kwargs.pop('expectation_suite_name', None)
data_context = kwargs.pop('data_context', None)
batch_kwargs = kwargs.pop('batch_kwargs', BatchKwargs(ge_batch_id=str(uuid.uuid1())))
batch_parameters = kwargs.pop('batch_parameters', {})
batch_markers = kwargs.pop('batch_markers', {})
if ('autoinspect_func' in kwargs):
warnings.warn('Autoinspect_func is no longer supported; use a profiler instead (migration is easy!).', category=DeprecationWarning)
super().__init__(*args, **kwargs)
self._config = {'interactive_evaluation': interactive_evaluation}
self._initialize_expectations(expectation_suite=expectation_suite, expectation_suite_name=expectation_suite_name)
self._data_context = data_context
self._batch_kwargs = BatchKwargs(batch_kwargs)
self._batch_markers = batch_markers
self._batch_parameters = batch_parameters
self._active_validation = False
if (profiler is not None):
profiler.profile(self)
if (data_context and hasattr(data_context, '_expectation_explorer_manager')):
self.set_default_expectation_argument('include_config', True) |
def autoinspect(self, profiler):
'Deprecated: use profile instead.\n\n Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.\n\n Args:\n profiler: The profiler to use\n\n Returns:\n tuple(expectation_suite, validation_results)\n '
warnings.warn("The term autoinspect is deprecated and will be removed in a future release. Please use 'profile' instead.")
(expectation_suite, validation_results) = profiler.profile(self)
return (expectation_suite, validation_results) | 6,257,769,304,988,778,000 | Deprecated: use profile instead.
Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
Returns:
tuple(expectation_suite, validation_results) | great_expectations/data_asset/data_asset.py | autoinspect | BSofo/great_expectations | python | def autoinspect(self, profiler):
'Deprecated: use profile instead.\n\n Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.\n\n Args:\n profiler: The profiler to use\n\n Returns:\n tuple(expectation_suite, validation_results)\n '
warnings.warn("The term autoinspect is deprecated and will be removed in a future release. Please use 'profile' instead.")
(expectation_suite, validation_results) = profiler.profile(self)
return (expectation_suite, validation_results) |
def profile(self, profiler, profiler_configuration=None):
'Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.\n\n Args:\n profiler: The profiler to use\n profiler_configuration: Optional profiler configuration dict\n\n Returns:\n tuple(expectation_suite, validation_results)\n\n '
(expectation_suite, validation_results) = profiler.profile(self, profiler_configuration)
return (expectation_suite, validation_results) | -7,188,949,196,076,871,000 | Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
profiler_configuration: Optional profiler configuration dict
Returns:
tuple(expectation_suite, validation_results) | great_expectations/data_asset/data_asset.py | profile | BSofo/great_expectations | python | def profile(self, profiler, profiler_configuration=None):
'Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.\n\n Args:\n profiler: The profiler to use\n profiler_configuration: Optional profiler configuration dict\n\n Returns:\n tuple(expectation_suite, validation_results)\n\n '
(expectation_suite, validation_results) = profiler.profile(self, profiler_configuration)
return (expectation_suite, validation_results) |
@classmethod
def expectation(cls, method_arg_names):
"Manages configuration and running of expectation objects.\n\n Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator used by great expectations to manage expectation configurations.\n\n Args:\n method_arg_names (List) : An ordered list of the arguments used by the method implementing the expectation (typically the result of inspection). Positional arguments are explicitly mapped to keyword arguments when the expectation is run.\n\n Notes:\n Intermediate decorators that call the core @expectation decorator will most likely need to pass their decorated methods' signature up to the expectation decorator. For example, the MetaPandasDataset column_map_expectation decorator relies on the DataAsset expectation decorator, but will pass through the signature from the implementing method.\n\n @expectation intercepts and takes action based on the following parameters:\n * include_config (boolean or None) : If True, then include the generated expectation config as part of the result object. For more detail, see :ref:`include_config`.\n * catch_exceptions (boolean or None) : If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`.\n * result_format (str or None) : Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.\n For more detail, see :ref:`result_format <result_format>`.\n * meta (dict or None): A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`.\n "
def outer_wrapper(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
method_name = func.__name__
all_args = dict(zip(method_arg_names, args))
all_args.update(kwargs)
if ('include_config' in kwargs):
include_config = kwargs['include_config']
del all_args['include_config']
else:
include_config = self.default_expectation_args['include_config']
if ('catch_exceptions' in kwargs):
catch_exceptions = kwargs['catch_exceptions']
del all_args['catch_exceptions']
else:
catch_exceptions = self.default_expectation_args['catch_exceptions']
if ('result_format' in kwargs):
result_format = kwargs['result_format']
else:
result_format = self.default_expectation_args['result_format']
if ('meta' in kwargs):
meta = kwargs['meta']
del all_args['meta']
else:
meta = None
argspec = inspect.getfullargspec(func)[0][1:]
if ('result_format' in argspec):
all_args['result_format'] = result_format
elif ('result_format' in all_args):
del all_args['result_format']
all_args = recursively_convert_to_json_serializable(all_args)
expectation_args = copy.deepcopy(all_args)
if self._expectation_suite.evaluation_parameters:
(evaluation_args, substituted_parameters) = build_evaluation_parameters(expectation_args, self._expectation_suite.evaluation_parameters, self._config.get('interactive_evaluation', True), self._data_context)
else:
(evaluation_args, substituted_parameters) = build_evaluation_parameters(expectation_args, None, self._config.get('interactive_evaluation', True), self._data_context)
expectation_config = ExpectationConfiguration(expectation_type=method_name, kwargs=expectation_args, meta=meta)
raised_exception = False
exception_traceback = None
exception_message = None
if (self._config.get('interactive_evaluation', True) or self._active_validation):
try:
return_obj = func(self, **evaluation_args)
if isinstance(return_obj, dict):
return_obj = ExpectationValidationResult(**return_obj)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
exception_message = '{}: {}'.format(type(err).__name__, str(err))
return_obj = ExpectationValidationResult(success=False)
else:
raise err
else:
return_obj = ExpectationValidationResult(expectation_config=copy.deepcopy(expectation_config))
if (self._active_validation is True):
stored_config = expectation_config
else:
stored_config = self._expectation_suite.add_expectation(expectation_config)
if include_config:
return_obj.expectation_config = copy.deepcopy(stored_config)
if (return_obj.success is not None):
stored_config.success_on_last_run = return_obj.success
if catch_exceptions:
return_obj.exception_info = {'raised_exception': raised_exception, 'exception_message': exception_message, 'exception_traceback': exception_traceback}
if (len(substituted_parameters) > 0):
if (meta is None):
meta = dict()
meta['substituted_parameters'] = substituted_parameters
if (meta is not None):
return_obj.meta = meta
return_obj = recursively_convert_to_json_serializable(return_obj)
if (self._data_context is not None):
return_obj = self._data_context.update_return_obj(self, return_obj)
return return_obj
return wrapper
return outer_wrapper | 4,132,695,184,668,145,700 | Manages configuration and running of expectation objects.
Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator used by great expectations to manage expectation configurations.
Args:
method_arg_names (List) : An ordered list of the arguments used by the method implementing the expectation (typically the result of inspection). Positional arguments are explicitly mapped to keyword arguments when the expectation is run.
Notes:
Intermediate decorators that call the core @expectation decorator will most likely need to pass their decorated methods' signature up to the expectation decorator. For example, the MetaPandasDataset column_map_expectation decorator relies on the DataAsset expectation decorator, but will pass through the signature from the implementing method.
@expectation intercepts and takes action based on the following parameters:
* include_config (boolean or None) : If True, then include the generated expectation config as part of the result object. For more detail, see :ref:`include_config`.
* catch_exceptions (boolean or None) : If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`.
* result_format (str or None) : Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
* meta (dict or None): A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`. | great_expectations/data_asset/data_asset.py | expectation | BSofo/great_expectations | python | @classmethod
def expectation(cls, method_arg_names):
"Manages configuration and running of expectation objects.\n\n Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator used by great expectations to manage expectation configurations.\n\n Args:\n method_arg_names (List) : An ordered list of the arguments used by the method implementing the expectation (typically the result of inspection). Positional arguments are explicitly mapped to keyword arguments when the expectation is run.\n\n Notes:\n Intermediate decorators that call the core @expectation decorator will most likely need to pass their decorated methods' signature up to the expectation decorator. For example, the MetaPandasDataset column_map_expectation decorator relies on the DataAsset expectation decorator, but will pass through the signature from the implementing method.\n\n @expectation intercepts and takes action based on the following parameters:\n * include_config (boolean or None) : If True, then include the generated expectation config as part of the result object. For more detail, see :ref:`include_config`.\n * catch_exceptions (boolean or None) : If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`.\n * result_format (str or None) : Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.\n For more detail, see :ref:`result_format <result_format>`.\n * meta (dict or None): A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`.\n "
def outer_wrapper(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
method_name = func.__name__
all_args = dict(zip(method_arg_names, args))
all_args.update(kwargs)
if ('include_config' in kwargs):
include_config = kwargs['include_config']
del all_args['include_config']
else:
include_config = self.default_expectation_args['include_config']
if ('catch_exceptions' in kwargs):
catch_exceptions = kwargs['catch_exceptions']
del all_args['catch_exceptions']
else:
catch_exceptions = self.default_expectation_args['catch_exceptions']
if ('result_format' in kwargs):
result_format = kwargs['result_format']
else:
result_format = self.default_expectation_args['result_format']
if ('meta' in kwargs):
meta = kwargs['meta']
del all_args['meta']
else:
meta = None
argspec = inspect.getfullargspec(func)[0][1:]
if ('result_format' in argspec):
all_args['result_format'] = result_format
elif ('result_format' in all_args):
del all_args['result_format']
all_args = recursively_convert_to_json_serializable(all_args)
expectation_args = copy.deepcopy(all_args)
if self._expectation_suite.evaluation_parameters:
(evaluation_args, substituted_parameters) = build_evaluation_parameters(expectation_args, self._expectation_suite.evaluation_parameters, self._config.get('interactive_evaluation', True), self._data_context)
else:
(evaluation_args, substituted_parameters) = build_evaluation_parameters(expectation_args, None, self._config.get('interactive_evaluation', True), self._data_context)
expectation_config = ExpectationConfiguration(expectation_type=method_name, kwargs=expectation_args, meta=meta)
raised_exception = False
exception_traceback = None
exception_message = None
if (self._config.get('interactive_evaluation', True) or self._active_validation):
try:
return_obj = func(self, **evaluation_args)
if isinstance(return_obj, dict):
return_obj = ExpectationValidationResult(**return_obj)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
exception_message = '{}: {}'.format(type(err).__name__, str(err))
return_obj = ExpectationValidationResult(success=False)
else:
raise err
else:
return_obj = ExpectationValidationResult(expectation_config=copy.deepcopy(expectation_config))
if (self._active_validation is True):
stored_config = expectation_config
else:
stored_config = self._expectation_suite.add_expectation(expectation_config)
if include_config:
return_obj.expectation_config = copy.deepcopy(stored_config)
if (return_obj.success is not None):
stored_config.success_on_last_run = return_obj.success
if catch_exceptions:
return_obj.exception_info = {'raised_exception': raised_exception, 'exception_message': exception_message, 'exception_traceback': exception_traceback}
if (len(substituted_parameters) > 0):
if (meta is None):
meta = dict()
meta['substituted_parameters'] = substituted_parameters
if (meta is not None):
return_obj.meta = meta
return_obj = recursively_convert_to_json_serializable(return_obj)
if (self._data_context is not None):
return_obj = self._data_context.update_return_obj(self, return_obj)
return return_obj
return wrapper
return outer_wrapper |
def _initialize_expectations(self, expectation_suite=None, expectation_suite_name=None):
"Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.\n In addition, this always sets the `default_expectation_args` to:\n `include_config`: False,\n `catch_exceptions`: False,\n `output_format`: 'BASIC'\n\n By default, initializes data_asset_type to the name of the implementing class, but subclasses\n that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their\n interoperability.\n\n Args:\n expectation_suite (json): A json-serializable expectation config. If None, creates default `_expectation_suite` with an empty list of expectations and key value `data_asset_name` as `data_asset_name`.\n\n expectation_suite_name (string): The name to assign to the `expectation_suite.expectation_suite_name`\n\n Returns:\n None\n "
if (expectation_suite is not None):
if isinstance(expectation_suite, dict):
expectation_suite = expectationSuiteSchema.load(expectation_suite)
else:
expectation_suite = copy.deepcopy(expectation_suite)
self._expectation_suite = expectation_suite
if (expectation_suite_name is not None):
if (self._expectation_suite.expectation_suite_name != expectation_suite_name):
logger.warning('Overriding existing expectation_suite_name {n1} with new name {n2}'.format(n1=self._expectation_suite.expectation_suite_name, n2=expectation_suite_name))
self._expectation_suite.expectation_suite_name = expectation_suite_name
else:
if (expectation_suite_name is None):
expectation_suite_name = 'default'
self._expectation_suite = ExpectationSuite(expectation_suite_name=expectation_suite_name)
self._expectation_suite.data_asset_type = self._data_asset_type
self.default_expectation_args = {'include_config': True, 'catch_exceptions': False, 'result_format': 'BASIC'} | -3,262,480,724,169,079,000 | Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.
In addition, this always sets the `default_expectation_args` to:
`include_config`: False,
`catch_exceptions`: False,
`output_format`: 'BASIC'
By default, initializes data_asset_type to the name of the implementing class, but subclasses
that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their
interoperability.
Args:
expectation_suite (json): A json-serializable expectation config. If None, creates default `_expectation_suite` with an empty list of expectations and key value `data_asset_name` as `data_asset_name`.
expectation_suite_name (string): The name to assign to the `expectation_suite.expectation_suite_name`
Returns:
None | great_expectations/data_asset/data_asset.py | _initialize_expectations | BSofo/great_expectations | python | def _initialize_expectations(self, expectation_suite=None, expectation_suite_name=None):
"Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.\n In addition, this always sets the `default_expectation_args` to:\n `include_config`: False,\n `catch_exceptions`: False,\n `output_format`: 'BASIC'\n\n By default, initializes data_asset_type to the name of the implementing class, but subclasses\n that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their\n interoperability.\n\n Args:\n expectation_suite (json): A json-serializable expectation config. If None, creates default `_expectation_suite` with an empty list of expectations and key value `data_asset_name` as `data_asset_name`.\n\n expectation_suite_name (string): The name to assign to the `expectation_suite.expectation_suite_name`\n\n Returns:\n None\n "
if (expectation_suite is not None):
if isinstance(expectation_suite, dict):
expectation_suite = expectationSuiteSchema.load(expectation_suite)
else:
expectation_suite = copy.deepcopy(expectation_suite)
self._expectation_suite = expectation_suite
if (expectation_suite_name is not None):
if (self._expectation_suite.expectation_suite_name != expectation_suite_name):
logger.warning('Overriding existing expectation_suite_name {n1} with new name {n2}'.format(n1=self._expectation_suite.expectation_suite_name, n2=expectation_suite_name))
self._expectation_suite.expectation_suite_name = expectation_suite_name
else:
if (expectation_suite_name is None):
expectation_suite_name = 'default'
self._expectation_suite = ExpectationSuite(expectation_suite_name=expectation_suite_name)
self._expectation_suite.data_asset_type = self._data_asset_type
self.default_expectation_args = {'include_config': True, 'catch_exceptions': False, 'result_format': 'BASIC'} |
def append_expectation(self, expectation_config):
'This method is a thin wrapper for ExpectationSuite.append_expectation'
warnings.warn(('append_expectation is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.add_expectation instead.'), DeprecationWarning)
self._expectation_suite.append_expectation(expectation_config) | 3,051,038,348,989,836,300 | This method is a thin wrapper for ExpectationSuite.append_expectation | great_expectations/data_asset/data_asset.py | append_expectation | BSofo/great_expectations | python | def append_expectation(self, expectation_config):
warnings.warn(('append_expectation is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.add_expectation instead.'), DeprecationWarning)
self._expectation_suite.append_expectation(expectation_config) |
def find_expectation_indexes(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain') -> List[int]:
'This method is a thin wrapper for ExpectationSuite.find_expectation_indexes'
warnings.warn(('find_expectation_indexes is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.find_expectation_indexes instead.'), DeprecationWarning)
return self._expectation_suite.find_expectation_indexes(expectation_configuration=expectation_configuration, match_type=match_type) | -6,067,033,181,278,181,000 | This method is a thin wrapper for ExpectationSuite.find_expectation_indexes | great_expectations/data_asset/data_asset.py | find_expectation_indexes | BSofo/great_expectations | python | def find_expectation_indexes(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain') -> List[int]:
warnings.warn(('find_expectation_indexes is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.find_expectation_indexes instead.'), DeprecationWarning)
return self._expectation_suite.find_expectation_indexes(expectation_configuration=expectation_configuration, match_type=match_type) |
def find_expectations(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain') -> List[ExpectationConfiguration]:
'This method is a thin wrapper for ExpectationSuite.find_expectations()'
warnings.warn(('find_expectations is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.find_expectation_indexes instead.'), DeprecationWarning)
return self._expectation_suite.find_expectations(expectation_configuration=expectation_configuration, match_type=match_type) | -3,740,664,149,477,724,000 | This method is a thin wrapper for ExpectationSuite.find_expectations() | great_expectations/data_asset/data_asset.py | find_expectations | BSofo/great_expectations | python | def find_expectations(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain') -> List[ExpectationConfiguration]:
warnings.warn(('find_expectations is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.find_expectation_indexes instead.'), DeprecationWarning)
return self._expectation_suite.find_expectations(expectation_configuration=expectation_configuration, match_type=match_type) |
def remove_expectation(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain', remove_multiple_matches: bool=False) -> List[ExpectationConfiguration]:
'This method is a thin wrapper for ExpectationSuite.remove()'
warnings.warn(('DataAsset.remove_expectations is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.remove_expectation instead.'), DeprecationWarning)
return self._expectation_suite.remove_expectation(expectation_configuration=expectation_configuration, match_type=match_type, remove_multiple_matches=remove_multiple_matches) | 5,106,026,801,216,445,000 | This method is a thin wrapper for ExpectationSuite.remove() | great_expectations/data_asset/data_asset.py | remove_expectation | BSofo/great_expectations | python | def remove_expectation(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain', remove_multiple_matches: bool=False) -> List[ExpectationConfiguration]:
warnings.warn(('DataAsset.remove_expectations is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.remove_expectation instead.'), DeprecationWarning)
return self._expectation_suite.remove_expectation(expectation_configuration=expectation_configuration, match_type=match_type, remove_multiple_matches=remove_multiple_matches) |
def get_default_expectation_arguments(self):
'Fetch default expectation arguments for this data_asset\n\n Returns:\n A dictionary containing all the current default expectation arguments for a data_asset\n\n Ex::\n\n {\n "include_config" : True,\n "catch_exceptions" : False,\n "result_format" : \'BASIC\'\n }\n\n See also:\n set_default_expectation_arguments\n '
return self.default_expectation_args | -6,200,447,437,952,300,000 | Fetch default expectation arguments for this data_asset
Returns:
A dictionary containing all the current default expectation arguments for a data_asset
Ex::
{
"include_config" : True,
"catch_exceptions" : False,
"result_format" : 'BASIC'
}
See also:
set_default_expectation_arguments | great_expectations/data_asset/data_asset.py | get_default_expectation_arguments | BSofo/great_expectations | python | def get_default_expectation_arguments(self):
'Fetch default expectation arguments for this data_asset\n\n Returns:\n A dictionary containing all the current default expectation arguments for a data_asset\n\n Ex::\n\n {\n "include_config" : True,\n "catch_exceptions" : False,\n "result_format" : \'BASIC\'\n }\n\n See also:\n set_default_expectation_arguments\n '
return self.default_expectation_args |
def set_default_expectation_argument(self, argument, value):
'Set a default expectation argument for this data_asset\n\n Args:\n argument (string): The argument to be replaced\n value : The New argument to use for replacement\n\n Returns:\n None\n\n See also:\n get_default_expectation_arguments\n '
self.default_expectation_args[argument] = value | 1,081,044,049,782,888,400 | Set a default expectation argument for this data_asset
Args:
argument (string): The argument to be replaced
value : The New argument to use for replacement
Returns:
None
See also:
get_default_expectation_arguments | great_expectations/data_asset/data_asset.py | set_default_expectation_argument | BSofo/great_expectations | python | def set_default_expectation_argument(self, argument, value):
'Set a default expectation argument for this data_asset\n\n Args:\n argument (string): The argument to be replaced\n value : The New argument to use for replacement\n\n Returns:\n None\n\n See also:\n get_default_expectation_arguments\n '
self.default_expectation_args[argument] = value |
def get_expectation_suite(self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False):
'Returns _expectation_config as a JSON object, and perform some cleaning along the way.\n\n Args:\n discard_failed_expectations (boolean): Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.\n discard_result_format_kwargs (boolean): In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.\n discard_include_config_kwargs (boolean): In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`.\n discard_catch_exceptions_kwargs (boolean): In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.\n suppress_warnings (boolean): If true, do not include warnings in logging information about the operation.\n suppress_logging (boolean): If true, do not create a log entry (useful when using get_expectation_suite programmatically)\n\n Returns:\n An expectation suite.\n\n Note:\n get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a copy of _expectation_suite, not the original object.\n '
expectation_suite = copy.deepcopy(self._expectation_suite)
expectations = expectation_suite.expectations
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
if (expectation.success_on_last_run is False):
discards['failed_expectations'] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
message = ('\t%d expectation(s) included in expectation_suite.' % len(expectations))
if ((discards['failed_expectations'] > 0) and (not suppress_warnings)):
message += (' Omitting %d expectation(s) that failed when last run; set discard_failed_expectations=False to include them.' % discards['failed_expectations'])
for expectation in expectations:
expectation.success_on_last_run = None
if discard_result_format_kwargs:
if ('result_format' in expectation.kwargs):
del expectation.kwargs['result_format']
discards['result_format'] += 1
if discard_include_config_kwargs:
if ('include_config' in expectation.kwargs):
del expectation.kwargs['include_config']
discards['include_config'] += 1
if discard_catch_exceptions_kwargs:
if ('catch_exceptions' in expectation.kwargs):
del expectation.kwargs['catch_exceptions']
discards['catch_exceptions'] += 1
settings_message = ''
if ((discards['result_format'] > 0) and (not suppress_warnings)):
settings_message += ' result_format'
if ((discards['include_config'] > 0) and (not suppress_warnings)):
settings_message += ' include_config'
if ((discards['catch_exceptions'] > 0) and (not suppress_warnings)):
settings_message += ' catch_exceptions'
if (len(settings_message) > 1):
settings_message += ' settings filtered.'
expectation_suite.expectations = expectations
if (not suppress_logging):
logger.info((message + settings_message))
return expectation_suite | 3,306,050,817,204,586,500 | Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_config_kwargs (boolean): In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
suppress_warnings (boolean): If true, do not include warnings in logging information about the operation.
suppress_logging (boolean): If true, do not create a log entry (useful when using get_expectation_suite programmatically)
Returns:
An expectation suite.
Note:
get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a copy of _expectation_suite, not the original object. | great_expectations/data_asset/data_asset.py | get_expectation_suite | BSofo/great_expectations | python | def get_expectation_suite(self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False):
'Returns _expectation_config as a JSON object, and perform some cleaning along the way.\n\n Args:\n discard_failed_expectations (boolean): Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.\n discard_result_format_kwargs (boolean): In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.\n discard_include_config_kwargs (boolean): In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`.\n discard_catch_exceptions_kwargs (boolean): In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.\n suppress_warnings (boolean): If true, do not include warnings in logging information about the operation.\n suppress_logging (boolean): If true, do not create a log entry (useful when using get_expectation_suite programmatically)\n\n Returns:\n An expectation suite.\n\n Note:\n get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a copy of _expectation_suite, not the original object.\n '
expectation_suite = copy.deepcopy(self._expectation_suite)
expectations = expectation_suite.expectations
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
if (expectation.success_on_last_run is False):
discards['failed_expectations'] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
message = ('\t%d expectation(s) included in expectation_suite.' % len(expectations))
if ((discards['failed_expectations'] > 0) and (not suppress_warnings)):
message += (' Omitting %d expectation(s) that failed when last run; set discard_failed_expectations=False to include them.' % discards['failed_expectations'])
for expectation in expectations:
expectation.success_on_last_run = None
if discard_result_format_kwargs:
if ('result_format' in expectation.kwargs):
del expectation.kwargs['result_format']
discards['result_format'] += 1
if discard_include_config_kwargs:
if ('include_config' in expectation.kwargs):
del expectation.kwargs['include_config']
discards['include_config'] += 1
if discard_catch_exceptions_kwargs:
if ('catch_exceptions' in expectation.kwargs):
del expectation.kwargs['catch_exceptions']
discards['catch_exceptions'] += 1
settings_message =
if ((discards['result_format'] > 0) and (not suppress_warnings)):
settings_message += ' result_format'
if ((discards['include_config'] > 0) and (not suppress_warnings)):
settings_message += ' include_config'
if ((discards['catch_exceptions'] > 0) and (not suppress_warnings)):
settings_message += ' catch_exceptions'
if (len(settings_message) > 1):
settings_message += ' settings filtered.'
expectation_suite.expectations = expectations
if (not suppress_logging):
logger.info((message + settings_message))
return expectation_suite |
def save_expectation_suite(self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False):
"Writes ``_expectation_config`` to a JSON file.\n\n Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from the JSON expectations config.\n\n Args:\n filepath (string): The location and name to write the JSON config file to.\n discard_failed_expectations (boolean): If True, excludes expectations that do not return ``success = True``. If False, all expectations are written to the JSON config file.\n discard_result_format_kwargs (boolean): If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config file.\n discard_include_config_kwargs (boolean): If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config file.\n discard_catch_exceptions_kwargs (boolean): If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON config file.\n suppress_warnings (boolean): It True, all warnings raised by Great Expectations, as a result of dropped expectations, are suppressed.\n\n "
expectation_suite = self.get_expectation_suite(discard_failed_expectations, discard_result_format_kwargs, discard_include_config_kwargs, discard_catch_exceptions_kwargs, suppress_warnings)
if ((filepath is None) and (self._data_context is not None)):
self._data_context.save_expectation_suite(expectation_suite)
elif (filepath is not None):
with open(filepath, 'w') as outfile:
json.dump(expectationSuiteSchema.dump(expectation_suite), outfile, indent=2, sort_keys=True)
else:
raise ValueError('Unable to save config: filepath or data_context must be available.') | 6,608,268,073,604,393,000 | Writes ``_expectation_config`` to a JSON file.
Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from the JSON expectations config.
Args:
filepath (string): The location and name to write the JSON config file to.
discard_failed_expectations (boolean): If True, excludes expectations that do not return ``success = True``. If False, all expectations are written to the JSON config file.
discard_result_format_kwargs (boolean): If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config file.
discard_include_config_kwargs (boolean): If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config file.
discard_catch_exceptions_kwargs (boolean): If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON config file.
suppress_warnings (boolean): It True, all warnings raised by Great Expectations, as a result of dropped expectations, are suppressed. | great_expectations/data_asset/data_asset.py | save_expectation_suite | BSofo/great_expectations | python | def save_expectation_suite(self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False):
"Writes ``_expectation_config`` to a JSON file.\n\n Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from the JSON expectations config.\n\n Args:\n filepath (string): The location and name to write the JSON config file to.\n discard_failed_expectations (boolean): If True, excludes expectations that do not return ``success = True``. If False, all expectations are written to the JSON config file.\n discard_result_format_kwargs (boolean): If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config file.\n discard_include_config_kwargs (boolean): If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config file.\n discard_catch_exceptions_kwargs (boolean): If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON config file.\n suppress_warnings (boolean): It True, all warnings raised by Great Expectations, as a result of dropped expectations, are suppressed.\n\n "
expectation_suite = self.get_expectation_suite(discard_failed_expectations, discard_result_format_kwargs, discard_include_config_kwargs, discard_catch_exceptions_kwargs, suppress_warnings)
if ((filepath is None) and (self._data_context is not None)):
self._data_context.save_expectation_suite(expectation_suite)
elif (filepath is not None):
with open(filepath, 'w') as outfile:
json.dump(expectationSuiteSchema.dump(expectation_suite), outfile, indent=2, sort_keys=True)
else:
raise ValueError('Unable to save config: filepath or data_context must be available.') |
def validate(self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None):
'Generates a JSON-formatted report describing the outcome of all expectations.\n\n Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.\n\n Args:\n expectation_suite (json or None): If None, uses the expectations config generated with the DataAsset during the current session. If a JSON file, validates those expectations.\n run_name (str): Used to identify this validation result as part of a collection of validations. See DataContext for more information.\n data_context (DataContext): A datacontext object to use as part of validation for binding evaluation parameters and registering validation results.\n evaluation_parameters (dict or None): If None, uses the evaluation_paramters from the expectation_suite provided or as part of the data_asset. If a dict, uses the evaluation parameters in the dictionary.\n catch_exceptions (boolean): If True, exceptions raised by tests will not end validation and will be described in the returned report.\n result_format (string or None): If None, uses the default value (\'BASIC\' or as specified). If string, the returned expectation output follows the specified format (\'BOOLEAN_ONLY\',\'BASIC\', etc.).\n only_return_failures (boolean): If True, expectation results are only returned when ``success = False`` \n Returns:\n A JSON-formatted dictionary containing a list of the validation results. An example of the returned format::\n\n {\n "results": [\n {\n "unexpected_list": [unexpected_value_1, unexpected_value_2],\n "expectation_type": "expect_*",\n "kwargs": {\n "column": "Column_Name",\n "output_format": "SUMMARY"\n },\n "success": true,\n "raised_exception: false.\n "exception_traceback": null\n },\n {\n ... (Second expectation results)\n },\n ... (More expectations results)\n ],\n "success": true,\n "statistics": {\n "evaluated_expectations": n,\n "successful_expectations": m,\n "unsuccessful_expectations": n - m,\n "success_percent": m / n\n }\n }\n\n Notes:\n If the configuration object was built with a different version of great expectations then the current environment. If no version was found in the configuration file.\n\n Raises:\n AttributeError - if \'catch_exceptions\'=None and an expectation throws an AttributeError\n '
try:
validation_time = datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%dT%H%M%S.%fZ')
assert ((not (run_id and run_name)) and (not (run_id and run_time))), 'Please provide either a run_id or run_name and/or run_time.'
if (isinstance(run_id, str) and (not run_name)):
warnings.warn('String run_ids will be deprecated in the future. Please provide a run_id of type RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name and run_time (both optional). Instead of providing a run_id, you may also providerun_name and run_time separately.', DeprecationWarning)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
pass
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif (not isinstance(run_id, RunIdentifier)):
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
self._active_validation = True
validate__data_context = self._data_context
if ((data_context is None) and (self._data_context is not None)):
data_context = self._data_context
elif (data_context is not None):
self._data_context = data_context
results = []
if (expectation_suite is None):
expectation_suite = self.get_expectation_suite(discard_failed_expectations=False, discard_result_format_kwargs=False, discard_include_config_kwargs=False, discard_catch_exceptions_kwargs=False)
elif isinstance(expectation_suite, str):
try:
with open(expectation_suite) as infile:
expectation_suite = expectationSuiteSchema.loads(infile.read())
except ValidationError:
raise
except OSError:
raise GreatExpectationsError(('Unable to load expectation suite: IO error while reading %s' % expectation_suite))
elif (not isinstance(expectation_suite, ExpectationSuite)):
logger.error('Unable to validate using the provided value for expectation suite; does it need to be loaded from a dictionary?')
if getattr(data_context, '_usage_statistics_handler', None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(event='data_asset.validate', event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=False)
return ExpectationValidationResult(success=False)
if (data_context is not None):
runtime_evaluation_parameters = data_context.evaluation_parameter_store.get_bind_params(run_id)
else:
runtime_evaluation_parameters = {}
if expectation_suite.evaluation_parameters:
runtime_evaluation_parameters.update(expectation_suite.evaluation_parameters)
if (evaluation_parameters is not None):
runtime_evaluation_parameters.update(evaluation_parameters)
runtime_evaluation_parameters = recursively_convert_to_json_serializable(runtime_evaluation_parameters)
suite_ge_version = (expectation_suite.meta.get('great_expectations_version') or expectation_suite.meta.get('great_expectations.__version__'))
if suite_ge_version:
if (suite_ge_version != ge_version):
warnings.warn(('WARNING: This configuration object was built using version %s of great_expectations, but is currently being validated by version %s.' % (suite_ge_version, ge_version)))
else:
warnings.warn('WARNING: No great_expectations version found in configuration object.')
columns = {}
for expectation in expectation_suite.expectations:
if (('column' in expectation.kwargs) and isinstance(expectation.kwargs['column'], Hashable)):
column = expectation.kwargs['column']
else:
column = '_nocolumn'
if (column not in columns):
columns[column] = []
columns[column].append(expectation)
expectations_to_evaluate = []
for col in columns:
expectations_to_evaluate.extend(columns[col])
for expectation in expectations_to_evaluate:
try:
expectation = copy.deepcopy(expectation)
expectation_method = getattr(self, expectation.expectation_type)
if (result_format is not None):
expectation.kwargs.update({'result_format': result_format})
(evaluation_args, substituted_parameters) = build_evaluation_parameters(expectation.kwargs, runtime_evaluation_parameters, self._config.get('interactive_evaluation', True), self._data_context)
result = expectation_method(catch_exceptions=catch_exceptions, include_config=True, **evaluation_args)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
result = ExpectationValidationResult(success=False, exception_info={'raised_exception': raised_exception, 'exception_traceback': exception_traceback, 'exception_message': str(err)})
else:
raise err
result.expectation_config = expectation
if (catch_exceptions and (result.exception_info is None)):
result.exception_info = {'raised_exception': False, 'exception_traceback': None, 'exception_message': None}
results.append(result)
statistics = _calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if (not exp.success):
abbrev_results.append(exp)
results = abbrev_results
expectation_suite_name = expectation_suite.expectation_suite_name
result = ExpectationSuiteValidationResult(results=results, success=statistics.success, statistics={'evaluated_expectations': statistics.evaluated_expectations, 'successful_expectations': statistics.successful_expectations, 'unsuccessful_expectations': statistics.unsuccessful_expectations, 'success_percent': statistics.success_percent}, evaluation_parameters=runtime_evaluation_parameters, meta={'great_expectations_version': ge_version, 'expectation_suite_name': expectation_suite_name, 'run_id': run_id, 'batch_kwargs': self.batch_kwargs, 'batch_markers': self.batch_markers, 'batch_parameters': self.batch_parameters, 'validation_time': validation_time})
self._data_context = validate__data_context
except Exception:
if getattr(data_context, '_usage_statistics_handler', None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(event='data_asset.validate', event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=False)
raise
finally:
self._active_validation = False
if getattr(data_context, '_usage_statistics_handler', None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(event='data_asset.validate', event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=True)
return result | -2,197,131,871,844,403,700 | Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.
Args:
expectation_suite (json or None): If None, uses the expectations config generated with the DataAsset during the current session. If a JSON file, validates those expectations.
run_name (str): Used to identify this validation result as part of a collection of validations. See DataContext for more information.
data_context (DataContext): A datacontext object to use as part of validation for binding evaluation parameters and registering validation results.
evaluation_parameters (dict or None): If None, uses the evaluation_paramters from the expectation_suite provided or as part of the data_asset. If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): If True, exceptions raised by tests will not end validation and will be described in the returned report.
result_format (string or None): If None, uses the default value ('BASIC' or as specified). If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', etc.).
only_return_failures (boolean): If True, expectation results are only returned when ``success = False``
Returns:
A JSON-formatted dictionary containing a list of the validation results. An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the current environment. If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError | great_expectations/data_asset/data_asset.py | validate | BSofo/great_expectations | python | def validate(self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None):
'Generates a JSON-formatted report describing the outcome of all expectations.\n\n Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.\n\n Args:\n expectation_suite (json or None): If None, uses the expectations config generated with the DataAsset during the current session. If a JSON file, validates those expectations.\n run_name (str): Used to identify this validation result as part of a collection of validations. See DataContext for more information.\n data_context (DataContext): A datacontext object to use as part of validation for binding evaluation parameters and registering validation results.\n evaluation_parameters (dict or None): If None, uses the evaluation_paramters from the expectation_suite provided or as part of the data_asset. If a dict, uses the evaluation parameters in the dictionary.\n catch_exceptions (boolean): If True, exceptions raised by tests will not end validation and will be described in the returned report.\n result_format (string or None): If None, uses the default value (\'BASIC\' or as specified). If string, the returned expectation output follows the specified format (\'BOOLEAN_ONLY\',\'BASIC\', etc.).\n only_return_failures (boolean): If True, expectation results are only returned when ``success = False`` \n Returns:\n A JSON-formatted dictionary containing a list of the validation results. An example of the returned format::\n\n {\n "results": [\n {\n "unexpected_list": [unexpected_value_1, unexpected_value_2],\n "expectation_type": "expect_*",\n "kwargs": {\n "column": "Column_Name",\n "output_format": "SUMMARY"\n },\n "success": true,\n "raised_exception: false.\n "exception_traceback": null\n },\n {\n ... (Second expectation results)\n },\n ... (More expectations results)\n ],\n "success": true,\n "statistics": {\n "evaluated_expectations": n,\n "successful_expectations": m,\n "unsuccessful_expectations": n - m,\n "success_percent": m / n\n }\n }\n\n Notes:\n If the configuration object was built with a different version of great expectations then the current environment. If no version was found in the configuration file.\n\n Raises:\n AttributeError - if \'catch_exceptions\'=None and an expectation throws an AttributeError\n '
try:
validation_time = datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%dT%H%M%S.%fZ')
assert ((not (run_id and run_name)) and (not (run_id and run_time))), 'Please provide either a run_id or run_name and/or run_time.'
if (isinstance(run_id, str) and (not run_name)):
warnings.warn('String run_ids will be deprecated in the future. Please provide a run_id of type RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name and run_time (both optional). Instead of providing a run_id, you may also providerun_name and run_time separately.', DeprecationWarning)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
pass
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif (not isinstance(run_id, RunIdentifier)):
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
self._active_validation = True
validate__data_context = self._data_context
if ((data_context is None) and (self._data_context is not None)):
data_context = self._data_context
elif (data_context is not None):
self._data_context = data_context
results = []
if (expectation_suite is None):
expectation_suite = self.get_expectation_suite(discard_failed_expectations=False, discard_result_format_kwargs=False, discard_include_config_kwargs=False, discard_catch_exceptions_kwargs=False)
elif isinstance(expectation_suite, str):
try:
with open(expectation_suite) as infile:
expectation_suite = expectationSuiteSchema.loads(infile.read())
except ValidationError:
raise
except OSError:
raise GreatExpectationsError(('Unable to load expectation suite: IO error while reading %s' % expectation_suite))
elif (not isinstance(expectation_suite, ExpectationSuite)):
logger.error('Unable to validate using the provided value for expectation suite; does it need to be loaded from a dictionary?')
if getattr(data_context, '_usage_statistics_handler', None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(event='data_asset.validate', event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=False)
return ExpectationValidationResult(success=False)
if (data_context is not None):
runtime_evaluation_parameters = data_context.evaluation_parameter_store.get_bind_params(run_id)
else:
runtime_evaluation_parameters = {}
if expectation_suite.evaluation_parameters:
runtime_evaluation_parameters.update(expectation_suite.evaluation_parameters)
if (evaluation_parameters is not None):
runtime_evaluation_parameters.update(evaluation_parameters)
runtime_evaluation_parameters = recursively_convert_to_json_serializable(runtime_evaluation_parameters)
suite_ge_version = (expectation_suite.meta.get('great_expectations_version') or expectation_suite.meta.get('great_expectations.__version__'))
if suite_ge_version:
if (suite_ge_version != ge_version):
warnings.warn(('WARNING: This configuration object was built using version %s of great_expectations, but is currently being validated by version %s.' % (suite_ge_version, ge_version)))
else:
warnings.warn('WARNING: No great_expectations version found in configuration object.')
columns = {}
for expectation in expectation_suite.expectations:
if (('column' in expectation.kwargs) and isinstance(expectation.kwargs['column'], Hashable)):
column = expectation.kwargs['column']
else:
column = '_nocolumn'
if (column not in columns):
columns[column] = []
columns[column].append(expectation)
expectations_to_evaluate = []
for col in columns:
expectations_to_evaluate.extend(columns[col])
for expectation in expectations_to_evaluate:
try:
expectation = copy.deepcopy(expectation)
expectation_method = getattr(self, expectation.expectation_type)
if (result_format is not None):
expectation.kwargs.update({'result_format': result_format})
(evaluation_args, substituted_parameters) = build_evaluation_parameters(expectation.kwargs, runtime_evaluation_parameters, self._config.get('interactive_evaluation', True), self._data_context)
result = expectation_method(catch_exceptions=catch_exceptions, include_config=True, **evaluation_args)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
result = ExpectationValidationResult(success=False, exception_info={'raised_exception': raised_exception, 'exception_traceback': exception_traceback, 'exception_message': str(err)})
else:
raise err
result.expectation_config = expectation
if (catch_exceptions and (result.exception_info is None)):
result.exception_info = {'raised_exception': False, 'exception_traceback': None, 'exception_message': None}
results.append(result)
statistics = _calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if (not exp.success):
abbrev_results.append(exp)
results = abbrev_results
expectation_suite_name = expectation_suite.expectation_suite_name
result = ExpectationSuiteValidationResult(results=results, success=statistics.success, statistics={'evaluated_expectations': statistics.evaluated_expectations, 'successful_expectations': statistics.successful_expectations, 'unsuccessful_expectations': statistics.unsuccessful_expectations, 'success_percent': statistics.success_percent}, evaluation_parameters=runtime_evaluation_parameters, meta={'great_expectations_version': ge_version, 'expectation_suite_name': expectation_suite_name, 'run_id': run_id, 'batch_kwargs': self.batch_kwargs, 'batch_markers': self.batch_markers, 'batch_parameters': self.batch_parameters, 'validation_time': validation_time})
self._data_context = validate__data_context
except Exception:
if getattr(data_context, '_usage_statistics_handler', None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(event='data_asset.validate', event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=False)
raise
finally:
self._active_validation = False
if getattr(data_context, '_usage_statistics_handler', None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(event='data_asset.validate', event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=True)
return result |
def get_evaluation_parameter(self, parameter_name, default_value=None):
'Get an evaluation parameter value that has been stored in meta.\n\n Args:\n parameter_name (string): The name of the parameter to store.\n default_value (any): The default value to be returned if the parameter is not found.\n\n Returns:\n The current value of the evaluation parameter.\n '
if (parameter_name in self._expectation_suite.evaluation_parameters):
return self._expectation_suite.evaluation_parameters[parameter_name]
else:
return default_value | 5,703,724,371,110,034,000 | Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter. | great_expectations/data_asset/data_asset.py | get_evaluation_parameter | BSofo/great_expectations | python | def get_evaluation_parameter(self, parameter_name, default_value=None):
'Get an evaluation parameter value that has been stored in meta.\n\n Args:\n parameter_name (string): The name of the parameter to store.\n default_value (any): The default value to be returned if the parameter is not found.\n\n Returns:\n The current value of the evaluation parameter.\n '
if (parameter_name in self._expectation_suite.evaluation_parameters):
return self._expectation_suite.evaluation_parameters[parameter_name]
else:
return default_value |
def set_evaluation_parameter(self, parameter_name, parameter_value):
'Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate\n parameterized expectations.\n\n Args:\n parameter_name (string): The name of the kwarg to be replaced at evaluation time\n parameter_value (any): The value to be used\n '
self._expectation_suite.evaluation_parameters.update({parameter_name: parameter_value}) | -1,699,529,156,743,561,200 | Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used | great_expectations/data_asset/data_asset.py | set_evaluation_parameter | BSofo/great_expectations | python | def set_evaluation_parameter(self, parameter_name, parameter_value):
'Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate\n parameterized expectations.\n\n Args:\n parameter_name (string): The name of the kwarg to be replaced at evaluation time\n parameter_value (any): The value to be used\n '
self._expectation_suite.evaluation_parameters.update({parameter_name: parameter_value}) |
@property
def expectation_suite_name(self):
'Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.'
return self._expectation_suite.expectation_suite_name | -395,357,649,999,344,200 | Gets the current expectation_suite name of this data_asset as stored in the expectations configuration. | great_expectations/data_asset/data_asset.py | expectation_suite_name | BSofo/great_expectations | python | @property
def expectation_suite_name(self):
return self._expectation_suite.expectation_suite_name |
@expectation_suite_name.setter
def expectation_suite_name(self, expectation_suite_name):
'Sets the expectation_suite name of this data_asset as stored in the expectations configuration.'
self._expectation_suite.expectation_suite_name = expectation_suite_name | 2,893,231,240,521,078,000 | Sets the expectation_suite name of this data_asset as stored in the expectations configuration. | great_expectations/data_asset/data_asset.py | expectation_suite_name | BSofo/great_expectations | python | @expectation_suite_name.setter
def expectation_suite_name(self, expectation_suite_name):
self._expectation_suite.expectation_suite_name = expectation_suite_name |
def _format_map_output(self, result_format, success, element_count, nonnull_count, unexpected_count, unexpected_list, unexpected_index_list):
'Helper function to construct expectation result objects for map_expectations (such as column_map_expectation\n and file_lines_map_expectation).\n\n Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.\n In each case, the object returned has a different set of populated fields.\n See :ref:`result_format` for more information.\n\n This function handles the logic for mapping those fields for column_map_expectations.\n '
result_format = parse_result_format(result_format)
return_obj = {'success': success}
if (result_format['result_format'] == 'BOOLEAN_ONLY'):
return return_obj
missing_count = (element_count - nonnull_count)
if (element_count > 0):
missing_percent = ((missing_count / element_count) * 100)
if (nonnull_count > 0):
unexpected_percent_total = ((unexpected_count / element_count) * 100)
unexpected_percent_nonmissing = ((unexpected_count / nonnull_count) * 100)
else:
unexpected_percent_total = None
unexpected_percent_nonmissing = None
else:
missing_percent = None
unexpected_percent_total = None
unexpected_percent_nonmissing = None
return_obj['result'] = {'element_count': element_count, 'missing_count': missing_count, 'missing_percent': missing_percent, 'unexpected_count': unexpected_count, 'unexpected_percent': unexpected_percent_nonmissing, 'unexpected_percent_total': unexpected_percent_total, 'unexpected_percent_nonmissing': unexpected_percent_nonmissing, 'partial_unexpected_list': unexpected_list[:result_format['partial_unexpected_count']]}
if (result_format['result_format'] == 'BASIC'):
return return_obj
if (0 < result_format.get('partial_unexpected_count')):
try:
partial_unexpected_counts = [{'value': key, 'count': value} for (key, value) in sorted(Counter(unexpected_list).most_common(result_format['partial_unexpected_count']), key=(lambda x: ((- x[1]), str(x[0]))))]
except TypeError:
partial_unexpected_counts = []
if ('details' not in return_obj['result']):
return_obj['result']['details'] = {}
return_obj['result']['details']['partial_unexpected_counts_error'] = 'partial_unexpected_counts requested, but requires a hashable type'
finally:
return_obj['result'].update({'partial_unexpected_index_list': (unexpected_index_list[:result_format['partial_unexpected_count']] if (unexpected_index_list is not None) else None), 'partial_unexpected_counts': partial_unexpected_counts})
if (result_format['result_format'] == 'SUMMARY'):
return return_obj
return_obj['result'].update({'unexpected_list': unexpected_list, 'unexpected_index_list': unexpected_index_list})
if (result_format['result_format'] == 'COMPLETE'):
return return_obj
raise ValueError('Unknown result_format {}.'.format(result_format['result_format'])) | 2,999,667,954,995,944,000 | Helper function to construct expectation result objects for map_expectations (such as column_map_expectation
and file_lines_map_expectation).
Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.
In each case, the object returned has a different set of populated fields.
See :ref:`result_format` for more information.
This function handles the logic for mapping those fields for column_map_expectations. | great_expectations/data_asset/data_asset.py | _format_map_output | BSofo/great_expectations | python | def _format_map_output(self, result_format, success, element_count, nonnull_count, unexpected_count, unexpected_list, unexpected_index_list):
'Helper function to construct expectation result objects for map_expectations (such as column_map_expectation\n and file_lines_map_expectation).\n\n Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.\n In each case, the object returned has a different set of populated fields.\n See :ref:`result_format` for more information.\n\n This function handles the logic for mapping those fields for column_map_expectations.\n '
result_format = parse_result_format(result_format)
return_obj = {'success': success}
if (result_format['result_format'] == 'BOOLEAN_ONLY'):
return return_obj
missing_count = (element_count - nonnull_count)
if (element_count > 0):
missing_percent = ((missing_count / element_count) * 100)
if (nonnull_count > 0):
unexpected_percent_total = ((unexpected_count / element_count) * 100)
unexpected_percent_nonmissing = ((unexpected_count / nonnull_count) * 100)
else:
unexpected_percent_total = None
unexpected_percent_nonmissing = None
else:
missing_percent = None
unexpected_percent_total = None
unexpected_percent_nonmissing = None
return_obj['result'] = {'element_count': element_count, 'missing_count': missing_count, 'missing_percent': missing_percent, 'unexpected_count': unexpected_count, 'unexpected_percent': unexpected_percent_nonmissing, 'unexpected_percent_total': unexpected_percent_total, 'unexpected_percent_nonmissing': unexpected_percent_nonmissing, 'partial_unexpected_list': unexpected_list[:result_format['partial_unexpected_count']]}
if (result_format['result_format'] == 'BASIC'):
return return_obj
if (0 < result_format.get('partial_unexpected_count')):
try:
partial_unexpected_counts = [{'value': key, 'count': value} for (key, value) in sorted(Counter(unexpected_list).most_common(result_format['partial_unexpected_count']), key=(lambda x: ((- x[1]), str(x[0]))))]
except TypeError:
partial_unexpected_counts = []
if ('details' not in return_obj['result']):
return_obj['result']['details'] = {}
return_obj['result']['details']['partial_unexpected_counts_error'] = 'partial_unexpected_counts requested, but requires a hashable type'
finally:
return_obj['result'].update({'partial_unexpected_index_list': (unexpected_index_list[:result_format['partial_unexpected_count']] if (unexpected_index_list is not None) else None), 'partial_unexpected_counts': partial_unexpected_counts})
if (result_format['result_format'] == 'SUMMARY'):
return return_obj
return_obj['result'].update({'unexpected_list': unexpected_list, 'unexpected_index_list': unexpected_index_list})
if (result_format['result_format'] == 'COMPLETE'):
return return_obj
raise ValueError('Unknown result_format {}.'.format(result_format['result_format'])) |
def _calc_map_expectation_success(self, success_count, nonnull_count, mostly):
'Calculate success and percent_success for column_map_expectations\n\n Args:\n success_count (int): The number of successful values in the column\n nonnull_count (int): The number of nonnull values in the column\n mostly (float or None): A value between 0 and 1 (or None), indicating the fraction of successes required to pass the expectation as a whole. If mostly=None, then all values must succeed in order for the expectation as a whole to succeed.\n\n Returns:\n success (boolean), percent_success (float)\n '
if isinstance(success_count, decimal.Decimal):
raise ValueError('success_count must not be a decimal; check your db configuration')
if isinstance(nonnull_count, decimal.Decimal):
raise ValueError('nonnull_count must not be a decimal; check your db configuration')
if (nonnull_count > 0):
percent_success = (success_count / nonnull_count)
if (mostly is not None):
success = bool((percent_success >= mostly))
else:
success = bool(((nonnull_count - success_count) == 0))
else:
success = True
percent_success = None
return (success, percent_success) | 1,063,625,033,990,201,300 | Calculate success and percent_success for column_map_expectations
Args:
success_count (int): The number of successful values in the column
nonnull_count (int): The number of nonnull values in the column
mostly (float or None): A value between 0 and 1 (or None), indicating the fraction of successes required to pass the expectation as a whole. If mostly=None, then all values must succeed in order for the expectation as a whole to succeed.
Returns:
success (boolean), percent_success (float) | great_expectations/data_asset/data_asset.py | _calc_map_expectation_success | BSofo/great_expectations | python | def _calc_map_expectation_success(self, success_count, nonnull_count, mostly):
'Calculate success and percent_success for column_map_expectations\n\n Args:\n success_count (int): The number of successful values in the column\n nonnull_count (int): The number of nonnull values in the column\n mostly (float or None): A value between 0 and 1 (or None), indicating the fraction of successes required to pass the expectation as a whole. If mostly=None, then all values must succeed in order for the expectation as a whole to succeed.\n\n Returns:\n success (boolean), percent_success (float)\n '
if isinstance(success_count, decimal.Decimal):
raise ValueError('success_count must not be a decimal; check your db configuration')
if isinstance(nonnull_count, decimal.Decimal):
raise ValueError('nonnull_count must not be a decimal; check your db configuration')
if (nonnull_count > 0):
percent_success = (success_count / nonnull_count)
if (mostly is not None):
success = bool((percent_success >= mostly))
else:
success = bool(((nonnull_count - success_count) == 0))
else:
success = True
percent_success = None
return (success, percent_success) |
def test_expectation_function(self, function, *args, **kwargs):
'Test a generic expectation function\n\n Args:\n function (func): The function to be tested. (Must be a valid expectation function.)\n *args : Positional arguments to be passed the the function\n **kwargs : Keyword arguments to be passed the the function\n\n Returns:\n A JSON-serializable expectation result object.\n\n Notes:\n This function is a thin layer to allow quick testing of new expectation functions, without having to define custom classes, etc. To use developed expectations from the command-line tool, you will still need to define custom classes, etc.\n\n Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.\n '
argspec = inspect.getfullargspec(function)[0][1:]
new_function = self.expectation(argspec)(function)
return new_function(self, *args, **kwargs) | 7,143,974,565,756,083,000 | Test a generic expectation function
Args:
function (func): The function to be tested. (Must be a valid expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to define custom classes, etc. To use developed expectations from the command-line tool, you will still need to define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information. | great_expectations/data_asset/data_asset.py | test_expectation_function | BSofo/great_expectations | python | def test_expectation_function(self, function, *args, **kwargs):
'Test a generic expectation function\n\n Args:\n function (func): The function to be tested. (Must be a valid expectation function.)\n *args : Positional arguments to be passed the the function\n **kwargs : Keyword arguments to be passed the the function\n\n Returns:\n A JSON-serializable expectation result object.\n\n Notes:\n This function is a thin layer to allow quick testing of new expectation functions, without having to define custom classes, etc. To use developed expectations from the command-line tool, you will still need to define custom classes, etc.\n\n Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.\n '
argspec = inspect.getfullargspec(function)[0][1:]
new_function = self.expectation(argspec)(function)
return new_function(self, *args, **kwargs) |
def run(self):
' \n Build the command and run.\n Return list of file(s)\n '
contigs = self.data.contigfiles
reads = self.data.readsets
if (len(contigs) > 1):
raise Exception('Reapr: multiple contig files!')
if (len(reads) > 1):
self.out_module.write('WARNING: Reapr will use only one read library')
read_pair = reads[0].files
bamfile = os.path.join(self.outpath, 'out.bam')
cmd_args = [self.executable, 'smaltmap', contigs[0], read_pair[0], read_pair[1], bamfile]
self.arast_popen(cmd_args)
if (not os.path.exists(bamfile)):
raise ArastDataOutputError('REAPR: Unable to create alignment')
rpr_outpath = os.path.join(self.outpath, 'output')
cmd_args = [self.executable, 'pipeline', contigs[0], bamfile, rpr_outpath]
self.arast_popen(cmd_args)
for f in os.listdir(rpr_outpath):
old = os.path.join(rpr_outpath, f)
new = os.path.join(self.outpath, f)
os.rename(old, new)
broken = os.path.join(self.outpath, '04.break.broken_assembly.fa')
if os.path.exists(broken):
return {'contigs': [broken]} | -2,533,049,675,244,112,400 | Build the command and run.
Return list of file(s) | lib/assembly/plugins/reapr.py | run | levinas/assembly | python | def run(self):
' \n Build the command and run.\n Return list of file(s)\n '
contigs = self.data.contigfiles
reads = self.data.readsets
if (len(contigs) > 1):
raise Exception('Reapr: multiple contig files!')
if (len(reads) > 1):
self.out_module.write('WARNING: Reapr will use only one read library')
read_pair = reads[0].files
bamfile = os.path.join(self.outpath, 'out.bam')
cmd_args = [self.executable, 'smaltmap', contigs[0], read_pair[0], read_pair[1], bamfile]
self.arast_popen(cmd_args)
if (not os.path.exists(bamfile)):
raise ArastDataOutputError('REAPR: Unable to create alignment')
rpr_outpath = os.path.join(self.outpath, 'output')
cmd_args = [self.executable, 'pipeline', contigs[0], bamfile, rpr_outpath]
self.arast_popen(cmd_args)
for f in os.listdir(rpr_outpath):
old = os.path.join(rpr_outpath, f)
new = os.path.join(self.outpath, f)
os.rename(old, new)
broken = os.path.join(self.outpath, '04.break.broken_assembly.fa')
if os.path.exists(broken):
return {'contigs': [broken]} |
def setup(set_prefix=True):
'\n Configure the settings (this happens as a side effect of accessing the\n first setting), configure logging and populate the app registry.\n Set the thread-local urlresolvers script prefix if `set_prefix` is True.\n '
from django.apps import apps
from django.conf import settings
from django.urls import set_script_prefix
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
if set_prefix:
set_script_prefix(('/' if (settings.FORCE_SCRIPT_NAME is None) else settings.FORCE_SCRIPT_NAME))
apps.populate(settings.INSTALLED_APPS) | 3,197,155,097,738,839,000 | Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
Set the thread-local urlresolvers script prefix if `set_prefix` is True. | [email protected]/Lib/site-packages/django/__init__.py | setup | nverbois/TFE21-232 | python | def setup(set_prefix=True):
'\n Configure the settings (this happens as a side effect of accessing the\n first setting), configure logging and populate the app registry.\n Set the thread-local urlresolvers script prefix if `set_prefix` is True.\n '
from django.apps import apps
from django.conf import settings
from django.urls import set_script_prefix
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
if set_prefix:
set_script_prefix(('/' if (settings.FORCE_SCRIPT_NAME is None) else settings.FORCE_SCRIPT_NAME))
apps.populate(settings.INSTALLED_APPS) |
def on_trial_complete(self, trial_id: str, result: Optional[Dict]=None, error: bool=False):
'Notification for the completion of trial.\n\n If a trial fails, it will be reported as a failed Observation, telling\n the optimizer that the Suggestion led to a metric failure, which\n updates the feasible region and improves parameter recommendation.\n\n Creates SigOpt Observation object for trial.\n '
if result:
payload = dict(suggestion=self._live_trial_mapping[trial_id], values=self.serialize_result(result))
self.conn.experiments(self.experiment.id).observations().create(**payload)
self.experiment = self.conn.experiments(self.experiment.id).fetch()
elif error:
self.conn.experiments(self.experiment.id).observations().create(failed=True, suggestion=self._live_trial_mapping[trial_id])
del self._live_trial_mapping[trial_id] | 5,290,662,398,531,859,000 | Notification for the completion of trial.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial. | python/ray/tune/suggest/sigopt.py | on_trial_complete | Actexpler/ray | python | def on_trial_complete(self, trial_id: str, result: Optional[Dict]=None, error: bool=False):
'Notification for the completion of trial.\n\n If a trial fails, it will be reported as a failed Observation, telling\n the optimizer that the Suggestion led to a metric failure, which\n updates the feasible region and improves parameter recommendation.\n\n Creates SigOpt Observation object for trial.\n '
if result:
payload = dict(suggestion=self._live_trial_mapping[trial_id], values=self.serialize_result(result))
self.conn.experiments(self.experiment.id).observations().create(**payload)
self.experiment = self.conn.experiments(self.experiment.id).fetch()
elif error:
self.conn.experiments(self.experiment.id).observations().create(failed=True, suggestion=self._live_trial_mapping[trial_id])
del self._live_trial_mapping[trial_id] |
@staticmethod
def serialize_metric(metrics: List[str], modes: List[str]):
'\n Converts metrics to https://app.sigopt.com/docs/objects/metric\n '
serialized_metric = []
for (metric, mode) in zip(metrics, modes):
serialized_metric.append(dict(name=metric, **SigOptSearch.OBJECTIVE_MAP[mode].copy()))
return serialized_metric | -1,739,824,465,971,723,000 | Converts metrics to https://app.sigopt.com/docs/objects/metric | python/ray/tune/suggest/sigopt.py | serialize_metric | Actexpler/ray | python | @staticmethod
def serialize_metric(metrics: List[str], modes: List[str]):
'\n \n '
serialized_metric = []
for (metric, mode) in zip(metrics, modes):
serialized_metric.append(dict(name=metric, **SigOptSearch.OBJECTIVE_MAP[mode].copy()))
return serialized_metric |
def serialize_result(self, result: Dict):
'\n Converts experiments results to\n https://app.sigopt.com/docs/objects/metric_evaluation\n '
missing_scores = [metric for metric in self._metric if (metric not in result)]
if missing_scores:
raise ValueError(f'Some metrics specified during initialization are missing. Missing metrics: {missing_scores}, provided result {result}')
values = []
for metric in self._metric:
value = dict(name=metric, value=result[metric])
values.append(value)
return values | -4,923,203,362,273,260,000 | Converts experiments results to
https://app.sigopt.com/docs/objects/metric_evaluation | python/ray/tune/suggest/sigopt.py | serialize_result | Actexpler/ray | python | def serialize_result(self, result: Dict):
'\n Converts experiments results to\n https://app.sigopt.com/docs/objects/metric_evaluation\n '
missing_scores = [metric for metric in self._metric if (metric not in result)]
if missing_scores:
raise ValueError(f'Some metrics specified during initialization are missing. Missing metrics: {missing_scores}, provided result {result}')
values = []
for metric in self._metric:
value = dict(name=metric, value=result[metric])
values.append(value)
return values |
def replacing_yield(o, attr, val):
'Context manager to temporarily replace an attribute'
old = getattr(o, attr)
try:
(yield setattr(o, attr, val))
finally:
setattr(o, attr, old) | 7,309,409,930,689,528,000 | Context manager to temporarily replace an attribute | fastai2/learner.py | replacing_yield | akashpalrecha/fastai2 | python | def replacing_yield(o, attr, val):
old = getattr(o, attr)
try:
(yield setattr(o, attr, val))
finally:
setattr(o, attr, old) |
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
return (m if isinstance(m, Metric) else AvgMetric(m)) | 3,190,265,609,295,447,600 | Convert `m` to an `AvgMetric`, unless it's already a `Metric` | fastai2/learner.py | mk_metric | akashpalrecha/fastai2 | python | def mk_metric(m):
return (m if isinstance(m, Metric) else AvgMetric(m)) |
def save_model(file, model, opt, with_opt=True):
'Save `model` to `file` along with `opt` (if available, and if `with_opt`)'
if (opt is None):
with_opt = False
state = get_model(model).state_dict()
if with_opt:
state = {'model': state, 'opt': opt.state_dict()}
torch.save(state, file) | 815,293,515,183,298,400 | Save `model` to `file` along with `opt` (if available, and if `with_opt`) | fastai2/learner.py | save_model | akashpalrecha/fastai2 | python | def save_model(file, model, opt, with_opt=True):
if (opt is None):
with_opt = False
state = get_model(model).state_dict()
if with_opt:
state = {'model': state, 'opt': opt.state_dict()}
torch.save(state, file) |
def load_model(file, model, opt, with_opt=None, device=None, strict=True):
'Load `model` from `file` along with `opt` (if available, and if `with_opt`)'
if isinstance(device, int):
device = torch.device('cuda', device)
elif (device is None):
device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = (set(state) == {'model', 'opt'})
model_state = (state['model'] if hasopt else state)
get_model(model).load_state_dict(model_state, strict=strict)
if (hasopt and ifnone(with_opt, True)):
try:
opt.load_state_dict(state['opt'])
except:
if with_opt:
warn('Could not load the optimizer state.')
elif with_opt:
warn("Saved filed doesn't contain an optimizer state.") | 5,862,884,301,277,646,000 | Load `model` from `file` along with `opt` (if available, and if `with_opt`) | fastai2/learner.py | load_model | akashpalrecha/fastai2 | python | def load_model(file, model, opt, with_opt=None, device=None, strict=True):
if isinstance(device, int):
device = torch.device('cuda', device)
elif (device is None):
device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = (set(state) == {'model', 'opt'})
model_state = (state['model'] if hasopt else state)
get_model(model).load_state_dict(model_state, strict=strict)
if (hasopt and ifnone(with_opt, True)):
try:
opt.load_state_dict(state['opt'])
except:
if with_opt:
warn('Could not load the optimizer state.')
elif with_opt:
warn("Saved filed doesn't contain an optimizer state.") |
@patch
def export(self: Learner, fname='export.pkl'):
'Export the content of `self` without the items and the optimizer state for inference'
if rank_distrib():
return
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict()
self.opt = None
with warnings.catch_warnings():
warnings.simplefilter('ignore')
torch.save(self, (self.path / fname))
self.create_opt()
self.opt.load_state_dict(state)
self.dls = old_dbunch | -5,186,329,875,146,697,000 | Export the content of `self` without the items and the optimizer state for inference | fastai2/learner.py | export | akashpalrecha/fastai2 | python | @patch
def export(self: Learner, fname='export.pkl'):
if rank_distrib():
return
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict()
self.opt = None
with warnings.catch_warnings():
warnings.simplefilter('ignore')
torch.save(self, (self.path / fname))
self.create_opt()
self.opt.load_state_dict(state)
self.dls = old_dbunch |
def load_learner(fname, cpu=True):
'Load a `Learner` object in `fname`, optionally putting it on the `cpu`'
res = torch.load(fname, map_location=('cpu' if cpu else None))
if hasattr(res, 'to_fp32'):
res = res.to_fp32()
if cpu:
res.dls.cpu()
return res | -7,052,218,109,212,532,000 | Load a `Learner` object in `fname`, optionally putting it on the `cpu` | fastai2/learner.py | load_learner | akashpalrecha/fastai2 | python | def load_learner(fname, cpu=True):
res = torch.load(fname, map_location=('cpu' if cpu else None))
if hasattr(res, 'to_fp32'):
res = res.to_fp32()
if cpu:
res.dls.cpu()
return res |
@patch
def tta(self: Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
'Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation'
if (dl is None):
dl = self.dls[ds_idx]
if ((item_tfms is not None) or (batch_tfms is not None)):
dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self, 'progress'):
self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in (self.progress.mbar if hasattr(self, 'progress') else range(n)):
self.epoch = i
aug_preds.append(self.get_preds(ds_idx, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = (aug_preds.max(0)[0] if use_max else aug_preds.mean(0))
self.epoch = n
with dl.dataset.set_split_idx(1):
(preds, targs) = self.get_preds(ds_idx, inner=True)
if use_max:
return (torch.stack([preds, aug_preds], 0).max(0)[0], targs)
preds = ((aug_preds, preds) if (beta is None) else torch.lerp(aug_preds, preds, beta))
return (preds, targs) | 623,691,656,397,558,400 | Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation | fastai2/learner.py | tta | akashpalrecha/fastai2 | python | @patch
def tta(self: Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
if (dl is None):
dl = self.dls[ds_idx]
if ((item_tfms is not None) or (batch_tfms is not None)):
dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self, 'progress'):
self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in (self.progress.mbar if hasattr(self, 'progress') else range(n)):
self.epoch = i
aug_preds.append(self.get_preds(ds_idx, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = (aug_preds.max(0)[0] if use_max else aug_preds.mean(0))
self.epoch = n
with dl.dataset.set_split_idx(1):
(preds, targs) = self.get_preds(ds_idx, inner=True)
if use_max:
return (torch.stack([preds, aug_preds], 0).max(0)[0], targs)
preds = ((aug_preds, preds) if (beta is None) else torch.lerp(aug_preds, preds, beta))
return (preds, targs) |
def begin_fit(self):
'Prepare state for training'
(self.lrs, self.iters, self.losses, self.values) = ([], [], [], [])
names = self.metrics.attrgot('name')
if (self.train_metrics and self.valid_metrics):
names = (L('loss') + names)
names = (names.map('train_{}') + names.map('valid_{}'))
elif self.valid_metrics:
names = (L('train_loss', 'valid_loss') + names)
else:
names = (L('train_loss') + names)
if self.add_time:
names.append('time')
self.metric_names = ('epoch' + names)
self.smooth_loss.reset() | 8,912,982,815,727,020,000 | Prepare state for training | fastai2/learner.py | begin_fit | akashpalrecha/fastai2 | python | def begin_fit(self):
(self.lrs, self.iters, self.losses, self.values) = ([], [], [], [])
names = self.metrics.attrgot('name')
if (self.train_metrics and self.valid_metrics):
names = (L('loss') + names)
names = (names.map('train_{}') + names.map('valid_{}'))
elif self.valid_metrics:
names = (L('train_loss', 'valid_loss') + names)
else:
names = (L('train_loss') + names)
if self.add_time:
names.append('time')
self.metric_names = ('epoch' + names)
self.smooth_loss.reset() |
def after_batch(self):
'Update all metrics and records lr and smooth loss in training'
if (len(self.yb) == 0):
return
mets = (self._train_mets if self.training else self._valid_mets)
for met in mets:
met.accumulate(self.learn)
if (not self.training):
return
self.lrs.append(self.opt.hypers[(- 1)]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value | 5,701,394,699,218,141,000 | Update all metrics and records lr and smooth loss in training | fastai2/learner.py | after_batch | akashpalrecha/fastai2 | python | def after_batch(self):
if (len(self.yb) == 0):
return
mets = (self._train_mets if self.training else self._valid_mets)
for met in mets:
met.accumulate(self.learn)
if (not self.training):
return
self.lrs.append(self.opt.hypers[(- 1)]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value |
def begin_epoch(self):
'Set timer if `self.add_time=True`'
(self.cancel_train, self.cancel_valid) = (False, False)
if self.add_time:
self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0)) | 7,413,627,441,664,656,000 | Set timer if `self.add_time=True` | fastai2/learner.py | begin_epoch | akashpalrecha/fastai2 | python | def begin_epoch(self):
(self.cancel_train, self.cancel_valid) = (False, False)
if self.add_time:
self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0)) |
def after_epoch(self):
'Store and log the loss/metric values'
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time:
self.log.append(format_time((time.time() - self.start_epoch)))
self.logger(self.log)
self.iters.append(self.smooth_loss.count) | 2,613,439,967,057,962,000 | Store and log the loss/metric values | fastai2/learner.py | after_epoch | akashpalrecha/fastai2 | python | def after_epoch(self):
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time:
self.log.append(format_time((time.time() - self.start_epoch)))
self.logger(self.log)
self.iters.append(self.smooth_loss.count) |
def FromStruct(self, PointList):
' FromStruct(self: dotnetPointList_t,PointList: PointList) '
pass | -6,544,352,734,837,394,000 | FromStruct(self: dotnetPointList_t,PointList: PointList) | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotnetPointList_t.py | FromStruct | YKato521/ironpython-stubs | python | def FromStruct(self, PointList):
' '
pass |
def ToStruct(self, PointList):
' ToStruct(self: dotnetPointList_t,PointList: PointList) '
pass | 1,758,093,045,537,316,400 | ToStruct(self: dotnetPointList_t,PointList: PointList) | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotnetPointList_t.py | ToStruct | YKato521/ironpython-stubs | python | def ToStruct(self, PointList):
' '
pass |
@staticmethod
def __new__(self, Size):
'\n __new__[dotnetPointList_t]() -> dotnetPointList_t\n\n \n\n __new__(cls: type,Size: int)\n '
pass | 3,034,896,310,408,620,500 | __new__[dotnetPointList_t]() -> dotnetPointList_t
__new__(cls: type,Size: int) | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotnetPointList_t.py | __new__ | YKato521/ironpython-stubs | python | @staticmethod
def __new__(self, Size):
'\n __new__[dotnetPointList_t]() -> dotnetPointList_t\n\n \n\n __new__(cls: type,Size: int)\n '
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.