body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
'Sets the reject_on_error of this ExtendedBoolValueTest.\n\n\n :param reject_on_error: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501\n :type: bool\n '
self._reject_on_error = reject_on_error | 6,733,980,712,168,993,000 | Sets the reject_on_error of this ExtendedBoolValueTest.
:param reject_on_error: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501
:type: bool | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | reject_on_error | Telestream/telestream-cloud-python-sdk | python | @reject_on_error.setter
def reject_on_error(self, reject_on_error):
'Sets the reject_on_error of this ExtendedBoolValueTest.\n\n\n :param reject_on_error: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501\n :type: bool\n '
self._reject_on_error = reject_on_error |
@property
def checked(self):
'Gets the checked of this ExtendedBoolValueTest. # noqa: E501\n\n\n :return: The checked of this ExtendedBoolValueTest. # noqa: E501\n :rtype: bool\n '
return self._checked | -3,276,358,111,662,453,000 | Gets the checked of this ExtendedBoolValueTest. # noqa: E501
:return: The checked of this ExtendedBoolValueTest. # noqa: E501
:rtype: bool | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | checked | Telestream/telestream-cloud-python-sdk | python | @property
def checked(self):
'Gets the checked of this ExtendedBoolValueTest. # noqa: E501\n\n\n :return: The checked of this ExtendedBoolValueTest. # noqa: E501\n :rtype: bool\n '
return self._checked |
@checked.setter
def checked(self, checked):
'Sets the checked of this ExtendedBoolValueTest.\n\n\n :param checked: The checked of this ExtendedBoolValueTest. # noqa: E501\n :type: bool\n '
self._checked = checked | -5,146,549,918,617,549,000 | Sets the checked of this ExtendedBoolValueTest.
:param checked: The checked of this ExtendedBoolValueTest. # noqa: E501
:type: bool | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | checked | Telestream/telestream-cloud-python-sdk | python | @checked.setter
def checked(self, checked):
'Sets the checked of this ExtendedBoolValueTest.\n\n\n :param checked: The checked of this ExtendedBoolValueTest. # noqa: E501\n :type: bool\n '
self._checked = checked |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result | 8,442,519,487,048,767,000 | Returns the model properties as a dict | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | to_dict | Telestream/telestream-cloud-python-sdk | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | to_str | Telestream/telestream-cloud-python-sdk | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | __repr__ | Telestream/telestream-cloud-python-sdk | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, ExtendedBoolValueTest)):
return False
return (self.to_dict() == other.to_dict()) | 487,001,221,569,480,700 | Returns true if both objects are equal | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | __eq__ | Telestream/telestream-cloud-python-sdk | python | def __eq__(self, other):
if (not isinstance(other, ExtendedBoolValueTest)):
return False
return (self.to_dict() == other.to_dict()) |
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, ExtendedBoolValueTest)):
return True
return (self.to_dict() != other.to_dict()) | 3,255,979,270,629,175,000 | Returns true if both objects are not equal | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | __ne__ | Telestream/telestream-cloud-python-sdk | python | def __ne__(self, other):
if (not isinstance(other, ExtendedBoolValueTest)):
return True
return (self.to_dict() != other.to_dict()) |
def train(data: Dict[(str, np.ndarray)], model_name: str, dest_path: str, sample_size: int, n_classes: int, lr: float, batch_size: int, epochs: int, verbose: int, shuffle: bool, patience: int, seed: int):
'\n Function for running experiments on various unmixing models,\n given a set of hyper parameters.\n\n :param data: The data dictionary containing\n the subsets for training and validation.\n First dimension of the datasets should be the number of samples.\n :param model_name: Name of the model, it serves as a key in the\n dictionary holding all functions returning models.\n :param dest_path: Path to where all experiment runs will be saved as\n subdirectories in this given directory.\n :param sample_size: Size of the input sample.\n :param n_classes: Number of classes.\n :param lr: Learning rate for the model, i.e., regulates\n the size of the step in the gradient descent process.\n :param batch_size: Size of the batch used in training phase,\n it is the size of samples per gradient step.\n :param epochs: Number of epochs for model to train.\n :param verbose: Verbosity mode used in training, (0, 1 or 2).\n :param shuffle: Boolean indicating whether to shuffle datasets.\n :param patience: Number of epochs without improvement in order to\n stop the training phase.\n :param seed: Seed for training reproducibility.\n '
np.random.seed(seed=seed)
model = _get_model(model_key=model_name, **{'input_size': sample_size, 'n_classes': n_classes})
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(lr=lr), loss=UNMIXING_LOSSES[model_name], metrics=UNMIXING_TRAIN_METRICS[model_name])
time_history = time_metrics.TimeHistory()
mcp_save = tf.keras.callbacks.ModelCheckpoint(os.path.join(dest_path, 'model.h5'), save_best_only=True, monitor='val_loss', mode='min')
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, mode='min')
callbacks = [time_history, mcp_save, early_stopping]
train_dict = data[enums.Dataset.TRAIN].copy()
val_dict = data[enums.Dataset.VAL].copy()
(min_, max_) = (data[enums.DataStats.MIN], data[enums.DataStats.MAX])
transformations = [transforms.MinMaxNormalize(min_=min_, max_=max_)]
transformations += [t() for t in UNMIXING_TRANSFORMS[model_name]]
train_dict = transforms.apply_transformations(train_dict, transformations)
val_dict = transforms.apply_transformations(val_dict, transformations)
history = model.fit(x=train_dict[enums.Dataset.DATA], y=train_dict[enums.Dataset.LABELS], epochs=epochs, verbose=verbose, shuffle=shuffle, validation_data=(val_dict[enums.Dataset.DATA], val_dict[enums.Dataset.LABELS]), callbacks=callbacks, batch_size=batch_size)
np.savetxt(os.path.join(dest_path, 'min-max.csv'), np.array([min_, max_]), delimiter=',', fmt='%f')
history.history[time_metrics.TimeHistory.__name__] = time_history.average
io.save_metrics(dest_path=dest_path, file_name='training_metrics.csv', metrics=history.history) | -1,414,845,563,647,005,400 | Function for running experiments on various unmixing models,
given a set of hyper parameters.
:param data: The data dictionary containing
the subsets for training and validation.
First dimension of the datasets should be the number of samples.
:param model_name: Name of the model, it serves as a key in the
dictionary holding all functions returning models.
:param dest_path: Path to where all experiment runs will be saved as
subdirectories in this given directory.
:param sample_size: Size of the input sample.
:param n_classes: Number of classes.
:param lr: Learning rate for the model, i.e., regulates
the size of the step in the gradient descent process.
:param batch_size: Size of the batch used in training phase,
it is the size of samples per gradient step.
:param epochs: Number of epochs for model to train.
:param verbose: Verbosity mode used in training, (0, 1 or 2).
:param shuffle: Boolean indicating whether to shuffle datasets.
:param patience: Number of epochs without improvement in order to
stop the training phase.
:param seed: Seed for training reproducibility. | src/model/train_unmixing.py | train | laugh12321/DACN | python | def train(data: Dict[(str, np.ndarray)], model_name: str, dest_path: str, sample_size: int, n_classes: int, lr: float, batch_size: int, epochs: int, verbose: int, shuffle: bool, patience: int, seed: int):
'\n Function for running experiments on various unmixing models,\n given a set of hyper parameters.\n\n :param data: The data dictionary containing\n the subsets for training and validation.\n First dimension of the datasets should be the number of samples.\n :param model_name: Name of the model, it serves as a key in the\n dictionary holding all functions returning models.\n :param dest_path: Path to where all experiment runs will be saved as\n subdirectories in this given directory.\n :param sample_size: Size of the input sample.\n :param n_classes: Number of classes.\n :param lr: Learning rate for the model, i.e., regulates\n the size of the step in the gradient descent process.\n :param batch_size: Size of the batch used in training phase,\n it is the size of samples per gradient step.\n :param epochs: Number of epochs for model to train.\n :param verbose: Verbosity mode used in training, (0, 1 or 2).\n :param shuffle: Boolean indicating whether to shuffle datasets.\n :param patience: Number of epochs without improvement in order to\n stop the training phase.\n :param seed: Seed for training reproducibility.\n '
np.random.seed(seed=seed)
model = _get_model(model_key=model_name, **{'input_size': sample_size, 'n_classes': n_classes})
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(lr=lr), loss=UNMIXING_LOSSES[model_name], metrics=UNMIXING_TRAIN_METRICS[model_name])
time_history = time_metrics.TimeHistory()
mcp_save = tf.keras.callbacks.ModelCheckpoint(os.path.join(dest_path, 'model.h5'), save_best_only=True, monitor='val_loss', mode='min')
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, mode='min')
callbacks = [time_history, mcp_save, early_stopping]
train_dict = data[enums.Dataset.TRAIN].copy()
val_dict = data[enums.Dataset.VAL].copy()
(min_, max_) = (data[enums.DataStats.MIN], data[enums.DataStats.MAX])
transformations = [transforms.MinMaxNormalize(min_=min_, max_=max_)]
transformations += [t() for t in UNMIXING_TRANSFORMS[model_name]]
train_dict = transforms.apply_transformations(train_dict, transformations)
val_dict = transforms.apply_transformations(val_dict, transformations)
history = model.fit(x=train_dict[enums.Dataset.DATA], y=train_dict[enums.Dataset.LABELS], epochs=epochs, verbose=verbose, shuffle=shuffle, validation_data=(val_dict[enums.Dataset.DATA], val_dict[enums.Dataset.LABELS]), callbacks=callbacks, batch_size=batch_size)
np.savetxt(os.path.join(dest_path, 'min-max.csv'), np.array([min_, max_]), delimiter=',', fmt='%f')
history.history[time_metrics.TimeHistory.__name__] = time_history.average
io.save_metrics(dest_path=dest_path, file_name='training_metrics.csv', metrics=history.history) |
def _style(message: str, **kwargs: Any) -> str:
'Wrapper around mypy.util for fancy formatting.'
kwargs.setdefault('color', 'none')
return _formatter.style(message, **kwargs) | 7,824,578,596,113,823,000 | Wrapper around mypy.util for fancy formatting. | venv/Lib/site-packages/mypy/stubtest.py | _style | HarisHijazi/mojarnik-server | python | def _style(message: str, **kwargs: Any) -> str:
kwargs.setdefault('color', 'none')
return _formatter.style(message, **kwargs) |
def test_module(module_name: str) -> Iterator[Error]:
"Tests a given module's stub against introspecting it at runtime.\n\n Requires the stub to have been built already, accomplished by a call to ``build_stubs``.\n\n :param module_name: The module to test\n\n "
stub = get_stub(module_name)
if (stub is None):
(yield Error([module_name], 'failed to find stubs', MISSING, None))
return
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
runtime = importlib.import_module(module_name)
except Exception as e:
(yield Error([module_name], 'failed to import: {}'.format(e), stub, MISSING))
return
with warnings.catch_warnings():
warnings.simplefilter('ignore')
(yield from verify(stub, runtime, [module_name])) | 4,199,037,603,568,104,000 | Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test | venv/Lib/site-packages/mypy/stubtest.py | test_module | HarisHijazi/mojarnik-server | python | def test_module(module_name: str) -> Iterator[Error]:
"Tests a given module's stub against introspecting it at runtime.\n\n Requires the stub to have been built already, accomplished by a call to ``build_stubs``.\n\n :param module_name: The module to test\n\n "
stub = get_stub(module_name)
if (stub is None):
(yield Error([module_name], 'failed to find stubs', MISSING, None))
return
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
runtime = importlib.import_module(module_name)
except Exception as e:
(yield Error([module_name], 'failed to import: {}'.format(e), stub, MISSING))
return
with warnings.catch_warnings():
warnings.simplefilter('ignore')
(yield from verify(stub, runtime, [module_name])) |
@singledispatch
def verify(stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str]) -> Iterator[Error]:
'Entry point for comparing a stub to a runtime object.\n\n We use single dispatch based on the type of ``stub``.\n\n :param stub: The mypy node representing a part of the stub\n :param runtime: The runtime object corresponding to ``stub``\n\n '
(yield Error(object_path, 'is an unknown mypy node', stub, runtime)) | -1,455,489,771,263,504,100 | Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub`` | venv/Lib/site-packages/mypy/stubtest.py | verify | HarisHijazi/mojarnik-server | python | @singledispatch
def verify(stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str]) -> Iterator[Error]:
'Entry point for comparing a stub to a runtime object.\n\n We use single dispatch based on the type of ``stub``.\n\n :param stub: The mypy node representing a part of the stub\n :param runtime: The runtime object corresponding to ``stub``\n\n '
(yield Error(object_path, 'is an unknown mypy node', stub, runtime)) |
def _verify_arg_name(stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str) -> Iterator[str]:
'Checks whether argument names match.'
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return (s[len(prefix):] if s.startswith(prefix) else s)
if (strip_prefix(stub_arg.variable.name, '__') == runtime_arg.name):
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip('_')
b = b.strip('_')
return (a.startswith(b) or b.startswith(a) or (len(a) == 1) or (len(b) == 1))
if ((runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY) and names_approx_match(stub_arg.variable.name, runtime_arg.name)):
return
if (stub_arg.variable.name == '_self'):
return
(yield 'stub argument "{}" differs from runtime argument "{}"'.format(stub_arg.variable.name, runtime_arg.name)) | 1,372,644,029,172,474,400 | Checks whether argument names match. | venv/Lib/site-packages/mypy/stubtest.py | _verify_arg_name | HarisHijazi/mojarnik-server | python | def _verify_arg_name(stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str) -> Iterator[str]:
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return (s[len(prefix):] if s.startswith(prefix) else s)
if (strip_prefix(stub_arg.variable.name, '__') == runtime_arg.name):
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip('_')
b = b.strip('_')
return (a.startswith(b) or b.startswith(a) or (len(a) == 1) or (len(b) == 1))
if ((runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY) and names_approx_match(stub_arg.variable.name, runtime_arg.name)):
return
if (stub_arg.variable.name == '_self'):
return
(yield 'stub argument "{}" differs from runtime argument "{}"'.format(stub_arg.variable.name, runtime_arg.name)) |
def _verify_arg_default_value(stub_arg: nodes.Argument, runtime_arg: inspect.Parameter) -> Iterator[str]:
'Checks whether argument default values are compatible.'
if (runtime_arg.default != inspect.Parameter.empty):
if (stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)):
(yield 'runtime argument "{}" has a default value but stub argument does not'.format(runtime_arg.name))
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
stub_type = (stub_arg.variable.type or stub_arg.type_annotation)
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if ((runtime_type is not None) and (stub_type is not None) and (type(runtime_arg.default) != object) and (not is_subtype_helper(runtime_type, stub_type))):
(yield 'runtime argument "{}" has a default value of type {}, which is incompatible with stub argument type {}'.format(runtime_arg.name, runtime_type, stub_type))
elif (stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)):
(yield 'stub argument "{}" has a default value but runtime argument does not'.format(stub_arg.variable.name)) | 7,913,220,526,749,710,000 | Checks whether argument default values are compatible. | venv/Lib/site-packages/mypy/stubtest.py | _verify_arg_default_value | HarisHijazi/mojarnik-server | python | def _verify_arg_default_value(stub_arg: nodes.Argument, runtime_arg: inspect.Parameter) -> Iterator[str]:
if (runtime_arg.default != inspect.Parameter.empty):
if (stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)):
(yield 'runtime argument "{}" has a default value but stub argument does not'.format(runtime_arg.name))
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
stub_type = (stub_arg.variable.type or stub_arg.type_annotation)
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if ((runtime_type is not None) and (stub_type is not None) and (type(runtime_arg.default) != object) and (not is_subtype_helper(runtime_type, stub_type))):
(yield 'runtime argument "{}" has a default value of type {}, which is incompatible with stub argument type {}'.format(runtime_arg.name, runtime_type, stub_type))
elif (stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)):
(yield 'stub argument "{}" has a default value but runtime argument does not'.format(stub_arg.variable.name)) |
def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]:
"Returns a FuncItem that corresponds to the output of the decorator.\n\n Returns None if we can't figure out what that would be. For convenience, this function also\n accepts FuncItems.\n\n "
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(decorator: nodes.Expression, func: nodes.FuncItem) -> Optional[nodes.FuncItem]:
if (not isinstance(decorator, nodes.RefExpr)):
return None
if (decorator.fullname is None):
return None
if (decorator.fullname in ('builtins.staticmethod', 'typing.overload', 'abc.abstractmethod')):
return func
if (decorator.fullname == 'builtins.classmethod'):
assert (func.arguments[0].variable.name in ('cls', 'metacls'))
ret = copy.copy(func)
ret.arguments = ret.arguments[1:]
return ret
return None
func = dec.func
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if (resulting_func is None):
return None
func = resulting_func
return func | -1,845,176,756,709,411,300 | Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems. | venv/Lib/site-packages/mypy/stubtest.py | _resolve_funcitem_from_decorator | HarisHijazi/mojarnik-server | python | def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]:
"Returns a FuncItem that corresponds to the output of the decorator.\n\n Returns None if we can't figure out what that would be. For convenience, this function also\n accepts FuncItems.\n\n "
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(decorator: nodes.Expression, func: nodes.FuncItem) -> Optional[nodes.FuncItem]:
if (not isinstance(decorator, nodes.RefExpr)):
return None
if (decorator.fullname is None):
return None
if (decorator.fullname in ('builtins.staticmethod', 'typing.overload', 'abc.abstractmethod')):
return func
if (decorator.fullname == 'builtins.classmethod'):
assert (func.arguments[0].variable.name in ('cls', 'metacls'))
ret = copy.copy(func)
ret.arguments = ret.arguments[1:]
return ret
return None
func = dec.func
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if (resulting_func is None):
return None
func = resulting_func
return func |
def is_dunder(name: str, exclude_special: bool=False) -> bool:
'Returns whether name is a dunder name.\n\n :param exclude_special: Whether to return False for a couple special dunder methods.\n\n '
if (exclude_special and (name in SPECIAL_DUNDERS)):
return False
return (name.startswith('__') and name.endswith('__')) | 8,043,481,766,942,279,000 | Returns whether name is a dunder name.
:param exclude_special: Whether to return False for a couple special dunder methods. | venv/Lib/site-packages/mypy/stubtest.py | is_dunder | HarisHijazi/mojarnik-server | python | def is_dunder(name: str, exclude_special: bool=False) -> bool:
'Returns whether name is a dunder name.\n\n :param exclude_special: Whether to return False for a couple special dunder methods.\n\n '
if (exclude_special and (name in SPECIAL_DUNDERS)):
return False
return (name.startswith('__') and name.endswith('__')) |
def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
'Checks whether ``left`` is a subtype of ``right``.'
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (isinstance(left, mypy.types.LiteralType) and isinstance(left.value, int) and (left.value in (0, 1)) and isinstance(right, mypy.types.Instance) and (right.type.fullname == 'builtins.bool')):
return True
with mypy.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right) | -4,968,396,397,563,760,000 | Checks whether ``left`` is a subtype of ``right``. | venv/Lib/site-packages/mypy/stubtest.py | is_subtype_helper | HarisHijazi/mojarnik-server | python | def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (isinstance(left, mypy.types.LiteralType) and isinstance(left.value, int) and (left.value in (0, 1)) and isinstance(right, mypy.types.Instance) and (right.type.fullname == 'builtins.bool')):
return True
with mypy.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right) |
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"Returns a mypy type object representing the type of ``runtime``.\n\n Returns None if we can't find something that works.\n\n "
if (runtime is None):
return mypy.types.NoneType()
if isinstance(runtime, property):
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType)):
builtins = get_stub('builtins')
assert (builtins is not None)
type_info = builtins.names['function'].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
try:
signature = inspect.signature(runtime)
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append((None if (arg.kind == inspect.Parameter.POSITIONAL_ONLY) else arg.name))
has_default = (arg.default == inspect.Parameter.empty)
if (arg.kind == inspect.Parameter.POSITIONAL_ONLY):
arg_kinds.append((nodes.ARG_POS if has_default else nodes.ARG_OPT))
elif (arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD):
arg_kinds.append((nodes.ARG_POS if has_default else nodes.ARG_OPT))
elif (arg.kind == inspect.Parameter.KEYWORD_ONLY):
arg_kinds.append((nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT))
elif (arg.kind == inspect.Parameter.VAR_POSITIONAL):
arg_kinds.append(nodes.ARG_STAR)
elif (arg.kind == inspect.Parameter.VAR_KEYWORD):
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
except ValueError:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(arg_types, arg_kinds, arg_names, ret_type=anytype(), fallback=fallback, is_ellipsis_args=True)
stub = get_stub(type(runtime).__module__)
if (stub is None):
return None
type_name = type(runtime).__name__
if (type_name not in stub.names):
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if (not isinstance(type_info, nodes.TypeInfo)):
return None
if isinstance(runtime, tuple):
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if (i is not None) else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
try:
return mypy.types.LiteralType(value=runtime, fallback=fallback)
except TypeError:
return fallback | 2,015,463,356,520,767,200 | Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works. | venv/Lib/site-packages/mypy/stubtest.py | get_mypy_type_of_runtime_value | HarisHijazi/mojarnik-server | python | def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"Returns a mypy type object representing the type of ``runtime``.\n\n Returns None if we can't find something that works.\n\n "
if (runtime is None):
return mypy.types.NoneType()
if isinstance(runtime, property):
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType)):
builtins = get_stub('builtins')
assert (builtins is not None)
type_info = builtins.names['function'].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
try:
signature = inspect.signature(runtime)
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append((None if (arg.kind == inspect.Parameter.POSITIONAL_ONLY) else arg.name))
has_default = (arg.default == inspect.Parameter.empty)
if (arg.kind == inspect.Parameter.POSITIONAL_ONLY):
arg_kinds.append((nodes.ARG_POS if has_default else nodes.ARG_OPT))
elif (arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD):
arg_kinds.append((nodes.ARG_POS if has_default else nodes.ARG_OPT))
elif (arg.kind == inspect.Parameter.KEYWORD_ONLY):
arg_kinds.append((nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT))
elif (arg.kind == inspect.Parameter.VAR_POSITIONAL):
arg_kinds.append(nodes.ARG_STAR)
elif (arg.kind == inspect.Parameter.VAR_KEYWORD):
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
except ValueError:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(arg_types, arg_kinds, arg_names, ret_type=anytype(), fallback=fallback, is_ellipsis_args=True)
stub = get_stub(type(runtime).__module__)
if (stub is None):
return None
type_name = type(runtime).__name__
if (type_name not in stub.names):
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if (not isinstance(type_info, nodes.TypeInfo)):
return None
if isinstance(runtime, tuple):
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if (i is not None) else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
try:
return mypy.types.LiteralType(value=runtime, fallback=fallback)
except TypeError:
return fallback |
def build_stubs(modules: List[str], options: Options, find_submodules: bool=False) -> List[str]:
'Uses mypy to construct stub objects for the given modules.\n\n This sets global state that ``get_stub`` can access.\n\n Returns all modules we might want to check. If ``find_submodules`` is False, this is equal\n to ``modules``.\n\n :param modules: List of modules to build stubs for.\n :param options: Mypy options for finding and building stubs.\n :param find_submodules: Whether to attempt to find submodules of the given modules as well.\n\n '
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(search_path, fscache=None, options=options)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if (not find_submodules):
module_path = find_module_cache.find_module(module)
if (not isinstance(module_path, str)):
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
all_modules.extend((s.module for s in found_sources if (s.module not in all_modules)))
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
output = [_style('error: ', color='red', bold=True), 'not checking stubs due to failed mypy compile:\n', str(e)]
print(''.join(output))
raise RuntimeError from e
if res.errors:
output = [_style('error: ', color='red', bold=True), 'not checking stubs due to mypy build errors:\n']
print((''.join(output) + '\n'.join(res.errors)))
raise RuntimeError
global _all_stubs
_all_stubs = res.files
return all_modules | 379,680,852,265,002,900 | Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well. | venv/Lib/site-packages/mypy/stubtest.py | build_stubs | HarisHijazi/mojarnik-server | python | def build_stubs(modules: List[str], options: Options, find_submodules: bool=False) -> List[str]:
'Uses mypy to construct stub objects for the given modules.\n\n This sets global state that ``get_stub`` can access.\n\n Returns all modules we might want to check. If ``find_submodules`` is False, this is equal\n to ``modules``.\n\n :param modules: List of modules to build stubs for.\n :param options: Mypy options for finding and building stubs.\n :param find_submodules: Whether to attempt to find submodules of the given modules as well.\n\n '
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(search_path, fscache=None, options=options)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if (not find_submodules):
module_path = find_module_cache.find_module(module)
if (not isinstance(module_path, str)):
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
all_modules.extend((s.module for s in found_sources if (s.module not in all_modules)))
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
output = [_style('error: ', color='red', bold=True), 'not checking stubs due to failed mypy compile:\n', str(e)]
print(.join(output))
raise RuntimeError from e
if res.errors:
output = [_style('error: ', color='red', bold=True), 'not checking stubs due to mypy build errors:\n']
print((.join(output) + '\n'.join(res.errors)))
raise RuntimeError
global _all_stubs
_all_stubs = res.files
return all_modules |
def get_stub(module: str) -> Optional[nodes.MypyFile]:
"Returns a stub object for the given module, if we've built one."
return _all_stubs.get(module) | 718,094,875,160,185,500 | Returns a stub object for the given module, if we've built one. | venv/Lib/site-packages/mypy/stubtest.py | get_stub | HarisHijazi/mojarnik-server | python | def get_stub(module: str) -> Optional[nodes.MypyFile]:
return _all_stubs.get(module) |
def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]:
'Returns a list of stdlib modules in typeshed (for current Python version).'
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
packages = set()
if (sys.version_info < (3, 6)):
version_info = (3, 6)
else:
version_info = sys.version_info[0:2]
for (module, versions) in stdlib_py_versions.items():
(minver, maxver) = versions
if ((version_info >= minver) and ((maxver is None) or (version_info <= maxver))):
packages.add(module)
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = (Path(mypy.build.default_data_dir()) / 'typeshed')
stdlib_dir = (typeshed_dir / 'stdlib')
modules = []
for path in stdlib_dir.rglob('*.pyi'):
if (path.stem == '__init__'):
path = path.parent
module = '.'.join((path.relative_to(stdlib_dir).parts[:(- 1)] + (path.stem,)))
if (module.split('.')[0] in packages):
modules.append(module)
return sorted(modules) | -7,716,510,822,172,239,000 | Returns a list of stdlib modules in typeshed (for current Python version). | venv/Lib/site-packages/mypy/stubtest.py | get_typeshed_stdlib_modules | HarisHijazi/mojarnik-server | python | def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]:
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
packages = set()
if (sys.version_info < (3, 6)):
version_info = (3, 6)
else:
version_info = sys.version_info[0:2]
for (module, versions) in stdlib_py_versions.items():
(minver, maxver) = versions
if ((version_info >= minver) and ((maxver is None) or (version_info <= maxver))):
packages.add(module)
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = (Path(mypy.build.default_data_dir()) / 'typeshed')
stdlib_dir = (typeshed_dir / 'stdlib')
modules = []
for path in stdlib_dir.rglob('*.pyi'):
if (path.stem == '__init__'):
path = path.parent
module = '.'.join((path.relative_to(stdlib_dir).parts[:(- 1)] + (path.stem,)))
if (module.split('.')[0] in packages):
modules.append(module)
return sorted(modules) |
def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool=False) -> int:
"This is stubtest! It's time to test the stubs!"
allowlist = {entry: False for allowlist_file in args.allowlist for entry in get_allowlist_entries(allowlist_file)}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
assert (not args.modules), 'Cannot pass both --check-typeshed and a list of modules'
modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
annoying_modules = {'antigravity', 'this'}
modules = [m for m in modules if (m not in annoying_modules)]
assert modules, 'No modules to check'
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None:
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
try:
modules = build_stubs(modules, options, find_submodules=(not args.check_typeshed))
except RuntimeError:
return 1
exit_code = 0
for module in modules:
for error in test_module(module):
if (args.ignore_missing_stub and error.is_missing_stub()):
continue
if (args.ignore_positional_only and error.is_positional_only_related()):
continue
if (error.object_desc in allowlist):
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
if (not args.ignore_unused_allowlist):
for w in allowlist:
if ((not allowlist[w]) and (not allowlist_regexes[w].fullmatch(''))):
exit_code = 1
print('note: unused allowlist entry {}'.format(w))
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
return exit_code | 8,016,859,559,546,443,000 | This is stubtest! It's time to test the stubs! | venv/Lib/site-packages/mypy/stubtest.py | test_stubs | HarisHijazi/mojarnik-server | python | def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool=False) -> int:
allowlist = {entry: False for allowlist_file in args.allowlist for entry in get_allowlist_entries(allowlist_file)}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
assert (not args.modules), 'Cannot pass both --check-typeshed and a list of modules'
modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
annoying_modules = {'antigravity', 'this'}
modules = [m for m in modules if (m not in annoying_modules)]
assert modules, 'No modules to check'
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None:
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
try:
modules = build_stubs(modules, options, find_submodules=(not args.check_typeshed))
except RuntimeError:
return 1
exit_code = 0
for module in modules:
for error in test_module(module):
if (args.ignore_missing_stub and error.is_missing_stub()):
continue
if (args.ignore_positional_only and error.is_positional_only_related()):
continue
if (error.object_desc in allowlist):
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
if (not args.ignore_unused_allowlist):
for w in allowlist:
if ((not allowlist[w]) and (not allowlist_regexes[w].fullmatch())):
exit_code = 1
print('note: unused allowlist entry {}'.format(w))
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
return exit_code |
def __init__(self, object_path: List[str], message: str, stub_object: MaybeMissing[nodes.Node], runtime_object: MaybeMissing[Any], *, stub_desc: Optional[str]=None, runtime_desc: Optional[str]=None) -> None:
'Represents an error found by stubtest.\n\n :param object_path: Location of the object with the error,\n e.g. ``["module", "Class", "method"]``\n :param message: Error message\n :param stub_object: The mypy node representing the stub\n :param runtime_object: Actual object obtained from the runtime\n :param stub_desc: Specialised description for the stub object, should you wish\n :param runtime_desc: Specialised description for the runtime object, should you wish\n\n '
self.object_desc = '.'.join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = (stub_desc or str(getattr(stub_object, 'type', stub_object)))
self.runtime_desc = (runtime_desc or str(runtime_object)) | -7,149,678,860,484,340,000 | Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish | venv/Lib/site-packages/mypy/stubtest.py | __init__ | HarisHijazi/mojarnik-server | python | def __init__(self, object_path: List[str], message: str, stub_object: MaybeMissing[nodes.Node], runtime_object: MaybeMissing[Any], *, stub_desc: Optional[str]=None, runtime_desc: Optional[str]=None) -> None:
'Represents an error found by stubtest.\n\n :param object_path: Location of the object with the error,\n e.g. ``["module", "Class", "method"]``\n :param message: Error message\n :param stub_object: The mypy node representing the stub\n :param runtime_object: Actual object obtained from the runtime\n :param stub_desc: Specialised description for the stub object, should you wish\n :param runtime_desc: Specialised description for the runtime object, should you wish\n\n '
self.object_desc = '.'.join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = (stub_desc or str(getattr(stub_object, 'type', stub_object)))
self.runtime_desc = (runtime_desc or str(runtime_object)) |
def is_missing_stub(self) -> bool:
'Whether or not the error is for something missing from the stub.'
return isinstance(self.stub_object, Missing) | 5,390,748,104,280,314,000 | Whether or not the error is for something missing from the stub. | venv/Lib/site-packages/mypy/stubtest.py | is_missing_stub | HarisHijazi/mojarnik-server | python | def is_missing_stub(self) -> bool:
return isinstance(self.stub_object, Missing) |
def is_positional_only_related(self) -> bool:
'Whether or not the error is for something being (or not being) positional-only.'
return ('leading double underscore' in self.message) | -4,917,370,307,703,007,000 | Whether or not the error is for something being (or not being) positional-only. | venv/Lib/site-packages/mypy/stubtest.py | is_positional_only_related | HarisHijazi/mojarnik-server | python | def is_positional_only_related(self) -> bool:
return ('leading double underscore' in self.message) |
def get_description(self, concise: bool=False) -> str:
'Returns a description of the error.\n\n :param concise: Whether to return a concise, one-line description\n\n '
if concise:
return ((_style(self.object_desc, bold=True) + ' ') + self.message)
stub_line = None
stub_file = None
if (not isinstance(self.stub_object, Missing)):
stub_line = self.stub_object.line
stub_loc_str = ''
if stub_line:
stub_loc_str += ' at line {}'.format(stub_line)
if stub_file:
stub_loc_str += ' in file {}'.format(Path(stub_file))
runtime_line = None
runtime_file = None
if (not isinstance(self.runtime_object, Missing)):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ''
if runtime_line:
runtime_loc_str += ' at line {}'.format(runtime_line)
if runtime_file:
runtime_loc_str += ' in file {}'.format(Path(runtime_file))
output = [_style('error: ', color='red', bold=True), _style(self.object_desc, bold=True), ' ', self.message, '\n', 'Stub:', _style(stub_loc_str, dim=True), '\n', _style((self.stub_desc + '\n'), color='blue', dim=True), 'Runtime:', _style(runtime_loc_str, dim=True), '\n', _style((self.runtime_desc + '\n'), color='blue', dim=True)]
return ''.join(output) | 7,574,251,078,733,622,000 | Returns a description of the error.
:param concise: Whether to return a concise, one-line description | venv/Lib/site-packages/mypy/stubtest.py | get_description | HarisHijazi/mojarnik-server | python | def get_description(self, concise: bool=False) -> str:
'Returns a description of the error.\n\n :param concise: Whether to return a concise, one-line description\n\n '
if concise:
return ((_style(self.object_desc, bold=True) + ' ') + self.message)
stub_line = None
stub_file = None
if (not isinstance(self.stub_object, Missing)):
stub_line = self.stub_object.line
stub_loc_str =
if stub_line:
stub_loc_str += ' at line {}'.format(stub_line)
if stub_file:
stub_loc_str += ' in file {}'.format(Path(stub_file))
runtime_line = None
runtime_file = None
if (not isinstance(self.runtime_object, Missing)):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str =
if runtime_line:
runtime_loc_str += ' at line {}'.format(runtime_line)
if runtime_file:
runtime_loc_str += ' in file {}'.format(Path(runtime_file))
output = [_style('error: ', color='red', bold=True), _style(self.object_desc, bold=True), ' ', self.message, '\n', 'Stub:', _style(stub_loc_str, dim=True), '\n', _style((self.stub_desc + '\n'), color='blue', dim=True), 'Runtime:', _style(runtime_loc_str, dim=True), '\n', _style((self.runtime_desc + '\n'), color='blue', dim=True)]
return .join(output) |
@staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> 'Signature[nodes.Argument]':
"Returns a Signature from an OverloadedFuncDef.\n\n If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its\n items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we\n try and combine the overload's items into a single signature that is compatible with any\n lies it might try to tell.\n\n "
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args = {}
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert (func is not None)
args = maybe_strip_cls(stub.name, func.arguments)
for (index, arg) in enumerate(args):
name = ('__{}'.format(index) if (arg.variable.name.startswith('__') or assume_positional_only) else arg.variable.name)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
return max((index for (_, index) in all_args[arg_name]))
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.strict_optional_set(True):
all_types = [(arg.variable.type or arg.type_annotation) for (arg, _) in all_args[arg_name]]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> int:
kinds = {arg.kind for (arg, _) in all_args[arg_name]}
if (nodes.ARG_STAR in kinds):
return nodes.ARG_STAR
if (nodes.ARG_STAR2 in kinds):
return nodes.ARG_STAR2
is_opt = ((len(all_args[arg_name]) < len(stub.items)) or (nodes.ARG_OPT in kinds) or (nodes.ARG_NAMED_OPT in kinds))
is_pos = ((nodes.ARG_OPT in kinds) or (nodes.ARG_POS in kinds))
if is_opt:
return (nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT)
return (nodes.ARG_POS if is_pos else nodes.ARG_NAMED)
sig = Signature()
for arg_name in sorted(all_args, key=get_position):
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(nodes.Var(example_arg_name, get_type(arg_name)), type_annotation=None, initializer=None, kind=get_kind(arg_name))
if (arg.kind in (nodes.ARG_POS, nodes.ARG_OPT)):
sig.pos.append(arg)
elif (arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT)):
sig.kwonly[arg.variable.name] = arg
elif (arg.kind == nodes.ARG_STAR):
sig.varpos = arg
elif (arg.kind == nodes.ARG_STAR2):
sig.varkw = arg
else:
raise AssertionError
return sig | 1,645,200,278,387,473,000 | Returns a Signature from an OverloadedFuncDef.
If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its
items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we
try and combine the overload's items into a single signature that is compatible with any
lies it might try to tell. | venv/Lib/site-packages/mypy/stubtest.py | from_overloadedfuncdef | HarisHijazi/mojarnik-server | python | @staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> 'Signature[nodes.Argument]':
"Returns a Signature from an OverloadedFuncDef.\n\n If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its\n items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we\n try and combine the overload's items into a single signature that is compatible with any\n lies it might try to tell.\n\n "
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args = {}
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert (func is not None)
args = maybe_strip_cls(stub.name, func.arguments)
for (index, arg) in enumerate(args):
name = ('__{}'.format(index) if (arg.variable.name.startswith('__') or assume_positional_only) else arg.variable.name)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
return max((index for (_, index) in all_args[arg_name]))
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.strict_optional_set(True):
all_types = [(arg.variable.type or arg.type_annotation) for (arg, _) in all_args[arg_name]]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> int:
kinds = {arg.kind for (arg, _) in all_args[arg_name]}
if (nodes.ARG_STAR in kinds):
return nodes.ARG_STAR
if (nodes.ARG_STAR2 in kinds):
return nodes.ARG_STAR2
is_opt = ((len(all_args[arg_name]) < len(stub.items)) or (nodes.ARG_OPT in kinds) or (nodes.ARG_NAMED_OPT in kinds))
is_pos = ((nodes.ARG_OPT in kinds) or (nodes.ARG_POS in kinds))
if is_opt:
return (nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT)
return (nodes.ARG_POS if is_pos else nodes.ARG_NAMED)
sig = Signature()
for arg_name in sorted(all_args, key=get_position):
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(nodes.Var(example_arg_name, get_type(arg_name)), type_annotation=None, initializer=None, kind=get_kind(arg_name))
if (arg.kind in (nodes.ARG_POS, nodes.ARG_OPT)):
sig.pos.append(arg)
elif (arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT)):
sig.kwonly[arg.variable.name] = arg
elif (arg.kind == nodes.ARG_STAR):
sig.varpos = arg
elif (arg.kind == nodes.ARG_STAR2):
sig.varkw = arg
else:
raise AssertionError
return sig |
def period_range(start=None, end=None, periods: (int | None)=None, freq=None, name=None) -> PeriodIndex:
'\n Return a fixed frequency PeriodIndex.\n\n The day (calendar) is the default frequency.\n\n Parameters\n ----------\n start : str or period-like, default None\n Left bound for generating periods.\n end : str or period-like, default None\n Right bound for generating periods.\n periods : int, default None\n Number of periods to generate.\n freq : str or DateOffset, optional\n Frequency alias. By default the freq is taken from `start` or `end`\n if those are Period objects. Otherwise, the default is ``"D"`` for\n daily frequency.\n name : str, default None\n Name of the resulting PeriodIndex.\n\n Returns\n -------\n PeriodIndex\n\n Notes\n -----\n Of the three parameters: ``start``, ``end``, and ``periods``, exactly two\n must be specified.\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n >>> pd.period_range(start=\'2017-01-01\', end=\'2018-01-01\', freq=\'M\')\n PeriodIndex([\'2017-01\', \'2017-02\', \'2017-03\', \'2017-04\', \'2017-05\', \'2017-06\',\n \'2017-07\', \'2017-08\', \'2017-09\', \'2017-10\', \'2017-11\', \'2017-12\',\n \'2018-01\'],\n dtype=\'period[M]\')\n\n If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor\n endpoints for a ``PeriodIndex`` with frequency matching that of the\n ``period_range`` constructor.\n\n >>> pd.period_range(start=pd.Period(\'2017Q1\', freq=\'Q\'),\n ... end=pd.Period(\'2017Q2\', freq=\'Q\'), freq=\'M\')\n PeriodIndex([\'2017-03\', \'2017-04\', \'2017-05\', \'2017-06\'],\n dtype=\'period[M]\')\n '
if (com.count_not_none(start, end, periods) != 2):
raise ValueError('Of the three parameters: start, end, and periods, exactly two must be specified')
if ((freq is None) and ((not isinstance(start, Period)) and (not isinstance(end, Period)))):
freq = 'D'
(data, freq) = PeriodArray._generate_range(start, end, periods, freq, fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name) | -1,241,766,003,733,699,300 | Return a fixed frequency PeriodIndex.
The day (calendar) is the default frequency.
Parameters
----------
start : str or period-like, default None
Left bound for generating periods.
end : str or period-like, default None
Right bound for generating periods.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
dtype='period[M]')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]') | env/Lib/site-packages/pandas/core/indexes/period.py | period_range | ATJWen/weather-app | python | def period_range(start=None, end=None, periods: (int | None)=None, freq=None, name=None) -> PeriodIndex:
'\n Return a fixed frequency PeriodIndex.\n\n The day (calendar) is the default frequency.\n\n Parameters\n ----------\n start : str or period-like, default None\n Left bound for generating periods.\n end : str or period-like, default None\n Right bound for generating periods.\n periods : int, default None\n Number of periods to generate.\n freq : str or DateOffset, optional\n Frequency alias. By default the freq is taken from `start` or `end`\n if those are Period objects. Otherwise, the default is ``"D"`` for\n daily frequency.\n name : str, default None\n Name of the resulting PeriodIndex.\n\n Returns\n -------\n PeriodIndex\n\n Notes\n -----\n Of the three parameters: ``start``, ``end``, and ``periods``, exactly two\n must be specified.\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n >>> pd.period_range(start=\'2017-01-01\', end=\'2018-01-01\', freq=\'M\')\n PeriodIndex([\'2017-01\', \'2017-02\', \'2017-03\', \'2017-04\', \'2017-05\', \'2017-06\',\n \'2017-07\', \'2017-08\', \'2017-09\', \'2017-10\', \'2017-11\', \'2017-12\',\n \'2018-01\'],\n dtype=\'period[M]\')\n\n If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor\n endpoints for a ``PeriodIndex`` with frequency matching that of the\n ``period_range`` constructor.\n\n >>> pd.period_range(start=pd.Period(\'2017Q1\', freq=\'Q\'),\n ... end=pd.Period(\'2017Q2\', freq=\'Q\'), freq=\'M\')\n PeriodIndex([\'2017-03\', \'2017-04\', \'2017-05\', \'2017-06\'],\n dtype=\'period[M]\')\n '
if (com.count_not_none(start, end, periods) != 2):
raise ValueError('Of the three parameters: start, end, and periods, exactly two must be specified')
if ((freq is None) and ((not isinstance(start, Period)) and (not isinstance(end, Period)))):
freq = 'D'
(data, freq) = PeriodArray._generate_range(start, end, periods, freq, fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name) |
def _maybe_convert_timedelta(self, other):
'\n Convert timedelta-like input to an integer multiple of self.freq\n\n Parameters\n ----------\n other : timedelta, np.timedelta64, DateOffset, int, np.ndarray\n\n Returns\n -------\n converted : int, np.ndarray[int64]\n\n Raises\n ------\n IncompatibleFrequency : if the input cannot be written as a multiple\n of self.freq. Note IncompatibleFrequency subclasses ValueError.\n '
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if (other.base == self.freq.base):
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
return other
raise raise_on_incompatible(self, None) | -2,410,665,731,165,831,700 | Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError. | env/Lib/site-packages/pandas/core/indexes/period.py | _maybe_convert_timedelta | ATJWen/weather-app | python | def _maybe_convert_timedelta(self, other):
'\n Convert timedelta-like input to an integer multiple of self.freq\n\n Parameters\n ----------\n other : timedelta, np.timedelta64, DateOffset, int, np.ndarray\n\n Returns\n -------\n converted : int, np.ndarray[int64]\n\n Raises\n ------\n IncompatibleFrequency : if the input cannot be written as a multiple\n of self.freq. Note IncompatibleFrequency subclasses ValueError.\n '
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if (other.base == self.freq.base):
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
return other
raise raise_on_incompatible(self, None) |
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
'\n Can we compare values of the given dtype to our own?\n '
if (not isinstance(dtype, PeriodDtype)):
return False
return (dtype.freq == self.freq) | 2,929,216,423,391,983,600 | Can we compare values of the given dtype to our own? | env/Lib/site-packages/pandas/core/indexes/period.py | _is_comparable_dtype | ATJWen/weather-app | python | def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
'\n \n '
if (not isinstance(dtype, PeriodDtype)):
return False
return (dtype.freq == self.freq) |
def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
'\n where : array of timestamps\n mask : np.ndarray[bool]\n Array of booleans where data is not NA.\n '
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif (not isinstance(where, PeriodIndex)):
raise TypeError('asof_locs `where` must be DatetimeIndex or PeriodIndex')
return super().asof_locs(where, mask) | -2,531,526,199,883,752,400 | where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA. | env/Lib/site-packages/pandas/core/indexes/period.py | asof_locs | ATJWen/weather-app | python | def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
'\n where : array of timestamps\n mask : np.ndarray[bool]\n Array of booleans where data is not NA.\n '
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif (not isinstance(where, PeriodIndex)):
raise TypeError('asof_locs `where` must be DatetimeIndex or PeriodIndex')
return super().asof_locs(where, mask) |
@property
def is_full(self) -> bool:
'\n Returns True if this PeriodIndex is range-like in that all Periods\n between start and end are present, in order.\n '
if (len(self) == 0):
return True
if (not self.is_monotonic_increasing):
raise ValueError('Index is not monotonic')
values = self.asi8
return ((values[1:] - values[:(- 1)]) < 2).all() | -6,990,255,511,362,442,000 | Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order. | env/Lib/site-packages/pandas/core/indexes/period.py | is_full | ATJWen/weather-app | python | @property
def is_full(self) -> bool:
'\n Returns True if this PeriodIndex is range-like in that all Periods\n between start and end are present, in order.\n '
if (len(self) == 0):
return True
if (not self.is_monotonic_increasing):
raise ValueError('Index is not monotonic')
values = self.asi8
return ((values[1:] - values[:(- 1)]) < 2).all() |
def get_loc(self, key, method=None, tolerance=None):
'\n Get integer location for requested label.\n\n Parameters\n ----------\n key : Period, NaT, str, or datetime\n String or datetime key must be parsable as Period.\n\n Returns\n -------\n loc : int or ndarray[int64]\n\n Raises\n ------\n KeyError\n Key is not present in the index.\n TypeError\n If key is listlike or otherwise not hashable.\n '
orig_key = key
if (not is_scalar(key)):
raise InvalidIndexError(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
(asdt, reso_str) = parse_time_string(key, self.freq)
except (ValueError, DateParseError) as err:
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso_str)
grp = reso.freq_group.value
freqn = self.dtype.freq_group_code
assert (grp >= freqn)
if ((grp == freqn) or ((reso == Resolution.RESO_DAY) and (self.dtype.freq.name == 'B'))):
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif (method is None):
raise KeyError(key)
else:
key = asdt
elif isinstance(key, Period):
sfreq = self.freq
kfreq = key.freq
if (not ((sfreq.n == kfreq.n) and (sfreq._period_dtype_code == kfreq._period_dtype_code))):
raise KeyError(key)
elif isinstance(key, datetime):
try:
key = Period(key, freq=self.freq)
except ValueError as err:
raise KeyError(orig_key) from err
else:
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err | -5,329,255,313,596,644,000 | Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable. | env/Lib/site-packages/pandas/core/indexes/period.py | get_loc | ATJWen/weather-app | python | def get_loc(self, key, method=None, tolerance=None):
'\n Get integer location for requested label.\n\n Parameters\n ----------\n key : Period, NaT, str, or datetime\n String or datetime key must be parsable as Period.\n\n Returns\n -------\n loc : int or ndarray[int64]\n\n Raises\n ------\n KeyError\n Key is not present in the index.\n TypeError\n If key is listlike or otherwise not hashable.\n '
orig_key = key
if (not is_scalar(key)):
raise InvalidIndexError(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
(asdt, reso_str) = parse_time_string(key, self.freq)
except (ValueError, DateParseError) as err:
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso_str)
grp = reso.freq_group.value
freqn = self.dtype.freq_group_code
assert (grp >= freqn)
if ((grp == freqn) or ((reso == Resolution.RESO_DAY) and (self.dtype.freq.name == 'B'))):
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif (method is None):
raise KeyError(key)
else:
key = asdt
elif isinstance(key, Period):
sfreq = self.freq
kfreq = key.freq
if (not ((sfreq.n == kfreq.n) and (sfreq._period_dtype_code == kfreq._period_dtype_code))):
raise KeyError(key)
elif isinstance(key, datetime):
try:
key = Period(key, freq=self.freq)
except ValueError as err:
raise KeyError(orig_key) from err
else:
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err |
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"\n If label is a string or a datetime, cast it to Period.ordinal according\n to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'}, or None\n\n Returns\n -------\n bound : Period or object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n\n "
assert (kind in ['loc', 'getitem', None, lib.no_default])
self._deprecated_arg(kind, 'kind', '_maybe_cast_slice_bound')
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
(parsed, reso_str) = parse_time_string(label, self.freq)
except ValueError as err:
raise self._invalid_indexer('slice', label) from err
reso = Resolution.from_attrname(reso_str)
(lower, upper) = self._parsed_string_to_bounds(reso, parsed)
return (lower if (side == 'left') else upper)
elif (not isinstance(label, self._data._recognized_scalars)):
raise self._invalid_indexer('slice', label)
return label | -8,794,501,317,859,449,000 | If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}, or None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller. | env/Lib/site-packages/pandas/core/indexes/period.py | _maybe_cast_slice_bound | ATJWen/weather-app | python | def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"\n If label is a string or a datetime, cast it to Period.ordinal according\n to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'}, or None\n\n Returns\n -------\n bound : Period or object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n\n "
assert (kind in ['loc', 'getitem', None, lib.no_default])
self._deprecated_arg(kind, 'kind', '_maybe_cast_slice_bound')
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
(parsed, reso_str) = parse_time_string(label, self.freq)
except ValueError as err:
raise self._invalid_indexer('slice', label) from err
reso = Resolution.from_attrname(reso_str)
(lower, upper) = self._parsed_string_to_bounds(reso, parsed)
return (lower if (side == 'left') else upper)
elif (not isinstance(label, self._data._recognized_scalars)):
raise self._invalid_indexer('slice', label)
return label |
def fill(self):
'Intelligently sets any non-specific parameters.'
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int((self.feature_bagging_fraction * self.num_features))
self.bagged_features = None
if (self.feature_bagging_fraction < 1.0):
self.bagged_features = [random.sample(range(self.num_features), self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
self.num_outputs = (self.num_classes if self.regression else 1)
self.num_output_columns = (self.num_classes + 1)
self.max_depth = (self.max_depth or int((2 * math.ceil(math.log(self.max_nodes, 2)))))
self.num_splits_to_consider = (self.num_splits_to_consider or max(10, int(math.ceil(math.sqrt(self.num_features)))))
num_fertile = int(math.ceil((self.max_nodes / self.num_splits_to_consider)))
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = (self.max_fertile_nodes or num_fertile)
self.max_fertile_nodes = min(self.max_fertile_nodes, int(math.ceil((self.max_nodes / 2.0))))
num_split_initializiations_per_input = max(1, int(math.floor((self.num_splits_to_consider / self.split_after_samples))))
self.split_initializations_per_input = getattr(self, 'split_initializations_per_input', num_split_initializiations_per_input)
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self | 3,822,639,199,110,041,600 | Intelligently sets any non-specific parameters. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | fill | AdityaPai2398/tensorflow | python | def fill(self):
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int((self.feature_bagging_fraction * self.num_features))
self.bagged_features = None
if (self.feature_bagging_fraction < 1.0):
self.bagged_features = [random.sample(range(self.num_features), self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
self.num_outputs = (self.num_classes if self.regression else 1)
self.num_output_columns = (self.num_classes + 1)
self.max_depth = (self.max_depth or int((2 * math.ceil(math.log(self.max_nodes, 2)))))
self.num_splits_to_consider = (self.num_splits_to_consider or max(10, int(math.ceil(math.sqrt(self.num_features)))))
num_fertile = int(math.ceil((self.max_nodes / self.num_splits_to_consider)))
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = (self.max_fertile_nodes or num_fertile)
self.max_fertile_nodes = min(self.max_fertile_nodes, int(math.ceil((self.max_nodes / 2.0))))
num_split_initializiations_per_input = max(1, int(math.floor((self.num_splits_to_consider / self.split_after_samples))))
self.split_initializations_per_input = getattr(self, 'split_initializations_per_input', num_split_initializiations_per_input)
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self |
def __init__(self, tree_stats, params):
'A simple container for stats about a forest.'
self.tree_stats = tree_stats
self.params = params | 3,002,426,196,251,461,600 | A simple container for stats about a forest. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | __init__ | AdityaPai2398/tensorflow | python | def __init__(self, tree_stats, params):
self.tree_stats = tree_stats
self.params = params |
def training_graph(self, input_data, input_labels, data_spec=None, epoch=None, **tree_kwargs):
"Constructs a TF graph for training a random forest.\n\n Args:\n input_data: A tensor or SparseTensor or placeholder for input data.\n input_labels: A tensor or placeholder for labels associated with\n input_data.\n data_spec: A list of tf.dtype values specifying the original types of\n each column.\n epoch: A tensor or placeholder for the epoch the training data comes from.\n **tree_kwargs: Keyword arguments passed to each tree's training_graph.\n\n Returns:\n The last op in the random forest training graph.\n "
data_spec = (([constants.DATA_FLOAT] * self.params.num_features) if (data_spec is None) else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if (seed != 0):
seed += i
tree_data = input_data
tree_labels = input_labels
if (self.params.bagging_fraction < 1.0):
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(r, (array_ops.ones_like(r) * self.params.bagging_fraction))
gather_indices = array_ops.squeeze(array_ops.where(mask), squeeze_dims=[1])
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(self.trees[i].training_graph(tree_data, tree_labels, seed, data_spec=data_spec, epoch=([0] if (epoch is None) else epoch), **tree_kwargs))
return control_flow_ops.group(*tree_graphs) | -2,788,288,756,385,881,600 | Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | training_graph | AdityaPai2398/tensorflow | python | def training_graph(self, input_data, input_labels, data_spec=None, epoch=None, **tree_kwargs):
"Constructs a TF graph for training a random forest.\n\n Args:\n input_data: A tensor or SparseTensor or placeholder for input data.\n input_labels: A tensor or placeholder for labels associated with\n input_data.\n data_spec: A list of tf.dtype values specifying the original types of\n each column.\n epoch: A tensor or placeholder for the epoch the training data comes from.\n **tree_kwargs: Keyword arguments passed to each tree's training_graph.\n\n Returns:\n The last op in the random forest training graph.\n "
data_spec = (([constants.DATA_FLOAT] * self.params.num_features) if (data_spec is None) else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if (seed != 0):
seed += i
tree_data = input_data
tree_labels = input_labels
if (self.params.bagging_fraction < 1.0):
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(r, (array_ops.ones_like(r) * self.params.bagging_fraction))
gather_indices = array_ops.squeeze(array_ops.where(mask), squeeze_dims=[1])
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(self.trees[i].training_graph(tree_data, tree_labels, seed, data_spec=data_spec, epoch=([0] if (epoch is None) else epoch), **tree_kwargs))
return control_flow_ops.group(*tree_graphs) |
def inference_graph(self, input_data, data_spec=None):
'Constructs a TF graph for evaluating a random forest.\n\n Args:\n input_data: A tensor or SparseTensor or placeholder for input data.\n data_spec: A list of tf.dtype values specifying the original types of\n each column.\n\n Returns:\n The last op in the random forest inference graph.\n '
data_spec = (([constants.DATA_FLOAT] * self.params.num_features) if (data_spec is None) else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data, data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return (math_ops.reduce_sum(all_predict, 0) / self.params.num_trees) | 7,747,370,123,409,987,000 | Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | inference_graph | AdityaPai2398/tensorflow | python | def inference_graph(self, input_data, data_spec=None):
'Constructs a TF graph for evaluating a random forest.\n\n Args:\n input_data: A tensor or SparseTensor or placeholder for input data.\n data_spec: A list of tf.dtype values specifying the original types of\n each column.\n\n Returns:\n The last op in the random forest inference graph.\n '
data_spec = (([constants.DATA_FLOAT] * self.params.num_features) if (data_spec is None) else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data, data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return (math_ops.reduce_sum(all_predict, 0) / self.params.num_trees) |
def average_size(self):
'Constructs a TF graph for evaluating the average size of a forest.\n\n Returns:\n The average number of nodes over the trees.\n '
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes)) | 5,671,812,050,120,021,000 | Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | average_size | AdityaPai2398/tensorflow | python | def average_size(self):
'Constructs a TF graph for evaluating the average size of a forest.\n\n Returns:\n The average number of nodes over the trees.\n '
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes)) |
def average_impurity(self):
'Constructs a TF graph for evaluating the leaf impurity of a forest.\n\n Returns:\n The last op in the graph.\n '
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities)) | -7,324,765,734,865,910,000 | Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | average_impurity | AdityaPai2398/tensorflow | python | def average_impurity(self):
'Constructs a TF graph for evaluating the leaf impurity of a forest.\n\n Returns:\n The last op in the graph.\n '
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities)) |
def _gini(self, class_counts):
'Calculate the Gini impurity.\n\n If c(i) denotes the i-th class count and c = sum_i c(i) then\n score = 1 - sum_i ( c(i) / c )^2\n\n Args:\n class_counts: A 2-D tensor of per-class counts, usually a slice or\n gather from variables.node_sums.\n\n Returns:\n A 1-D tensor of the Gini impurities for each row in the input.\n '
smoothed = (1.0 + array_ops.slice(class_counts, [0, 1], [(- 1), (- 1)]))
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return (1.0 - (sum_squares / (sums * sums))) | 7,108,791,516,632,742,000 | Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | _gini | AdityaPai2398/tensorflow | python | def _gini(self, class_counts):
'Calculate the Gini impurity.\n\n If c(i) denotes the i-th class count and c = sum_i c(i) then\n score = 1 - sum_i ( c(i) / c )^2\n\n Args:\n class_counts: A 2-D tensor of per-class counts, usually a slice or\n gather from variables.node_sums.\n\n Returns:\n A 1-D tensor of the Gini impurities for each row in the input.\n '
smoothed = (1.0 + array_ops.slice(class_counts, [0, 1], [(- 1), (- 1)]))
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return (1.0 - (sum_squares / (sums * sums))) |
def _weighted_gini(self, class_counts):
'Our split score is the Gini impurity times the number of examples.\n\n If c(i) denotes the i-th class count and c = sum_i c(i) then\n score = c * (1 - sum_i ( c(i) / c )^2 )\n = c - sum_i c(i)^2 / c\n Args:\n class_counts: A 2-D tensor of per-class counts, usually a slice or\n gather from variables.node_sums.\n\n Returns:\n A 1-D tensor of the Gini impurities for each row in the input.\n '
smoothed = (1.0 + array_ops.slice(class_counts, [0, 1], [(- 1), (- 1)]))
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return (sums - (sum_squares / sums)) | 6,267,550,326,469,067,000 | Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | _weighted_gini | AdityaPai2398/tensorflow | python | def _weighted_gini(self, class_counts):
'Our split score is the Gini impurity times the number of examples.\n\n If c(i) denotes the i-th class count and c = sum_i c(i) then\n score = c * (1 - sum_i ( c(i) / c )^2 )\n = c - sum_i c(i)^2 / c\n Args:\n class_counts: A 2-D tensor of per-class counts, usually a slice or\n gather from variables.node_sums.\n\n Returns:\n A 1-D tensor of the Gini impurities for each row in the input.\n '
smoothed = (1.0 + array_ops.slice(class_counts, [0, 1], [(- 1), (- 1)]))
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return (sums - (sum_squares / sums)) |
def _variance(self, sums, squares):
'Calculate the variance for each row of the input tensors.\n\n Variance is V = E[x^2] - (E[x])^2.\n\n Args:\n sums: A tensor containing output sums, usually a slice from\n variables.node_sums. Should contain the number of examples seen\n in index 0 so we can calculate expected value.\n squares: Same as sums, but sums of squares.\n\n Returns:\n A 1-D tensor of the variances for each row in the input.\n '
total_count = array_ops.slice(sums, [0, 0], [(- 1), 1])
e_x = (sums / total_count)
e_x2 = (squares / total_count)
return math_ops.reduce_sum((e_x2 - math_ops.square(e_x)), 1) | -4,835,720,901,682,458,000 | Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | _variance | AdityaPai2398/tensorflow | python | def _variance(self, sums, squares):
'Calculate the variance for each row of the input tensors.\n\n Variance is V = E[x^2] - (E[x])^2.\n\n Args:\n sums: A tensor containing output sums, usually a slice from\n variables.node_sums. Should contain the number of examples seen\n in index 0 so we can calculate expected value.\n squares: Same as sums, but sums of squares.\n\n Returns:\n A 1-D tensor of the variances for each row in the input.\n '
total_count = array_ops.slice(sums, [0, 0], [(- 1), 1])
e_x = (sums / total_count)
e_x2 = (squares / total_count)
return math_ops.reduce_sum((e_x2 - math_ops.square(e_x)), 1) |
def training_graph(self, input_data, input_labels, random_seed, data_spec, epoch=None):
'Constructs a TF graph for training a random tree.\n\n Args:\n input_data: A tensor or SparseTensor or placeholder for input data.\n input_labels: A tensor or placeholder for labels associated with\n input_data.\n random_seed: The random number generator seed to use for this tree. 0\n means use the current time as the seed.\n data_spec: A list of tf.dtype values specifying the original types of\n each column.\n epoch: A tensor or placeholder for the epoch the training data comes from.\n\n Returns:\n The last op in the random tree training graph.\n '
epoch = ([0] if (epoch is None) else epoch)
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
(node_sums, node_squares, splits_indices, splits_sums, splits_squares, totals_indices, totals_sums, totals_squares, input_leaves) = self.training_ops.count_extremely_random_stats(input_data, sparse_indices, sparse_values, sparse_shape, data_spec, input_labels, self.variables.tree, self.variables.tree_thresholds, self.variables.node_to_accumulator_map, self.variables.candidate_split_features, self.variables.candidate_split_thresholds, self.variables.start_epoch, epoch, num_classes=self.params.num_output_columns, regression=self.params.regression)
node_update_ops = []
node_update_ops.append(state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(self.variables.candidate_split_sums, splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(self.variables.accumulator_sums, totals_indices, totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares, node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(self.variables.candidate_split_squares, splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(self.variables.accumulator_squares, totals_indices, totals_squares))
(update_indices, feature_updates, threshold_updates) = self.training_ops.sample_inputs(input_data, sparse_indices, sparse_values, sparse_shape, self.variables.node_to_accumulator_map, input_leaves, self.variables.candidate_split_features, self.variables.candidate_split_thresholds, split_initializations_per_input=self.params.split_initializations_per_input, split_sampling_random_seed=random_seed)
update_features_op = state_ops.scatter_update(self.variables.candidate_split_features, update_indices, feature_updates)
update_thresholds_op = state_ops.scatter_update(self.variables.candidate_split_thresholds, update_indices, threshold_updates)
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(self.variables.tree, [0, 0], [(- 1), 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1]))
(finished, stale) = self.training_ops.finished_nodes(leaves, self.variables.node_to_accumulator_map, self.variables.candidate_split_sums, self.variables.candidate_split_squares, self.variables.accumulator_sums, self.variables.accumulator_squares, self.variables.start_epoch, epoch, num_split_after_samples=self.params.split_after_samples, min_split_samples=self.params.min_split_samples)
non_fertile_leaves = array_ops.boolean_mask(leaves, math_ops.less(array_ops.gather(self.variables.node_to_accumulator_map, leaves), 0))
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares, non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(finished, self.variables.node_to_accumulator_map, self.variables.candidate_split_sums, self.variables.candidate_split_squares, self.variables.accumulator_sums, self.variables.accumulator_squares, regression=self.params.regression)
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates, tree_threshold_updates, tree_depth_updates, new_eot) = self.training_ops.grow_tree(self.variables.end_of_tree, self.variables.tree_depths, self.variables.node_to_accumulator_map, finished, split_indices, self.variables.candidate_split_features, self.variables.candidate_split_thresholds)
tree_update_op = state_ops.scatter_update(self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(self.variables.tree_thresholds, tree_update_indices, tree_threshold_updates)
depth_update_op = state_ops.scatter_update(self.variables.tree_depths, tree_update_indices, tree_depth_updates)
new_epoch_updates = (epoch * array_ops.ones_like(tree_depth_updates))
epoch_update_op = state_ops.scatter_update(self.variables.start_epoch, tree_update_indices, new_epoch_updates)
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = self.training_ops.update_fertile_slots(finished, non_fertile_leaves, non_fertile_leaf_scores, self.variables.end_of_tree, self.variables.tree_depths, self.variables.accumulator_sums, self.variables.node_to_accumulator_map, stale, max_depth=self.params.max_depth, regression=self.params.regression)
(gated_new_eot,) = control_flow_ops.tuple([new_eot], control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(self.variables.node_to_accumulator_map, array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, (- 1)]), squeeze_dims=[0]), array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, (- 1)]), squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(0, [accumulators_cleared, accumulators_allocated])
split_values = array_ops.tile(array_ops.expand_dims(array_ops.expand_dims(array_ops.zeros_like(cleared_and_allocated_accumulators, dtype=dtypes.float32), 1), 2), [1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(self.variables.candidate_split_sums, cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(self.variables.candidate_split_squares, cleared_and_allocated_accumulators, split_values))
total_cleared = array_ops.tile(array_ops.expand_dims(math_ops.neg(array_ops.ones_like(accumulators_cleared, dtype=dtypes.float32)), 1), [1, self.params.num_output_columns])
total_reset = array_ops.tile(array_ops.expand_dims(array_ops.zeros_like(accumulators_allocated, dtype=dtypes.float32), 1), [1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(self.variables.accumulator_sums, cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(self.variables.accumulator_squares, cleared_and_allocated_accumulators, accumulator_updates))
split_features_updates = array_ops.tile(array_ops.expand_dims(math_ops.neg(array_ops.ones_like(cleared_and_allocated_accumulators)), 1), [1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(self.variables.candidate_split_features, cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates) | -5,841,729,855,553,593,000 | Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | training_graph | AdityaPai2398/tensorflow | python | def training_graph(self, input_data, input_labels, random_seed, data_spec, epoch=None):
'Constructs a TF graph for training a random tree.\n\n Args:\n input_data: A tensor or SparseTensor or placeholder for input data.\n input_labels: A tensor or placeholder for labels associated with\n input_data.\n random_seed: The random number generator seed to use for this tree. 0\n means use the current time as the seed.\n data_spec: A list of tf.dtype values specifying the original types of\n each column.\n epoch: A tensor or placeholder for the epoch the training data comes from.\n\n Returns:\n The last op in the random tree training graph.\n '
epoch = ([0] if (epoch is None) else epoch)
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
(node_sums, node_squares, splits_indices, splits_sums, splits_squares, totals_indices, totals_sums, totals_squares, input_leaves) = self.training_ops.count_extremely_random_stats(input_data, sparse_indices, sparse_values, sparse_shape, data_spec, input_labels, self.variables.tree, self.variables.tree_thresholds, self.variables.node_to_accumulator_map, self.variables.candidate_split_features, self.variables.candidate_split_thresholds, self.variables.start_epoch, epoch, num_classes=self.params.num_output_columns, regression=self.params.regression)
node_update_ops = []
node_update_ops.append(state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(self.variables.candidate_split_sums, splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(self.variables.accumulator_sums, totals_indices, totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares, node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(self.variables.candidate_split_squares, splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(self.variables.accumulator_squares, totals_indices, totals_squares))
(update_indices, feature_updates, threshold_updates) = self.training_ops.sample_inputs(input_data, sparse_indices, sparse_values, sparse_shape, self.variables.node_to_accumulator_map, input_leaves, self.variables.candidate_split_features, self.variables.candidate_split_thresholds, split_initializations_per_input=self.params.split_initializations_per_input, split_sampling_random_seed=random_seed)
update_features_op = state_ops.scatter_update(self.variables.candidate_split_features, update_indices, feature_updates)
update_thresholds_op = state_ops.scatter_update(self.variables.candidate_split_thresholds, update_indices, threshold_updates)
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(self.variables.tree, [0, 0], [(- 1), 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1]))
(finished, stale) = self.training_ops.finished_nodes(leaves, self.variables.node_to_accumulator_map, self.variables.candidate_split_sums, self.variables.candidate_split_squares, self.variables.accumulator_sums, self.variables.accumulator_squares, self.variables.start_epoch, epoch, num_split_after_samples=self.params.split_after_samples, min_split_samples=self.params.min_split_samples)
non_fertile_leaves = array_ops.boolean_mask(leaves, math_ops.less(array_ops.gather(self.variables.node_to_accumulator_map, leaves), 0))
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares, non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(finished, self.variables.node_to_accumulator_map, self.variables.candidate_split_sums, self.variables.candidate_split_squares, self.variables.accumulator_sums, self.variables.accumulator_squares, regression=self.params.regression)
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates, tree_threshold_updates, tree_depth_updates, new_eot) = self.training_ops.grow_tree(self.variables.end_of_tree, self.variables.tree_depths, self.variables.node_to_accumulator_map, finished, split_indices, self.variables.candidate_split_features, self.variables.candidate_split_thresholds)
tree_update_op = state_ops.scatter_update(self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(self.variables.tree_thresholds, tree_update_indices, tree_threshold_updates)
depth_update_op = state_ops.scatter_update(self.variables.tree_depths, tree_update_indices, tree_depth_updates)
new_epoch_updates = (epoch * array_ops.ones_like(tree_depth_updates))
epoch_update_op = state_ops.scatter_update(self.variables.start_epoch, tree_update_indices, new_epoch_updates)
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = self.training_ops.update_fertile_slots(finished, non_fertile_leaves, non_fertile_leaf_scores, self.variables.end_of_tree, self.variables.tree_depths, self.variables.accumulator_sums, self.variables.node_to_accumulator_map, stale, max_depth=self.params.max_depth, regression=self.params.regression)
(gated_new_eot,) = control_flow_ops.tuple([new_eot], control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(self.variables.node_to_accumulator_map, array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, (- 1)]), squeeze_dims=[0]), array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, (- 1)]), squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(0, [accumulators_cleared, accumulators_allocated])
split_values = array_ops.tile(array_ops.expand_dims(array_ops.expand_dims(array_ops.zeros_like(cleared_and_allocated_accumulators, dtype=dtypes.float32), 1), 2), [1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(self.variables.candidate_split_sums, cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(self.variables.candidate_split_squares, cleared_and_allocated_accumulators, split_values))
total_cleared = array_ops.tile(array_ops.expand_dims(math_ops.neg(array_ops.ones_like(accumulators_cleared, dtype=dtypes.float32)), 1), [1, self.params.num_output_columns])
total_reset = array_ops.tile(array_ops.expand_dims(array_ops.zeros_like(accumulators_allocated, dtype=dtypes.float32), 1), [1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(self.variables.accumulator_sums, cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(self.variables.accumulator_squares, cleared_and_allocated_accumulators, accumulator_updates))
split_features_updates = array_ops.tile(array_ops.expand_dims(math_ops.neg(array_ops.ones_like(cleared_and_allocated_accumulators)), 1), [1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(self.variables.candidate_split_features, cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates) |
def finish_iteration(self):
'Perform any operations that should be done at the end of an iteration.\n\n This is mostly useful for subclasses that need to reset variables after\n an iteration, such as ones that are used to finish nodes.\n\n Returns:\n A list of operations.\n '
return [] | -114,024,798,016,085,220 | Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | finish_iteration | AdityaPai2398/tensorflow | python | def finish_iteration(self):
'Perform any operations that should be done at the end of an iteration.\n\n This is mostly useful for subclasses that need to reset variables after\n an iteration, such as ones that are used to finish nodes.\n\n Returns:\n A list of operations.\n '
return [] |
def inference_graph(self, input_data, data_spec):
'Constructs a TF graph for evaluating a random tree.\n\n Args:\n input_data: A tensor or SparseTensor or placeholder for input data.\n data_spec: A list of tf.dtype values specifying the original types of\n each column.\n\n Returns:\n The last op in the random tree inference graph.\n '
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(input_data, sparse_indices, sparse_values, sparse_shape, data_spec, self.variables.tree, self.variables.tree_thresholds, self.variables.node_sums, valid_leaf_threshold=self.params.valid_leaf_threshold) | -1,317,678,232,807,222,500 | Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | inference_graph | AdityaPai2398/tensorflow | python | def inference_graph(self, input_data, data_spec):
'Constructs a TF graph for evaluating a random tree.\n\n Args:\n input_data: A tensor or SparseTensor or placeholder for input data.\n data_spec: A list of tf.dtype values specifying the original types of\n each column.\n\n Returns:\n The last op in the random tree inference graph.\n '
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(input_data, sparse_indices, sparse_values, sparse_shape, data_spec, self.variables.tree, self.variables.tree_thresholds, self.variables.node_sums, valid_leaf_threshold=self.params.valid_leaf_threshold) |
def average_impurity(self):
'Constructs a TF graph for evaluating the average leaf impurity of a tree.\n\n If in regression mode, this is the leaf variance. If in classification mode,\n this is the gini impurity.\n\n Returns:\n The last op in the graph.\n '
children = array_ops.squeeze(array_ops.slice(self.variables.tree, [0, 0], [(- 1), 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
def impurity():
return gini
def big():
return (array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.0)
return control_flow_ops.cond(math_ops.greater(array_ops.shape(leaves)[0], 0), impurity, big) | 2,271,007,417,708,949,200 | Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | average_impurity | AdityaPai2398/tensorflow | python | def average_impurity(self):
'Constructs a TF graph for evaluating the average leaf impurity of a tree.\n\n If in regression mode, this is the leaf variance. If in classification mode,\n this is the gini impurity.\n\n Returns:\n The last op in the graph.\n '
children = array_ops.squeeze(array_ops.slice(self.variables.tree, [0, 0], [(- 1), 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
def impurity():
return gini
def big():
return (array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.0)
return control_flow_ops.cond(math_ops.greater(array_ops.shape(leaves)[0], 0), impurity, big) |
def size(self):
'Constructs a TF graph for evaluating the current number of nodes.\n\n Returns:\n The current number of nodes in the tree.\n '
return (self.variables.end_of_tree - 1) | 4,745,050,360,644,350,000 | Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree. | tensorflow/contrib/tensor_forest/python/tensor_forest.py | size | AdityaPai2398/tensorflow | python | def size(self):
'Constructs a TF graph for evaluating the current number of nodes.\n\n Returns:\n The current number of nodes in the tree.\n '
return (self.variables.end_of_tree - 1) |
def __init__(self, floatingip=None):
'NeutronCreateFloatingIpRequestBody - a model defined in huaweicloud sdk'
self._floatingip = None
self.discriminator = None
self.floatingip = floatingip | -8,986,675,368,031,841,000 | NeutronCreateFloatingIpRequestBody - a model defined in huaweicloud sdk | huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py | __init__ | huaweicloud/huaweicloud-sdk-python-v3 | python | def __init__(self, floatingip=None):
self._floatingip = None
self.discriminator = None
self.floatingip = floatingip |
@property
def floatingip(self):
'Gets the floatingip of this NeutronCreateFloatingIpRequestBody.\n\n\n :return: The floatingip of this NeutronCreateFloatingIpRequestBody.\n :rtype: CreateFloatingIpOption\n '
return self._floatingip | 1,985,792,057,117,326,000 | Gets the floatingip of this NeutronCreateFloatingIpRequestBody.
:return: The floatingip of this NeutronCreateFloatingIpRequestBody.
:rtype: CreateFloatingIpOption | huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py | floatingip | huaweicloud/huaweicloud-sdk-python-v3 | python | @property
def floatingip(self):
'Gets the floatingip of this NeutronCreateFloatingIpRequestBody.\n\n\n :return: The floatingip of this NeutronCreateFloatingIpRequestBody.\n :rtype: CreateFloatingIpOption\n '
return self._floatingip |
@floatingip.setter
def floatingip(self, floatingip):
'Sets the floatingip of this NeutronCreateFloatingIpRequestBody.\n\n\n :param floatingip: The floatingip of this NeutronCreateFloatingIpRequestBody.\n :type: CreateFloatingIpOption\n '
self._floatingip = floatingip | -5,082,099,477,760,268,000 | Sets the floatingip of this NeutronCreateFloatingIpRequestBody.
:param floatingip: The floatingip of this NeutronCreateFloatingIpRequestBody.
:type: CreateFloatingIpOption | huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py | floatingip | huaweicloud/huaweicloud-sdk-python-v3 | python | @floatingip.setter
def floatingip(self, floatingip):
'Sets the floatingip of this NeutronCreateFloatingIpRequestBody.\n\n\n :param floatingip: The floatingip of this NeutronCreateFloatingIpRequestBody.\n :type: CreateFloatingIpOption\n '
self._floatingip = floatingip |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result | 2,594,216,033,120,720,000 | Returns the model properties as a dict | huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py | to_dict | huaweicloud/huaweicloud-sdk-python-v3 | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result |
def to_str(self):
'Returns the string representation of the model'
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) | -6,095,553,759,700,562,000 | Returns the string representation of the model | huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py | to_str | huaweicloud/huaweicloud-sdk-python-v3 | python | def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) |
def __repr__(self):
'For `print`'
return self.to_str() | -1,581,176,371,750,213,000 | For `print` | huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py | __repr__ | huaweicloud/huaweicloud-sdk-python-v3 | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, NeutronCreateFloatingIpRequestBody)):
return False
return (self.__dict__ == other.__dict__) | 1,684,303,059,840,454,000 | Returns true if both objects are equal | huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py | __eq__ | huaweicloud/huaweicloud-sdk-python-v3 | python | def __eq__(self, other):
if (not isinstance(other, NeutronCreateFloatingIpRequestBody)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py | __ne__ | huaweicloud/huaweicloud-sdk-python-v3 | python | def __ne__(self, other):
return (not (self == other)) |
@pytest.mark.parametrize('SearchCV', [HalvingRandomSearchCV, HalvingGridSearchCV])
def test_min_resources_null(SearchCV):
'Check that we raise an error if the minimum resources is set to 0.'
base_estimator = FastClassifier()
param_grid = {'a': [1]}
X = np.empty(0).reshape(0, 3)
search = SearchCV(base_estimator, param_grid, min_resources='smallest')
err_msg = 'min_resources_=0: you might have passed an empty dataset X.'
with pytest.raises(ValueError, match=err_msg):
search.fit(X, []) | -706,482,965,388,153,000 | Check that we raise an error if the minimum resources is set to 0. | sklearn/model_selection/tests/test_successive_halving.py | test_min_resources_null | 3021104750/scikit-learn | python | @pytest.mark.parametrize('SearchCV', [HalvingRandomSearchCV, HalvingGridSearchCV])
def test_min_resources_null(SearchCV):
base_estimator = FastClassifier()
param_grid = {'a': [1]}
X = np.empty(0).reshape(0, 3)
search = SearchCV(base_estimator, param_grid, min_resources='smallest')
err_msg = 'min_resources_=0: you might have passed an empty dataset X.'
with pytest.raises(ValueError, match=err_msg):
search.fit(X, []) |
@pytest.mark.parametrize('SearchCV', [HalvingGridSearchCV, HalvingRandomSearchCV])
def test_select_best_index(SearchCV):
'Check the selection strategy of the halving search.'
results = {'iter': np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]), 'mean_test_score': np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]), 'params': np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'])}
best_index = SearchCV._select_best_index(None, None, results)
assert (best_index == 8) | -8,218,927,456,292,474,000 | Check the selection strategy of the halving search. | sklearn/model_selection/tests/test_successive_halving.py | test_select_best_index | 3021104750/scikit-learn | python | @pytest.mark.parametrize('SearchCV', [HalvingGridSearchCV, HalvingRandomSearchCV])
def test_select_best_index(SearchCV):
results = {'iter': np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]), 'mean_test_score': np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]), 'params': np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'])}
best_index = SearchCV._select_best_index(None, None, results)
assert (best_index == 8) |
def drawline():
"Tracé d'une ligne dans le canevas can1"
global x1, y1, x2, y2, coul
can1.create_line(x1, y1, x2, y2, width=2, fill=coul)
(y2, y1) = ((y2 + 10), (y1 - 10)) | 3,233,638,542,157,701,600 | Tracé d'une ligne dans le canevas can1 | Exemples cours 4/TK_Line.py | drawline | geocot/coursPython | python | def drawline():
global x1, y1, x2, y2, coul
can1.create_line(x1, y1, x2, y2, width=2, fill=coul)
(y2, y1) = ((y2 + 10), (y1 - 10)) |
def changecolor():
'Changement aléatoire de la couleur du tracé'
global coul
pal = ['purple', 'cyan', 'maroon', 'green', 'red', 'blue', 'orange', 'yellow']
c = randrange(8)
coul = pal[c] | -6,397,451,742,445,943,000 | Changement aléatoire de la couleur du tracé | Exemples cours 4/TK_Line.py | changecolor | geocot/coursPython | python | def changecolor():
global coul
pal = ['purple', 'cyan', 'maroon', 'green', 'red', 'blue', 'orange', 'yellow']
c = randrange(8)
coul = pal[c] |
@tf.function
def mse_loss(static, moving):
'Computes the mean squared error (MSE) loss.\n\n Currently, only 4-D inputs are supported.\n\n Parameters\n ----------\n static : tf.Tensor, shape (N, H, W, C)\n The static image to which the moving image is aligned.\n moving : tf.Tensor, shape (N, H, W, C)\n The moving image, the same shape as the static image.\n\n Returns\n -------\n loss : tf.Tensor, shape ()\n Mean squared error between the static and the moving images,\n averaged over the batch.\n '
loss = tf.reduce_mean(tf.square((moving - static)))
return loss | -8,802,986,864,010,985,000 | Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch. | register_basics.py | mse_loss | jerinka/voxelmorph_demo | python | @tf.function
def mse_loss(static, moving):
'Computes the mean squared error (MSE) loss.\n\n Currently, only 4-D inputs are supported.\n\n Parameters\n ----------\n static : tf.Tensor, shape (N, H, W, C)\n The static image to which the moving image is aligned.\n moving : tf.Tensor, shape (N, H, W, C)\n The moving image, the same shape as the static image.\n\n Returns\n -------\n loss : tf.Tensor, shape ()\n Mean squared error between the static and the moving images,\n averaged over the batch.\n '
loss = tf.reduce_mean(tf.square((moving - static)))
return loss |
@tf.function
def ncc_loss(static, moving):
'Computes the normalized cross-correlation (NCC) loss.\n\n Currently, only 4-D inputs are supported.\n\n Parameters\n ----------\n static : tf.Tensor, shape (N, H, W, C)\n The static image to which the moving image is aligned.\n moving : tf.Tensor, shape (N, H, W, C)\n The moving image, the same shape as the static image.\n\n Returns\n -------\n loss : tf.Tensor, shape ()\n Normalized cross-correlation loss between the static and the\n moving images, averaged over the batch. Range is [-1.0, 1.0].\n The best value is -1 (perfect match) and the worst is 1.\n\n References\n ----------\n .. [1] `Wikipedia entry for the Cross-correlation\n <https://en.wikipedia.org/wiki/Cross-correlation>`_\n '
eps = tf.constant(1e-09, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
static_hat = ((static - static_mean) / (static_std + eps))
moving_hat = ((moving - moving_mean) / (moving_std + eps))
ncc = tf.reduce_mean((static_hat * moving_hat))
loss = (- ncc)
return loss | -1,974,962,980,259,870,200 | Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_ | register_basics.py | ncc_loss | jerinka/voxelmorph_demo | python | @tf.function
def ncc_loss(static, moving):
'Computes the normalized cross-correlation (NCC) loss.\n\n Currently, only 4-D inputs are supported.\n\n Parameters\n ----------\n static : tf.Tensor, shape (N, H, W, C)\n The static image to which the moving image is aligned.\n moving : tf.Tensor, shape (N, H, W, C)\n The moving image, the same shape as the static image.\n\n Returns\n -------\n loss : tf.Tensor, shape ()\n Normalized cross-correlation loss between the static and the\n moving images, averaged over the batch. Range is [-1.0, 1.0].\n The best value is -1 (perfect match) and the worst is 1.\n\n References\n ----------\n .. [1] `Wikipedia entry for the Cross-correlation\n <https://en.wikipedia.org/wiki/Cross-correlation>`_\n '
eps = tf.constant(1e-09, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
static_hat = ((static - static_mean) / (static_std + eps))
moving_hat = ((moving - moving_mean) / (moving_std + eps))
ncc = tf.reduce_mean((static_hat * moving_hat))
loss = (- ncc)
return loss |
def simple_cnn(input_shape=(32, 32, 2)):
"Creates a 2-D convolutional encoder-decoder network.\n\n Parameters\n ----------\n input_shape : sequence of ints, optional\n Input data shape of the form (H, W, C). Default is (32, 32, 2).\n\n Returns\n -------\n model\n An instance of Keras' Model class.\n\n Notes\n -----\n Given a concatenated pair of static and moving images as input, the\n CNN computes a dense displacement field that is used to warp the\n moving image to match with the static image.\n\n The number of channels in the output (displacement field) is equal\n to the dimensionality of the input data. For 3-D volumes, it is 3,\n and for 2-D images, it is 2. The first channel comprises\n displacement in the x-direction and the second comprises\n displacement in the y-direction.\n "
out_channels = 2
inputs = layers.Input(shape=input_shape)
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same', activation='relu')(inputs)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPool2D(pool_size=2)(x)
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPool2D(pool_size=2)(x)
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2, padding='same')(x)
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2, padding='same')(x)
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2, padding='same')(x)
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(out_channels, kernel_size=1, strides=1, padding='same')(x)
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
'\n Differntiable image sampling\n References:\n 1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py\n 2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial\n transformer networks." Advances in neural information processing\n systems. 2015. https://arxiv.org/pdf/1506.02025.pdf\n 3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5\n ' | 4,992,043,161,819,919,000 | Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction. | register_basics.py | simple_cnn | jerinka/voxelmorph_demo | python | def simple_cnn(input_shape=(32, 32, 2)):
"Creates a 2-D convolutional encoder-decoder network.\n\n Parameters\n ----------\n input_shape : sequence of ints, optional\n Input data shape of the form (H, W, C). Default is (32, 32, 2).\n\n Returns\n -------\n model\n An instance of Keras' Model class.\n\n Notes\n -----\n Given a concatenated pair of static and moving images as input, the\n CNN computes a dense displacement field that is used to warp the\n moving image to match with the static image.\n\n The number of channels in the output (displacement field) is equal\n to the dimensionality of the input data. For 3-D volumes, it is 3,\n and for 2-D images, it is 2. The first channel comprises\n displacement in the x-direction and the second comprises\n displacement in the y-direction.\n "
out_channels = 2
inputs = layers.Input(shape=input_shape)
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same', activation='relu')(inputs)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPool2D(pool_size=2)(x)
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPool2D(pool_size=2)(x)
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2, padding='same')(x)
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2, padding='same')(x)
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2, padding='same')(x)
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same', activation='relu')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(out_channels, kernel_size=1, strides=1, padding='same')(x)
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
'\n Differntiable image sampling\n References:\n 1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py\n 2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial\n transformer networks." Advances in neural information processing\n systems. 2015. https://arxiv.org/pdf/1506.02025.pdf\n 3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5\n ' |
@tf.function
def grid_sample(moving, grid):
'Given a moving image and a sampling grid as input, computes the\n transformed image by sampling the moving image at locations given by\n the grid.\n\n Currently, only 2-D images, i.e., 4-D inputs are supported.\n\n Parameters\n ----------\n moving : tf.Tensor, shape (N, H, W, C)\n The moving image.\n grid : tf.Tensor, shape (N, H, W, C)\n A tensor of sampling points (x, y). The x and y values should be\n normalized to [-1.0, 1.0] range.\n\n Returns\n -------\n moved : tf.Tensor, shape (N, H, W, C)\n The transformed image.\n\n Notes\n -----\n Let M be the moving image of shape (H, W, C), T be the transformed\n image of the same shape and G be the 2-D sampling grid of shape\n (H, W, 2). The value of T at a location (x, y) is T[y, x, :] =\n M[y\', x\', :] where [x\', y\'] = G[y, x, :].\n\n Further, [x\', y\'] = [x + dx, y + dy] where [dx, dy] are the\n displacements outputted by the CNN. When dx and dy are 0, the\n sampling grid G is a regular grid and the transformed image is the\n same as the moving image.\n\n Since the sampling point (x + dx, y + dy) can be non-integral, the\n value M[y\', x\'] is calculated using bi-linear interpolation.\n\n References\n ----------\n .. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial\n transformer networks." Advances in neural information processing\n systems. 2015. <https://arxiv.org/abs/1506.02025>`_\n .. [2] `TensorFlow implementation of spatial transformer networks.\n <https://github.com/tensorflow/models/tree/master/research/transformer>`_\n .. [3] `Spatial Transformer Networks by Kushagra Bhatnagar\n <https://link.medium.com/0b2OrmqVO5>`_\n '
(nb, nh, nw, nc) = moving.shape
x = grid[(..., 0)]
y = grid[(..., 1)]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
x = (((x + 1.0) * 0.5) * tf.cast(nw, 'float32'))
y = (((y + 1.0) * 0.5) * tf.cast(nh, 'float32'))
y_max = tf.cast((nh - 1), 'int32')
x_max = tf.cast((nw - 1), 'int32')
zero = tf.constant(0, 'int32')
x0 = tf.cast(tf.floor(x), 'int32')
x1 = (x0 + 1)
y0 = tf.cast(tf.floor(y), 'int32')
y1 = (y0 + 1)
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
b = (tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1]))
idx_a = tf.stack([b, y0, x0], axis=(- 1))
idx_b = tf.stack([b, y1, x0], axis=(- 1))
idx_c = tf.stack([b, y0, x1], axis=(- 1))
idx_d = tf.stack([b, y1, x1], axis=(- 1))
moving_a = tf.gather_nd(moving, idx_a)
moving_b = tf.gather_nd(moving, idx_b)
moving_c = tf.gather_nd(moving, idx_c)
moving_d = tf.gather_nd(moving, idx_d)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), axis=(- 1))
wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), axis=(- 1))
wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), axis=(- 1))
wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), axis=(- 1))
moved = tf.add_n([(wa * moving_a), (wb * moving_b), (wc * moving_c), (wd * moving_d)])
return moved | -8,025,276,344,341,063,000 | Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_ | register_basics.py | grid_sample | jerinka/voxelmorph_demo | python | @tf.function
def grid_sample(moving, grid):
'Given a moving image and a sampling grid as input, computes the\n transformed image by sampling the moving image at locations given by\n the grid.\n\n Currently, only 2-D images, i.e., 4-D inputs are supported.\n\n Parameters\n ----------\n moving : tf.Tensor, shape (N, H, W, C)\n The moving image.\n grid : tf.Tensor, shape (N, H, W, C)\n A tensor of sampling points (x, y). The x and y values should be\n normalized to [-1.0, 1.0] range.\n\n Returns\n -------\n moved : tf.Tensor, shape (N, H, W, C)\n The transformed image.\n\n Notes\n -----\n Let M be the moving image of shape (H, W, C), T be the transformed\n image of the same shape and G be the 2-D sampling grid of shape\n (H, W, 2). The value of T at a location (x, y) is T[y, x, :] =\n M[y\', x\', :] where [x\', y\'] = G[y, x, :].\n\n Further, [x\', y\'] = [x + dx, y + dy] where [dx, dy] are the\n displacements outputted by the CNN. When dx and dy are 0, the\n sampling grid G is a regular grid and the transformed image is the\n same as the moving image.\n\n Since the sampling point (x + dx, y + dy) can be non-integral, the\n value M[y\', x\'] is calculated using bi-linear interpolation.\n\n References\n ----------\n .. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial\n transformer networks." Advances in neural information processing\n systems. 2015. <https://arxiv.org/abs/1506.02025>`_\n .. [2] `TensorFlow implementation of spatial transformer networks.\n <https://github.com/tensorflow/models/tree/master/research/transformer>`_\n .. [3] `Spatial Transformer Networks by Kushagra Bhatnagar\n <https://link.medium.com/0b2OrmqVO5>`_\n '
(nb, nh, nw, nc) = moving.shape
x = grid[(..., 0)]
y = grid[(..., 1)]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
x = (((x + 1.0) * 0.5) * tf.cast(nw, 'float32'))
y = (((y + 1.0) * 0.5) * tf.cast(nh, 'float32'))
y_max = tf.cast((nh - 1), 'int32')
x_max = tf.cast((nw - 1), 'int32')
zero = tf.constant(0, 'int32')
x0 = tf.cast(tf.floor(x), 'int32')
x1 = (x0 + 1)
y0 = tf.cast(tf.floor(y), 'int32')
y1 = (y0 + 1)
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
b = (tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1]))
idx_a = tf.stack([b, y0, x0], axis=(- 1))
idx_b = tf.stack([b, y1, x0], axis=(- 1))
idx_c = tf.stack([b, y0, x1], axis=(- 1))
idx_d = tf.stack([b, y1, x1], axis=(- 1))
moving_a = tf.gather_nd(moving, idx_a)
moving_b = tf.gather_nd(moving, idx_b)
moving_c = tf.gather_nd(moving, idx_c)
moving_d = tf.gather_nd(moving, idx_d)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), axis=(- 1))
wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), axis=(- 1))
wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), axis=(- 1))
wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), axis=(- 1))
moved = tf.add_n([(wa * moving_a), (wb * moving_b), (wc * moving_c), (wd * moving_d)])
return moved |
@tf.function
def regular_grid(shape):
'Returns a batch of 2-D regular grids.\n\n Currently, only 2-D regular grids are supported.\n\n Parameters\n ----------\n shape : sequence of ints, shape (3, )\n The desired regular grid shape of the form (N, H, W).\n\n Returns\n -------\n grid : tf.Tensor, shape (N, H, W, 2)\n A batch of 2-D regular grids, values normalized to [-1.0, 1.0]\n range.\n\n Notes\n -----\n Sampling using the regular grid is an identity transformation, i.e.,\n it results in the same input and output images.\n\n References\n ----------\n .. [1] `NumPy, "numpy.meshgrid"\n <https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_\n .. [2] `NumPy, "numpy.indices"\n <https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_\n '
(nb, nh, nw) = shape
x = tf.linspace((- 1.0), 1.0, nw)
y = tf.linspace((- 1.0), 1.0, nh)
(X, Y) = tf.meshgrid(x, y)
grid = tf.stack([X, Y], axis=(- 1))
grid = tf.expand_dims(grid, axis=0)
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid | -4,218,321,770,434,875,400 | Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_ | register_basics.py | regular_grid | jerinka/voxelmorph_demo | python | @tf.function
def regular_grid(shape):
'Returns a batch of 2-D regular grids.\n\n Currently, only 2-D regular grids are supported.\n\n Parameters\n ----------\n shape : sequence of ints, shape (3, )\n The desired regular grid shape of the form (N, H, W).\n\n Returns\n -------\n grid : tf.Tensor, shape (N, H, W, 2)\n A batch of 2-D regular grids, values normalized to [-1.0, 1.0]\n range.\n\n Notes\n -----\n Sampling using the regular grid is an identity transformation, i.e.,\n it results in the same input and output images.\n\n References\n ----------\n .. [1] `NumPy, "numpy.meshgrid"\n <https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_\n .. [2] `NumPy, "numpy.indices"\n <https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_\n '
(nb, nh, nw) = shape
x = tf.linspace((- 1.0), 1.0, nw)
y = tf.linspace((- 1.0), 1.0, nh)
(X, Y) = tf.meshgrid(x, y)
grid = tf.stack([X, Y], axis=(- 1))
grid = tf.expand_dims(grid, axis=0)
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid |
@tf.function
def train_step(model, moving, static, criterion, optimizer):
'A generic training procedure for one iteration.\n\n Parameters\n ----------\n model\n A convolutional encoder-decoder network.\n moving : tf.Tensor, shape (N, H, W, C)\n A batch of moving images.\n static : tf.Tensor, shape (1, H, W, C)\n The static image.\n criterion\n The loss function.\n optimizer\n An optimzer.\n\n Returns\n -------\n loss : tf.Tensor, shape ()\n The average loss for the batch.\n '
(nb, nh, nw, nc) = moving.shape
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
with tf.GradientTape() as tape:
inputs = tf.concat([moving, static], axis=(- 1))
deformation = model(inputs)
grid = regular_grid([nb, nh, nw])
grid_new = (grid + deformation)
grid_new = tf.clip_by_value(grid_new, (- 1), 1)
moved = grid_sample(moving, grid_new)
loss = criterion(moved, static)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss | -1,444,017,728,608,054,500 | A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch. | register_basics.py | train_step | jerinka/voxelmorph_demo | python | @tf.function
def train_step(model, moving, static, criterion, optimizer):
'A generic training procedure for one iteration.\n\n Parameters\n ----------\n model\n A convolutional encoder-decoder network.\n moving : tf.Tensor, shape (N, H, W, C)\n A batch of moving images.\n static : tf.Tensor, shape (1, H, W, C)\n The static image.\n criterion\n The loss function.\n optimizer\n An optimzer.\n\n Returns\n -------\n loss : tf.Tensor, shape ()\n The average loss for the batch.\n '
(nb, nh, nw, nc) = moving.shape
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
with tf.GradientTape() as tape:
inputs = tf.concat([moving, static], axis=(- 1))
deformation = model(inputs)
grid = regular_grid([nb, nh, nw])
grid_new = (grid + deformation)
grid_new = tf.clip_by_value(grid_new, (- 1), 1)
moved = grid_sample(moving, grid_new)
loss = criterion(moved, static)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss |
@tf.function
def test_step(model, moving, static, criterion):
'A generic testing procedure.\n\n Parameters\n ----------\n model\n A convolutional encoder-decoder network.\n moving : tf.Tensor, shape (N, H, W, C)\n A batch of moving images.\n static : tf.Tensor, shape (1, H, W, C)\n The static image.\n criterion\n The loss function.\n\n Returns\n -------\n loss : tf.Tensor, shape ()\n The average loss for the batch.\n '
(nb, nh, nw, nc) = moving.shape
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
inputs = tf.concat([moving, static], axis=(- 1))
deformation = model(inputs, training=False)
grid = regular_grid([nb, nh, nw])
grid_new = (grid + deformation)
grid_new = tf.clip_by_value(grid_new, (- 1), 1)
moved = grid_sample(moving, grid_new)
loss = criterion(moved, static)
return loss | -7,464,719,366,714,921,000 | A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch. | register_basics.py | test_step | jerinka/voxelmorph_demo | python | @tf.function
def test_step(model, moving, static, criterion):
'A generic testing procedure.\n\n Parameters\n ----------\n model\n A convolutional encoder-decoder network.\n moving : tf.Tensor, shape (N, H, W, C)\n A batch of moving images.\n static : tf.Tensor, shape (1, H, W, C)\n The static image.\n criterion\n The loss function.\n\n Returns\n -------\n loss : tf.Tensor, shape ()\n The average loss for the batch.\n '
(nb, nh, nw, nc) = moving.shape
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
inputs = tf.concat([moving, static], axis=(- 1))
deformation = model(inputs, training=False)
grid = regular_grid([nb, nh, nw])
grid_new = (grid + deformation)
grid_new = tf.clip_by_value(grid_new, (- 1), 1)
moved = grid_sample(moving, grid_new)
loss = criterion(moved, static)
return loss |
def load_data(label=2):
'Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]\n range, resizes the images from (28, 28) to (32, 32) and filters the\n dataset to keep images of just one class.\n\n Parameters\n ----------\n label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2\n The class of images to train and test on.\n\n Returns\n -------\n (x_train, x_test) : tuple of ndarrays\n NumPy arrays of training and testing images.\n '
((x_train, y_train), (x_test, y_test)) = tf.keras.datasets.mnist.load_data()
ids_train = np.where((y_train == label))
ids_test = np.where((y_test == label))
x_train = x_train[ids_train]
x_test = x_test[ids_test]
x_train = (x_train.astype(np.float32) / 255.0)
x_test = (x_test.astype(np.float32) / 255.0)
x_train = x_train[(..., None)]
x_test = x_test[(..., None)]
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return (x_train, x_test) | 7,456,557,386,423,309,000 | Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images. | register_basics.py | load_data | jerinka/voxelmorph_demo | python | def load_data(label=2):
'Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]\n range, resizes the images from (28, 28) to (32, 32) and filters the\n dataset to keep images of just one class.\n\n Parameters\n ----------\n label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2\n The class of images to train and test on.\n\n Returns\n -------\n (x_train, x_test) : tuple of ndarrays\n NumPy arrays of training and testing images.\n '
((x_train, y_train), (x_test, y_test)) = tf.keras.datasets.mnist.load_data()
ids_train = np.where((y_train == label))
ids_test = np.where((y_test == label))
x_train = x_train[ids_train]
x_test = x_test[ids_test]
x_train = (x_train.astype(np.float32) / 255.0)
x_test = (x_test.astype(np.float32) / 255.0)
x_train = x_train[(..., None)]
x_test = x_test[(..., None)]
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return (x_train, x_test) |
def plot_images(model, moving, static):
'Visualize some images after training.\n\n Parameters\n ----------\n model\n The trained model.\n moving : tf.Tensor, shape (N, H, W, C)\n A batch of moving images.\n static : tf.Tensor, shape (1, H, W, C)\n The static image.\n '
(nb, nh, nw, nc) = moving.shape
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
inputs = tf.concat([moving, static], axis=(- 1))
deformation = model(inputs, training=False)
grid = regular_grid([nb, nh, nw])
grid_new = (grid + deformation)
grid_new = tf.clip_by_value(grid_new, (- 1), 1)
moved = grid_sample(moving, grid_new)
moved = (moved.numpy().squeeze(axis=(- 1)) * 255.0)
moved = moved.astype(np.uint8)
moving = (moving.numpy().squeeze(axis=(- 1)) * 255.0)
moving = moving.astype(np.uint8)
static = (static.numpy().squeeze(axis=(- 1)) * 255.0)
static = static.astype(np.uint8)
fig = plt.figure(figsize=((3 * 1.7), (nb * 1.7)))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, (((i * 3) + j) + 1))
if (i == 0):
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show() | -2,103,651,409,913,373,200 | Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image. | register_basics.py | plot_images | jerinka/voxelmorph_demo | python | def plot_images(model, moving, static):
'Visualize some images after training.\n\n Parameters\n ----------\n model\n The trained model.\n moving : tf.Tensor, shape (N, H, W, C)\n A batch of moving images.\n static : tf.Tensor, shape (1, H, W, C)\n The static image.\n '
(nb, nh, nw, nc) = moving.shape
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
inputs = tf.concat([moving, static], axis=(- 1))
deformation = model(inputs, training=False)
grid = regular_grid([nb, nh, nw])
grid_new = (grid + deformation)
grid_new = tf.clip_by_value(grid_new, (- 1), 1)
moved = grid_sample(moving, grid_new)
moved = (moved.numpy().squeeze(axis=(- 1)) * 255.0)
moved = moved.astype(np.uint8)
moving = (moving.numpy().squeeze(axis=(- 1)) * 255.0)
moving = moving.astype(np.uint8)
static = (static.numpy().squeeze(axis=(- 1)) * 255.0)
static = static.astype(np.uint8)
fig = plt.figure(figsize=((3 * 1.7), (nb * 1.7)))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, (((i * 3) + j) + 1))
if (i == 0):
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show() |
def egg(num_eggs: int) -> None:
'prints the number of eggs.\n\n Arguments:\n num_eggs {int} -- The number of eggs\n\n Returns:\n None.\n '
print(f'We have {num_eggs} eggs') | -3,256,755,077,250,168,300 | prints the number of eggs.
Arguments:
num_eggs {int} -- The number of eggs
Returns:
None. | src/moonshine/__main__.py | egg | CatchemAl/moonshine | python | def egg(num_eggs: int) -> None:
'prints the number of eggs.\n\n Arguments:\n num_eggs {int} -- The number of eggs\n\n Returns:\n None.\n '
print(f'We have {num_eggs} eggs') |
def get(self, request):
'提供订单结算页面'
user = request.user
try:
addresses = Address.objects.filter(user=user, is_deleted=False)
except Address.DoesNotExist:
addresses = None
redis_conn = get_redis_connection('carts')
item_dict = redis_conn.hgetall(('carts_%s' % user.id))
cart_selected = redis_conn.smembers(('selected_%s' % user.id))
cart = {}
for sku_id in cart_selected:
cart[int(sku_id)] = int(item_dict[sku_id])
total_count = 0
total_amount = Decimal(0.0)
skus = SKU.objects.filter(id__in=cart.keys())
for sku in skus:
sku.count = cart[sku.id]
sku.amount = (sku.count * sku.price)
total_count += sku.count
total_amount += sku.amount
freight = Decimal('10.00')
context = {'addresses': addresses, 'skus': skus, 'total_count': total_count, 'total_amount': total_amount, 'freight': freight, 'payment_amount': (total_amount + freight)}
return render(request, 'place_order.html', context) | 221,095,081,085,981,470 | 提供订单结算页面 | meiduo_mall/meiduo_mall/apps/orders/views.py | get | Gdavid123/md_project | python | def get(self, request):
user = request.user
try:
addresses = Address.objects.filter(user=user, is_deleted=False)
except Address.DoesNotExist:
addresses = None
redis_conn = get_redis_connection('carts')
item_dict = redis_conn.hgetall(('carts_%s' % user.id))
cart_selected = redis_conn.smembers(('selected_%s' % user.id))
cart = {}
for sku_id in cart_selected:
cart[int(sku_id)] = int(item_dict[sku_id])
total_count = 0
total_amount = Decimal(0.0)
skus = SKU.objects.filter(id__in=cart.keys())
for sku in skus:
sku.count = cart[sku.id]
sku.amount = (sku.count * sku.price)
total_count += sku.count
total_amount += sku.amount
freight = Decimal('10.00')
context = {'addresses': addresses, 'skus': skus, 'total_count': total_count, 'total_amount': total_amount, 'freight': freight, 'payment_amount': (total_amount + freight)}
return render(request, 'place_order.html', context) |
def post(self, request):
'保存订单信息和订单商品信息'
json_dict = json.loads(request.body)
address_id = json_dict.get('address_id')
pay_method = json_dict.get('pay_method')
if (not all([address_id, pay_method])):
return HttpResponseForbidden('缺少必传参数')
try:
address = Address.objects.get(id=address_id)
except Exception:
return HttpResponseForbidden('参数address_id错误')
if (pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo.PAY_METHODS_ENUM['ALIPAY']]):
return HttpResponseForbidden('参数pay_method错误')
user = request.user
order_id = (timezone.localtime().strftime('%Y%m%d%H%M%S') + ('%09d' % user.id))
with transaction.atomic():
save_id = transaction.savepoint()
try:
order = OrderInfo.objects.create(order_id=order_id, user=user, address=address, total_count=0, total_amount=Decimal('0'), freight=Decimal('10.00'), pay_method=pay_method, status=(OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if (pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY']) else OrderInfo.ORDER_STATUS_ENUM['UNSEND']))
redis_conn = get_redis_connection('carts')
item_dict = redis_conn.hgetall(('carts_%s' % user.id))
cart_selected = redis_conn.smembers(('selected_%s' % user.id))
carts = {}
for sku_id in cart_selected:
carts[int(sku_id)] = int(item_dict[sku_id])
sku_ids = carts.keys()
for sku_id in sku_ids:
while True:
sku = SKU.objects.get(id=sku_id)
origin_stock = sku.stock
origin_sales = sku.sales
sku_count = carts[sku_id]
if (sku_count > origin_stock):
transaction.savepoint_rollback(save_id)
return JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'})
new_stock = (origin_stock - sku_count)
new_sales = (origin_sales + sku_count)
result = SKU.objects.filter(id=sku_id, stock=origin_stock).update(stock=new_stock, sales=new_sales)
if (result == 0):
continue
sku.goods.sales += sku_count
sku.goods.save()
OrderGoods.objects.create(order=order, sku=sku, count=sku_count, price=sku.price)
order.total_count += sku_count
order.total_amount += (sku_count * sku.price)
break
order.total_amount += order.freight
order.save()
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_id)
return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '下单失败'})
transaction.savepoint_commit(save_id)
pl = redis_conn.pipeline()
pl.hdel(('carts_%s' % user.id), *cart_selected)
pl.srem(('selected_%s' % user.id), *cart_selected)
pl.execute()
return JsonResponse({'code': RETCODE.OK, 'errmsg': '下单成功', 'order_id': order.order_id}) | 6,315,316,786,832,754,000 | 保存订单信息和订单商品信息 | meiduo_mall/meiduo_mall/apps/orders/views.py | post | Gdavid123/md_project | python | def post(self, request):
json_dict = json.loads(request.body)
address_id = json_dict.get('address_id')
pay_method = json_dict.get('pay_method')
if (not all([address_id, pay_method])):
return HttpResponseForbidden('缺少必传参数')
try:
address = Address.objects.get(id=address_id)
except Exception:
return HttpResponseForbidden('参数address_id错误')
if (pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo.PAY_METHODS_ENUM['ALIPAY']]):
return HttpResponseForbidden('参数pay_method错误')
user = request.user
order_id = (timezone.localtime().strftime('%Y%m%d%H%M%S') + ('%09d' % user.id))
with transaction.atomic():
save_id = transaction.savepoint()
try:
order = OrderInfo.objects.create(order_id=order_id, user=user, address=address, total_count=0, total_amount=Decimal('0'), freight=Decimal('10.00'), pay_method=pay_method, status=(OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if (pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY']) else OrderInfo.ORDER_STATUS_ENUM['UNSEND']))
redis_conn = get_redis_connection('carts')
item_dict = redis_conn.hgetall(('carts_%s' % user.id))
cart_selected = redis_conn.smembers(('selected_%s' % user.id))
carts = {}
for sku_id in cart_selected:
carts[int(sku_id)] = int(item_dict[sku_id])
sku_ids = carts.keys()
for sku_id in sku_ids:
while True:
sku = SKU.objects.get(id=sku_id)
origin_stock = sku.stock
origin_sales = sku.sales
sku_count = carts[sku_id]
if (sku_count > origin_stock):
transaction.savepoint_rollback(save_id)
return JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'})
new_stock = (origin_stock - sku_count)
new_sales = (origin_sales + sku_count)
result = SKU.objects.filter(id=sku_id, stock=origin_stock).update(stock=new_stock, sales=new_sales)
if (result == 0):
continue
sku.goods.sales += sku_count
sku.goods.save()
OrderGoods.objects.create(order=order, sku=sku, count=sku_count, price=sku.price)
order.total_count += sku_count
order.total_amount += (sku_count * sku.price)
break
order.total_amount += order.freight
order.save()
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_id)
return JsonResponse({'code': RETCODE.DBERR, 'errmsg': '下单失败'})
transaction.savepoint_commit(save_id)
pl = redis_conn.pipeline()
pl.hdel(('carts_%s' % user.id), *cart_selected)
pl.srem(('selected_%s' % user.id), *cart_selected)
pl.execute()
return JsonResponse({'code': RETCODE.OK, 'errmsg': '下单成功', 'order_id': order.order_id}) |
@commands.command()
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, user: discord.Member, *, reason: str=None):
'Kicks a user from the server.'
if (user == ctx.author):
return (await ctx.send('Kicking yourself? smh.'))
if (user == self.bot.user):
return (await ctx.send("I can't kick myself."))
res = (f', for reason: `{reason}`' if reason else '')
try:
(await user.kick(reason=reason))
(await ctx.send(f'Kicked {user}{res}'))
except discord.Forbidden:
(await ctx.send("I don't have permissions to kick that user."))
except Exception as e:
raise e | 3,890,303,692,033,552,400 | Kicks a user from the server. | cogs/mod.py | kick | bananaboy21/LadyBug-Bot | python | @commands.command()
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, user: discord.Member, *, reason: str=None):
if (user == ctx.author):
return (await ctx.send('Kicking yourself? smh.'))
if (user == self.bot.user):
return (await ctx.send("I can't kick myself."))
res = (f', for reason: `{reason}`' if reason else )
try:
(await user.kick(reason=reason))
(await ctx.send(f'Kicked {user}{res}'))
except discord.Forbidden:
(await ctx.send("I don't have permissions to kick that user."))
except Exception as e:
raise e |
@commands.command()
@comnands.guild_only()
@commands.has_permissions(manage_messages=True)
async def purge(self, ctx, amount):
'Purges X amount of messages from a channel'
try:
amount = int(amount)
except ValueError:
return (await ctx.send('Enter a number only!'))
try:
(await ctx.channel.purge(limit=(amount + 1)))
(await ctx.send(f'Purged **{amount}** messages', delete_after=3))
except discord.Forbidden:
(await ctx.send(f'I need the `Manage Messages` permission to do this.')) | -5,009,195,797,135,292,000 | Purges X amount of messages from a channel | cogs/mod.py | purge | bananaboy21/LadyBug-Bot | python | @commands.command()
@comnands.guild_only()
@commands.has_permissions(manage_messages=True)
async def purge(self, ctx, amount):
try:
amount = int(amount)
except ValueError:
return (await ctx.send('Enter a number only!'))
try:
(await ctx.channel.purge(limit=(amount + 1)))
(await ctx.send(f'Purged **{amount}** messages', delete_after=3))
except discord.Forbidden:
(await ctx.send(f'I need the `Manage Messages` permission to do this.')) |
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
'Continuously collect data from the audio stream, into the buffer.'
self._buff.put(in_data)
return (None, paContinue) | 8,279,764,556,543,421,000 | Continuously collect data from the audio stream, into the buffer. | googlesr.py | _fill_buffer | kwea123/Unity_live_caption | python | def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
self._buff.put(in_data)
return (None, paContinue) |
def create_process_chain_entry(input_name):
'Create a Actinia process description that uses t.rast.series to create the minimum\n value of the time series.\n\n :param input_time_series: The input time series name\n :param output_map: The name of the output map\n :return: A Actinia process chain description\n '
(location, mapset, datatype, layer_name) = ActiniaInterface.layer_def_to_components(input_name)
input_name = layer_name
if (mapset is not None):
input_name = ((layer_name + '@') + mapset)
rn = randint(0, 1000000)
pc = {}
if (datatype == 'raster'):
pc = {'id': ('r_info_%i' % rn), 'module': 'r.info', 'inputs': [{'param': 'map', 'value': input_name}], 'flags': 'g'}
elif (datatype == 'vector'):
pc = {'id': ('v_info_%i' % rn), 'module': 'v.info', 'inputs': [{'param': 'map', 'value': input_name}], 'flags': 'g'}
elif (datatype == 'strds'):
pc = {'id': ('t_info_%i' % rn), 'module': 't.info', 'inputs': [{'param': 'input', 'value': input_name}], 'flags': 'g'}
else:
raise Exception('Unsupported datatype')
return pc | -4,390,559,835,525,533,000 | Create a Actinia process description that uses t.rast.series to create the minimum
value of the time series.
:param input_time_series: The input time series name
:param output_map: The name of the output map
:return: A Actinia process chain description | src/openeo_grass_gis_driver/actinia_processing/get_data_process.py | create_process_chain_entry | AnikaBettge/openeo-grassgis-driver | python | def create_process_chain_entry(input_name):
'Create a Actinia process description that uses t.rast.series to create the minimum\n value of the time series.\n\n :param input_time_series: The input time series name\n :param output_map: The name of the output map\n :return: A Actinia process chain description\n '
(location, mapset, datatype, layer_name) = ActiniaInterface.layer_def_to_components(input_name)
input_name = layer_name
if (mapset is not None):
input_name = ((layer_name + '@') + mapset)
rn = randint(0, 1000000)
pc = {}
if (datatype == 'raster'):
pc = {'id': ('r_info_%i' % rn), 'module': 'r.info', 'inputs': [{'param': 'map', 'value': input_name}], 'flags': 'g'}
elif (datatype == 'vector'):
pc = {'id': ('v_info_%i' % rn), 'module': 'v.info', 'inputs': [{'param': 'map', 'value': input_name}], 'flags': 'g'}
elif (datatype == 'strds'):
pc = {'id': ('t_info_%i' % rn), 'module': 't.info', 'inputs': [{'param': 'input', 'value': input_name}], 'flags': 'g'}
else:
raise Exception('Unsupported datatype')
return pc |
def get_process_list(process):
'Analyse the process description and return the Actinia process chain and the name of the processing result\n\n :param process: The process description\n :return: (output_names, actinia_process_list)\n '
(input_names, process_list) = analyse_process_graph(process)
output_names = []
if ('data_id' not in process):
raise Exception(('Process %s requires parameter <data_id>' % PROCESS_NAME))
output_names.append(process['data_id'])
pc = create_process_chain_entry(input_name=process['data_id'])
process_list.append(pc)
for input_name in input_names:
output_name = input_name
output_names.append(output_name)
return (output_names, process_list) | -8,158,080,401,428,951,000 | Analyse the process description and return the Actinia process chain and the name of the processing result
:param process: The process description
:return: (output_names, actinia_process_list) | src/openeo_grass_gis_driver/actinia_processing/get_data_process.py | get_process_list | AnikaBettge/openeo-grassgis-driver | python | def get_process_list(process):
'Analyse the process description and return the Actinia process chain and the name of the processing result\n\n :param process: The process description\n :return: (output_names, actinia_process_list)\n '
(input_names, process_list) = analyse_process_graph(process)
output_names = []
if ('data_id' not in process):
raise Exception(('Process %s requires parameter <data_id>' % PROCESS_NAME))
output_names.append(process['data_id'])
pc = create_process_chain_entry(input_name=process['data_id'])
process_list.append(pc)
for input_name in input_names:
output_name = input_name
output_names.append(output_name)
return (output_names, process_list) |
def test_ooo_ns(self):
' Check that ooo exists in namespace declarations '
calcdoc = OpenDocumentSpreadsheet()
table = odf.table.Table(name='Costs')
forms = odf.office.Forms()
form = odf.form.Form(controlimplementation='ooo:com.sun.star.form.component.Form')
lb = odf.form.Listbox(controlimplementation='ooo:com.sun.star.form.component.ListBox', dropdown='true', id='control1')
form.addElement(lb)
forms.addElement(form)
table.addElement(forms)
tr = odf.table.TableRow()
table.addElement(tr)
tr = odf.table.TableRow()
cell = odf.table.TableCell()
tr.addElement(cell)
cell = odf.table.TableCell()
draw = odf.draw.Control(control='control1', height='0.1126in', width='0.798in', x='0.0303in', y='0.0205in', endcelladdress='Costs.B2', endx='0.8283in', endy='0.1331in')
cell.addElement(draw)
tr.addElement(cell)
table.addElement(tr)
calcdoc.spreadsheet.addElement(table)
result = calcdoc.contentxml()
self.assertNotEqual((- 1), result.find(b'xmlns:ooo="http://openoffice.org/2004/office"')) | -4,638,254,260,209,595,000 | Check that ooo exists in namespace declarations | desktop/core/ext-py/odfpy-1.4.1/tests/testform.py | test_ooo_ns | 10088/hue | python | def test_ooo_ns(self):
' '
calcdoc = OpenDocumentSpreadsheet()
table = odf.table.Table(name='Costs')
forms = odf.office.Forms()
form = odf.form.Form(controlimplementation='ooo:com.sun.star.form.component.Form')
lb = odf.form.Listbox(controlimplementation='ooo:com.sun.star.form.component.ListBox', dropdown='true', id='control1')
form.addElement(lb)
forms.addElement(form)
table.addElement(forms)
tr = odf.table.TableRow()
table.addElement(tr)
tr = odf.table.TableRow()
cell = odf.table.TableCell()
tr.addElement(cell)
cell = odf.table.TableCell()
draw = odf.draw.Control(control='control1', height='0.1126in', width='0.798in', x='0.0303in', y='0.0205in', endcelladdress='Costs.B2', endx='0.8283in', endy='0.1331in')
cell.addElement(draw)
tr.addElement(cell)
table.addElement(tr)
calcdoc.spreadsheet.addElement(table)
result = calcdoc.contentxml()
self.assertNotEqual((- 1), result.find(b'xmlns:ooo="http://openoffice.org/2004/office"')) |
def acked(err, msg):
'Delivery report callback called (from flush()) on successful or failed delivery of the message.'
if (err is not None):
print('failed to deliver message: {0}'.format(err.str()))
else:
print('produced to: {0} [{1}] @ {2}'.format(msg.topic(), msg.partition(), msg.offset())) | -5,767,730,579,330,683,000 | Delivery report callback called (from flush()) on successful or failed delivery of the message. | examples/confluent_cloud.py | acked | RasmusWL/confluent-kafka-python | python | def acked(err, msg):
if (err is not None):
print('failed to deliver message: {0}'.format(err.str()))
else:
print('produced to: {0} [{1}] @ {2}'.format(msg.topic(), msg.partition(), msg.offset())) |
@cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'cluster': (ClusterInfoSummary,)} | -2,487,247,778,736,868,400 | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type. | api/client/src/pcluster_client/model/delete_cluster_response_content.py | openapi_types | Chen188/aws-parallelcluster | python | @cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'cluster': (ClusterInfoSummary,)} |
@convert_js_args_to_python_args
def __init__(self, cluster, *args, **kwargs):
'DeleteClusterResponseContent - a model defined in OpenAPI\n\n Args:\n cluster (ClusterInfoSummary):\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
self.cluster = cluster
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value) | 560,588,246,799,685,570 | DeleteClusterResponseContent - a model defined in OpenAPI
Args:
cluster (ClusterInfoSummary):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,) | api/client/src/pcluster_client/model/delete_cluster_response_content.py | __init__ | Chen188/aws-parallelcluster | python | @convert_js_args_to_python_args
def __init__(self, cluster, *args, **kwargs):
'DeleteClusterResponseContent - a model defined in OpenAPI\n\n Args:\n cluster (ClusterInfoSummary):\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
self.cluster = cluster
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value) |
def on_status_update(self, channel, callback):
'\n Callback to execute on status of update of channel\n '
if (not (channel in self._callbacks)):
self._callbacks[channel] = []
self._callbacks[channel].append(callback) | -786,942,491,258,360,300 | Callback to execute on status of update of channel | velbus/modules/vmbbl.py | on_status_update | ddanssaert/python-velbus | python | def on_status_update(self, channel, callback):
'\n \n '
if (not (channel in self._callbacks)):
self._callbacks[channel] = []
self._callbacks[channel].append(callback) |
def clean_path(self, path):
'\n Helper to clean issues path from remote tasks\n '
if path.startswith(WORKER_CHECKOUT):
path = path[len(WORKER_CHECKOUT):]
if path.startswith('/'):
path = path[1:]
return path | 77,414,928,993,778,610 | Helper to clean issues path from remote tasks | src/staticanalysis/bot/static_analysis_bot/task.py | clean_path | Mozilla-GitHub-Standards/7a0517c85b685752ad36ce0e8246040e3de8d842fb0f2696540dfc0c54da847b | python | def clean_path(self, path):
'\n \n '
if path.startswith(WORKER_CHECKOUT):
path = path[len(WORKER_CHECKOUT):]
if path.startswith('/'):
path = path[1:]
return path |
def __init__(self, model_dir, every_n_steps=1):
'Create a FeatureImportanceSummarySaver Hook.\n\n This hook creates scalar summaries representing feature importance\n for each feature column during training.\n\n Args:\n model_dir: model base output directory.\n every_n_steps: frequency, in number of steps, for logging summaries.\n\n Raises:\n ValueError: If one of the arguments is invalid.\n '
if (model_dir is None):
raise ValueError('model dir must be specified.')
self._model_dir = model_dir
self._every_n_steps = every_n_steps
self._last_triggered_step = None | -6,315,023,366,711,679,000 | Create a FeatureImportanceSummarySaver Hook.
This hook creates scalar summaries representing feature importance
for each feature column during training.
Args:
model_dir: model base output directory.
every_n_steps: frequency, in number of steps, for logging summaries.
Raises:
ValueError: If one of the arguments is invalid. | tensorflow/contrib/boosted_trees/estimator_batch/trainer_hooks.py | __init__ | 252125889/tensorflow | python | def __init__(self, model_dir, every_n_steps=1):
'Create a FeatureImportanceSummarySaver Hook.\n\n This hook creates scalar summaries representing feature importance\n for each feature column during training.\n\n Args:\n model_dir: model base output directory.\n every_n_steps: frequency, in number of steps, for logging summaries.\n\n Raises:\n ValueError: If one of the arguments is invalid.\n '
if (model_dir is None):
raise ValueError('model dir must be specified.')
self._model_dir = model_dir
self._every_n_steps = every_n_steps
self._last_triggered_step = None |
def __init__(self, rolling_update=None, type=None, local_vars_configuration=None):
'V1beta2DeploymentStrategy - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._rolling_update = None
self._type = None
self.discriminator = None
if (rolling_update is not None):
self.rolling_update = rolling_update
if (type is not None):
self.type = type | 1,758,358,165,594,836,200 | V1beta2DeploymentStrategy - a model defined in OpenAPI | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | __init__ | playground-julia/kubernetes_asyncio | python | def __init__(self, rolling_update=None, type=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._rolling_update = None
self._type = None
self.discriminator = None
if (rolling_update is not None):
self.rolling_update = rolling_update
if (type is not None):
self.type = type |
@property
def rolling_update(self):
'Gets the rolling_update of this V1beta2DeploymentStrategy. # noqa: E501\n\n\n :return: The rolling_update of this V1beta2DeploymentStrategy. # noqa: E501\n :rtype: V1beta2RollingUpdateDeployment\n '
return self._rolling_update | 2,836,691,819,272,422,400 | Gets the rolling_update of this V1beta2DeploymentStrategy. # noqa: E501
:return: The rolling_update of this V1beta2DeploymentStrategy. # noqa: E501
:rtype: V1beta2RollingUpdateDeployment | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | rolling_update | playground-julia/kubernetes_asyncio | python | @property
def rolling_update(self):
'Gets the rolling_update of this V1beta2DeploymentStrategy. # noqa: E501\n\n\n :return: The rolling_update of this V1beta2DeploymentStrategy. # noqa: E501\n :rtype: V1beta2RollingUpdateDeployment\n '
return self._rolling_update |
@rolling_update.setter
def rolling_update(self, rolling_update):
'Sets the rolling_update of this V1beta2DeploymentStrategy.\n\n\n :param rolling_update: The rolling_update of this V1beta2DeploymentStrategy. # noqa: E501\n :type: V1beta2RollingUpdateDeployment\n '
self._rolling_update = rolling_update | -6,238,375,914,927,697,000 | Sets the rolling_update of this V1beta2DeploymentStrategy.
:param rolling_update: The rolling_update of this V1beta2DeploymentStrategy. # noqa: E501
:type: V1beta2RollingUpdateDeployment | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | rolling_update | playground-julia/kubernetes_asyncio | python | @rolling_update.setter
def rolling_update(self, rolling_update):
'Sets the rolling_update of this V1beta2DeploymentStrategy.\n\n\n :param rolling_update: The rolling_update of this V1beta2DeploymentStrategy. # noqa: E501\n :type: V1beta2RollingUpdateDeployment\n '
self._rolling_update = rolling_update |
@property
def type(self):
'Gets the type of this V1beta2DeploymentStrategy. # noqa: E501\n\n Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. # noqa: E501\n\n :return: The type of this V1beta2DeploymentStrategy. # noqa: E501\n :rtype: str\n '
return self._type | -5,930,811,531,650,901,000 | Gets the type of this V1beta2DeploymentStrategy. # noqa: E501
Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. # noqa: E501
:return: The type of this V1beta2DeploymentStrategy. # noqa: E501
:rtype: str | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | type | playground-julia/kubernetes_asyncio | python | @property
def type(self):
'Gets the type of this V1beta2DeploymentStrategy. # noqa: E501\n\n Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. # noqa: E501\n\n :return: The type of this V1beta2DeploymentStrategy. # noqa: E501\n :rtype: str\n '
return self._type |
@type.setter
def type(self, type):
'Sets the type of this V1beta2DeploymentStrategy.\n\n Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. # noqa: E501\n\n :param type: The type of this V1beta2DeploymentStrategy. # noqa: E501\n :type: str\n '
self._type = type | -6,357,622,358,049,090,000 | Sets the type of this V1beta2DeploymentStrategy.
Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. # noqa: E501
:param type: The type of this V1beta2DeploymentStrategy. # noqa: E501
:type: str | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | type | playground-julia/kubernetes_asyncio | python | @type.setter
def type(self, type):
'Sets the type of this V1beta2DeploymentStrategy.\n\n Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. # noqa: E501\n\n :param type: The type of this V1beta2DeploymentStrategy. # noqa: E501\n :type: str\n '
self._type = type |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result | 8,442,519,487,048,767,000 | Returns the model properties as a dict | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | to_dict | playground-julia/kubernetes_asyncio | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | to_str | playground-julia/kubernetes_asyncio | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | __repr__ | playground-julia/kubernetes_asyncio | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, V1beta2DeploymentStrategy)):
return False
return (self.to_dict() == other.to_dict()) | 6,809,897,058,905,253,000 | Returns true if both objects are equal | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | __eq__ | playground-julia/kubernetes_asyncio | python | def __eq__(self, other):
if (not isinstance(other, V1beta2DeploymentStrategy)):
return False
return (self.to_dict() == other.to_dict()) |
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, V1beta2DeploymentStrategy)):
return True
return (self.to_dict() != other.to_dict()) | 4,985,561,881,093,274,000 | Returns true if both objects are not equal | kubernetes_asyncio/client/models/v1beta2_deployment_strategy.py | __ne__ | playground-julia/kubernetes_asyncio | python | def __ne__(self, other):
if (not isinstance(other, V1beta2DeploymentStrategy)):
return True
return (self.to_dict() != other.to_dict()) |
@register_make_test_function()
def make_transpose_conv_tests(options):
'Make a set of tests to do transpose_conv.'
test_parameters = [{'input_shape': [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]], 'filter_size': [[1, 1], [1, 2], [3, 3]], 'strides': [[1, 1, 1, 1], [1, 3, 3, 1]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'channel_multiplier': [1, 2], 'output_shape': [[]], 'fully_quantize': [False]}, {'input_shape': [[1, 3, 3, 1]], 'filter_size': [[3, 3, 2, 1]], 'strides': [[1, 1, 1, 1]], 'padding': ['SAME'], 'data_format': ['NHWC'], 'channel_multiplier': [1], 'output_shape': [[1, 3, 3, 2]], 'fully_quantize': [True]}, {'input_shape': [[1, 3, 3, 1]], 'filter_size': [[3, 3, 2, 1]], 'strides': [[1, 2, 2, 1]], 'padding': ['SAME'], 'data_format': ['NHWC'], 'channel_multiplier': [1], 'output_shape': [[1, 6, 6, 2]], 'fully_quantize': [True]}, {'input_shape': [[1, 4, 3, 1]], 'filter_size': [[3, 3, 2, 1]], 'strides': [[1, 2, 2, 1]], 'padding': ['SAME'], 'data_format': ['NHWC'], 'channel_multiplier': [1], 'output_shape': [[1, 8, 6, 2]], 'fully_quantize': [True]}]
def get_tensor_shapes(parameters):
input_shape = parameters['input_shape']
filter_size = parameters['filter_size']
if (not parameters['fully_quantize']):
filter_shape = (filter_size + [input_shape[3], parameters['channel_multiplier']])
return [input_shape, filter_shape]
return [input_shape, filter_size]
def build_graph(parameters):
'Build a transpose_conv graph given `parameters`.'
(input_shape, filter_shape) = get_tensor_shapes(parameters)
input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=input_shape)
filter_input = tf.compat.v1.placeholder(dtype=tf.float32, name='filter', shape=filter_shape)
if (not parameters['fully_quantize']):
input_tensors = [input_tensor, filter_input]
conv_outputs = tf.nn.conv2d(input_tensor, filter_input, strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
out = tf.compat.v1.nn.conv2d_backprop_input(input_shape, filter_input, conv_outputs, strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
else:
input_tensors = [input_tensor]
filter_input = create_tensor_data(np.float32, filter_shape, min_value=(- 1), max_value=1)
out = tf.nn.conv2d_transpose(input_tensor, filter_input, parameters['output_shape'], strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
return (input_tensors, [out])
def build_inputs(parameters, sess, inputs, outputs):
(input_shape, filter_shape) = get_tensor_shapes(parameters)
if (not parameters['fully_quantize']):
values = [create_tensor_data(np.float32, input_shape), create_tensor_data(np.float32, filter_shape)]
else:
values = [create_tensor_data(np.float32, input_shape, min_value=(- 1), max_value=1)]
return (values, sess.run(outputs, feed_dict=dict(zip(inputs, values))))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs) | 6,016,943,675,267,754,000 | Make a set of tests to do transpose_conv. | tensorflow/lite/testing/op_tests/transpose_conv.py | make_transpose_conv_tests | 1250281649/tensorflow | python | @register_make_test_function()
def make_transpose_conv_tests(options):
test_parameters = [{'input_shape': [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]], 'filter_size': [[1, 1], [1, 2], [3, 3]], 'strides': [[1, 1, 1, 1], [1, 3, 3, 1]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'channel_multiplier': [1, 2], 'output_shape': [[]], 'fully_quantize': [False]}, {'input_shape': [[1, 3, 3, 1]], 'filter_size': [[3, 3, 2, 1]], 'strides': [[1, 1, 1, 1]], 'padding': ['SAME'], 'data_format': ['NHWC'], 'channel_multiplier': [1], 'output_shape': [[1, 3, 3, 2]], 'fully_quantize': [True]}, {'input_shape': [[1, 3, 3, 1]], 'filter_size': [[3, 3, 2, 1]], 'strides': [[1, 2, 2, 1]], 'padding': ['SAME'], 'data_format': ['NHWC'], 'channel_multiplier': [1], 'output_shape': [[1, 6, 6, 2]], 'fully_quantize': [True]}, {'input_shape': [[1, 4, 3, 1]], 'filter_size': [[3, 3, 2, 1]], 'strides': [[1, 2, 2, 1]], 'padding': ['SAME'], 'data_format': ['NHWC'], 'channel_multiplier': [1], 'output_shape': [[1, 8, 6, 2]], 'fully_quantize': [True]}]
def get_tensor_shapes(parameters):
input_shape = parameters['input_shape']
filter_size = parameters['filter_size']
if (not parameters['fully_quantize']):
filter_shape = (filter_size + [input_shape[3], parameters['channel_multiplier']])
return [input_shape, filter_shape]
return [input_shape, filter_size]
def build_graph(parameters):
'Build a transpose_conv graph given `parameters`.'
(input_shape, filter_shape) = get_tensor_shapes(parameters)
input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=input_shape)
filter_input = tf.compat.v1.placeholder(dtype=tf.float32, name='filter', shape=filter_shape)
if (not parameters['fully_quantize']):
input_tensors = [input_tensor, filter_input]
conv_outputs = tf.nn.conv2d(input_tensor, filter_input, strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
out = tf.compat.v1.nn.conv2d_backprop_input(input_shape, filter_input, conv_outputs, strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
else:
input_tensors = [input_tensor]
filter_input = create_tensor_data(np.float32, filter_shape, min_value=(- 1), max_value=1)
out = tf.nn.conv2d_transpose(input_tensor, filter_input, parameters['output_shape'], strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
return (input_tensors, [out])
def build_inputs(parameters, sess, inputs, outputs):
(input_shape, filter_shape) = get_tensor_shapes(parameters)
if (not parameters['fully_quantize']):
values = [create_tensor_data(np.float32, input_shape), create_tensor_data(np.float32, filter_shape)]
else:
values = [create_tensor_data(np.float32, input_shape, min_value=(- 1), max_value=1)]
return (values, sess.run(outputs, feed_dict=dict(zip(inputs, values))))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs) |
def build_graph(parameters):
'Build a transpose_conv graph given `parameters`.'
(input_shape, filter_shape) = get_tensor_shapes(parameters)
input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=input_shape)
filter_input = tf.compat.v1.placeholder(dtype=tf.float32, name='filter', shape=filter_shape)
if (not parameters['fully_quantize']):
input_tensors = [input_tensor, filter_input]
conv_outputs = tf.nn.conv2d(input_tensor, filter_input, strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
out = tf.compat.v1.nn.conv2d_backprop_input(input_shape, filter_input, conv_outputs, strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
else:
input_tensors = [input_tensor]
filter_input = create_tensor_data(np.float32, filter_shape, min_value=(- 1), max_value=1)
out = tf.nn.conv2d_transpose(input_tensor, filter_input, parameters['output_shape'], strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
return (input_tensors, [out]) | -8,626,366,598,057,815,000 | Build a transpose_conv graph given `parameters`. | tensorflow/lite/testing/op_tests/transpose_conv.py | build_graph | 1250281649/tensorflow | python | def build_graph(parameters):
(input_shape, filter_shape) = get_tensor_shapes(parameters)
input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=input_shape)
filter_input = tf.compat.v1.placeholder(dtype=tf.float32, name='filter', shape=filter_shape)
if (not parameters['fully_quantize']):
input_tensors = [input_tensor, filter_input]
conv_outputs = tf.nn.conv2d(input_tensor, filter_input, strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
out = tf.compat.v1.nn.conv2d_backprop_input(input_shape, filter_input, conv_outputs, strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
else:
input_tensors = [input_tensor]
filter_input = create_tensor_data(np.float32, filter_shape, min_value=(- 1), max_value=1)
out = tf.nn.conv2d_transpose(input_tensor, filter_input, parameters['output_shape'], strides=parameters['strides'], padding=parameters['padding'], data_format=parameters['data_format'])
return (input_tensors, [out]) |
def _adapt_clause(self, clause, as_filter, orm_only):
'Adapt incoming clauses to transformations which\n have been applied within this query.'
adapters = []
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if (as_filter and self._filter_aliases):
for fa in self._filter_aliases._visitor_iterator:
adapters.append((orm_only, fa.replace))
if self._from_obj_alias:
adapters.append((getattr(self, '_orm_only_from_obj_alias', orm_only), self._from_obj_alias.replace))
if self._polymorphic_adapters:
adapters.append((orm_only, self._adapt_polymorphic_element))
if (not adapters):
return clause
def replace(elem):
for (_orm_only, adapter) in adapters:
if ((not _orm_only) or ('_orm_adapt' in elem._annotations) or ('parententity' in elem._annotations)):
e = adapter(elem)
if (e is not None):
return e
return visitors.replacement_traverse(clause, {}, replace) | 179,562,849,315,056,350 | Adapt incoming clauses to transformations which
have been applied within this query. | lib/sqlalchemy/orm/query.py | _adapt_clause | slafs/sqlalchemy | python | def _adapt_clause(self, clause, as_filter, orm_only):
'Adapt incoming clauses to transformations which\n have been applied within this query.'
adapters = []
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if (as_filter and self._filter_aliases):
for fa in self._filter_aliases._visitor_iterator:
adapters.append((orm_only, fa.replace))
if self._from_obj_alias:
adapters.append((getattr(self, '_orm_only_from_obj_alias', orm_only), self._from_obj_alias.replace))
if self._polymorphic_adapters:
adapters.append((orm_only, self._adapt_polymorphic_element))
if (not adapters):
return clause
def replace(elem):
for (_orm_only, adapter) in adapters:
if ((not _orm_only) or ('_orm_adapt' in elem._annotations) or ('parententity' in elem._annotations)):
e = adapter(elem)
if (e is not None):
return e
return visitors.replacement_traverse(clause, {}, replace) |
@property
def statement(self):
'The full SELECT statement represented by this Query.\n\n The statement by default will not have disambiguating labels\n applied to the construct unless with_labels(True) is called\n first.\n\n '
stmt = self._compile_context(labels=self._with_labels).statement
if self._params:
stmt = stmt.params(self._params)
return stmt._annotate({'no_replacement_traverse': True}) | 8,025,505,478,787,422,000 | The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first. | lib/sqlalchemy/orm/query.py | statement | slafs/sqlalchemy | python | @property
def statement(self):
'The full SELECT statement represented by this Query.\n\n The statement by default will not have disambiguating labels\n applied to the construct unless with_labels(True) is called\n first.\n\n '
stmt = self._compile_context(labels=self._with_labels).statement
if self._params:
stmt = stmt.params(self._params)
return stmt._annotate({'no_replacement_traverse': True}) |
def subquery(self, name=None, with_labels=False, reduce_columns=False):
'return the full SELECT statement represented by\n this :class:`.Query`, embedded within an :class:`.Alias`.\n\n Eager JOIN generation within the query is disabled.\n\n :param name: string name to be assigned as the alias;\n this is passed through to :meth:`.FromClause.alias`.\n If ``None``, a name will be deterministically generated\n at compile time.\n\n :param with_labels: if True, :meth:`.with_labels` will be called\n on the :class:`.Query` first to apply table-qualified labels\n to all columns.\n\n :param reduce_columns: if True, :meth:`.Select.reduce_columns` will\n be called on the resulting :func:`.select` construct,\n to remove same-named columns where one also refers to the other\n via foreign key or WHERE clause equivalence.\n\n .. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``\n keyword arguments were added.\n\n '
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name) | 9,211,129,501,899,320,000 | return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added. | lib/sqlalchemy/orm/query.py | subquery | slafs/sqlalchemy | python | def subquery(self, name=None, with_labels=False, reduce_columns=False):
'return the full SELECT statement represented by\n this :class:`.Query`, embedded within an :class:`.Alias`.\n\n Eager JOIN generation within the query is disabled.\n\n :param name: string name to be assigned as the alias;\n this is passed through to :meth:`.FromClause.alias`.\n If ``None``, a name will be deterministically generated\n at compile time.\n\n :param with_labels: if True, :meth:`.with_labels` will be called\n on the :class:`.Query` first to apply table-qualified labels\n to all columns.\n\n :param reduce_columns: if True, :meth:`.Select.reduce_columns` will\n be called on the resulting :func:`.select` construct,\n to remove same-named columns where one also refers to the other\n via foreign key or WHERE clause equivalence.\n\n .. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``\n keyword arguments were added.\n\n '
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name) |
def cte(self, name=None, recursive=False):
'Return the full SELECT statement represented by this\n :class:`.Query` represented as a common table expression (CTE).\n\n .. versionadded:: 0.7.6\n\n Parameters and usage are the same as those of the\n :meth:`.SelectBase.cte` method; see that method for\n further details.\n\n Here is the `Postgresql WITH\n RECURSIVE example\n <http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.\n Note that, in this example, the ``included_parts`` cte and the\n ``incl_alias`` alias of it are Core selectables, which\n means the columns are accessed via the ``.c.`` attribute. The\n ``parts_alias`` object is an :func:`.orm.aliased` instance of the\n ``Part`` entity, so column-mapped attributes are available\n directly::\n\n from sqlalchemy.orm import aliased\n\n class Part(Base):\n __tablename__ = \'part\'\n part = Column(String, primary_key=True)\n sub_part = Column(String, primary_key=True)\n quantity = Column(Integer)\n\n included_parts = session.query(\n Part.sub_part,\n Part.part,\n Part.quantity).\\\n filter(Part.part=="our part").\\\n cte(name="included_parts", recursive=True)\n\n incl_alias = aliased(included_parts, name="pr")\n parts_alias = aliased(Part, name="p")\n included_parts = included_parts.union_all(\n session.query(\n parts_alias.sub_part,\n parts_alias.part,\n parts_alias.quantity).\\\n filter(parts_alias.part==incl_alias.c.sub_part)\n )\n\n q = session.query(\n included_parts.c.sub_part,\n func.sum(included_parts.c.quantity).\n label(\'total_quantity\')\n ).\\\n group_by(included_parts.c.sub_part)\n\n .. seealso::\n\n :meth:`.SelectBase.cte`\n\n '
return self.enable_eagerloads(False).statement.cte(name=name, recursive=recursive) | 6,680,600,726,794,780,000 | Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\
filter(Part.part=="our part").\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte` | lib/sqlalchemy/orm/query.py | cte | slafs/sqlalchemy | python | def cte(self, name=None, recursive=False):
'Return the full SELECT statement represented by this\n :class:`.Query` represented as a common table expression (CTE).\n\n .. versionadded:: 0.7.6\n\n Parameters and usage are the same as those of the\n :meth:`.SelectBase.cte` method; see that method for\n further details.\n\n Here is the `Postgresql WITH\n RECURSIVE example\n <http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.\n Note that, in this example, the ``included_parts`` cte and the\n ``incl_alias`` alias of it are Core selectables, which\n means the columns are accessed via the ``.c.`` attribute. The\n ``parts_alias`` object is an :func:`.orm.aliased` instance of the\n ``Part`` entity, so column-mapped attributes are available\n directly::\n\n from sqlalchemy.orm import aliased\n\n class Part(Base):\n __tablename__ = \'part\'\n part = Column(String, primary_key=True)\n sub_part = Column(String, primary_key=True)\n quantity = Column(Integer)\n\n included_parts = session.query(\n Part.sub_part,\n Part.part,\n Part.quantity).\\\n filter(Part.part=="our part").\\\n cte(name="included_parts", recursive=True)\n\n incl_alias = aliased(included_parts, name="pr")\n parts_alias = aliased(Part, name="p")\n included_parts = included_parts.union_all(\n session.query(\n parts_alias.sub_part,\n parts_alias.part,\n parts_alias.quantity).\\\n filter(parts_alias.part==incl_alias.c.sub_part)\n )\n\n q = session.query(\n included_parts.c.sub_part,\n func.sum(included_parts.c.quantity).\n label(\'total_quantity\')\n ).\\\n group_by(included_parts.c.sub_part)\n\n .. seealso::\n\n :meth:`.SelectBase.cte`\n\n '
return self.enable_eagerloads(False).statement.cte(name=name, recursive=recursive) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.