body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@message.setter
def message(self, message):
'Sets the message of this WikiCommit.\n\n\n :param message: The message of this WikiCommit. # noqa: E501\n :type: str\n '
self._message = message | -7,870,938,731,241,469,000 | Sets the message of this WikiCommit.
:param message: The message of this WikiCommit. # noqa: E501
:type: str | gitea_api/models/wiki_commit.py | message | r7l/python-gitea-api | python | @message.setter
def message(self, message):
'Sets the message of this WikiCommit.\n\n\n :param message: The message of this WikiCommit. # noqa: E501\n :type: str\n '
self._message = message |
@property
def sha(self):
'Gets the sha of this WikiCommit. # noqa: E501\n\n\n :return: The sha of this WikiCommit. # noqa: E501\n :rtype: str\n '
return self._sha | 2,141,027,612,509,132,000 | Gets the sha of this WikiCommit. # noqa: E501
:return: The sha of this WikiCommit. # noqa: E501
:rtype: str | gitea_api/models/wiki_commit.py | sha | r7l/python-gitea-api | python | @property
def sha(self):
'Gets the sha of this WikiCommit. # noqa: E501\n\n\n :return: The sha of this WikiCommit. # noqa: E501\n :rtype: str\n '
return self._sha |
@sha.setter
def sha(self, sha):
'Sets the sha of this WikiCommit.\n\n\n :param sha: The sha of this WikiCommit. # noqa: E501\n :type: str\n '
self._sha = sha | 6,294,440,623,741,654,000 | Sets the sha of this WikiCommit.
:param sha: The sha of this WikiCommit. # noqa: E501
:type: str | gitea_api/models/wiki_commit.py | sha | r7l/python-gitea-api | python | @sha.setter
def sha(self, sha):
'Sets the sha of this WikiCommit.\n\n\n :param sha: The sha of this WikiCommit. # noqa: E501\n :type: str\n '
self._sha = sha |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(WikiCommit, dict):
for (key, value) in self.items():
result[key] = value
return result | -3,813,903,353,230,840,000 | Returns the model properties as a dict | gitea_api/models/wiki_commit.py | to_dict | r7l/python-gitea-api | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(WikiCommit, dict):
for (key, value) in self.items():
result[key] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | gitea_api/models/wiki_commit.py | to_str | r7l/python-gitea-api | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | gitea_api/models/wiki_commit.py | __repr__ | r7l/python-gitea-api | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, WikiCommit)):
return False
return (self.__dict__ == other.__dict__) | 7,727,040,178,788,317,000 | Returns true if both objects are equal | gitea_api/models/wiki_commit.py | __eq__ | r7l/python-gitea-api | python | def __eq__(self, other):
if (not isinstance(other, WikiCommit)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | gitea_api/models/wiki_commit.py | __ne__ | r7l/python-gitea-api | python | def __ne__(self, other):
return (not (self == other)) |
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, data_collection_rule_name: Optional[pulumi.Input[str]]=None, data_flows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]]]=None, data_sources: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']]]=None, description: Optional[pulumi.Input[str]]=None, destinations: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']]]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None, __name__=None, __opts__=None):
"\n Definition of ARM tracked top level resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]] data_flows: The specification of data flows.\n :param pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']] data_sources: The specification of data sources. \n This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.\n :param pulumi.Input[str] description: Description of the data collection rule.\n :param pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']] destinations: The specification of destinations.\n :param pulumi.Input[str] location: The geo-location where the resource lives.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n "
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = _utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_collection_rule_name'] = data_collection_rule_name
if ((data_flows is None) and (not opts.urn)):
raise TypeError("Missing required property 'data_flows'")
__props__['data_flows'] = data_flows
__props__['data_sources'] = data_sources
__props__['description'] = description
if ((destinations is None) and (not opts.urn)):
raise TypeError("Missing required property 'destinations'")
__props__['destinations'] = destinations
__props__['location'] = location
if ((resource_group_name is None) and (not opts.urn)):
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:insights/v20191101preview:DataCollectionRule'), pulumi.Alias(type_='azure-native:insights:DataCollectionRule'), pulumi.Alias(type_='azure-nextgen:insights:DataCollectionRule')])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataCollectionRule, __self__).__init__('azure-native:insights/v20191101preview:DataCollectionRule', resource_name, __props__, opts) | -8,366,357,137,412,955,000 | Definition of ARM tracked top level resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]] data_flows: The specification of data flows.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']] data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
:param pulumi.Input[str] description: Description of the data collection rule.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']] destinations: The specification of destinations.
:param pulumi.Input[str] location: The geo-location where the resource lives.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | __init__ | pulumi-bot/pulumi-azure-native | python | def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, data_collection_rule_name: Optional[pulumi.Input[str]]=None, data_flows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]]]=None, data_sources: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']]]=None, description: Optional[pulumi.Input[str]]=None, destinations: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']]]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None, __name__=None, __opts__=None):
"\n Definition of ARM tracked top level resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]] data_flows: The specification of data flows.\n :param pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']] data_sources: The specification of data sources. \n This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.\n :param pulumi.Input[str] description: Description of the data collection rule.\n :param pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']] destinations: The specification of destinations.\n :param pulumi.Input[str] location: The geo-location where the resource lives.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n "
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = _utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_collection_rule_name'] = data_collection_rule_name
if ((data_flows is None) and (not opts.urn)):
raise TypeError("Missing required property 'data_flows'")
__props__['data_flows'] = data_flows
__props__['data_sources'] = data_sources
__props__['description'] = description
if ((destinations is None) and (not opts.urn)):
raise TypeError("Missing required property 'destinations'")
__props__['destinations'] = destinations
__props__['location'] = location
if ((resource_group_name is None) and (not opts.urn)):
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:insights/v20191101preview:DataCollectionRule'), pulumi.Alias(type_='azure-native:insights:DataCollectionRule'), pulumi.Alias(type_='azure-nextgen:insights:DataCollectionRule')])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataCollectionRule, __self__).__init__('azure-native:insights/v20191101preview:DataCollectionRule', resource_name, __props__, opts) |
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'DataCollectionRule':
"\n Get an existing DataCollectionRule resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__['data_flows'] = None
__props__['data_sources'] = None
__props__['description'] = None
__props__['destinations'] = None
__props__['etag'] = None
__props__['location'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['tags'] = None
__props__['type'] = None
return DataCollectionRule(resource_name, opts=opts, __props__=__props__) | -3,088,457,438,230,935,600 | Get an existing DataCollectionRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | get | pulumi-bot/pulumi-azure-native | python | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'DataCollectionRule':
"\n Get an existing DataCollectionRule resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__['data_flows'] = None
__props__['data_sources'] = None
__props__['description'] = None
__props__['destinations'] = None
__props__['etag'] = None
__props__['location'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['tags'] = None
__props__['type'] = None
return DataCollectionRule(resource_name, opts=opts, __props__=__props__) |
@property
@pulumi.getter(name='dataFlows')
def data_flows(self) -> pulumi.Output[Sequence['outputs.DataFlowResponse']]:
'\n The specification of data flows.\n '
return pulumi.get(self, 'data_flows') | 998,013,760,708,920,400 | The specification of data flows. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | data_flows | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter(name='dataFlows')
def data_flows(self) -> pulumi.Output[Sequence['outputs.DataFlowResponse']]:
'\n \n '
return pulumi.get(self, 'data_flows') |
@property
@pulumi.getter(name='dataSources')
def data_sources(self) -> pulumi.Output[Optional['outputs.DataCollectionRuleResponseDataSources']]:
'\n The specification of data sources. \n This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.\n '
return pulumi.get(self, 'data_sources') | 6,409,329,648,646,107,000 | The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | data_sources | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter(name='dataSources')
def data_sources(self) -> pulumi.Output[Optional['outputs.DataCollectionRuleResponseDataSources']]:
'\n The specification of data sources. \n This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.\n '
return pulumi.get(self, 'data_sources') |
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
'\n Description of the data collection rule.\n '
return pulumi.get(self, 'description') | -765,155,414,852,401,200 | Description of the data collection rule. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | description | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'description') |
@property
@pulumi.getter
def destinations(self) -> pulumi.Output['outputs.DataCollectionRuleResponseDestinations']:
'\n The specification of destinations.\n '
return pulumi.get(self, 'destinations') | 2,759,344,770,410,032,600 | The specification of destinations. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | destinations | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter
def destinations(self) -> pulumi.Output['outputs.DataCollectionRuleResponseDestinations']:
'\n \n '
return pulumi.get(self, 'destinations') |
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
'\n Resource entity tag (ETag).\n '
return pulumi.get(self, 'etag') | 1,359,688,913,322,792,700 | Resource entity tag (ETag). | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | etag | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'etag') |
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
'\n The geo-location where the resource lives.\n '
return pulumi.get(self, 'location') | 7,682,718,716,494,702,000 | The geo-location where the resource lives. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | location | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'location') |
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n The name of the resource.\n '
return pulumi.get(self, 'name') | 7,945,008,266,317,837,000 | The name of the resource. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | name | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'name') |
@property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> pulumi.Output[str]:
'\n The resource provisioning state.\n '
return pulumi.get(self, 'provisioning_state') | -3,707,423,413,488,761,300 | The resource provisioning state. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | provisioning_state | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'provisioning_state') |
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags') | -2,929,197,049,816,896,000 | Resource tags. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | tags | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n \n '
return pulumi.get(self, 'tags') |
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n The type of the resource.\n '
return pulumi.get(self, 'type') | 3,589,901,220,239,403,500 | The type of the resource. | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | type | pulumi-bot/pulumi-azure-native | python | @property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'type') |
def test_corner_case_for_power_at_1(metric_class=TweedieDevianceScore):
'Test that corner case for power=1.0 produce valid result.'
metric = TweedieDevianceScore()
targets = torch.tensor([0, 1, 0, 1])
preds = torch.tensor([0.1, 0.1, 0.1, 0.1])
val = metric(preds, targets)
assert (val != 0.0)
assert (not torch.isnan(val)) | -4,181,891,001,919,652,000 | Test that corner case for power=1.0 produce valid result. | tests/regression/test_tweedie_deviance.py | test_corner_case_for_power_at_1 | Abdelrhman-Hosny/metrics | python | def test_corner_case_for_power_at_1(metric_class=TweedieDevianceScore):
metric = TweedieDevianceScore()
targets = torch.tensor([0, 1, 0, 1])
preds = torch.tensor([0.1, 0.1, 0.1, 0.1])
val = metric(preds, targets)
assert (val != 0.0)
assert (not torch.isnan(val)) |
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[(_INPUT_TYPE, Tuple[(_K, _INPUT_TYPE)])])
@beam.typehints.with_output_types(Union[(_OUTPUT_TYPE, Tuple[(_K, _OUTPUT_TYPE)])])
def RunInference(examples: beam.pvalue.PCollection, inference_spec_type: model_spec_pb2.InferenceSpecType) -> beam.pvalue.PCollection:
'Run inference with a model.\n\n There are two types of inference you can perform using this PTransform:\n 1. In-process inference from a SavedModel instance. Used when\n `saved_model_spec` field is set in `inference_spec_type`.\n 2. Remote inference by using a service endpoint. Used when\n `ai_platform_prediction_model_spec` field is set in\n `inference_spec_type`.\n\n TODO(b/131873699): Add support for the following features:\n 1. tf.train.SequenceExample as Input for RemotePredict.\n 2. beam.Shared() initialization via Fingerprint for models CSE.\n 3. Models as SideInput.\n 4. TPU models.\n\n Args:\n examples: A PCollection containing examples of the following possible kinds,\n each with their corresponding return type.\n - PCollection[Example] -> PCollection[PredictionLog]\n * Works with Classify, Regress, MultiInference, Predict and\n RemotePredict.\n\n - PCollection[SequenceExample] -> PCollection[PredictionLog]\n * Works with Predict and (serialized) RemotePredict.\n\n - PCollection[bytes] -> PCollection[PredictionLog]\n * For serialized Example: Works with Classify, Regress,\n MultiInference, Predict and RemotePredict.\n * For everything else: Works with Predict and RemotePredict.\n\n - PCollection[Tuple[K, Example]] -> PCollection[\n Tuple[K, PredictionLog]]\n * Works with Classify, Regress, MultiInference, Predict and\n RemotePredict.\n\n - PCollection[Tuple[K, SequenceExample]] -> PCollection[\n Tuple[K, PredictionLog]]\n * Works with Predict and (serialized) RemotePredict.\n\n - PCollection[Tuple[K, bytes]] -> PCollection[\n Tuple[K, PredictionLog]]\n * For serialized Example: Works with Classify, Regress,\n MultiInference, Predict and RemotePredict.\n * For everything else: Works with Predict and RemotePredict.\n\n inference_spec_type: Model inference endpoint.\n\n Returns:\n A PCollection (possibly keyed) containing prediction logs.\n '
return (examples | ('RunInferenceImpl' >> run_inference.RunInferenceImpl(inference_spec_type))) | 2,901,633,151,631,102,000 | Run inference with a model.
There are two types of inference you can perform using this PTransform:
1. In-process inference from a SavedModel instance. Used when
`saved_model_spec` field is set in `inference_spec_type`.
2. Remote inference by using a service endpoint. Used when
`ai_platform_prediction_model_spec` field is set in
`inference_spec_type`.
TODO(b/131873699): Add support for the following features:
1. tf.train.SequenceExample as Input for RemotePredict.
2. beam.Shared() initialization via Fingerprint for models CSE.
3. Models as SideInput.
4. TPU models.
Args:
examples: A PCollection containing examples of the following possible kinds,
each with their corresponding return type.
- PCollection[Example] -> PCollection[PredictionLog]
* Works with Classify, Regress, MultiInference, Predict and
RemotePredict.
- PCollection[SequenceExample] -> PCollection[PredictionLog]
* Works with Predict and (serialized) RemotePredict.
- PCollection[bytes] -> PCollection[PredictionLog]
* For serialized Example: Works with Classify, Regress,
MultiInference, Predict and RemotePredict.
* For everything else: Works with Predict and RemotePredict.
- PCollection[Tuple[K, Example]] -> PCollection[
Tuple[K, PredictionLog]]
* Works with Classify, Regress, MultiInference, Predict and
RemotePredict.
- PCollection[Tuple[K, SequenceExample]] -> PCollection[
Tuple[K, PredictionLog]]
* Works with Predict and (serialized) RemotePredict.
- PCollection[Tuple[K, bytes]] -> PCollection[
Tuple[K, PredictionLog]]
* For serialized Example: Works with Classify, Regress,
MultiInference, Predict and RemotePredict.
* For everything else: Works with Predict and RemotePredict.
inference_spec_type: Model inference endpoint.
Returns:
A PCollection (possibly keyed) containing prediction logs. | tfx_bsl/public/beam/run_inference.py | RunInference | RossKohler/tfx-bsl | python | @beam.ptransform_fn
@beam.typehints.with_input_types(Union[(_INPUT_TYPE, Tuple[(_K, _INPUT_TYPE)])])
@beam.typehints.with_output_types(Union[(_OUTPUT_TYPE, Tuple[(_K, _OUTPUT_TYPE)])])
def RunInference(examples: beam.pvalue.PCollection, inference_spec_type: model_spec_pb2.InferenceSpecType) -> beam.pvalue.PCollection:
'Run inference with a model.\n\n There are two types of inference you can perform using this PTransform:\n 1. In-process inference from a SavedModel instance. Used when\n `saved_model_spec` field is set in `inference_spec_type`.\n 2. Remote inference by using a service endpoint. Used when\n `ai_platform_prediction_model_spec` field is set in\n `inference_spec_type`.\n\n TODO(b/131873699): Add support for the following features:\n 1. tf.train.SequenceExample as Input for RemotePredict.\n 2. beam.Shared() initialization via Fingerprint for models CSE.\n 3. Models as SideInput.\n 4. TPU models.\n\n Args:\n examples: A PCollection containing examples of the following possible kinds,\n each with their corresponding return type.\n - PCollection[Example] -> PCollection[PredictionLog]\n * Works with Classify, Regress, MultiInference, Predict and\n RemotePredict.\n\n - PCollection[SequenceExample] -> PCollection[PredictionLog]\n * Works with Predict and (serialized) RemotePredict.\n\n - PCollection[bytes] -> PCollection[PredictionLog]\n * For serialized Example: Works with Classify, Regress,\n MultiInference, Predict and RemotePredict.\n * For everything else: Works with Predict and RemotePredict.\n\n - PCollection[Tuple[K, Example]] -> PCollection[\n Tuple[K, PredictionLog]]\n * Works with Classify, Regress, MultiInference, Predict and\n RemotePredict.\n\n - PCollection[Tuple[K, SequenceExample]] -> PCollection[\n Tuple[K, PredictionLog]]\n * Works with Predict and (serialized) RemotePredict.\n\n - PCollection[Tuple[K, bytes]] -> PCollection[\n Tuple[K, PredictionLog]]\n * For serialized Example: Works with Classify, Regress,\n MultiInference, Predict and RemotePredict.\n * For everything else: Works with Predict and RemotePredict.\n\n inference_spec_type: Model inference endpoint.\n\n Returns:\n A PCollection (possibly keyed) containing prediction logs.\n '
return (examples | ('RunInferenceImpl' >> run_inference.RunInferenceImpl(inference_spec_type))) |
def hapi(trange=None, server=None, dataset=None, parameters='', suffix='', catalog=False):
"\n Loads data from a HAPI server into pytplot variables\n\n Parameters\n -----------\n trange: list of str or list of float\n Time range to load the data for\n\n server: str\n HAPI server to load the data from\n\n dataset: str\n HAPI dataset to load\n\n parameters: str or list of str\n Parameters in the dataset to load; default\n is to load them all\n\n suffix: str\n Suffix to append to the tplot variables\n\n catalog: bool\n If True, returns the server's catalog of datasets\n\n Returns\n -------\n List of tplot variables created.\n "
if (server is None):
print('Error, no server specified; example servers include:')
print('- https://cdaweb.gsfc.nasa.gov/hapi')
print('- https://pds-ppi.igpp.ucla.edu/hapi')
print('- http://planet.physics.uiowa.edu/das/das2Server/hapi')
print('- https://iswa.gsfc.nasa.gov/IswaSystemWebApp/hapi')
print('- http://lasp.colorado.edu/lisird/hapi')
return
if catalog:
catalog = load_hapi(server)
items = []
if ('catalog' in catalog.keys()):
items = catalog['catalog']
print('Available datasets: ')
for item in items:
if ('title' in item.keys()):
print(((item['id'] + ': ') + item['title']))
else:
print(item['id'])
return
if (dataset is None):
print('Error, no dataset specified; please see the catalog for a list of available data sets.')
return
if (trange is None):
print('Error, no trange specified')
return
if isinstance(parameters, list):
parameters = ','.join(parameters)
opts = {'logging': False}
(data, hapi_metadata) = load_hapi(server, dataset, parameters, trange[0], trange[1], **opts)
out_vars = []
params = hapi_metadata['parameters']
for param in params[1:]:
spec = False
param_name = param.get('name')
print(('Loading ' + param_name))
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=ResourceWarning)
(data, hapi_metadata) = load_hapi(server, dataset, param_name, trange[0], trange[1], **opts)
except:
breakpoint()
print('Error! 95')
continue
timestamps = [datapoint[0] for datapoint in data]
unixtimes = [time_double(timestamp.decode('utf-8')) for timestamp in timestamps]
param_type = hapi_metadata['parameters'][1].get('type')
if (param_type is None):
param_type = 'double'
data_size = hapi_metadata['parameters'][1].get('size')
if (data_size is None):
single_line = True
try:
if (param_type == 'double'):
single_line = isinstance(data[0][1], np.float64)
elif (param_type == 'integer'):
single_line = isinstance(data[0][1], np.int32)
except IndexError:
breakpoint()
print('Error! 103')
continue
if single_line:
data_out = np.zeros(len(data))
else:
try:
data_out = np.zeros((len(data), len(data[0][1])))
except TypeError:
print('Error! 112')
breakpoint()
continue
for (idx, datapoint) in enumerate(data):
if single_line:
data_out[idx] = datapoint[1]
else:
data_out[idx, :] = datapoint[1]
data_out = data_out.squeeze()
fill_value = hapi_metadata['parameters'][1].get('fill')
if (fill_value is not None):
if (param_type == 'double'):
fill_value = float(fill_value)
data_out[(data_out == fill_value)] = np.nan
elif (param_type == 'integer'):
fill_value = int(fill_value)
data_out[(data_out == fill_value)] = 0
bins = param.get('bins')
if (bins is not None):
centers = bins[0].get('centers')
if (centers is not None):
spec = True
data_table = {'x': unixtimes, 'y': data_out}
if spec:
data_table['v'] = centers
saved = store_data((param_name + suffix), data=data_table)
metadata = get_data((param_name + suffix), metadata=True)
metadata['HAPI'] = hapi_metadata
if spec:
options((param_name + suffix), 'spec', True)
if saved:
out_vars.append((param_name + suffix))
sleep(1)
return out_vars | -1,940,759,919,022,476,800 | Loads data from a HAPI server into pytplot variables
Parameters
-----------
trange: list of str or list of float
Time range to load the data for
server: str
HAPI server to load the data from
dataset: str
HAPI dataset to load
parameters: str or list of str
Parameters in the dataset to load; default
is to load them all
suffix: str
Suffix to append to the tplot variables
catalog: bool
If True, returns the server's catalog of datasets
Returns
-------
List of tplot variables created. | pyspedas/hapi/hapi.py | hapi | pulupa/pyspedas | python | def hapi(trange=None, server=None, dataset=None, parameters=, suffix=, catalog=False):
"\n Loads data from a HAPI server into pytplot variables\n\n Parameters\n -----------\n trange: list of str or list of float\n Time range to load the data for\n\n server: str\n HAPI server to load the data from\n\n dataset: str\n HAPI dataset to load\n\n parameters: str or list of str\n Parameters in the dataset to load; default\n is to load them all\n\n suffix: str\n Suffix to append to the tplot variables\n\n catalog: bool\n If True, returns the server's catalog of datasets\n\n Returns\n -------\n List of tplot variables created.\n "
if (server is None):
print('Error, no server specified; example servers include:')
print('- https://cdaweb.gsfc.nasa.gov/hapi')
print('- https://pds-ppi.igpp.ucla.edu/hapi')
print('- http://planet.physics.uiowa.edu/das/das2Server/hapi')
print('- https://iswa.gsfc.nasa.gov/IswaSystemWebApp/hapi')
print('- http://lasp.colorado.edu/lisird/hapi')
return
if catalog:
catalog = load_hapi(server)
items = []
if ('catalog' in catalog.keys()):
items = catalog['catalog']
print('Available datasets: ')
for item in items:
if ('title' in item.keys()):
print(((item['id'] + ': ') + item['title']))
else:
print(item['id'])
return
if (dataset is None):
print('Error, no dataset specified; please see the catalog for a list of available data sets.')
return
if (trange is None):
print('Error, no trange specified')
return
if isinstance(parameters, list):
parameters = ','.join(parameters)
opts = {'logging': False}
(data, hapi_metadata) = load_hapi(server, dataset, parameters, trange[0], trange[1], **opts)
out_vars = []
params = hapi_metadata['parameters']
for param in params[1:]:
spec = False
param_name = param.get('name')
print(('Loading ' + param_name))
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=ResourceWarning)
(data, hapi_metadata) = load_hapi(server, dataset, param_name, trange[0], trange[1], **opts)
except:
breakpoint()
print('Error! 95')
continue
timestamps = [datapoint[0] for datapoint in data]
unixtimes = [time_double(timestamp.decode('utf-8')) for timestamp in timestamps]
param_type = hapi_metadata['parameters'][1].get('type')
if (param_type is None):
param_type = 'double'
data_size = hapi_metadata['parameters'][1].get('size')
if (data_size is None):
single_line = True
try:
if (param_type == 'double'):
single_line = isinstance(data[0][1], np.float64)
elif (param_type == 'integer'):
single_line = isinstance(data[0][1], np.int32)
except IndexError:
breakpoint()
print('Error! 103')
continue
if single_line:
data_out = np.zeros(len(data))
else:
try:
data_out = np.zeros((len(data), len(data[0][1])))
except TypeError:
print('Error! 112')
breakpoint()
continue
for (idx, datapoint) in enumerate(data):
if single_line:
data_out[idx] = datapoint[1]
else:
data_out[idx, :] = datapoint[1]
data_out = data_out.squeeze()
fill_value = hapi_metadata['parameters'][1].get('fill')
if (fill_value is not None):
if (param_type == 'double'):
fill_value = float(fill_value)
data_out[(data_out == fill_value)] = np.nan
elif (param_type == 'integer'):
fill_value = int(fill_value)
data_out[(data_out == fill_value)] = 0
bins = param.get('bins')
if (bins is not None):
centers = bins[0].get('centers')
if (centers is not None):
spec = True
data_table = {'x': unixtimes, 'y': data_out}
if spec:
data_table['v'] = centers
saved = store_data((param_name + suffix), data=data_table)
metadata = get_data((param_name + suffix), metadata=True)
metadata['HAPI'] = hapi_metadata
if spec:
options((param_name + suffix), 'spec', True)
if saved:
out_vars.append((param_name + suffix))
sleep(1)
return out_vars |
def wrap_socket(sock, server_hostname, ssl_context=None, force_proto=None):
"\n A vastly simplified SSL wrapping function. We'll probably extend this to\n do more things later.\n "
global _context
if ssl_context:
_ssl_context = ssl_context
else:
if (_context is None):
_context = init_context()
_ssl_context = _context
ssl_sock = _ssl_context.wrap_socket(sock, server_hostname=server_hostname)
if _ssl_context.check_hostname:
try:
ssl.match_hostname(ssl_sock.getpeercert(), server_hostname)
except AttributeError:
ssl.verify_hostname(ssl_sock, server_hostname)
proto = force_proto
with ignore_missing():
if (proto is None):
proto = ssl_sock.selected_alpn_protocol()
with ignore_missing():
if (proto is None):
proto = ssl_sock.selected_npn_protocol()
return (ssl_sock, proto) | -732,052,899,502,781,700 | A vastly simplified SSL wrapping function. We'll probably extend this to
do more things later. | hyper/tls.py | wrap_socket | qtacore/hyper | python | def wrap_socket(sock, server_hostname, ssl_context=None, force_proto=None):
"\n A vastly simplified SSL wrapping function. We'll probably extend this to\n do more things later.\n "
global _context
if ssl_context:
_ssl_context = ssl_context
else:
if (_context is None):
_context = init_context()
_ssl_context = _context
ssl_sock = _ssl_context.wrap_socket(sock, server_hostname=server_hostname)
if _ssl_context.check_hostname:
try:
ssl.match_hostname(ssl_sock.getpeercert(), server_hostname)
except AttributeError:
ssl.verify_hostname(ssl_sock, server_hostname)
proto = force_proto
with ignore_missing():
if (proto is None):
proto = ssl_sock.selected_alpn_protocol()
with ignore_missing():
if (proto is None):
proto = ssl_sock.selected_npn_protocol()
return (ssl_sock, proto) |
def init_context(cert_path=None, cert=None, cert_password=None):
"\n Create a new ``SSLContext`` that is correctly set up for an HTTP/2\n connection. This SSL context object can be customized and passed as a\n parameter to the :class:`HTTPConnection <hyper.HTTPConnection>` class.\n Provide your own certificate file in case you don’t want to use hyper’s\n default certificate. The path to the certificate can be absolute or\n relative to your working directory.\n\n :param cert_path: (optional) The path to the certificate file of\n “certification authority” (CA) certificates\n :param cert: (optional) if string, path to ssl client cert file (.pem).\n If tuple, ('cert', 'key') pair.\n The certfile string must be the path to a single file in PEM format\n containing the certificate as well as any number of CA certificates\n needed to establish the certificate’s authenticity. The keyfile string,\n if present, must point to a file containing the private key in.\n Otherwise the private key will be taken from certfile as well.\n :param cert_password: (optional) The password argument may be a function to\n call to get the password for decrypting the private key. It will only\n be called if the private key is encrypted and a password is necessary.\n It will be called with no arguments, and it should return a string,\n bytes, or bytearray. If the return value is a string it will be\n encoded as UTF-8 before using it to decrypt the key. Alternatively a\n string, bytes, or bytearray value may be supplied directly as the\n password argument. It will be ignored if the private key is not\n encrypted and no password is needed.\n :returns: An ``SSLContext`` correctly set up for HTTP/2.\n "
cafile = (cert_path or cert_loc)
if ((not cafile) or (not path.exists(cafile))):
err_msg = ((((('No certificate found at ' + str(cafile)) + '. Either ') + 'ensure the default cert.pem file is included in the ') + 'distribution or provide a custom certificate when ') + 'creating the connection.')
raise MissingCertFile(err_msg)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.set_default_verify_paths()
context.load_verify_locations(cafile=cafile)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
with ignore_missing():
context.set_npn_protocols(SUPPORTED_NPN_PROTOCOLS)
with ignore_missing():
context.set_alpn_protocols(SUPPORTED_NPN_PROTOCOLS)
context.options |= ssl.OP_NO_COMPRESSION
if (cert is not None):
if (not isinstance(cert, six.string_types)):
context.load_cert_chain(cert[0], cert[1], cert_password)
else:
context.load_cert_chain(cert, password=cert_password)
return context | -1,115,899,053,343,230,600 | Create a new ``SSLContext`` that is correctly set up for an HTTP/2
connection. This SSL context object can be customized and passed as a
parameter to the :class:`HTTPConnection <hyper.HTTPConnection>` class.
Provide your own certificate file in case you don’t want to use hyper’s
default certificate. The path to the certificate can be absolute or
relative to your working directory.
:param cert_path: (optional) The path to the certificate file of
“certification authority” (CA) certificates
:param cert: (optional) if string, path to ssl client cert file (.pem).
If tuple, ('cert', 'key') pair.
The certfile string must be the path to a single file in PEM format
containing the certificate as well as any number of CA certificates
needed to establish the certificate’s authenticity. The keyfile string,
if present, must point to a file containing the private key in.
Otherwise the private key will be taken from certfile as well.
:param cert_password: (optional) The password argument may be a function to
call to get the password for decrypting the private key. It will only
be called if the private key is encrypted and a password is necessary.
It will be called with no arguments, and it should return a string,
bytes, or bytearray. If the return value is a string it will be
encoded as UTF-8 before using it to decrypt the key. Alternatively a
string, bytes, or bytearray value may be supplied directly as the
password argument. It will be ignored if the private key is not
encrypted and no password is needed.
:returns: An ``SSLContext`` correctly set up for HTTP/2. | hyper/tls.py | init_context | qtacore/hyper | python | def init_context(cert_path=None, cert=None, cert_password=None):
"\n Create a new ``SSLContext`` that is correctly set up for an HTTP/2\n connection. This SSL context object can be customized and passed as a\n parameter to the :class:`HTTPConnection <hyper.HTTPConnection>` class.\n Provide your own certificate file in case you don’t want to use hyper’s\n default certificate. The path to the certificate can be absolute or\n relative to your working directory.\n\n :param cert_path: (optional) The path to the certificate file of\n “certification authority” (CA) certificates\n :param cert: (optional) if string, path to ssl client cert file (.pem).\n If tuple, ('cert', 'key') pair.\n The certfile string must be the path to a single file in PEM format\n containing the certificate as well as any number of CA certificates\n needed to establish the certificate’s authenticity. The keyfile string,\n if present, must point to a file containing the private key in.\n Otherwise the private key will be taken from certfile as well.\n :param cert_password: (optional) The password argument may be a function to\n call to get the password for decrypting the private key. It will only\n be called if the private key is encrypted and a password is necessary.\n It will be called with no arguments, and it should return a string,\n bytes, or bytearray. If the return value is a string it will be\n encoded as UTF-8 before using it to decrypt the key. Alternatively a\n string, bytes, or bytearray value may be supplied directly as the\n password argument. It will be ignored if the private key is not\n encrypted and no password is needed.\n :returns: An ``SSLContext`` correctly set up for HTTP/2.\n "
cafile = (cert_path or cert_loc)
if ((not cafile) or (not path.exists(cafile))):
err_msg = ((((('No certificate found at ' + str(cafile)) + '. Either ') + 'ensure the default cert.pem file is included in the ') + 'distribution or provide a custom certificate when ') + 'creating the connection.')
raise MissingCertFile(err_msg)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.set_default_verify_paths()
context.load_verify_locations(cafile=cafile)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
with ignore_missing():
context.set_npn_protocols(SUPPORTED_NPN_PROTOCOLS)
with ignore_missing():
context.set_alpn_protocols(SUPPORTED_NPN_PROTOCOLS)
context.options |= ssl.OP_NO_COMPRESSION
if (cert is not None):
if (not isinstance(cert, six.string_types)):
context.load_cert_chain(cert[0], cert[1], cert_password)
else:
context.load_cert_chain(cert, password=cert_password)
return context |
def _configure_randomizer(self):
'configure domain randomizer\n '
for obstacle_names in self.obstacle_names:
RandomizerManager.get_instance().add(ModelVisualRandomizer(model_name=obstacle_names, model_randomizer_type=ModelRandomizerType.MODEL)) | 6,835,405,223,600,537,000 | configure domain randomizer | reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/agent_ctrl/obstacles_agent_ctrl.py | _configure_randomizer | LastRemote/amazon-sagemaker-examples | python | def _configure_randomizer(self):
'\n '
for obstacle_names in self.obstacle_names:
RandomizerManager.get_instance().add(ModelVisualRandomizer(model_name=obstacle_names, model_randomizer_type=ModelRandomizerType.MODEL)) |
def aws_exception_handler(e):
'AWS specific exception handler.\n Args:\n e: the exception that was raised by the underlying API call that just failed.\n Returns:\n True if this exception can be retried, False otherwise.\n '
return ('Request limit exceeded' in str(e)) | -6,550,005,376,189,701,000 | AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise. | managed/devops/opscli/ybops/cloud/aws/utils.py | aws_exception_handler | bhavin192/yugabyte-db | python | def aws_exception_handler(e):
'AWS specific exception handler.\n Args:\n e: the exception that was raised by the underlying API call that just failed.\n Returns:\n True if this exception can be retried, False otherwise.\n '
return ('Request limit exceeded' in str(e)) |
def aws_request_limit_retry(fn):
'A decorator for retrying an AWS operation after exceeding request limit. Does retries with\n randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries\n internally, but as of May 2017 there does not seem to be a good way of doing that.\n\n Initially not adding this decorator to all functions in this module. This should be done\n gradually as we encounter rate limiting errors.\n\n Relevant boto issues:\n\n https://github.com/boto/boto3/issues/770\n https://github.com/boto/botocore/issues/882\n '
return request_retry_decorator(fn, aws_exception_handler) | -5,006,045,213,595,636,000 | A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882 | managed/devops/opscli/ybops/cloud/aws/utils.py | aws_request_limit_retry | bhavin192/yugabyte-db | python | def aws_request_limit_retry(fn):
'A decorator for retrying an AWS operation after exceeding request limit. Does retries with\n randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries\n internally, but as of May 2017 there does not seem to be a good way of doing that.\n\n Initially not adding this decorator to all functions in this module. This should be done\n gradually as we encounter rate limiting errors.\n\n Relevant boto issues:\n\n https://github.com/boto/boto3/issues/770\n https://github.com/boto/botocore/issues/882\n '
return request_retry_decorator(fn, aws_exception_handler) |
def get_client(region):
'Method to get boto3 ec2 resource for given region\n Args:\n region (str): Region name\n Returns:\n boto3 resource\n '
return boto3.resource('ec2', region_name=region) | 5,647,238,591,775,809,000 | Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource | managed/devops/opscli/ybops/cloud/aws/utils.py | get_client | bhavin192/yugabyte-db | python | def get_client(region):
'Method to get boto3 ec2 resource for given region\n Args:\n region (str): Region name\n Returns:\n boto3 resource\n '
return boto3.resource('ec2', region_name=region) |
def get_clients(regions):
'Method to get boto3 clients for given region or all the regions if none specified.\n Args:\n regions (list): List of regions to return clients for\n Returns:\n clients(obj): Map of region to boto3 resource\n '
return {region: get_client(region) for region in regions} | -2,862,654,079,155,418,600 | Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource | managed/devops/opscli/ybops/cloud/aws/utils.py | get_clients | bhavin192/yugabyte-db | python | def get_clients(regions):
'Method to get boto3 clients for given region or all the regions if none specified.\n Args:\n regions (list): List of regions to return clients for\n Returns:\n clients(obj): Map of region to boto3 resource\n '
return {region: get_client(region) for region in regions} |
def get_zones(region, dest_vpc_id=None):
'Method to fetch zones for given region or all the regions if none specified.\n Args:\n region (str): Name of region to get zones of.\n Returns:\n zones (obj): Map of zone -> subnet\n '
result = {}
filters = get_filters('state', 'available')
client = boto3.client('ec2', region_name=region)
zones = client.describe_availability_zones(Filters=filters).get('AvailabilityZones', [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z['ZoneName']
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if (subnet is None):
subnet = next(iter([s for s in region_vpc.subnets.all() if (s.availability_zone == zone_name)]), None)
zone_mapping[zone_name] = (subnet.id if (subnet is not None) else None)
return zone_mapping | 4,495,920,307,806,312,400 | Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet | managed/devops/opscli/ybops/cloud/aws/utils.py | get_zones | bhavin192/yugabyte-db | python | def get_zones(region, dest_vpc_id=None):
'Method to fetch zones for given region or all the regions if none specified.\n Args:\n region (str): Name of region to get zones of.\n Returns:\n zones (obj): Map of zone -> subnet\n '
result = {}
filters = get_filters('state', 'available')
client = boto3.client('ec2', region_name=region)
zones = client.describe_availability_zones(Filters=filters).get('AvailabilityZones', [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z['ZoneName']
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if (subnet is None):
subnet = next(iter([s for s in region_vpc.subnets.all() if (s.availability_zone == zone_name)]), None)
zone_mapping[zone_name] = (subnet.id if (subnet is not None) else None)
return zone_mapping |
def get_vpc(client, tag_name, **kwargs):
'Method to fetch vpc based on the tag_name.\n Args:\n client (boto client): Boto Client for the region to query.\n tag_name (str): VPC tag name.\n Returns:\n VPC obj: VPC object or None.\n '
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None) | 3,140,496,971,922,859,000 | Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None. | managed/devops/opscli/ybops/cloud/aws/utils.py | get_vpc | bhavin192/yugabyte-db | python | def get_vpc(client, tag_name, **kwargs):
'Method to fetch vpc based on the tag_name.\n Args:\n client (boto client): Boto Client for the region to query.\n tag_name (str): VPC tag name.\n Returns:\n VPC obj: VPC object or None.\n '
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None) |
def fetch_subnets(vpc, tag_name):
'Method to fetch subnets based on the tag_name.\n Args:\n vpc (vpc obj): VPC object to search for subnets\n tag_name (str): subnet tag name.\n Returns:\n subnets (list): list of aws subnets for given vpc.\n '
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters) | 7,710,063,374,320,810,000 | Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc. | managed/devops/opscli/ybops/cloud/aws/utils.py | fetch_subnets | bhavin192/yugabyte-db | python | def fetch_subnets(vpc, tag_name):
'Method to fetch subnets based on the tag_name.\n Args:\n vpc (vpc obj): VPC object to search for subnets\n tag_name (str): subnet tag name.\n Returns:\n subnets (list): list of aws subnets for given vpc.\n '
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters) |
def create_subnet(client, vpc, zone, cidr, tag_name):
'Method to create subnet based on cidr and tag name.\n Args:\n client (boto client): Region specific boto client\n vpc (VPC object): VPC object to create subnet.\n zone (str): Availability zone name\n cidr (str): CIDR string\n tag_name (str): Tag name for subnet.\n Returns:\n subnet: Newly created subnet object.\n '
subnet = next((s for s in fetch_subnets(vpc, tag_name) if (s.cidr_block == cidr)), None)
if (subnet is None):
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
client.meta.client.get_waiter('subnet_available').wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet | -29,046,076,793,090,160 | Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object. | managed/devops/opscli/ybops/cloud/aws/utils.py | create_subnet | bhavin192/yugabyte-db | python | def create_subnet(client, vpc, zone, cidr, tag_name):
'Method to create subnet based on cidr and tag name.\n Args:\n client (boto client): Region specific boto client\n vpc (VPC object): VPC object to create subnet.\n zone (str): Availability zone name\n cidr (str): CIDR string\n tag_name (str): Tag name for subnet.\n Returns:\n subnet: Newly created subnet object.\n '
subnet = next((s for s in fetch_subnets(vpc, tag_name) if (s.cidr_block == cidr)), None)
if (subnet is None):
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
client.meta.client.get_waiter('subnet_available').wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet |
def get_security_group(client, group_name, vpc, **kwargs):
'Method to fetch security group based on the group_name.\n Args:\n client (boto client): Region specific boto client\n group_name (str): Security Group name\n vpc (VPC object): The VPC in which to check for the SG\n Returns:\n SecurityGroup: Matching security group.\n '
filters = (get_filters('group-name', group_name) + get_filters('vpc-id', vpc.id))
return next(iter(client.security_groups.filter(Filters=filters)), None) | -2,037,505,821,622,541,000 | Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group. | managed/devops/opscli/ybops/cloud/aws/utils.py | get_security_group | bhavin192/yugabyte-db | python | def get_security_group(client, group_name, vpc, **kwargs):
'Method to fetch security group based on the group_name.\n Args:\n client (boto client): Region specific boto client\n group_name (str): Security Group name\n vpc (VPC object): The VPC in which to check for the SG\n Returns:\n SecurityGroup: Matching security group.\n '
filters = (get_filters('group-name', group_name) + get_filters('vpc-id', vpc.id))
return next(iter(client.security_groups.filter(Filters=filters)), None) |
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
'Method to create a security group based on the group_name and authorize ingress with\n the rules provided.\n Args:\n client (boto client): Region specific boto client\n group_name (str): security group name\n description (str): description of the security group\n vpc (VPC Object): VPC object to create the security group\n rules (dict): List of rules to add to security group.\n '
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule['ip_protocol'], CidrIp=rule['cidr_ip'], FromPort=rule['from_port'], ToPort=rule['to_port'])
except Exception as e:
logging.error('Authorize Security Group Ingress failed: {}'.format(e))
sg.delete()
raise YBOpsRuntimeError('Security Group creation failed.')
return sg | -3,877,660,530,089,913,000 | Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group. | managed/devops/opscli/ybops/cloud/aws/utils.py | create_security_group | bhavin192/yugabyte-db | python | @get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
'Method to create a security group based on the group_name and authorize ingress with\n the rules provided.\n Args:\n client (boto client): Region specific boto client\n group_name (str): security group name\n description (str): description of the security group\n vpc (VPC Object): VPC object to create the security group\n rules (dict): List of rules to add to security group.\n '
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule['ip_protocol'], CidrIp=rule['cidr_ip'], FromPort=rule['from_port'], ToPort=rule['to_port'])
except Exception as e:
logging.error('Authorize Security Group Ingress failed: {}'.format(e))
sg.delete()
raise YBOpsRuntimeError('Security Group creation failed.')
return sg |
def get_igw(client, tag_name, **kwargs):
'Method to fetch Internet Gateway based on tag_name.\n Args:\n client (boto client): Region specific boto client\n tag_name (str): Internet Gateway tag name.\n Returns:\n internet_gateway: internet gateway object.\n '
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None) | 5,410,119,642,960,169,000 | Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object. | managed/devops/opscli/ybops/cloud/aws/utils.py | get_igw | bhavin192/yugabyte-db | python | def get_igw(client, tag_name, **kwargs):
'Method to fetch Internet Gateway based on tag_name.\n Args:\n client (boto client): Region specific boto client\n tag_name (str): Internet Gateway tag name.\n Returns:\n internet_gateway: internet gateway object.\n '
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None) |
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"Method to create Internet Gateway based on tag_name in given VPC. If the gateway\n already exists, it would return that object. If the object doesn't have a tag, we\n would tag it accordingly.\n Args:\n client (boto client): Region specific boto client\n tag_name (str): Tag name for internet gateway.\n vpc (VPC object): VPC object to create Internet Gateway\n Returns:\n internet gateway: newly internet gateway object.\n "
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if (existing_igw is not None):
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw | 2,810,598,964,722,193,000 | Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object. | managed/devops/opscli/ybops/cloud/aws/utils.py | create_igw | bhavin192/yugabyte-db | python | @get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"Method to create Internet Gateway based on tag_name in given VPC. If the gateway\n already exists, it would return that object. If the object doesn't have a tag, we\n would tag it accordingly.\n Args:\n client (boto client): Region specific boto client\n tag_name (str): Tag name for internet gateway.\n vpc (VPC object): VPC object to create Internet Gateway\n Returns:\n internet gateway: newly internet gateway object.\n "
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if (existing_igw is not None):
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw |
def get_route_table(client, tag_name, **kwargs):
'Method to fetch route table based on tag_name\n Args:\n client (boto client): Region specific boto client\n tag_name (str): Route table tag name to search for.\n Returns:\n RouteTable (obj): Matching route table object or None.\n '
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None) | 1,407,385,284,823,409,400 | Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None. | managed/devops/opscli/ybops/cloud/aws/utils.py | get_route_table | bhavin192/yugabyte-db | python | def get_route_table(client, tag_name, **kwargs):
'Method to fetch route table based on tag_name\n Args:\n client (boto client): Region specific boto client\n tag_name (str): Route table tag name to search for.\n Returns:\n RouteTable (obj): Matching route table object or None.\n '
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None) |
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
'Method to create route table based on tag_name in given VPC. It will first\n query for the tag name to see if the route table already exists or if one is already\n attached to the VPC, if so it will return that route table.\n Args:\n client (boto client): Region specific boto client\n tag_name (str): Route table tag name\n vpc (vpc object): VPC object to create the route table against\n Returns:\n RouteTable (obj): newly created RouteTable object.\n '
existing_route_table = next(iter(vpc.route_tables.all()), None)
if (existing_route_table is not None):
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table | -9,139,993,306,136,857,000 | Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object. | managed/devops/opscli/ybops/cloud/aws/utils.py | create_route_table | bhavin192/yugabyte-db | python | @get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
'Method to create route table based on tag_name in given VPC. It will first\n query for the tag name to see if the route table already exists or if one is already\n attached to the VPC, if so it will return that route table.\n Args:\n client (boto client): Region specific boto client\n tag_name (str): Route table tag name\n vpc (vpc object): VPC object to create the route table against\n Returns:\n RouteTable (obj): newly created RouteTable object.\n '
existing_route_table = next(iter(vpc.route_tables.all()), None)
if (existing_route_table is not None):
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table |
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
'Method to cleanup security group for the matching group_name.\n Args:\n sg: Instance of security group matching the group_name.\n '
sg.delete() | 4,922,713,416,734,661,000 | Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name. | managed/devops/opscli/ybops/cloud/aws/utils.py | cleanup_security_group | bhavin192/yugabyte-db | python | @get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
'Method to cleanup security group for the matching group_name.\n Args:\n sg: Instance of security group matching the group_name.\n '
sg.delete() |
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
'Method to cleanup Internet Gateway matching the tag name. And also remove any vpc\n that is attached to the Internet Gateway.\n Args:\n igw: Instance of Internet Gateway matching tag_name.\n '
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete() | -7,607,356,858,449,717,000 | Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name. | managed/devops/opscli/ybops/cloud/aws/utils.py | cleanup_igw | bhavin192/yugabyte-db | python | @get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
'Method to cleanup Internet Gateway matching the tag name. And also remove any vpc\n that is attached to the Internet Gateway.\n Args:\n igw: Instance of Internet Gateway matching tag_name.\n '
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete() |
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
'Method to cleanup the Route Table matching the tag name.\n Args:\n rt: Instance of Route Table matching tag_name.\n '
rt.delete() | 6,731,431,572,599,774,000 | Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name. | managed/devops/opscli/ybops/cloud/aws/utils.py | cleanup_route_table | bhavin192/yugabyte-db | python | @get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
'Method to cleanup the Route Table matching the tag name.\n Args:\n rt: Instance of Route Table matching tag_name.\n '
rt.delete() |
def get_route_by_cidr(route_table, cidr):
'Method to check if given CIDR already attached to route table.\n Args:\n RouteTable (obj): Route Table object.\n cidr (str): CIDR string to check in route table.\n Returns:\n Route: the route for this CIDR or None if not found\n '
return dict(((r.destination_cidr_block, r) for r in route_table.routes)).get(cidr) | -4,798,626,644,708,583,000 | Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found | managed/devops/opscli/ybops/cloud/aws/utils.py | get_route_by_cidr | bhavin192/yugabyte-db | python | def get_route_by_cidr(route_table, cidr):
'Method to check if given CIDR already attached to route table.\n Args:\n RouteTable (obj): Route Table object.\n cidr (str): CIDR string to check in route table.\n Returns:\n Route: the route for this CIDR or None if not found\n '
return dict(((r.destination_cidr_block, r) for r in route_table.routes)).get(cidr) |
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
'Method to create vpc based on the cidr and tag with tag_name.\n Args:\n client (boto client): Region specific boto client\n tag_name (str): VPC tag name\n cidr (str): CIDR string.\n Returns:\n VPC(Object): Newly created VPC object.\n '
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc | 5,086,123,577,382,439,000 | Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object. | managed/devops/opscli/ybops/cloud/aws/utils.py | create_vpc | bhavin192/yugabyte-db | python | @get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
'Method to create vpc based on the cidr and tag with tag_name.\n Args:\n client (boto client): Region specific boto client\n tag_name (str): VPC tag name\n cidr (str): CIDR string.\n Returns:\n VPC(Object): Newly created VPC object.\n '
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc |
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
'Method to bootstrap vpc and security group, and enable vpc peering\n with the host_instance vpc.\n Args:\n metadata (obj): Cloud metadata object with cidr prefix and other metadata.\n region (str): Region name to create the vpc in.\n dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.\n Returns:\n vpc_info (json): return vpc, subnet and security group as json.\n '
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata['sg_rules']
for r in rules:
r.update({'cidr_ip': IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc, description='YugaByte SG', rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets) | 6,087,758,651,699,521,000 | Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json. | managed/devops/opscli/ybops/cloud/aws/utils.py | set_yb_sg_and_fetch_vpc | bhavin192/yugabyte-db | python | def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
'Method to bootstrap vpc and security group, and enable vpc peering\n with the host_instance vpc.\n Args:\n metadata (obj): Cloud metadata object with cidr prefix and other metadata.\n region (str): Region name to create the vpc in.\n dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.\n Returns:\n vpc_info (json): return vpc, subnet and security group as json.\n '
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata['sg_rules']
for r in rules:
r.update({'cidr_ip': IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc, description='YugaByte SG', rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets) |
def query_vpc(region):
'Method to query VPC against given region and respective subnets.\n Args:\n region (str): Region name to query the VPC.\n Returns:\n vpc and subnet info (obj): Object with region and zone subnet id.\n '
per_vpc_info = {}
raw_client = boto3.client('ec2', region_name=region)
zones = [z['ZoneName'] for z in raw_client.describe_availability_zones(Filters=get_filters('state', 'available')).get('AvailabilityZones', [])]
subnets_by_zone = {z: [] for z in zones}
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({'sg_id': sg.group_id, 'sg_name': sg.group_name})
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
subnets = vpc.subnets.filter(Filters=get_filters('state', 'available'))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({'subnet_id': s.subnet_id, 'name': _get_name_from_tags(s.tags), 'public': s.map_public_ip_on_launch})
vpc_info = {'subnets_by_zone': subnets_by_zone, 'security_groups': per_vpc_sgs.get(vpc.id, [])}
per_vpc_info[vpc.id] = vpc_info
region_json = {'per_vpc_info': per_vpc_info}
return region_json | 1,682,906,710,431,175,700 | Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id. | managed/devops/opscli/ybops/cloud/aws/utils.py | query_vpc | bhavin192/yugabyte-db | python | def query_vpc(region):
'Method to query VPC against given region and respective subnets.\n Args:\n region (str): Region name to query the VPC.\n Returns:\n vpc and subnet info (obj): Object with region and zone subnet id.\n '
per_vpc_info = {}
raw_client = boto3.client('ec2', region_name=region)
zones = [z['ZoneName'] for z in raw_client.describe_availability_zones(Filters=get_filters('state', 'available')).get('AvailabilityZones', [])]
subnets_by_zone = {z: [] for z in zones}
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({'sg_id': sg.group_id, 'sg_name': sg.group_name})
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
subnets = vpc.subnets.filter(Filters=get_filters('state', 'available'))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({'subnet_id': s.subnet_id, 'name': _get_name_from_tags(s.tags), 'public': s.map_public_ip_on_launch})
vpc_info = {'subnets_by_zone': subnets_by_zone, 'security_groups': per_vpc_sgs.get(vpc.id, [])}
per_vpc_info[vpc.id] = vpc_info
region_json = {'per_vpc_info': per_vpc_info}
return region_json |
def vpc_components_as_json(vpc, sgs, subnets):
'Method takes VPC, Security Group and Subnets and returns a json data format with ids.\n Args:\n vpc (VPC Object): Region specific VPC object\n sgs (List of Security Group Object): Region specific Security Group object\n subnets (subnet object map): Map of Subnet objects keyed of zone.\n Retuns:\n json (str): A Json string for yugaware to consume with necessary ids.\n '
result = {}
result['vpc_id'] = vpc.id
result['security_group'] = [{'id': sg.group_id, 'name': sg.group_name} for sg in sgs]
result['zones'] = {}
for (zone, subnet) in subnets.iteritems():
result['zones'][zone] = subnet.id
return result | 3,918,596,843,332,259,300 | Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids. | managed/devops/opscli/ybops/cloud/aws/utils.py | vpc_components_as_json | bhavin192/yugabyte-db | python | def vpc_components_as_json(vpc, sgs, subnets):
'Method takes VPC, Security Group and Subnets and returns a json data format with ids.\n Args:\n vpc (VPC Object): Region specific VPC object\n sgs (List of Security Group Object): Region specific Security Group object\n subnets (subnet object map): Map of Subnet objects keyed of zone.\n Retuns:\n json (str): A Json string for yugaware to consume with necessary ids.\n '
result = {}
result['vpc_id'] = vpc.id
result['security_group'] = [{'id': sg.group_id, 'name': sg.group_name} for sg in sgs]
result['zones'] = {}
for (zone, subnet) in subnets.iteritems():
result['zones'][zone] = subnet.id
return result |
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
'Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.\n Args:\n region (str): Region name to query the VPC.\n '
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if (region_vpc is None):
raise YBOpsRuntimeError('VPC not setup.')
zones = get_zones(region)
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
for (zone, subnet_id) in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if (subnet_id is not None):
client.Subnet(subnet_id).delete()
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
host_vpc = None
if ((host_vpc_id is not None) and (host_vpc_region is not None)):
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
region_vpc.delete()
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {'success': 'VPC deleted.'} | 4,416,998,199,135,841,300 | Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC. | managed/devops/opscli/ybops/cloud/aws/utils.py | delete_vpc | bhavin192/yugabyte-db | python | def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
'Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.\n Args:\n region (str): Region name to query the VPC.\n '
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if (region_vpc is None):
raise YBOpsRuntimeError('VPC not setup.')
zones = get_zones(region)
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
for (zone, subnet_id) in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if (subnet_id is not None):
client.Subnet(subnet_id).delete()
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
host_vpc = None
if ((host_vpc_id is not None) and (host_vpc_region is not None)):
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
region_vpc.delete()
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {'success': 'VPC deleted.'} |
def tag_resource_name(client, resource_id, tag_name):
'Method to create name tag for given resource.\n Args:\n client (boto3 client): Region specific boto client\n resource_id (str): EC2 resource id to tag\n tag_name (str): Tag name.\n '
tag_resource(client, resource_id, 'Name', tag_name) | 1,643,940,847,136,764,200 | Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name. | managed/devops/opscli/ybops/cloud/aws/utils.py | tag_resource_name | bhavin192/yugabyte-db | python | def tag_resource_name(client, resource_id, tag_name):
'Method to create name tag for given resource.\n Args:\n client (boto3 client): Region specific boto client\n resource_id (str): EC2 resource id to tag\n tag_name (str): Tag name.\n '
tag_resource(client, resource_id, 'Name', tag_name) |
def tag_resource(client, resource_id, tag_key, tag_value):
'Method to attach arbitrary key-value tags to resources.\n Args:\n client (boto3 client): Region specific boto client\n resource_id (str): EC2 resource id to tag\n tag_key: Tag key\n tag_value: Tag value\n '
tags = [{'Key': tag_key, 'Value': tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags) | -7,063,555,688,052,461,000 | Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value | managed/devops/opscli/ybops/cloud/aws/utils.py | tag_resource | bhavin192/yugabyte-db | python | def tag_resource(client, resource_id, tag_key, tag_value):
'Method to attach arbitrary key-value tags to resources.\n Args:\n client (boto3 client): Region specific boto client\n resource_id (str): EC2 resource id to tag\n tag_key: Tag key\n tag_value: Tag value\n '
tags = [{'Key': tag_key, 'Value': tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags) |
def get_vpc_peerings(vpc, host_vpc, **kwargs):
'Method to fetch all the VPC peerings against given VPC. If host_vpc is provided\n it will check if there is a peering against that vpc.\n Args:\n vpc(VPC object): VPC Object to search for peerings\n host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc\n peering is done.\n Returns:\n VPC peering (array): Array list of vpc peerings.\n '
output = []
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings if ((vp.status.get('Code') == 'active') and ((host_vpc is None) or (vp.requester_vpc == host_vpc)))])
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings if ((vp.status.get('Code') == 'active') and ((host_vpc is None) or (vp.accepter_vpc == host_vpc)))])
return output | 6,595,738,698,098,746,000 | Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings. | managed/devops/opscli/ybops/cloud/aws/utils.py | get_vpc_peerings | bhavin192/yugabyte-db | python | def get_vpc_peerings(vpc, host_vpc, **kwargs):
'Method to fetch all the VPC peerings against given VPC. If host_vpc is provided\n it will check if there is a peering against that vpc.\n Args:\n vpc(VPC object): VPC Object to search for peerings\n host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc\n peering is done.\n Returns:\n VPC peering (array): Array list of vpc peerings.\n '
output = []
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings if ((vp.status.get('Code') == 'active') and ((host_vpc is None) or (vp.requester_vpc == host_vpc)))])
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings if ((vp.status.get('Code') == 'active') and ((host_vpc is None) or (vp.accepter_vpc == host_vpc)))])
return output |
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"Method would create a vpc peering between the newly created VPC and caller's VPC\n Also makes sure, if they aren't the same, then there is no need for vpc peering.\n Args:\n client (boto client): Region specific boto client\n vpc (VPC object): Newly created VPC object\n host_vpc(Host VPC object): Host VPC to peer with.\n target_region (region name): Region name in which peering is being created.\n Returns:\n VPC peering (array): Array list of vpc peerings.\n "
try:
peer_conn = client.create_vpc_peering_connection(VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError('Unable to create VPC peering.') | -4,914,788,249,579,337,000 | Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings. | managed/devops/opscli/ybops/cloud/aws/utils.py | create_vpc_peering | bhavin192/yugabyte-db | python | @get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"Method would create a vpc peering between the newly created VPC and caller's VPC\n Also makes sure, if they aren't the same, then there is no need for vpc peering.\n Args:\n client (boto client): Region specific boto client\n vpc (VPC object): Newly created VPC object\n host_vpc(Host VPC object): Host VPC to peer with.\n target_region (region name): Region name in which peering is being created.\n Returns:\n VPC peering (array): Array list of vpc peerings.\n "
try:
peer_conn = client.create_vpc_peering_connection(VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError('Unable to create VPC peering.') |
def init(name):
'\n ## Init\n\n [ID]\n Init adalah perintah inisiasi oleh metric untuk membuat sebuah project dengan pondasi yang telah di setup, cara\n penggunaan ini bisa dengan 2 cara, membuat project dari direktori saat ini (CWD) atau dengan direktori baru.\n [EN]\n Init is the command initiation by metric to create a project with the foundation that has been setup, there are\n 2 ways to work with it, either you can create from current working directory (CWD) or new directory.\n\n @param name: project name\n '
project_path = os.getcwd()
if (name != '.'):
project_path = os.path.join(os.getcwd(), name)
Package.make_directory(project_path)
_init(Base.base_configuration(project_path), project_path)
packages_build = {'apps': ('resources',), 'models': tuple()}
for (k, v) in packages_build.items():
Package.make_package(os.path.join(project_path, k))
for i in v:
Package.make_package(os.path.join(project_path, k, i))
dir_build = {'apps': ('views',), 'models': ('fields',), '.': ('scripts',)}
for (k, v) in dir_build.items():
for i in v:
Package.make_directory(os.path.join(project_path, k, i))
file_remove = ['script.py.mako']
[os.remove(os.path.join(project_path, i)) for i in file_remove]
scripts = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../scripts')
[copy(file, os.path.join(project_path, 'scripts')) for file in glob.glob(os.path.join(scripts, '*.mako'))]
os.rename(os.path.join(project_path, 'env.py'), os.path.join(project_path, 'scripts', 'env.py'))
copy_tree(os.path.join(scripts, 'setup'), project_path)
for file in glob.glob(os.path.join(os.path.join(scripts, 'setup'), '*.py')):
copy(file, project_path)
Conf.reset(project_path) | 4,756,611,065,565,339,000 | ## Init
[ID]
Init adalah perintah inisiasi oleh metric untuk membuat sebuah project dengan pondasi yang telah di setup, cara
penggunaan ini bisa dengan 2 cara, membuat project dari direktori saat ini (CWD) atau dengan direktori baru.
[EN]
Init is the command initiation by metric to create a project with the foundation that has been setup, there are
2 ways to work with it, either you can create from current working directory (CWD) or new directory.
@param name: project name | metric/cli/__init__.py | init | kzulfazriawan/metric | python | def init(name):
'\n ## Init\n\n [ID]\n Init adalah perintah inisiasi oleh metric untuk membuat sebuah project dengan pondasi yang telah di setup, cara\n penggunaan ini bisa dengan 2 cara, membuat project dari direktori saat ini (CWD) atau dengan direktori baru.\n [EN]\n Init is the command initiation by metric to create a project with the foundation that has been setup, there are\n 2 ways to work with it, either you can create from current working directory (CWD) or new directory.\n\n @param name: project name\n '
project_path = os.getcwd()
if (name != '.'):
project_path = os.path.join(os.getcwd(), name)
Package.make_directory(project_path)
_init(Base.base_configuration(project_path), project_path)
packages_build = {'apps': ('resources',), 'models': tuple()}
for (k, v) in packages_build.items():
Package.make_package(os.path.join(project_path, k))
for i in v:
Package.make_package(os.path.join(project_path, k, i))
dir_build = {'apps': ('views',), 'models': ('fields',), '.': ('scripts',)}
for (k, v) in dir_build.items():
for i in v:
Package.make_directory(os.path.join(project_path, k, i))
file_remove = ['script.py.mako']
[os.remove(os.path.join(project_path, i)) for i in file_remove]
scripts = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../scripts')
[copy(file, os.path.join(project_path, 'scripts')) for file in glob.glob(os.path.join(scripts, '*.mako'))]
os.rename(os.path.join(project_path, 'env.py'), os.path.join(project_path, 'scripts', 'env.py'))
copy_tree(os.path.join(scripts, 'setup'), project_path)
for file in glob.glob(os.path.join(os.path.join(scripts, 'setup'), '*.py')):
copy(file, project_path)
Conf.reset(project_path) |
def make_resource(name):
'\n ## Make resource\n\n [ID]\n Perintah ini adalah suatu perintah yang digunakan untuk membuat "resource" baru dari "script" yang telah di\n sediakan.\n [EN]\n This is a command that used to create new "resource" based from the existing "script" provided.\n\n @param name: resource name\n '
t = Template()
t.template_type = 'resource'
t.make(name) | 5,630,455,689,173,030,000 | ## Make resource
[ID]
Perintah ini adalah suatu perintah yang digunakan untuk membuat "resource" baru dari "script" yang telah di
sediakan.
[EN]
This is a command that used to create new "resource" based from the existing "script" provided.
@param name: resource name | metric/cli/__init__.py | make_resource | kzulfazriawan/metric | python | def make_resource(name):
'\n ## Make resource\n\n [ID]\n Perintah ini adalah suatu perintah yang digunakan untuk membuat "resource" baru dari "script" yang telah di\n sediakan.\n [EN]\n This is a command that used to create new "resource" based from the existing "script" provided.\n\n @param name: resource name\n '
t = Template()
t.template_type = 'resource'
t.make(name) |
def __init__(self, transmitted_bytes_per_sec=None, received_bytes_per_sec=None):
'\n Keyword args:\n transmitted_bytes_per_sec (float): Total bytes transmitted per second.\n received_bytes_per_sec (float): Total bytes received per second.\n '
if (transmitted_bytes_per_sec is not None):
self.transmitted_bytes_per_sec = transmitted_bytes_per_sec
if (received_bytes_per_sec is not None):
self.received_bytes_per_sec = received_bytes_per_sec | 6,875,175,826,083,959,000 | Keyword args:
transmitted_bytes_per_sec (float): Total bytes transmitted per second.
received_bytes_per_sec (float): Total bytes received per second. | pypureclient/flashblade/FB_2_3/models/replication_performance.py | __init__ | Flav-STOR-WL/py-pure-client | python | def __init__(self, transmitted_bytes_per_sec=None, received_bytes_per_sec=None):
'\n Keyword args:\n transmitted_bytes_per_sec (float): Total bytes transmitted per second.\n received_bytes_per_sec (float): Total bytes received per second.\n '
if (transmitted_bytes_per_sec is not None):
self.transmitted_bytes_per_sec = transmitted_bytes_per_sec
if (received_bytes_per_sec is not None):
self.received_bytes_per_sec = received_bytes_per_sec |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(ReplicationPerformance, dict):
for (key, value) in self.items():
result[key] = value
return result | 3,724,535,437,965,489,000 | Returns the model properties as a dict | pypureclient/flashblade/FB_2_3/models/replication_performance.py | to_dict | Flav-STOR-WL/py-pure-client | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(ReplicationPerformance, dict):
for (key, value) in self.items():
result[key] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | pypureclient/flashblade/FB_2_3/models/replication_performance.py | to_str | Flav-STOR-WL/py-pure-client | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | pypureclient/flashblade/FB_2_3/models/replication_performance.py | __repr__ | Flav-STOR-WL/py-pure-client | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, ReplicationPerformance)):
return False
return (self.__dict__ == other.__dict__) | 6,495,413,359,304,010,000 | Returns true if both objects are equal | pypureclient/flashblade/FB_2_3/models/replication_performance.py | __eq__ | Flav-STOR-WL/py-pure-client | python | def __eq__(self, other):
if (not isinstance(other, ReplicationPerformance)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | pypureclient/flashblade/FB_2_3/models/replication_performance.py | __ne__ | Flav-STOR-WL/py-pure-client | python | def __ne__(self, other):
return (not (self == other)) |
def home(request):
'\n View function for simply rendering the Ionic Angular\n index.html\n '
return render(request, 'www/index.html') | 5,884,574,123,324,748,000 | View function for simply rendering the Ionic Angular
index.html | practicality/frontend/views.py | home | broden-wanner/practicality | python | def home(request):
'\n View function for simply rendering the Ionic Angular\n index.html\n '
return render(request, 'www/index.html') |
def main():
'The entry point for this script.'
usage = 'usage: %prog [dir] [-b basedir] [-o jsfile]\n example:\n %prog\n %prog assets -o js/jsfy_res.js\n '
parser = optparse.OptionParser(usage)
parser.add_option('-b', '--base', dest='basedir', help='base dir')
parser.add_option('-o', '--output', dest='outputpath', help='export js file path')
(options, args) = parser.parse_args()
if isinstance(options.basedir, str):
basedir = options.basedir
else:
basedir = '.'
basedir = abspath(basedir)
if isinstance(options.outputpath, str):
outputpath = options.outputpath
else:
outputpath = './jsfy_res.js'
fout = open(outputpath, 'w')
fout.write('// generated with jsfy.py, v0.1 (https://github.com/floatinghotpot/jsfy)\n\n')
fout.write('var jsfy_res = jsfy_res || {};\n\n')
if (not basedir.endswith('/')):
basedir = (basedir + '/')
for f in args:
f = abspath(f)
if isfile(f):
jsfy_file(f, basedir, fout)
elif isdir(f):
jsfy_dir(f, basedir, fout)
fout.close() | -8,927,664,434,250,232,000 | The entry point for this script. | tools/jsfy.py | main | floatinghotpot/ajax-local | python | def main():
usage = 'usage: %prog [dir] [-b basedir] [-o jsfile]\n example:\n %prog\n %prog assets -o js/jsfy_res.js\n '
parser = optparse.OptionParser(usage)
parser.add_option('-b', '--base', dest='basedir', help='base dir')
parser.add_option('-o', '--output', dest='outputpath', help='export js file path')
(options, args) = parser.parse_args()
if isinstance(options.basedir, str):
basedir = options.basedir
else:
basedir = '.'
basedir = abspath(basedir)
if isinstance(options.outputpath, str):
outputpath = options.outputpath
else:
outputpath = './jsfy_res.js'
fout = open(outputpath, 'w')
fout.write('// generated with jsfy.py, v0.1 (https://github.com/floatinghotpot/jsfy)\n\n')
fout.write('var jsfy_res = jsfy_res || {};\n\n')
if (not basedir.endswith('/')):
basedir = (basedir + '/')
for f in args:
f = abspath(f)
if isfile(f):
jsfy_file(f, basedir, fout)
elif isdir(f):
jsfy_dir(f, basedir, fout)
fout.close() |
def to_representation(self, instance):
'Return an ordered dictionary of HAL-style links.'
request = self.context.get('request')
ret = OrderedDict()
for link in self.links:
name = link[0]
ret[name] = self.to_link(request, instance, *link[1:])
return ret | 3,141,942,805,471,202,300 | Return an ordered dictionary of HAL-style links. | django_hal/fields.py | to_representation | jacktrades/django-hal | python | def to_representation(self, instance):
request = self.context.get('request')
ret = OrderedDict()
for link in self.links:
name = link[0]
ret[name] = self.to_link(request, instance, *link[1:])
return ret |
def get_attribute(self, instance, *args, **kwargs):
'Return the whole instance, instead of looking up an attribute value.\n\n Implementation note: We do this because `Serializer.to_representation`\n builds the list of serializer fields with something like:\n\n for field in serializer_fields:\n field.to_representation(field.get_attribute(instance))\n\n Since we need the instance in `to_representation` so we can query arbitrary\n attributes on it to build urls, we simply have to return the instance here.\n '
return instance | 3,648,155,090,417,222,000 | Return the whole instance, instead of looking up an attribute value.
Implementation note: We do this because `Serializer.to_representation`
builds the list of serializer fields with something like:
for field in serializer_fields:
field.to_representation(field.get_attribute(instance))
Since we need the instance in `to_representation` so we can query arbitrary
attributes on it to build urls, we simply have to return the instance here. | django_hal/fields.py | get_attribute | jacktrades/django-hal | python | def get_attribute(self, instance, *args, **kwargs):
'Return the whole instance, instead of looking up an attribute value.\n\n Implementation note: We do this because `Serializer.to_representation`\n builds the list of serializer fields with something like:\n\n for field in serializer_fields:\n field.to_representation(field.get_attribute(instance))\n\n Since we need the instance in `to_representation` so we can query arbitrary\n attributes on it to build urls, we simply have to return the instance here.\n '
return instance |
def to_link(self, request, instance, urlpattern, kwargs=None, query_kwargs=None):
'Return an absolute url for the given urlpattern.'
if query_kwargs:
query_kwargs = {k: getattr(instance, v) for (k, v) in query_kwargs.items()}
if (not kwargs):
url = reverse(urlpattern, request=request)
if (not query_kwargs):
return {'href': url}
return {'href': ('%s?%s' % (url, urlencode(query_kwargs)))}
if isinstance(kwargs, basestring):
url = reverse(urlpattern, kwargs={kwargs: getattr(instance, kwargs)}, request=request)
if (not query_kwargs):
return {'href': url}
return {'href': ('%s?%s' % (url, urlencode(query_kwargs)))}
reverse_kwargs = {}
if kwargs:
for (k, v) in kwargs.items():
reverse_kwargs[k] = getattr(instance, v)
try:
url = reverse(urlpattern, kwargs=reverse_kwargs, request=request)
if (not query_kwargs):
return {'href': url}
return {'href': ('%s?%s' % (url, urlencode(query_kwargs)))}
except NoReverseMatch:
return None | 3,290,308,599,027,952,600 | Return an absolute url for the given urlpattern. | django_hal/fields.py | to_link | jacktrades/django-hal | python | def to_link(self, request, instance, urlpattern, kwargs=None, query_kwargs=None):
if query_kwargs:
query_kwargs = {k: getattr(instance, v) for (k, v) in query_kwargs.items()}
if (not kwargs):
url = reverse(urlpattern, request=request)
if (not query_kwargs):
return {'href': url}
return {'href': ('%s?%s' % (url, urlencode(query_kwargs)))}
if isinstance(kwargs, basestring):
url = reverse(urlpattern, kwargs={kwargs: getattr(instance, kwargs)}, request=request)
if (not query_kwargs):
return {'href': url}
return {'href': ('%s?%s' % (url, urlencode(query_kwargs)))}
reverse_kwargs = {}
if kwargs:
for (k, v) in kwargs.items():
reverse_kwargs[k] = getattr(instance, v)
try:
url = reverse(urlpattern, kwargs=reverse_kwargs, request=request)
if (not query_kwargs):
return {'href': url}
return {'href': ('%s?%s' % (url, urlencode(query_kwargs)))}
except NoReverseMatch:
return None |
@staticmethod
def parse_input_params(size=None, error=None):
'\n Check if input params are valid and return sample size.\n\n :param size: an int not smaller than 16, which we would use to estimate\n number of unique values.\n :param error: max estimation error, which is a float between 0.01 and 0.50.\n If error is given, sample size will be calculated from error with\n _get_sample_size_from_est_error function.\n :return: sample size\n :raises:\n ValueError: If both size and error are given, or neither is given, or\n values are out of range.\n '
if (None not in (size, error)):
raise ValueError((ApproximateUnique._MULTI_VALUE_ERR_MSG % (size, error)))
elif ((size is None) and (error is None)):
raise ValueError(ApproximateUnique._NO_VALUE_ERR_MSG)
elif (size is not None):
if ((not isinstance(size, int)) or (size < 16)):
raise ValueError((ApproximateUnique._INPUT_SIZE_ERR_MSG % size))
else:
return size
elif ((error < 0.01) or (error > 0.5)):
raise ValueError((ApproximateUnique._INPUT_ERROR_ERR_MSG % error))
else:
return ApproximateUnique._get_sample_size_from_est_error(error) | -8,460,358,820,266,177,000 | Check if input params are valid and return sample size.
:param size: an int not smaller than 16, which we would use to estimate
number of unique values.
:param error: max estimation error, which is a float between 0.01 and 0.50.
If error is given, sample size will be calculated from error with
_get_sample_size_from_est_error function.
:return: sample size
:raises:
ValueError: If both size and error are given, or neither is given, or
values are out of range. | sdks/python/apache_beam/transforms/stats.py | parse_input_params | TimvdLippe/beam | python | @staticmethod
def parse_input_params(size=None, error=None):
'\n Check if input params are valid and return sample size.\n\n :param size: an int not smaller than 16, which we would use to estimate\n number of unique values.\n :param error: max estimation error, which is a float between 0.01 and 0.50.\n If error is given, sample size will be calculated from error with\n _get_sample_size_from_est_error function.\n :return: sample size\n :raises:\n ValueError: If both size and error are given, or neither is given, or\n values are out of range.\n '
if (None not in (size, error)):
raise ValueError((ApproximateUnique._MULTI_VALUE_ERR_MSG % (size, error)))
elif ((size is None) and (error is None)):
raise ValueError(ApproximateUnique._NO_VALUE_ERR_MSG)
elif (size is not None):
if ((not isinstance(size, int)) or (size < 16)):
raise ValueError((ApproximateUnique._INPUT_SIZE_ERR_MSG % size))
else:
return size
elif ((error < 0.01) or (error > 0.5)):
raise ValueError((ApproximateUnique._INPUT_ERROR_ERR_MSG % error))
else:
return ApproximateUnique._get_sample_size_from_est_error(error) |
@staticmethod
def _get_sample_size_from_est_error(est_err):
'\n :return: sample size\n\n Calculate sample size from estimation error\n '
return int(math.ceil((4.0 / math.pow(est_err, 2.0)))) | -1,706,389,842,662,100,500 | :return: sample size
Calculate sample size from estimation error | sdks/python/apache_beam/transforms/stats.py | _get_sample_size_from_est_error | TimvdLippe/beam | python | @staticmethod
def _get_sample_size_from_est_error(est_err):
'\n :return: sample size\n\n Calculate sample size from estimation error\n '
return int(math.ceil((4.0 / math.pow(est_err, 2.0)))) |
def add(self, element):
'\n :param an element from pcoll.\n :return: boolean type whether the value is in the heap\n\n Adds a value to the heap, returning whether the value is (large enough to\n be) in the heap.\n '
if ((len(self._sample_heap) >= self._sample_size) and (element < self._min_hash)):
return False
if (element not in self._sample_set):
self._sample_set.add(element)
heapq.heappush(self._sample_heap, element)
if (len(self._sample_heap) > self._sample_size):
temp = heapq.heappop(self._sample_heap)
self._sample_set.remove(temp)
self._min_hash = self._sample_heap[0]
elif (element < self._min_hash):
self._min_hash = element
return True | 2,688,914,121,887,976,000 | :param an element from pcoll.
:return: boolean type whether the value is in the heap
Adds a value to the heap, returning whether the value is (large enough to
be) in the heap. | sdks/python/apache_beam/transforms/stats.py | add | TimvdLippe/beam | python | def add(self, element):
'\n :param an element from pcoll.\n :return: boolean type whether the value is in the heap\n\n Adds a value to the heap, returning whether the value is (large enough to\n be) in the heap.\n '
if ((len(self._sample_heap) >= self._sample_size) and (element < self._min_hash)):
return False
if (element not in self._sample_set):
self._sample_set.add(element)
heapq.heappush(self._sample_heap, element)
if (len(self._sample_heap) > self._sample_size):
temp = heapq.heappop(self._sample_heap)
self._sample_set.remove(temp)
self._min_hash = self._sample_heap[0]
elif (element < self._min_hash):
self._min_hash = element
return True |
def get_estimate(self):
'\n :return: estimation count of unique values\n\n If heap size is smaller than sample size, just return heap size.\n Otherwise, takes into account the possibility of hash collisions,\n which become more likely than not for 2^32 distinct elements.\n Note that log(1+x) ~ x for small x, so for sampleSize << maxHash\n log(1 - sample_size/sample_space) / log(1 - 1/sample_space) ~ sample_size\n and hence estimate ~ sample_size * hash_space / sample_space\n as one would expect.\n\n Given sample_size / sample_space = est / hash_space\n est = sample_size * hash_space / sample_space\n\n Given above sample_size approximate,\n est = log1p(-sample_size/sample_space) / log1p(-1/sample_space)\n * hash_space / sample_space\n '
if (len(self._sample_heap) < self._sample_size):
return len(self._sample_heap)
else:
sample_space_size = (sys.maxsize - (1.0 * self._min_hash))
est = (((math.log1p(((- self._sample_size) / sample_space_size)) / math.log1p(((- 1) / sample_space_size))) * self._HASH_SPACE_SIZE) / sample_space_size)
return round(est) | -6,950,131,626,907,893,000 | :return: estimation count of unique values
If heap size is smaller than sample size, just return heap size.
Otherwise, takes into account the possibility of hash collisions,
which become more likely than not for 2^32 distinct elements.
Note that log(1+x) ~ x for small x, so for sampleSize << maxHash
log(1 - sample_size/sample_space) / log(1 - 1/sample_space) ~ sample_size
and hence estimate ~ sample_size * hash_space / sample_space
as one would expect.
Given sample_size / sample_space = est / hash_space
est = sample_size * hash_space / sample_space
Given above sample_size approximate,
est = log1p(-sample_size/sample_space) / log1p(-1/sample_space)
* hash_space / sample_space | sdks/python/apache_beam/transforms/stats.py | get_estimate | TimvdLippe/beam | python | def get_estimate(self):
'\n :return: estimation count of unique values\n\n If heap size is smaller than sample size, just return heap size.\n Otherwise, takes into account the possibility of hash collisions,\n which become more likely than not for 2^32 distinct elements.\n Note that log(1+x) ~ x for small x, so for sampleSize << maxHash\n log(1 - sample_size/sample_space) / log(1 - 1/sample_space) ~ sample_size\n and hence estimate ~ sample_size * hash_space / sample_space\n as one would expect.\n\n Given sample_size / sample_space = est / hash_space\n est = sample_size * hash_space / sample_space\n\n Given above sample_size approximate,\n est = log1p(-sample_size/sample_space) / log1p(-1/sample_space)\n * hash_space / sample_space\n '
if (len(self._sample_heap) < self._sample_size):
return len(self._sample_heap)
else:
sample_space_size = (sys.maxsize - (1.0 * self._min_hash))
est = (((math.log1p(((- self._sample_size) / sample_space_size)) / math.log1p(((- 1) / sample_space_size))) * self._HASH_SPACE_SIZE) / sample_space_size)
return round(est) |
def build_graph(self):
'\n Creates the computation graph\n '
' Create Variables '
with tf.variable_scope(self.name):
self.step_sizes = self._create_step_size_vars()
' --- Build inner update graph for adapting the policy and sampling trajectories --- '
(self.adapted_policies_params, self.adapt_input_ph_dict) = self._build_inner_adaption()
' ----- Build graph for the meta-update ----- '
self.meta_op_phs_dict = OrderedDict()
(obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict) = self._make_input_placeholders('step0')
self.meta_op_phs_dict.update(all_phs_dict)
(distribution_info_vars, current_policy_params) = ([], [])
(all_surr_objs, all_inner_kls) = ([], [])
for i in range(self.meta_batch_size):
dist_info_sym = self.policy.distribution_info_sym(obs_phs[i], params=None)
distribution_info_vars.append(dist_info_sym)
current_policy_params.append(self.policy.policy_params)
initial_distribution_info_vars = distribution_info_vars
initial_action_phs = action_phs
with tf.variable_scope(self.name):
' Inner updates'
for step_id in range(1, (self.num_inner_grad_steps + 1)):
(surr_objs, adapted_policy_params) = ([], [])
for i in range(self.meta_batch_size):
surr_loss = self._adapt_objective_sym(action_phs[i], adv_phs[i], dist_info_old_phs[i], distribution_info_vars[i])
adapted_params_var = self._adapt_sym(surr_loss, current_policy_params[i])
adapted_policy_params.append(adapted_params_var)
surr_objs.append(surr_loss)
all_surr_objs.append(surr_objs)
(obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict) = self._make_input_placeholders(('step%i' % step_id))
self.meta_op_phs_dict.update(all_phs_dict)
distribution_info_vars = [self.policy.distribution_info_sym(obs_phs[i], params=adapted_policy_params[i]) for i in range(self.meta_batch_size)]
current_policy_params = adapted_policy_params
' Outer objective '
(surr_objs, outer_kls) = ([], [])
for i in range(self.meta_batch_size):
likelihood_ratio = self.policy.distribution.likelihood_ratio_sym(action_phs[i], dist_info_old_phs[i], distribution_info_vars[i])
outer_kl = tf.reduce_mean(self.policy.distribution.kl_sym(dist_info_old_phs[i], distribution_info_vars[i]))
surr_obj = (- tf.reduce_mean((likelihood_ratio * adv_phs[i])))
if self.exploration:
adj_avg_rewards = tf.placeholder(dtype=tf.float32, shape=[None], name=(((('adj_avg_rewards' + '_') + str(self.num_inner_grad_steps)) + '_') + str(i)))
self.meta_op_phs_dict[('step%i_task%i_%s' % (self.num_inner_grad_steps, i, 'adj_avg_rewards'))] = adj_avg_rewards
log_likelihood_inital = self.policy.distribution.log_likelihood_sym(initial_action_phs[i], initial_distribution_info_vars[i])
surr_obj += ((- tf.reduce_mean(adj_avg_rewards)) * tf.reduce_mean(log_likelihood_inital))
surr_objs.append(surr_obj)
outer_kls.append(outer_kl)
mean_outer_kl = tf.reduce_mean(tf.stack(outer_kls))
' Mean over meta tasks '
meta_objective = tf.reduce_mean(tf.stack(surr_objs, 0))
self.optimizer.build_graph(loss=meta_objective, target=self.policy, input_ph_dict=self.meta_op_phs_dict, leq_constraint=(mean_outer_kl, self.step_size)) | -1,550,013,902,086,852,400 | Creates the computation graph | meta_policy_search/meta_algos/trpo_maml.py | build_graph | Manifold-Computing/MMAML-rl | python | def build_graph(self):
'\n \n '
' Create Variables '
with tf.variable_scope(self.name):
self.step_sizes = self._create_step_size_vars()
' --- Build inner update graph for adapting the policy and sampling trajectories --- '
(self.adapted_policies_params, self.adapt_input_ph_dict) = self._build_inner_adaption()
' ----- Build graph for the meta-update ----- '
self.meta_op_phs_dict = OrderedDict()
(obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict) = self._make_input_placeholders('step0')
self.meta_op_phs_dict.update(all_phs_dict)
(distribution_info_vars, current_policy_params) = ([], [])
(all_surr_objs, all_inner_kls) = ([], [])
for i in range(self.meta_batch_size):
dist_info_sym = self.policy.distribution_info_sym(obs_phs[i], params=None)
distribution_info_vars.append(dist_info_sym)
current_policy_params.append(self.policy.policy_params)
initial_distribution_info_vars = distribution_info_vars
initial_action_phs = action_phs
with tf.variable_scope(self.name):
' Inner updates'
for step_id in range(1, (self.num_inner_grad_steps + 1)):
(surr_objs, adapted_policy_params) = ([], [])
for i in range(self.meta_batch_size):
surr_loss = self._adapt_objective_sym(action_phs[i], adv_phs[i], dist_info_old_phs[i], distribution_info_vars[i])
adapted_params_var = self._adapt_sym(surr_loss, current_policy_params[i])
adapted_policy_params.append(adapted_params_var)
surr_objs.append(surr_loss)
all_surr_objs.append(surr_objs)
(obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict) = self._make_input_placeholders(('step%i' % step_id))
self.meta_op_phs_dict.update(all_phs_dict)
distribution_info_vars = [self.policy.distribution_info_sym(obs_phs[i], params=adapted_policy_params[i]) for i in range(self.meta_batch_size)]
current_policy_params = adapted_policy_params
' Outer objective '
(surr_objs, outer_kls) = ([], [])
for i in range(self.meta_batch_size):
likelihood_ratio = self.policy.distribution.likelihood_ratio_sym(action_phs[i], dist_info_old_phs[i], distribution_info_vars[i])
outer_kl = tf.reduce_mean(self.policy.distribution.kl_sym(dist_info_old_phs[i], distribution_info_vars[i]))
surr_obj = (- tf.reduce_mean((likelihood_ratio * adv_phs[i])))
if self.exploration:
adj_avg_rewards = tf.placeholder(dtype=tf.float32, shape=[None], name=(((('adj_avg_rewards' + '_') + str(self.num_inner_grad_steps)) + '_') + str(i)))
self.meta_op_phs_dict[('step%i_task%i_%s' % (self.num_inner_grad_steps, i, 'adj_avg_rewards'))] = adj_avg_rewards
log_likelihood_inital = self.policy.distribution.log_likelihood_sym(initial_action_phs[i], initial_distribution_info_vars[i])
surr_obj += ((- tf.reduce_mean(adj_avg_rewards)) * tf.reduce_mean(log_likelihood_inital))
surr_objs.append(surr_obj)
outer_kls.append(outer_kl)
mean_outer_kl = tf.reduce_mean(tf.stack(outer_kls))
' Mean over meta tasks '
meta_objective = tf.reduce_mean(tf.stack(surr_objs, 0))
self.optimizer.build_graph(loss=meta_objective, target=self.policy, input_ph_dict=self.meta_op_phs_dict, leq_constraint=(mean_outer_kl, self.step_size)) |
def optimize_policy(self, all_samples_data, log=True):
'\n Performs MAML outer step\n\n Args:\n all_samples_data (list) : list of lists of lists of samples (each is a dict) split by gradient update and\n meta task\n log (bool) : whether to log statistics\n\n Returns:\n None\n '
meta_op_input_dict = self._extract_input_dict_meta_op(all_samples_data, self._optimization_keys)
logger.log('Computing KL before')
mean_kl_before = self.optimizer.constraint_val(meta_op_input_dict)
logger.log('Computing loss before')
loss_before = self.optimizer.loss(meta_op_input_dict)
logger.log('Optimizing')
self.optimizer.optimize(meta_op_input_dict)
logger.log('Computing loss after')
loss_after = self.optimizer.loss(meta_op_input_dict)
logger.log('Computing KL after')
mean_kl = self.optimizer.constraint_val(meta_op_input_dict)
if log:
logger.logkv('MeanKLBefore', mean_kl_before)
logger.logkv('MeanKL', mean_kl)
logger.logkv('LossBefore', loss_before)
logger.logkv('LossAfter', loss_after)
logger.logkv('dLoss', (loss_before - loss_after)) | 7,132,963,398,032,266,000 | Performs MAML outer step
Args:
all_samples_data (list) : list of lists of lists of samples (each is a dict) split by gradient update and
meta task
log (bool) : whether to log statistics
Returns:
None | meta_policy_search/meta_algos/trpo_maml.py | optimize_policy | Manifold-Computing/MMAML-rl | python | def optimize_policy(self, all_samples_data, log=True):
'\n Performs MAML outer step\n\n Args:\n all_samples_data (list) : list of lists of lists of samples (each is a dict) split by gradient update and\n meta task\n log (bool) : whether to log statistics\n\n Returns:\n None\n '
meta_op_input_dict = self._extract_input_dict_meta_op(all_samples_data, self._optimization_keys)
logger.log('Computing KL before')
mean_kl_before = self.optimizer.constraint_val(meta_op_input_dict)
logger.log('Computing loss before')
loss_before = self.optimizer.loss(meta_op_input_dict)
logger.log('Optimizing')
self.optimizer.optimize(meta_op_input_dict)
logger.log('Computing loss after')
loss_after = self.optimizer.loss(meta_op_input_dict)
logger.log('Computing KL after')
mean_kl = self.optimizer.constraint_val(meta_op_input_dict)
if log:
logger.logkv('MeanKLBefore', mean_kl_before)
logger.logkv('MeanKL', mean_kl)
logger.logkv('LossBefore', loss_before)
logger.logkv('LossAfter', loss_after)
logger.logkv('dLoss', (loss_before - loss_after)) |
def __init__(self, filename):
'\n\t\tClass initialization.\n\t\t:param filename: name of the file to store the data, str\n\t\t'
self.filename = filename
self.content = {} | 5,144,989,418,529,070,000 | Class initialization.
:param filename: name of the file to store the data, str | scripts/writer.py | __init__ | STASYA00/CityMorph | python | def __init__(self, filename):
'\n\t\tClass initialization.\n\t\t:param filename: name of the file to store the data, str\n\t\t'
self.filename = filename
self.content = {} |
def add(self, instance, result):
'\n\t\tFunction that adds an instance with its smart labels to the collection\n\t\t:param instance: name of instance, str\n\t\t:param result: smart labels, dict {label_name: label_value}\n\t\t:return:\n\t\t'
self.content[instance] = result | 1,213,584,165,480,076,500 | Function that adds an instance with its smart labels to the collection
:param instance: name of instance, str
:param result: smart labels, dict {label_name: label_value}
:return: | scripts/writer.py | add | STASYA00/CityMorph | python | def add(self, instance, result):
'\n\t\tFunction that adds an instance with its smart labels to the collection\n\t\t:param instance: name of instance, str\n\t\t:param result: smart labels, dict {label_name: label_value}\n\t\t:return:\n\t\t'
self.content[instance] = result |
def get_instances(self) -> list:
'\n\t\tFunction that gets the instances that already exist in the file\n\t\t:return: existing instances, list\n\t\t'
return list(self.content.keys()) | -477,849,477,581,249,000 | Function that gets the instances that already exist in the file
:return: existing instances, list | scripts/writer.py | get_instances | STASYA00/CityMorph | python | def get_instances(self) -> list:
'\n\t\tFunction that gets the instances that already exist in the file\n\t\t:return: existing instances, list\n\t\t'
return list(self.content.keys()) |
def reset(self):
'\n\t\tFunction that resets the file to an empty state.\n\t\t:return:\n\t\t'
del self.content
self.content = {} | -6,866,196,078,460,596,000 | Function that resets the file to an empty state.
:return: | scripts/writer.py | reset | STASYA00/CityMorph | python | def reset(self):
'\n\t\tFunction that resets the file to an empty state.\n\t\t:return:\n\t\t'
del self.content
self.content = {} |
def save(self):
'\n\t\tFunction that saves all the smart labels in the class to a local file\n\t\tTODO: add saving to AWS based on AWS_SAVE in config\n\t\t:return:\n\t\t'
with open(self.filename, 'w') as f:
json.dump(self.content, f) | -2,173,692,445,259,486,500 | Function that saves all the smart labels in the class to a local file
TODO: add saving to AWS based on AWS_SAVE in config
:return: | scripts/writer.py | save | STASYA00/CityMorph | python | def save(self):
'\n\t\tFunction that saves all the smart labels in the class to a local file\n\t\tTODO: add saving to AWS based on AWS_SAVE in config\n\t\t:return:\n\t\t'
with open(self.filename, 'w') as f:
json.dump(self.content, f) |
def save(self):
"\n\t\tFunction that saves the writer's content to local system in json format.\n\t\t:return:\n\t\t"
with open(self.filename, 'a') as json_file:
json.dump(self.content, json_file) | 466,244,485,614,108,900 | Function that saves the writer's content to local system in json format.
:return: | scripts/writer.py | save | STASYA00/CityMorph | python | def save(self):
"\n\t\tFunction that saves the writer's content to local system in json format.\n\t\t:return:\n\t\t"
with open(self.filename, 'a') as json_file:
json.dump(self.content, json_file) |
@tf_export('copy')
def copy(input, tensor_name='', debug_ops_spec=[], name=None):
'Copy Op.\n\n Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the\n device on which the tensor is allocated.\n N.B.: If the all downstream attached debug ops are disabled given the current\n gRPC gating status, the output will simply forward the input tensor without\n deep-copying. See the documentation of Debug* ops for more details.\n\n Unlike the CopyHost Op, this op does not have HostMemory constraint on its\n input or output.\n\n Args:\n input: A `Tensor`. Input tensor.\n tensor_name: An optional `string`. Defaults to `""`.\n The name of the input tensor.\n debug_ops_spec: An optional list of `strings`. Defaults to `[]`.\n A list of debug op spec (op, url, gated_grpc) for attached debug\n ops. Each element of the list has the format\n <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented\n as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",\n "DebugIdentity;file:///tmp/tfdbg_1;0".\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Output tensor, deep-copied from input.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_ops_spec is None):
debug_ops_spec = []
if (not isinstance(debug_ops_spec, (list, tuple))):
raise TypeError(("Expected list for 'debug_ops_spec' argument to 'copy' Op, not %r." % debug_ops_spec))
debug_ops_spec = [_execute.make_str(_s, 'debug_ops_spec') for _s in debug_ops_spec]
(_, _, _op) = _op_def_lib._apply_op_helper('Copy', input=input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_ops_spec', _op.get_attr('debug_ops_spec'))
_execute.record_gradient('Copy', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'Copy', name, _ctx._post_execution_callbacks, input, 'tensor_name', tensor_name, 'debug_ops_spec', debug_ops_spec)
return _result
except _core._FallbackException:
return copy_eager_fallback(input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) | 500,006,722,487,040,260 | Copy Op.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
device on which the tensor is allocated.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its
input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input. | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | copy | Soum-Soum/Tensorflow_Face_Finder | python | @tf_export('copy')
def copy(input, tensor_name=, debug_ops_spec=[], name=None):
'Copy Op.\n\n Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the\n device on which the tensor is allocated.\n N.B.: If the all downstream attached debug ops are disabled given the current\n gRPC gating status, the output will simply forward the input tensor without\n deep-copying. See the documentation of Debug* ops for more details.\n\n Unlike the CopyHost Op, this op does not have HostMemory constraint on its\n input or output.\n\n Args:\n input: A `Tensor`. Input tensor.\n tensor_name: An optional `string`. Defaults to ``.\n The name of the input tensor.\n debug_ops_spec: An optional list of `strings`. Defaults to `[]`.\n A list of debug op spec (op, url, gated_grpc) for attached debug\n ops. Each element of the list has the format\n <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented\n as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",\n "DebugIdentity;file:///tmp/tfdbg_1;0".\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Output tensor, deep-copied from input.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_ops_spec is None):
debug_ops_spec = []
if (not isinstance(debug_ops_spec, (list, tuple))):
raise TypeError(("Expected list for 'debug_ops_spec' argument to 'copy' Op, not %r." % debug_ops_spec))
debug_ops_spec = [_execute.make_str(_s, 'debug_ops_spec') for _s in debug_ops_spec]
(_, _, _op) = _op_def_lib._apply_op_helper('Copy', input=input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_ops_spec', _op.get_attr('debug_ops_spec'))
_execute.record_gradient('Copy', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'Copy', name, _ctx._post_execution_callbacks, input, 'tensor_name', tensor_name, 'debug_ops_spec', debug_ops_spec)
return _result
except _core._FallbackException:
return copy_eager_fallback(input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) |
def copy_eager_fallback(input, tensor_name='', debug_ops_spec=[], name=None):
'This is the slowpath function for Eager mode.\n This is for function copy\n '
_ctx = _context.context()
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_ops_spec is None):
debug_ops_spec = []
if (not isinstance(debug_ops_spec, (list, tuple))):
raise TypeError(("Expected list for 'debug_ops_spec' argument to 'copy' Op, not %r." % debug_ops_spec))
debug_ops_spec = [_execute.make_str(_s, 'debug_ops_spec') for _s in debug_ops_spec]
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'tensor_name', tensor_name, 'debug_ops_spec', debug_ops_spec)
_result = _execute.execute(b'Copy', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('Copy', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result | -8,941,354,909,665,893,000 | This is the slowpath function for Eager mode.
This is for function copy | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | copy_eager_fallback | Soum-Soum/Tensorflow_Face_Finder | python | def copy_eager_fallback(input, tensor_name=, debug_ops_spec=[], name=None):
'This is the slowpath function for Eager mode.\n This is for function copy\n '
_ctx = _context.context()
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_ops_spec is None):
debug_ops_spec = []
if (not isinstance(debug_ops_spec, (list, tuple))):
raise TypeError(("Expected list for 'debug_ops_spec' argument to 'copy' Op, not %r." % debug_ops_spec))
debug_ops_spec = [_execute.make_str(_s, 'debug_ops_spec') for _s in debug_ops_spec]
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'tensor_name', tensor_name, 'debug_ops_spec', debug_ops_spec)
_result = _execute.execute(b'Copy', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('Copy', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result |
@tf_export('copy_host')
def copy_host(input, tensor_name='', debug_ops_spec=[], name=None):
'Copy Host Op.\n\n Performs CPU-to-CPU deep-copying of tensor.\n N.B.: If the all downstream attached debug ops are disabled given the current\n gRPC gating status, the output will simply forward the input tensor without\n deep-copying. See the documentation of Debug* ops for more details.\n\n Unlike the Copy Op, this op has HostMemory constraint on its input or output.\n\n Args:\n input: A `Tensor`. Input tensor.\n tensor_name: An optional `string`. Defaults to `""`.\n The name of the input tensor.\n debug_ops_spec: An optional list of `strings`. Defaults to `[]`.\n A list of debug op spec (op, url, gated_grpc) for attached debug\n ops. Each element of the list has the format\n <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented\n as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",\n "DebugIdentity;file:///tmp/tfdbg_1;0".\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Output tensor, deep-copied from input.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_ops_spec is None):
debug_ops_spec = []
if (not isinstance(debug_ops_spec, (list, tuple))):
raise TypeError(("Expected list for 'debug_ops_spec' argument to 'copy_host' Op, not %r." % debug_ops_spec))
debug_ops_spec = [_execute.make_str(_s, 'debug_ops_spec') for _s in debug_ops_spec]
(_, _, _op) = _op_def_lib._apply_op_helper('CopyHost', input=input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_ops_spec', _op.get_attr('debug_ops_spec'))
_execute.record_gradient('CopyHost', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'CopyHost', name, _ctx._post_execution_callbacks, input, 'tensor_name', tensor_name, 'debug_ops_spec', debug_ops_spec)
return _result
except _core._FallbackException:
return copy_host_eager_fallback(input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) | 8,536,506,984,682,582,000 | Copy Host Op.
Performs CPU-to-CPU deep-copying of tensor.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input. | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | copy_host | Soum-Soum/Tensorflow_Face_Finder | python | @tf_export('copy_host')
def copy_host(input, tensor_name=, debug_ops_spec=[], name=None):
'Copy Host Op.\n\n Performs CPU-to-CPU deep-copying of tensor.\n N.B.: If the all downstream attached debug ops are disabled given the current\n gRPC gating status, the output will simply forward the input tensor without\n deep-copying. See the documentation of Debug* ops for more details.\n\n Unlike the Copy Op, this op has HostMemory constraint on its input or output.\n\n Args:\n input: A `Tensor`. Input tensor.\n tensor_name: An optional `string`. Defaults to ``.\n The name of the input tensor.\n debug_ops_spec: An optional list of `strings`. Defaults to `[]`.\n A list of debug op spec (op, url, gated_grpc) for attached debug\n ops. Each element of the list has the format\n <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented\n as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",\n "DebugIdentity;file:///tmp/tfdbg_1;0".\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Output tensor, deep-copied from input.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_ops_spec is None):
debug_ops_spec = []
if (not isinstance(debug_ops_spec, (list, tuple))):
raise TypeError(("Expected list for 'debug_ops_spec' argument to 'copy_host' Op, not %r." % debug_ops_spec))
debug_ops_spec = [_execute.make_str(_s, 'debug_ops_spec') for _s in debug_ops_spec]
(_, _, _op) = _op_def_lib._apply_op_helper('CopyHost', input=input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_ops_spec', _op.get_attr('debug_ops_spec'))
_execute.record_gradient('CopyHost', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'CopyHost', name, _ctx._post_execution_callbacks, input, 'tensor_name', tensor_name, 'debug_ops_spec', debug_ops_spec)
return _result
except _core._FallbackException:
return copy_host_eager_fallback(input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) |
def copy_host_eager_fallback(input, tensor_name='', debug_ops_spec=[], name=None):
'This is the slowpath function for Eager mode.\n This is for function copy_host\n '
_ctx = _context.context()
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_ops_spec is None):
debug_ops_spec = []
if (not isinstance(debug_ops_spec, (list, tuple))):
raise TypeError(("Expected list for 'debug_ops_spec' argument to 'copy_host' Op, not %r." % debug_ops_spec))
debug_ops_spec = [_execute.make_str(_s, 'debug_ops_spec') for _s in debug_ops_spec]
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'tensor_name', tensor_name, 'debug_ops_spec', debug_ops_spec)
_result = _execute.execute(b'CopyHost', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('CopyHost', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result | -8,671,730,748,628,526,000 | This is the slowpath function for Eager mode.
This is for function copy_host | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | copy_host_eager_fallback | Soum-Soum/Tensorflow_Face_Finder | python | def copy_host_eager_fallback(input, tensor_name=, debug_ops_spec=[], name=None):
'This is the slowpath function for Eager mode.\n This is for function copy_host\n '
_ctx = _context.context()
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_ops_spec is None):
debug_ops_spec = []
if (not isinstance(debug_ops_spec, (list, tuple))):
raise TypeError(("Expected list for 'debug_ops_spec' argument to 'copy_host' Op, not %r." % debug_ops_spec))
debug_ops_spec = [_execute.make_str(_s, 'debug_ops_spec') for _s in debug_ops_spec]
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'tensor_name', tensor_name, 'debug_ops_spec', debug_ops_spec)
_result = _execute.execute(b'CopyHost', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('CopyHost', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result |
@tf_export('debug_identity')
def debug_identity(input, device_name='', tensor_name='', debug_urls=[], gated_grpc=False, name=None):
'Debug Identity Op.\n\n Provides an identity mapping of the non-Ref type input tensor for debugging.\n\n Args:\n input: A `Tensor`. Input tensor, non-Reference type.\n device_name: An optional `string`. Defaults to `""`.\n tensor_name: An optional `string`. Defaults to `""`.\n Name of the input tensor.\n debug_urls: An optional list of `strings`. Defaults to `[]`.\n List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011\n gated_grpc: An optional `bool`. Defaults to `False`.\n Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Output tensor that equals the input tensor.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (device_name is None):
device_name = ''
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_identity' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_, _, _op) = _op_def_lib._apply_op_helper('DebugIdentity', input=input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'device_name', _op.get_attr('device_name'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_urls', _op.get_attr('debug_urls'), 'gated_grpc', _op.get_attr('gated_grpc'))
_execute.record_gradient('DebugIdentity', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'DebugIdentity', name, _ctx._post_execution_callbacks, input, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'gated_grpc', gated_grpc)
return _result
except _core._FallbackException:
return debug_identity_eager_fallback(input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) | -7,136,687,543,313,901,000 | Debug Identity Op.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor that equals the input tensor. | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | debug_identity | Soum-Soum/Tensorflow_Face_Finder | python | @tf_export('debug_identity')
def debug_identity(input, device_name=, tensor_name=, debug_urls=[], gated_grpc=False, name=None):
'Debug Identity Op.\n\n Provides an identity mapping of the non-Ref type input tensor for debugging.\n\n Args:\n input: A `Tensor`. Input tensor, non-Reference type.\n device_name: An optional `string`. Defaults to ``.\n tensor_name: An optional `string`. Defaults to ``.\n Name of the input tensor.\n debug_urls: An optional list of `strings`. Defaults to `[]`.\n List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011\n gated_grpc: An optional `bool`. Defaults to `False`.\n Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Output tensor that equals the input tensor.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (device_name is None):
device_name =
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_identity' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_, _, _op) = _op_def_lib._apply_op_helper('DebugIdentity', input=input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'device_name', _op.get_attr('device_name'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_urls', _op.get_attr('debug_urls'), 'gated_grpc', _op.get_attr('gated_grpc'))
_execute.record_gradient('DebugIdentity', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'DebugIdentity', name, _ctx._post_execution_callbacks, input, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'gated_grpc', gated_grpc)
return _result
except _core._FallbackException:
return debug_identity_eager_fallback(input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) |
def debug_identity_eager_fallback(input, device_name='', tensor_name='', debug_urls=[], gated_grpc=False, name=None):
'This is the slowpath function for Eager mode.\n This is for function debug_identity\n '
_ctx = _context.context()
if (device_name is None):
device_name = ''
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_identity' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'gated_grpc', gated_grpc)
_result = _execute.execute(b'DebugIdentity', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('DebugIdentity', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result | -1,054,784,189,007,809,000 | This is the slowpath function for Eager mode.
This is for function debug_identity | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | debug_identity_eager_fallback | Soum-Soum/Tensorflow_Face_Finder | python | def debug_identity_eager_fallback(input, device_name=, tensor_name=, debug_urls=[], gated_grpc=False, name=None):
'This is the slowpath function for Eager mode.\n This is for function debug_identity\n '
_ctx = _context.context()
if (device_name is None):
device_name =
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_identity' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'gated_grpc', gated_grpc)
_result = _execute.execute(b'DebugIdentity', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('DebugIdentity', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result |
@tf_export('debug_nan_count')
def debug_nan_count(input, device_name='', tensor_name='', debug_urls=[], gated_grpc=False, name=None):
'Debug NaN Value Counter Op\n\n Counts number of NaNs in the input tensor, for debugging.\n\n Args:\n input: A `Tensor`. Input tensor, non-Reference type.\n device_name: An optional `string`. Defaults to `""`.\n tensor_name: An optional `string`. Defaults to `""`.\n Name of the input tensor.\n debug_urls: An optional list of `strings`. Defaults to `[]`.\n List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011.\n gated_grpc: An optional `bool`. Defaults to `False`.\n Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int64`.\n An integer output tensor that is the number of NaNs in the input.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (device_name is None):
device_name = ''
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_nan_count' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_, _, _op) = _op_def_lib._apply_op_helper('DebugNanCount', input=input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'device_name', _op.get_attr('device_name'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_urls', _op.get_attr('debug_urls'), 'gated_grpc', _op.get_attr('gated_grpc'))
_execute.record_gradient('DebugNanCount', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'DebugNanCount', name, _ctx._post_execution_callbacks, input, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'gated_grpc', gated_grpc)
return _result
except _core._FallbackException:
return debug_nan_count_eager_fallback(input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) | 6,683,522,879,572,854,000 | Debug NaN Value Counter Op
Counts number of NaNs in the input tensor, for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
An integer output tensor that is the number of NaNs in the input. | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | debug_nan_count | Soum-Soum/Tensorflow_Face_Finder | python | @tf_export('debug_nan_count')
def debug_nan_count(input, device_name=, tensor_name=, debug_urls=[], gated_grpc=False, name=None):
'Debug NaN Value Counter Op\n\n Counts number of NaNs in the input tensor, for debugging.\n\n Args:\n input: A `Tensor`. Input tensor, non-Reference type.\n device_name: An optional `string`. Defaults to ``.\n tensor_name: An optional `string`. Defaults to ``.\n Name of the input tensor.\n debug_urls: An optional list of `strings`. Defaults to `[]`.\n List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011.\n gated_grpc: An optional `bool`. Defaults to `False`.\n Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int64`.\n An integer output tensor that is the number of NaNs in the input.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (device_name is None):
device_name =
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_nan_count' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_, _, _op) = _op_def_lib._apply_op_helper('DebugNanCount', input=input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'device_name', _op.get_attr('device_name'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_urls', _op.get_attr('debug_urls'), 'gated_grpc', _op.get_attr('gated_grpc'))
_execute.record_gradient('DebugNanCount', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'DebugNanCount', name, _ctx._post_execution_callbacks, input, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'gated_grpc', gated_grpc)
return _result
except _core._FallbackException:
return debug_nan_count_eager_fallback(input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) |
def debug_nan_count_eager_fallback(input, device_name='', tensor_name='', debug_urls=[], gated_grpc=False, name=None):
'This is the slowpath function for Eager mode.\n This is for function debug_nan_count\n '
_ctx = _context.context()
if (device_name is None):
device_name = ''
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_nan_count' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'gated_grpc', gated_grpc)
_result = _execute.execute(b'DebugNanCount', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('DebugNanCount', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result | 5,057,920,901,955,037,000 | This is the slowpath function for Eager mode.
This is for function debug_nan_count | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | debug_nan_count_eager_fallback | Soum-Soum/Tensorflow_Face_Finder | python | def debug_nan_count_eager_fallback(input, device_name=, tensor_name=, debug_urls=[], gated_grpc=False, name=None):
'This is the slowpath function for Eager mode.\n This is for function debug_nan_count\n '
_ctx = _context.context()
if (device_name is None):
device_name =
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_nan_count' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'gated_grpc', gated_grpc)
_result = _execute.execute(b'DebugNanCount', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('DebugNanCount', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result |
@tf_export('debug_numeric_summary')
def debug_numeric_summary(input, device_name='', tensor_name='', debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
'Debug Numeric Summary Op.\n\n Provide a basic summary of numeric value types, range and distribution.\n\n Args:\n input: A `Tensor`. Input tensor, non-Reference type, float or double.\n device_name: An optional `string`. Defaults to `""`.\n tensor_name: An optional `string`. Defaults to `""`.\n Name of the input tensor.\n debug_urls: An optional list of `strings`. Defaults to `[]`.\n List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011\n lower_bound: An optional `float`. Defaults to `float(\'-inf\')`.\n (float) The lower bound <= which values will be included in the\n generalized -inf count. Default: -inf.\n upper_bound: An optional `float`. Defaults to `float(\'inf\')`.\n (float) The upper bound >= which values will be included in the\n generalized +inf count. Default: +inf.\n mute_if_healthy: An optional `bool`. Defaults to `False`.\n (bool) Do not send data to the debug URLs unless at least one\n of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and\n inf counts) is non-zero.\n gated_grpc: An optional `bool`. Defaults to `False`.\n Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float64`.\n A double tensor of shape [14 + nDimensions], where nDimensions is the\n the number of dimensions of the tensor\'s shape. The elements of output are:\n [0]: is initialized (1.0) or not (0.0).\n [1]: total number of elements\n [2]: NaN element count\n [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by\n default.\n [4]: negative element count (excluding -inf), if lower_bound is the default\n -inf. Otherwise, this is the count of elements > lower_bound and < 0.\n [5]: zero element count\n [6]: positive element count (excluding +inf), if upper_bound is the default\n -inf. Otherwise, this is the count of elements < upper_bound and > 0.\n [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by\n default.\n Output elements [1:8] are all zero, if the tensor is uninitialized.\n [8]: minimum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: +inf.\n [9]: maximum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: -inf.\n [10]: mean of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [11]: variance of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [12]: Data type of the tensor encoded as an enum integer. See the DataType\n proto for more details.\n [13]: Number of dimensions of the tensor (ndims).\n [14+]: Sizes of the dimensions.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (device_name is None):
device_name = ''
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_numeric_summary' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (lower_bound is None):
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, 'lower_bound')
if (upper_bound is None):
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, 'upper_bound')
if (mute_if_healthy is None):
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, 'mute_if_healthy')
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_, _, _op) = _op_def_lib._apply_op_helper('DebugNumericSummary', input=input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'device_name', _op.get_attr('device_name'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_urls', _op.get_attr('debug_urls'), 'lower_bound', _op.get_attr('lower_bound'), 'upper_bound', _op.get_attr('upper_bound'), 'mute_if_healthy', _op.get_attr('mute_if_healthy'), 'gated_grpc', _op.get_attr('gated_grpc'))
_execute.record_gradient('DebugNumericSummary', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'DebugNumericSummary', name, _ctx._post_execution_callbacks, input, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'lower_bound', lower_bound, 'upper_bound', upper_bound, 'mute_if_healthy', mute_if_healthy, 'gated_grpc', gated_grpc)
return _result
except _core._FallbackException:
return debug_numeric_summary_eager_fallback(input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) | -8,895,079,336,447,695,000 | Debug Numeric Summary Op.
Provide a basic summary of numeric value types, range and distribution.
Args:
input: A `Tensor`. Input tensor, non-Reference type, float or double.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
lower_bound: An optional `float`. Defaults to `float('-inf')`.
(float) The lower bound <= which values will be included in the
generalized -inf count. Default: -inf.
upper_bound: An optional `float`. Defaults to `float('inf')`.
(float) The upper bound >= which values will be included in the
generalized +inf count. Default: +inf.
mute_if_healthy: An optional `bool`. Defaults to `False`.
(bool) Do not send data to the debug URLs unless at least one
of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
inf counts) is non-zero.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float64`.
A double tensor of shape [14 + nDimensions], where nDimensions is the
the number of dimensions of the tensor's shape. The elements of output are:
[0]: is initialized (1.0) or not (0.0).
[1]: total number of elements
[2]: NaN element count
[3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
default.
[4]: negative element count (excluding -inf), if lower_bound is the default
-inf. Otherwise, this is the count of elements > lower_bound and < 0.
[5]: zero element count
[6]: positive element count (excluding +inf), if upper_bound is the default
-inf. Otherwise, this is the count of elements < upper_bound and > 0.
[7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
default.
Output elements [1:8] are all zero, if the tensor is uninitialized.
[8]: minimum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: +inf.
[9]: maximum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: -inf.
[10]: mean of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[11]: variance of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[12]: Data type of the tensor encoded as an enum integer. See the DataType
proto for more details.
[13]: Number of dimensions of the tensor (ndims).
[14+]: Sizes of the dimensions. | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | debug_numeric_summary | Soum-Soum/Tensorflow_Face_Finder | python | @tf_export('debug_numeric_summary')
def debug_numeric_summary(input, device_name=, tensor_name=, debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
'Debug Numeric Summary Op.\n\n Provide a basic summary of numeric value types, range and distribution.\n\n Args:\n input: A `Tensor`. Input tensor, non-Reference type, float or double.\n device_name: An optional `string`. Defaults to ``.\n tensor_name: An optional `string`. Defaults to ``.\n Name of the input tensor.\n debug_urls: An optional list of `strings`. Defaults to `[]`.\n List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011\n lower_bound: An optional `float`. Defaults to `float(\'-inf\')`.\n (float) The lower bound <= which values will be included in the\n generalized -inf count. Default: -inf.\n upper_bound: An optional `float`. Defaults to `float(\'inf\')`.\n (float) The upper bound >= which values will be included in the\n generalized +inf count. Default: +inf.\n mute_if_healthy: An optional `bool`. Defaults to `False`.\n (bool) Do not send data to the debug URLs unless at least one\n of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and\n inf counts) is non-zero.\n gated_grpc: An optional `bool`. Defaults to `False`.\n Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float64`.\n A double tensor of shape [14 + nDimensions], where nDimensions is the\n the number of dimensions of the tensor\'s shape. The elements of output are:\n [0]: is initialized (1.0) or not (0.0).\n [1]: total number of elements\n [2]: NaN element count\n [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by\n default.\n [4]: negative element count (excluding -inf), if lower_bound is the default\n -inf. Otherwise, this is the count of elements > lower_bound and < 0.\n [5]: zero element count\n [6]: positive element count (excluding +inf), if upper_bound is the default\n -inf. Otherwise, this is the count of elements < upper_bound and > 0.\n [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by\n default.\n Output elements [1:8] are all zero, if the tensor is uninitialized.\n [8]: minimum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: +inf.\n [9]: maximum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: -inf.\n [10]: mean of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [11]: variance of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [12]: Data type of the tensor encoded as an enum integer. See the DataType\n proto for more details.\n [13]: Number of dimensions of the tensor (ndims).\n [14+]: Sizes of the dimensions.\n '
_ctx = _context.context()
if (not _ctx.executing_eagerly()):
if (device_name is None):
device_name =
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_numeric_summary' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (lower_bound is None):
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, 'lower_bound')
if (upper_bound is None):
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, 'upper_bound')
if (mute_if_healthy is None):
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, 'mute_if_healthy')
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_, _, _op) = _op_def_lib._apply_op_helper('DebugNumericSummary', input=input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ('T', _op.get_attr('T'), 'device_name', _op.get_attr('device_name'), 'tensor_name', _op.get_attr('tensor_name'), 'debug_urls', _op.get_attr('debug_urls'), 'lower_bound', _op.get_attr('lower_bound'), 'upper_bound', _op.get_attr('upper_bound'), 'mute_if_healthy', _op.get_attr('mute_if_healthy'), 'gated_grpc', _op.get_attr('gated_grpc'))
_execute.record_gradient('DebugNumericSummary', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(_ctx._handle, _ctx.device_name, 'DebugNumericSummary', name, _ctx._post_execution_callbacks, input, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'lower_bound', lower_bound, 'upper_bound', upper_bound, 'mute_if_healthy', mute_if_healthy, 'gated_grpc', gated_grpc)
return _result
except _core._FallbackException:
return debug_numeric_summary_eager_fallback(input, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if (name is not None):
message = ((e.message + ' name: ') + name)
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None) |
def debug_numeric_summary_eager_fallback(input, device_name='', tensor_name='', debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
'This is the slowpath function for Eager mode.\n This is for function debug_numeric_summary\n '
_ctx = _context.context()
if (device_name is None):
device_name = ''
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name = ''
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_numeric_summary' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (lower_bound is None):
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, 'lower_bound')
if (upper_bound is None):
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, 'upper_bound')
if (mute_if_healthy is None):
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, 'mute_if_healthy')
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'lower_bound', lower_bound, 'upper_bound', upper_bound, 'mute_if_healthy', mute_if_healthy, 'gated_grpc', gated_grpc)
_result = _execute.execute(b'DebugNumericSummary', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('DebugNumericSummary', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result | 2,770,299,211,202,803,700 | This is the slowpath function for Eager mode.
This is for function debug_numeric_summary | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | debug_numeric_summary_eager_fallback | Soum-Soum/Tensorflow_Face_Finder | python | def debug_numeric_summary_eager_fallback(input, device_name=, tensor_name=, debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
'This is the slowpath function for Eager mode.\n This is for function debug_numeric_summary\n '
_ctx = _context.context()
if (device_name is None):
device_name =
device_name = _execute.make_str(device_name, 'device_name')
if (tensor_name is None):
tensor_name =
tensor_name = _execute.make_str(tensor_name, 'tensor_name')
if (debug_urls is None):
debug_urls = []
if (not isinstance(debug_urls, (list, tuple))):
raise TypeError(("Expected list for 'debug_urls' argument to 'debug_numeric_summary' Op, not %r." % debug_urls))
debug_urls = [_execute.make_str(_s, 'debug_urls') for _s in debug_urls]
if (lower_bound is None):
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, 'lower_bound')
if (upper_bound is None):
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, 'upper_bound')
if (mute_if_healthy is None):
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, 'mute_if_healthy')
if (gated_grpc is None):
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, 'gated_grpc')
(_attr_T, (input,)) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ('T', _attr_T, 'device_name', device_name, 'tensor_name', tensor_name, 'debug_urls', debug_urls, 'lower_bound', lower_bound, 'upper_bound', upper_bound, 'mute_if_healthy', mute_if_healthy, 'gated_grpc', gated_grpc)
_result = _execute.execute(b'DebugNumericSummary', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient('DebugNumericSummary', _inputs_flat, _attrs, _result, name)
(_result,) = _result
return _result |
@click.command('exons', short_help='Load exons')
@click.option('-e', '--exons-file', type=click.Path(exists=True), help='Path to file with ensembl exons')
@click.option('-b', '--build', type=click.Choice(['37', '38']), default='37', show_default=True)
@with_appcontext
def exons(build, exons_file):
'Load exons into the scout database. If no file, fetch exons from ensembl biomart'
adapter = store
LOG.info('Running scout load exons')
start = datetime.now()
existing_exon = adapter.exon(build=build)
if existing_exon:
LOG.warning('Dropping all exons ')
adapter.drop_exons(build=build)
LOG.info('Exons dropped')
nr_exons = 0
if exons_file:
ensembl_exons = get_file_handle(exons_file)
for (nr_exons, line) in enumerate(ensembl_exons, 1):
pass
ensembl_exons = get_file_handle(exons_file)
else:
ensembl_exons = fetch_ensembl_exons(build=build)
nr_exons = 1360000
try:
load_exons(adapter, ensembl_exons, build, nr_exons=nr_exons)
except Exception as err:
LOG.warning('Something went wrong with ensembl biomart')
LOG.info('Please download a mart dump manually, see instructions in user guide for admins')
return
LOG.info('Time to load exons: {0}'.format((datetime.now() - start))) | -8,420,868,486,457,255,000 | Load exons into the scout database. If no file, fetch exons from ensembl biomart | scout/commands/load/exons.py | exons | Clinical-Genomics/scout | python | @click.command('exons', short_help='Load exons')
@click.option('-e', '--exons-file', type=click.Path(exists=True), help='Path to file with ensembl exons')
@click.option('-b', '--build', type=click.Choice(['37', '38']), default='37', show_default=True)
@with_appcontext
def exons(build, exons_file):
adapter = store
LOG.info('Running scout load exons')
start = datetime.now()
existing_exon = adapter.exon(build=build)
if existing_exon:
LOG.warning('Dropping all exons ')
adapter.drop_exons(build=build)
LOG.info('Exons dropped')
nr_exons = 0
if exons_file:
ensembl_exons = get_file_handle(exons_file)
for (nr_exons, line) in enumerate(ensembl_exons, 1):
pass
ensembl_exons = get_file_handle(exons_file)
else:
ensembl_exons = fetch_ensembl_exons(build=build)
nr_exons = 1360000
try:
load_exons(adapter, ensembl_exons, build, nr_exons=nr_exons)
except Exception as err:
LOG.warning('Something went wrong with ensembl biomart')
LOG.info('Please download a mart dump manually, see instructions in user guide for admins')
return
LOG.info('Time to load exons: {0}'.format((datetime.now() - start))) |
def dictkeys(dct):
'\n Returns a list of keys of dictionary\n\n dict.keys returns a view that works like .keys in Python 2\n *except* any modifications in the dictionary will be visible\n (and will cause errors if the view is being iterated over while\n it is modified).\n '
return list(dct.keys()) | -3,805,923,842,563,118,600 | Returns a list of keys of dictionary
dict.keys returns a view that works like .keys in Python 2
*except* any modifications in the dictionary will be visible
(and will cause errors if the view is being iterated over while
it is modified). | pika/compat.py | dictkeys | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def dictkeys(dct):
'\n Returns a list of keys of dictionary\n\n dict.keys returns a view that works like .keys in Python 2\n *except* any modifications in the dictionary will be visible\n (and will cause errors if the view is being iterated over while\n it is modified).\n '
return list(dct.keys()) |
def dictvalues(dct):
'\n Returns a list of values of a dictionary\n\n dict.values returns a view that works like .values in Python 2\n *except* any modifications in the dictionary will be visible\n (and will cause errors if the view is being iterated over while\n it is modified).\n '
return list(dct.values()) | 2,355,709,757,336,019,500 | Returns a list of values of a dictionary
dict.values returns a view that works like .values in Python 2
*except* any modifications in the dictionary will be visible
(and will cause errors if the view is being iterated over while
it is modified). | pika/compat.py | dictvalues | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def dictvalues(dct):
'\n Returns a list of values of a dictionary\n\n dict.values returns a view that works like .values in Python 2\n *except* any modifications in the dictionary will be visible\n (and will cause errors if the view is being iterated over while\n it is modified).\n '
return list(dct.values()) |
def dict_iteritems(dct):
'\n Returns an iterator of items (key/value pairs) of a dictionary\n\n dict.items returns a view that works like .items in Python 2\n *except* any modifications in the dictionary will be visible\n (and will cause errors if the view is being iterated over while\n it is modified).\n '
return dct.items() | -837,165,073,974,448,100 | Returns an iterator of items (key/value pairs) of a dictionary
dict.items returns a view that works like .items in Python 2
*except* any modifications in the dictionary will be visible
(and will cause errors if the view is being iterated over while
it is modified). | pika/compat.py | dict_iteritems | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def dict_iteritems(dct):
'\n Returns an iterator of items (key/value pairs) of a dictionary\n\n dict.items returns a view that works like .items in Python 2\n *except* any modifications in the dictionary will be visible\n (and will cause errors if the view is being iterated over while\n it is modified).\n '
return dct.items() |
def dict_itervalues(dct):
'\n :param dict dct:\n :returns: an iterator of the values of a dictionary\n '
return dct.values() | 5,895,645,862,643,673,000 | :param dict dct:
:returns: an iterator of the values of a dictionary | pika/compat.py | dict_itervalues | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def dict_itervalues(dct):
'\n :param dict dct:\n :returns: an iterator of the values of a dictionary\n '
return dct.values() |
def byte(*args):
'\n This is the same as Python 2 `chr(n)` for bytes in Python 3\n\n Returns a single byte `bytes` for the given int argument (we\n optimize it a bit here by passing the positional argument tuple\n directly to the bytes constructor.\n '
return bytes(args) | -8,906,836,667,376,551,000 | This is the same as Python 2 `chr(n)` for bytes in Python 3
Returns a single byte `bytes` for the given int argument (we
optimize it a bit here by passing the positional argument tuple
directly to the bytes constructor. | pika/compat.py | byte | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def byte(*args):
'\n This is the same as Python 2 `chr(n)` for bytes in Python 3\n\n Returns a single byte `bytes` for the given int argument (we\n optimize it a bit here by passing the positional argument tuple\n directly to the bytes constructor.\n '
return bytes(args) |
def canonical_str(value):
'\n Return the canonical str value for the string.\n In both Python 3 and Python 2 this is str.\n '
return str(value) | -6,477,917,416,949,840,000 | Return the canonical str value for the string.
In both Python 3 and Python 2 this is str. | pika/compat.py | canonical_str | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def canonical_str(value):
'\n Return the canonical str value for the string.\n In both Python 3 and Python 2 this is str.\n '
return str(value) |
def canonical_str(value):
'\n Returns the canonical string value of the given string.\n In Python 2 this is the value unchanged if it is an str, otherwise\n it is the unicode value encoded as UTF-8.\n '
try:
return str(value)
except UnicodeEncodeError:
return str(value.encode('utf-8')) | -7,015,359,506,289,830,000 | Returns the canonical string value of the given string.
In Python 2 this is the value unchanged if it is an str, otherwise
it is the unicode value encoded as UTF-8. | pika/compat.py | canonical_str | EnjoyLifeFund/macHighSierra-py36-pkgs | python | def canonical_str(value):
'\n Returns the canonical string value of the given string.\n In Python 2 this is the value unchanged if it is an str, otherwise\n it is the unicode value encoded as UTF-8.\n '
try:
return str(value)
except UnicodeEncodeError:
return str(value.encode('utf-8')) |
def __init__(self, value=None, reject_on_error=None, checked=None, local_vars_configuration=None):
'ExtendedBoolValueTest - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._value = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if (value is not None):
self.value = value
if (reject_on_error is not None):
self.reject_on_error = reject_on_error
if (checked is not None):
self.checked = checked | 5,704,099,914,382,409,000 | ExtendedBoolValueTest - a model defined in OpenAPI | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | __init__ | Telestream/telestream-cloud-python-sdk | python | def __init__(self, value=None, reject_on_error=None, checked=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._value = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if (value is not None):
self.value = value
if (reject_on_error is not None):
self.reject_on_error = reject_on_error
if (checked is not None):
self.checked = checked |
@property
def value(self):
'Gets the value of this ExtendedBoolValueTest. # noqa: E501\n\n\n :return: The value of this ExtendedBoolValueTest. # noqa: E501\n :rtype: ExtendedBool\n '
return self._value | -7,428,640,341,322,616,000 | Gets the value of this ExtendedBoolValueTest. # noqa: E501
:return: The value of this ExtendedBoolValueTest. # noqa: E501
:rtype: ExtendedBool | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | value | Telestream/telestream-cloud-python-sdk | python | @property
def value(self):
'Gets the value of this ExtendedBoolValueTest. # noqa: E501\n\n\n :return: The value of this ExtendedBoolValueTest. # noqa: E501\n :rtype: ExtendedBool\n '
return self._value |
@value.setter
def value(self, value):
'Sets the value of this ExtendedBoolValueTest.\n\n\n :param value: The value of this ExtendedBoolValueTest. # noqa: E501\n :type: ExtendedBool\n '
self._value = value | -8,696,274,499,143,007,000 | Sets the value of this ExtendedBoolValueTest.
:param value: The value of this ExtendedBoolValueTest. # noqa: E501
:type: ExtendedBool | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | value | Telestream/telestream-cloud-python-sdk | python | @value.setter
def value(self, value):
'Sets the value of this ExtendedBoolValueTest.\n\n\n :param value: The value of this ExtendedBoolValueTest. # noqa: E501\n :type: ExtendedBool\n '
self._value = value |
@property
def reject_on_error(self):
'Gets the reject_on_error of this ExtendedBoolValueTest. # noqa: E501\n\n\n :return: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501\n :rtype: bool\n '
return self._reject_on_error | 5,830,948,398,873,198,000 | Gets the reject_on_error of this ExtendedBoolValueTest. # noqa: E501
:return: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501
:rtype: bool | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | reject_on_error | Telestream/telestream-cloud-python-sdk | python | @property
def reject_on_error(self):
'Gets the reject_on_error of this ExtendedBoolValueTest. # noqa: E501\n\n\n :return: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501\n :rtype: bool\n '
return self._reject_on_error |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.