body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __init__(__self__, *, group_id: Optional[pulumi.Input[str]]=None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None): '\n Input properties used for looking up and filtering GroupMemberships resources.\n :param pulumi.Input[str] group_id: ID of a Okta group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.\n ' if (group_id is not None): pulumi.set(__self__, 'group_id', group_id) if (users is not None): pulumi.set(__self__, 'users', users)
-1,124,800,230,950,680,700
Input properties used for looking up and filtering GroupMemberships resources. :param pulumi.Input[str] group_id: ID of a Okta group. :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.
sdk/python/pulumi_okta/group_memberships.py
__init__
pulumi/pulumi-okta
python
def __init__(__self__, *, group_id: Optional[pulumi.Input[str]]=None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None): '\n Input properties used for looking up and filtering GroupMemberships resources.\n :param pulumi.Input[str] group_id: ID of a Okta group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.\n ' if (group_id is not None): pulumi.set(__self__, 'group_id', group_id) if (users is not None): pulumi.set(__self__, 'users', users)
@property @pulumi.getter(name='groupId') def group_id(self) -> Optional[pulumi.Input[str]]: '\n ID of a Okta group.\n ' return pulumi.get(self, 'group_id')
-3,177,421,181,971,841,500
ID of a Okta group.
sdk/python/pulumi_okta/group_memberships.py
group_id
pulumi/pulumi-okta
python
@property @pulumi.getter(name='groupId') def group_id(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'group_id')
@property @pulumi.getter def users(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: '\n The list of Okta user IDs which the group should have membership managed for.\n ' return pulumi.get(self, 'users')
99,600,952,663,328,530
The list of Okta user IDs which the group should have membership managed for.
sdk/python/pulumi_okta/group_memberships.py
users
pulumi/pulumi-okta
python
@property @pulumi.getter def users(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: '\n \n ' return pulumi.get(self, 'users')
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, group_id: Optional[pulumi.Input[str]]=None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, __props__=None): '\n Resource to manage a set of memberships for a specific group.\n\n This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource.\n\n When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_okta as okta\n\n test_group = okta.group.Group("testGroup", description="testing, testing")\n test_group_memberships = okta.GroupMemberships("testGroupMemberships",\n group_id=test_group.id,\n users=[\n okta_user["test1"]["id"],\n okta_user["test2"]["id"],\n ])\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] group_id: ID of a Okta group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.\n ' ...
-5,755,180,085,078,006,000
Resource to manage a set of memberships for a specific group. This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource. When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state. ## Example Usage ```python import pulumi import pulumi_okta as okta test_group = okta.group.Group("testGroup", description="testing, testing") test_group_memberships = okta.GroupMemberships("testGroupMemberships", group_id=test_group.id, users=[ okta_user["test1"]["id"], okta_user["test2"]["id"], ]) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] group_id: ID of a Okta group. :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.
sdk/python/pulumi_okta/group_memberships.py
__init__
pulumi/pulumi-okta
python
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, group_id: Optional[pulumi.Input[str]]=None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, __props__=None): '\n Resource to manage a set of memberships for a specific group.\n\n This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource.\n\n When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_okta as okta\n\n test_group = okta.group.Group("testGroup", description="testing, testing")\n test_group_memberships = okta.GroupMemberships("testGroupMemberships",\n group_id=test_group.id,\n users=[\n okta_user["test1"]["id"],\n okta_user["test2"]["id"],\n ])\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] group_id: ID of a Okta group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.\n ' ...
@overload def __init__(__self__, resource_name: str, args: GroupMembershipsArgs, opts: Optional[pulumi.ResourceOptions]=None): '\n Resource to manage a set of memberships for a specific group.\n\n This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource.\n\n When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_okta as okta\n\n test_group = okta.group.Group("testGroup", description="testing, testing")\n test_group_memberships = okta.GroupMemberships("testGroupMemberships",\n group_id=test_group.id,\n users=[\n okta_user["test1"]["id"],\n okta_user["test2"]["id"],\n ])\n ```\n\n :param str resource_name: The name of the resource.\n :param GroupMembershipsArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n ' ...
-7,360,302,168,923,495,000
Resource to manage a set of memberships for a specific group. This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource. When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state. ## Example Usage ```python import pulumi import pulumi_okta as okta test_group = okta.group.Group("testGroup", description="testing, testing") test_group_memberships = okta.GroupMemberships("testGroupMemberships", group_id=test_group.id, users=[ okta_user["test1"]["id"], okta_user["test2"]["id"], ]) ``` :param str resource_name: The name of the resource. :param GroupMembershipsArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_okta/group_memberships.py
__init__
pulumi/pulumi-okta
python
@overload def __init__(__self__, resource_name: str, args: GroupMembershipsArgs, opts: Optional[pulumi.ResourceOptions]=None): '\n Resource to manage a set of memberships for a specific group.\n\n This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource.\n\n When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_okta as okta\n\n test_group = okta.group.Group("testGroup", description="testing, testing")\n test_group_memberships = okta.GroupMemberships("testGroupMemberships",\n group_id=test_group.id,\n users=[\n okta_user["test1"]["id"],\n okta_user["test2"]["id"],\n ])\n ```\n\n :param str resource_name: The name of the resource.\n :param GroupMembershipsArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n ' ...
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, group_id: Optional[pulumi.Input[str]]=None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None) -> 'GroupMemberships': "\n Get an existing GroupMemberships resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] group_id: ID of a Okta group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _GroupMembershipsState.__new__(_GroupMembershipsState) __props__.__dict__['group_id'] = group_id __props__.__dict__['users'] = users return GroupMemberships(resource_name, opts=opts, __props__=__props__)
-1,442,683,605,509,781,800
Get an existing GroupMemberships resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] group_id: ID of a Okta group. :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.
sdk/python/pulumi_okta/group_memberships.py
get
pulumi/pulumi-okta
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, group_id: Optional[pulumi.Input[str]]=None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None) -> 'GroupMemberships': "\n Get an existing GroupMemberships resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] group_id: ID of a Okta group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _GroupMembershipsState.__new__(_GroupMembershipsState) __props__.__dict__['group_id'] = group_id __props__.__dict__['users'] = users return GroupMemberships(resource_name, opts=opts, __props__=__props__)
@property @pulumi.getter(name='groupId') def group_id(self) -> pulumi.Output[str]: '\n ID of a Okta group.\n ' return pulumi.get(self, 'group_id')
-3,855,980,625,715,913,000
ID of a Okta group.
sdk/python/pulumi_okta/group_memberships.py
group_id
pulumi/pulumi-okta
python
@property @pulumi.getter(name='groupId') def group_id(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'group_id')
@property @pulumi.getter def users(self) -> pulumi.Output[Sequence[str]]: '\n The list of Okta user IDs which the group should have membership managed for.\n ' return pulumi.get(self, 'users')
-9,122,935,689,198,096,000
The list of Okta user IDs which the group should have membership managed for.
sdk/python/pulumi_okta/group_memberships.py
users
pulumi/pulumi-okta
python
@property @pulumi.getter def users(self) -> pulumi.Output[Sequence[str]]: '\n \n ' return pulumi.get(self, 'users')
def build_grid(hyperparameters): 'Build a grid represented as a list of parameter dictionaries.' parameter_dicts = [] for parameters in product(*hyperparameters.values()): parameter_tuples = zip(hyperparameters.keys(), parameters) parameter_dict = dict(parameter_tuples) parameter_dicts.append(parameter_dict) return parameter_dicts
5,178,821,570,779,621,000
Build a grid represented as a list of parameter dictionaries.
autotabular/metalearning/optimizers/optimizer_base.py
build_grid
Fanxingye/Autotabular
python
def build_grid(hyperparameters): parameter_dicts = [] for parameters in product(*hyperparameters.values()): parameter_tuples = zip(hyperparameters.keys(), parameters) parameter_dict = dict(parameter_tuples) parameter_dicts.append(parameter_dict) return parameter_dicts
def test_invalid_unicode_in_build_file(self): 'Demonstrate that unicode characters causing parse errors raise real parse errors.' self.add_to_build_file('BUILD', dedent("\n jvm_binary(name = ‘hello’, # Parse error due to smart quotes (non ascii characters)\n source = 'HelloWorld.java'\n main = 'foo.HelloWorld',\n )\n ")) build_file = self.create_buildfile('BUILD') self.assert_parser_error(build_file, ('invalid character' if PY3 else 'invalid syntax'))
-6,823,476,443,962,574,000
Demonstrate that unicode characters causing parse errors raise real parse errors.
tests/python/pants_test/build_graph/test_build_file_parser.py
test_invalid_unicode_in_build_file
omerzach/pants
python
def test_invalid_unicode_in_build_file(self): self.add_to_build_file('BUILD', dedent("\n jvm_binary(name = ‘hello’, # Parse error due to smart quotes (non ascii characters)\n source = 'HelloWorld.java'\n main = 'foo.HelloWorld',\n )\n ")) build_file = self.create_buildfile('BUILD') self.assert_parser_error(build_file, ('invalid character' if PY3 else 'invalid syntax'))
def test_unicode_string_in_build_file(self): 'Demonstrates that a string containing unicode should work in a BUILD file.' self.add_to_build_file('BUILD', dedent("\n java_library(\n name='foo',\n sources=['א.java']\n )\n ")) build_file = self.create_buildfile('BUILD') self.build_file_parser.parse_build_file(build_file)
-2,613,146,341,876,287,000
Demonstrates that a string containing unicode should work in a BUILD file.
tests/python/pants_test/build_graph/test_build_file_parser.py
test_unicode_string_in_build_file
omerzach/pants
python
def test_unicode_string_in_build_file(self): self.add_to_build_file('BUILD', dedent("\n java_library(\n name='foo',\n sources=['א.java']\n )\n ")) build_file = self.create_buildfile('BUILD') self.build_file_parser.parse_build_file(build_file)
def test_build_file_parser_error_hierarcy(self): 'Exception handling code depends on the fact that all explicit exceptions from BuildFileParser\n are subclassed from the BuildFileParserError base class.\n ' def assert_build_file_parser_error(e): self.assertIsInstance(e, BuildFileParser.BuildFileParserError) assert_build_file_parser_error(BuildFileParser.BuildFileScanError()) assert_build_file_parser_error(BuildFileParser.AddressableConflictException()) assert_build_file_parser_error(BuildFileParser.SiblingConflictException()) assert_build_file_parser_error(BuildFileParser.ParseError()) assert_build_file_parser_error(BuildFileParser.ExecuteError())
1,864,469,725,580,416,000
Exception handling code depends on the fact that all explicit exceptions from BuildFileParser are subclassed from the BuildFileParserError base class.
tests/python/pants_test/build_graph/test_build_file_parser.py
test_build_file_parser_error_hierarcy
omerzach/pants
python
def test_build_file_parser_error_hierarcy(self): 'Exception handling code depends on the fact that all explicit exceptions from BuildFileParser\n are subclassed from the BuildFileParserError base class.\n ' def assert_build_file_parser_error(e): self.assertIsInstance(e, BuildFileParser.BuildFileParserError) assert_build_file_parser_error(BuildFileParser.BuildFileScanError()) assert_build_file_parser_error(BuildFileParser.AddressableConflictException()) assert_build_file_parser_error(BuildFileParser.SiblingConflictException()) assert_build_file_parser_error(BuildFileParser.ParseError()) assert_build_file_parser_error(BuildFileParser.ExecuteError())
def gen_attributes(rp_attributes): "Generate list of attributes for the API request.\n\n Example of input list:\n ['tag_name:tag_value1', 'tag_value2']\n Output of the function for the given input list:\n [{'key': 'tag_name', 'value': 'tag_value1'}, {'value': 'tag_value2'}]\n\n :param rp_attributes: List of attributes(tags)\n :return: Correctly created list of dictionaries\n to be passed to RP\n " attrs = [] for rp_attr in rp_attributes: try: (key, value) = rp_attr.split(':') attr_dict = {'key': key, 'value': value} except ValueError as exc: logger.debug(str(exc)) attr_dict = {'value': rp_attr} if all(attr_dict.values()): attrs.append(attr_dict) continue logger.debug('Failed to process "{0}" attribute, attribute value should not be empty.'.format(rp_attr)) return attrs
-4,259,652,237,405,757,000
Generate list of attributes for the API request. Example of input list: ['tag_name:tag_value1', 'tag_value2'] Output of the function for the given input list: [{'key': 'tag_name', 'value': 'tag_value1'}, {'value': 'tag_value2'}] :param rp_attributes: List of attributes(tags) :return: Correctly created list of dictionaries to be passed to RP
reportportal_client/helpers.py
gen_attributes
jyejare/client-Python
python
def gen_attributes(rp_attributes): "Generate list of attributes for the API request.\n\n Example of input list:\n ['tag_name:tag_value1', 'tag_value2']\n Output of the function for the given input list:\n [{'key': 'tag_name', 'value': 'tag_value1'}, {'value': 'tag_value2'}]\n\n :param rp_attributes: List of attributes(tags)\n :return: Correctly created list of dictionaries\n to be passed to RP\n " attrs = [] for rp_attr in rp_attributes: try: (key, value) = rp_attr.split(':') attr_dict = {'key': key, 'value': value} except ValueError as exc: logger.debug(str(exc)) attr_dict = {'value': rp_attr} if all(attr_dict.values()): attrs.append(attr_dict) continue logger.debug('Failed to process "{0}" attribute, attribute value should not be empty.'.format(rp_attr)) return attrs
def get_launch_sys_attrs(): "Generate attributes for the launch containing system information.\n\n :return: dict {'os': 'Windows',\n 'cpu': 'AMD',\n 'machine': 'Windows10_pc'}\n " return {'os': system(), 'cpu': (processor() or 'unknown'), 'machine': machine(), 'system': True}
8,550,479,848,873,520,000
Generate attributes for the launch containing system information. :return: dict {'os': 'Windows', 'cpu': 'AMD', 'machine': 'Windows10_pc'}
reportportal_client/helpers.py
get_launch_sys_attrs
jyejare/client-Python
python
def get_launch_sys_attrs(): "Generate attributes for the launch containing system information.\n\n :return: dict {'os': 'Windows',\n 'cpu': 'AMD',\n 'machine': 'Windows10_pc'}\n " return {'os': system(), 'cpu': (processor() or 'unknown'), 'machine': machine(), 'system': True}
def get_package_version(package_name): 'Get version of the given package.\n\n :param package_name: Name of the package\n :return: Version of the package\n ' try: package_version = get_distribution(package_name).version except DistributionNotFound: package_version = 'not found' return package_version
1,597,655,982,996,706,800
Get version of the given package. :param package_name: Name of the package :return: Version of the package
reportportal_client/helpers.py
get_package_version
jyejare/client-Python
python
def get_package_version(package_name): 'Get version of the given package.\n\n :param package_name: Name of the package\n :return: Version of the package\n ' try: package_version = get_distribution(package_name).version except DistributionNotFound: package_version = 'not found' return package_version
def neighbord_analysis(x_as, column=0): '\n\tGiven an array xas this function compute the distance between the elements the mean distance and the variance\n\t\n\tAuthor: Michele Monti\n\t\n\tArgs:\n\t\t\tx_as: the name of the list or data set that you want:\n\t\n\tKwargs:\n\t\tcolumn: is the column of the data set that you need to analyze\n\n\tReturns:\n\t\tmean_distance: the mean distance between neighbords, \n\t\tstd_dev: stdeviation of the distances between neighbords.\n\t\tdiff_neighbor: the difference between the first-neighbours in a list \n\t' x_as = np.array(x_as) correct_axis = x_as if (shape(x_as) > 1): correct_axis = x_as[:, column] diff_neighbor = [(itm - correct_axis[(idx - 1)]) for (idx, itm) in enumerate(correct_axis)][1:] mean_distance = np.mean(diff_neighbor) std_dev = np.std(diff_neighbor) return (diff_neighbor, mean_distance, std_dev)
3,318,488,411,711,542,300
Given an array xas this function compute the distance between the elements the mean distance and the variance Author: Michele Monti Args: x_as: the name of the list or data set that you want: Kwargs: column: is the column of the data set that you need to analyze Returns: mean_distance: the mean distance between neighbords, std_dev: stdeviation of the distances between neighbords. diff_neighbor: the difference between the first-neighbours in a list
amolf/numerical_data_analysis/NeighbourAnalysis.py
neighbord_analysis
Repythory/Libraries
python
def neighbord_analysis(x_as, column=0): '\n\tGiven an array xas this function compute the distance between the elements the mean distance and the variance\n\t\n\tAuthor: Michele Monti\n\t\n\tArgs:\n\t\t\tx_as: the name of the list or data set that you want:\n\t\n\tKwargs:\n\t\tcolumn: is the column of the data set that you need to analyze\n\n\tReturns:\n\t\tmean_distance: the mean distance between neighbords, \n\t\tstd_dev: stdeviation of the distances between neighbords.\n\t\tdiff_neighbor: the difference between the first-neighbours in a list \n\t' x_as = np.array(x_as) correct_axis = x_as if (shape(x_as) > 1): correct_axis = x_as[:, column] diff_neighbor = [(itm - correct_axis[(idx - 1)]) for (idx, itm) in enumerate(correct_axis)][1:] mean_distance = np.mean(diff_neighbor) std_dev = np.std(diff_neighbor) return (diff_neighbor, mean_distance, std_dev)
def _setup(self): 'Sets up and resets flags before each test.' tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if (KerasBenchmark.local_flags is None): for flag_method in self.flag_methods: flag_method() flags.FLAGS(['foo']) for (k, v) in self.default_flags.items(): setattr(FLAGS, k, v) saved_flag_values = flagsaver.save_flag_values() KerasBenchmark.local_flags = saved_flag_values else: flagsaver.restore_flag_values(KerasBenchmark.local_flags)
2,129,715,044,460,102,000
Sets up and resets flags before each test.
official/resnet/keras/keras_benchmark.py
_setup
LinMiaoShuSheng/models
python
def _setup(self): tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if (KerasBenchmark.local_flags is None): for flag_method in self.flag_methods: flag_method() flags.FLAGS(['foo']) for (k, v) in self.default_flags.items(): setattr(FLAGS, k, v) saved_flag_values = flagsaver.save_flag_values() KerasBenchmark.local_flags = saved_flag_values else: flagsaver.restore_flag_values(KerasBenchmark.local_flags)
def _report_benchmark(self, stats, wall_time_sec, top_1_max=None, top_1_min=None, log_steps=None, total_batch_size=None, warmup=1): "Report benchmark results by writing to local protobuf file.\n\n Args:\n stats: dict returned from keras models with known entries.\n wall_time_sec: the during of the benchmark execution in seconds\n top_1_max: highest passing level for top_1 accuracy.\n top_1_min: lowest passing level for top_1 accuracy.\n log_steps: How often the log was created for stats['step_timestamp_log'].\n total_batch_size: Global batch-size.\n warmup: number of entries in stats['step_timestamp_log'] to ignore.\n " metrics = [] if ('accuracy_top_1' in stats): metrics.append({'name': 'accuracy_top_1', 'value': stats['accuracy_top_1'], 'min_value': top_1_min, 'max_value': top_1_max}) metrics.append({'name': 'top_1_train_accuracy', 'value': stats['training_accuracy_top_1']}) if (warmup and ('step_timestamp_log' in stats) and (len(stats['step_timestamp_log']) > warmup)): time_log = stats['step_timestamp_log'] elapsed = (time_log[(- 1)].timestamp - time_log[warmup].timestamp) num_examples = ((total_batch_size * log_steps) * ((len(time_log) - warmup) - 1)) examples_per_sec = (num_examples / elapsed) metrics.append({'name': 'exp_per_second', 'value': examples_per_sec}) if ('avg_exp_per_second' in stats): metrics.append({'name': 'avg_exp_per_second', 'value': stats['avg_exp_per_second']}) self.report_benchmark(iters=(- 1), wall_time=wall_time_sec, metrics=metrics)
1,068,746,809,112,859,500
Report benchmark results by writing to local protobuf file. Args: stats: dict returned from keras models with known entries. wall_time_sec: the during of the benchmark execution in seconds top_1_max: highest passing level for top_1 accuracy. top_1_min: lowest passing level for top_1 accuracy. log_steps: How often the log was created for stats['step_timestamp_log']. total_batch_size: Global batch-size. warmup: number of entries in stats['step_timestamp_log'] to ignore.
official/resnet/keras/keras_benchmark.py
_report_benchmark
LinMiaoShuSheng/models
python
def _report_benchmark(self, stats, wall_time_sec, top_1_max=None, top_1_min=None, log_steps=None, total_batch_size=None, warmup=1): "Report benchmark results by writing to local protobuf file.\n\n Args:\n stats: dict returned from keras models with known entries.\n wall_time_sec: the during of the benchmark execution in seconds\n top_1_max: highest passing level for top_1 accuracy.\n top_1_min: lowest passing level for top_1 accuracy.\n log_steps: How often the log was created for stats['step_timestamp_log'].\n total_batch_size: Global batch-size.\n warmup: number of entries in stats['step_timestamp_log'] to ignore.\n " metrics = [] if ('accuracy_top_1' in stats): metrics.append({'name': 'accuracy_top_1', 'value': stats['accuracy_top_1'], 'min_value': top_1_min, 'max_value': top_1_max}) metrics.append({'name': 'top_1_train_accuracy', 'value': stats['training_accuracy_top_1']}) if (warmup and ('step_timestamp_log' in stats) and (len(stats['step_timestamp_log']) > warmup)): time_log = stats['step_timestamp_log'] elapsed = (time_log[(- 1)].timestamp - time_log[warmup].timestamp) num_examples = ((total_batch_size * log_steps) * ((len(time_log) - warmup) - 1)) examples_per_sec = (num_examples / elapsed) metrics.append({'name': 'exp_per_second', 'value': examples_per_sec}) if ('avg_exp_per_second' in stats): metrics.append({'name': 'avg_exp_per_second', 'value': stats['avg_exp_per_second']}) self.report_benchmark(iters=(- 1), wall_time=wall_time_sec, metrics=metrics)
def setup_package(): 'Run on testing package.' oauth2client.util.positional_parameters_enforcement = 'EXCEPTION'
7,842,196,517,937,913,000
Run on testing package.
tests/__init__.py
setup_package
1ap/google-api-python-client
python
def setup_package(): oauth2client.util.positional_parameters_enforcement = 'EXCEPTION'
def initialize_flow(self, img): ' Flow is represented as difference between two coordinate grids flow = coords1 - coords0' (N, C, H, W) = img.shape coords0 = coords_grid(N, (H // 8), (W // 8)).to(img.device) coords1 = coords_grid(N, (H // 8), (W // 8)).to(img.device) return (coords0, coords1)
5,953,690,645,597,147,000
Flow is represented as difference between two coordinate grids flow = coords1 - coords0
nets/raft_core/backraft.py
initialize_flow
aharley/track_check_repeat
python
def initialize_flow(self, img): ' ' (N, C, H, W) = img.shape coords0 = coords_grid(N, (H // 8), (W // 8)).to(img.device) coords1 = coords_grid(N, (H // 8), (W // 8)).to(img.device) return (coords0, coords1)
def upsample_flow(self, flow, mask): ' Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination ' (N, _, H, W) = flow.shape mask = mask.view(N, 1, 9, 8, 8, H, W) mask = torch.softmax(mask, dim=2) up_flow = F.unfold((8 * flow), [3, 3], padding=1) up_flow = up_flow.view(N, 2, 9, 1, 1, H, W) up_flow = torch.sum((mask * up_flow), dim=2) up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) return up_flow.reshape(N, 2, (8 * H), (8 * W))
5,758,413,022,218,375,000
Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination
nets/raft_core/backraft.py
upsample_flow
aharley/track_check_repeat
python
def upsample_flow(self, flow, mask): ' ' (N, _, H, W) = flow.shape mask = mask.view(N, 1, 9, 8, 8, H, W) mask = torch.softmax(mask, dim=2) up_flow = F.unfold((8 * flow), [3, 3], padding=1) up_flow = up_flow.view(N, 2, 9, 1, 1, H, W) up_flow = torch.sum((mask * up_flow), dim=2) up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) return up_flow.reshape(N, 2, (8 * H), (8 * W))
def forward(self, image1): ' get featmap for one frame ' image1 = ((2 * (image1 / 255.0)) - 1.0) image1 = image1.contiguous() hdim = self.hidden_dim cdim = self.context_dim with autocast(enabled=self.args.mixed_precision): fmap1 = self.fnet(image1) fmap1 = fmap1.float() return fmap1
9,102,215,985,324,929,000
get featmap for one frame
nets/raft_core/backraft.py
forward
aharley/track_check_repeat
python
def forward(self, image1): ' ' image1 = ((2 * (image1 / 255.0)) - 1.0) image1 = image1.contiguous() hdim = self.hidden_dim cdim = self.context_dim with autocast(enabled=self.args.mixed_precision): fmap1 = self.fnet(image1) fmap1 = fmap1.float() return fmap1
def old_forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False): ' Estimate optical flow between pair of frames ' image1 = ((2 * (image1 / 255.0)) - 1.0) image2 = ((2 * (image2 / 255.0)) - 1.0) image1 = image1.contiguous() image2 = image2.contiguous() hdim = self.hidden_dim cdim = self.context_dim with autocast(enabled=self.args.mixed_precision): (fmap1, fmap2) = self.fnet([image1, image2]) fmap1 = fmap1.float() fmap2 = fmap2.float() if self.args.alternate_corr: corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius) else: corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius) with autocast(enabled=self.args.mixed_precision): cnet = self.cnet(image1) (net, inp) = torch.split(cnet, [hdim, cdim], dim=1) net = torch.tanh(net) inp = torch.relu(inp) (coords0, coords1) = self.initialize_flow(image1) if (flow_init is not None): coords1 = (coords1 + flow_init) flow_predictions = [] for itr in range(iters): coords1 = coords1.detach() corr = corr_fn(coords1) flow = (coords1 - coords0) with autocast(enabled=self.args.mixed_precision): (net, up_mask, delta_flow) = self.update_block(net, inp, corr, flow) coords1 = (coords1 + delta_flow) if (up_mask is None): flow_up = upflow8((coords1 - coords0)) else: flow_up = self.upsample_flow((coords1 - coords0), up_mask) flow_predictions.append(flow_up) if test_mode: corr = corr_fn(coords1) feat = inp return ((coords1 - coords0), flow_up, (feat, fmap1, fmap2)) return flow_predictions
-4,838,898,532,510,094,000
Estimate optical flow between pair of frames
nets/raft_core/backraft.py
old_forward
aharley/track_check_repeat
python
def old_forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False): ' ' image1 = ((2 * (image1 / 255.0)) - 1.0) image2 = ((2 * (image2 / 255.0)) - 1.0) image1 = image1.contiguous() image2 = image2.contiguous() hdim = self.hidden_dim cdim = self.context_dim with autocast(enabled=self.args.mixed_precision): (fmap1, fmap2) = self.fnet([image1, image2]) fmap1 = fmap1.float() fmap2 = fmap2.float() if self.args.alternate_corr: corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius) else: corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius) with autocast(enabled=self.args.mixed_precision): cnet = self.cnet(image1) (net, inp) = torch.split(cnet, [hdim, cdim], dim=1) net = torch.tanh(net) inp = torch.relu(inp) (coords0, coords1) = self.initialize_flow(image1) if (flow_init is not None): coords1 = (coords1 + flow_init) flow_predictions = [] for itr in range(iters): coords1 = coords1.detach() corr = corr_fn(coords1) flow = (coords1 - coords0) with autocast(enabled=self.args.mixed_precision): (net, up_mask, delta_flow) = self.update_block(net, inp, corr, flow) coords1 = (coords1 + delta_flow) if (up_mask is None): flow_up = upflow8((coords1 - coords0)) else: flow_up = self.upsample_flow((coords1 - coords0), up_mask) flow_predictions.append(flow_up) if test_mode: corr = corr_fn(coords1) feat = inp return ((coords1 - coords0), flow_up, (feat, fmap1, fmap2)) return flow_predictions
def start(self, initialization_hook: Optional[Callable[([], None)]]=None): 'Starts the worker group.' self.worker_group = WorkerGroup(self._num_workers, self._num_cpus_per_worker, self._num_gpus_per_worker) if initialization_hook: self.worker_group.execute(initialization_hook) self._backend.on_start(self.worker_group, self._backend_config)
4,632,527,791,434,311,000
Starts the worker group.
python/ray/util/sgd/v2/backends/backend.py
start
cuongnvan/ray
python
def start(self, initialization_hook: Optional[Callable[([], None)]]=None): self.worker_group = WorkerGroup(self._num_workers, self._num_cpus_per_worker, self._num_gpus_per_worker) if initialization_hook: self.worker_group.execute(initialization_hook) self._backend.on_start(self.worker_group, self._backend_config)
def start_training(self, train_func: Callable[([], T)]) -> None: 'Executes a training function on all workers in a separate thread.\n\n ``finish_training`` should be called after this.\n\n Args:\n train_func (Callable): The training function to run on each worker.\n ' def initialize_session(world_rank, train_func): try: init_session(training_func=train_func, world_rank=world_rank) except ValueError: raise SGDBackendError('Attempting to start training but a previous training run is still ongoing. You must call `finish_training` before calling `start_training` again.') futures = [] for world_rank in range(len(self.worker_group)): futures.append(self.worker_group.execute_single_async(world_rank, initialize_session, world_rank=world_rank, train_func=train_func)) ray.get(futures) def train_async(): session = get_session() session.start() self.worker_group.execute_async(train_async)
-328,435,917,548,396,860
Executes a training function on all workers in a separate thread. ``finish_training`` should be called after this. Args: train_func (Callable): The training function to run on each worker.
python/ray/util/sgd/v2/backends/backend.py
start_training
cuongnvan/ray
python
def start_training(self, train_func: Callable[([], T)]) -> None: 'Executes a training function on all workers in a separate thread.\n\n ``finish_training`` should be called after this.\n\n Args:\n train_func (Callable): The training function to run on each worker.\n ' def initialize_session(world_rank, train_func): try: init_session(training_func=train_func, world_rank=world_rank) except ValueError: raise SGDBackendError('Attempting to start training but a previous training run is still ongoing. You must call `finish_training` before calling `start_training` again.') futures = [] for world_rank in range(len(self.worker_group)): futures.append(self.worker_group.execute_single_async(world_rank, initialize_session, world_rank=world_rank, train_func=train_func)) ray.get(futures) def train_async(): session = get_session() session.start() self.worker_group.execute_async(train_async)
def fetch_next_result(self) -> Optional[List[Dict]]: 'Fetch next results produced by ``sgd.report()`` from each worker.\n\n Assumes ``start_training`` has already been called.\n\n Returns:\n A list of dictionaries of values passed to ``sgd.report()`` from\n each worker. Each item corresponds to an intermediate result\n a single worker. If there are no more items to fetch,\n returns None.\n ' def get_next(): try: session = get_session() except ValueError: raise SGDBackendError('`fetch_next_result` has been called before `start_training`. Please call `start_training` before `fetch_next_result`.') try: result = session.get_next() except RuntimeError: raise SGDBackendError('`fetch_next_result` has been called before `start_training`. Please call `start_training` before `fetch_next_result`.') return result futures = self.worker_group.execute_async(get_next) results = self.get_with_failure_handling(futures) if any(((r is None) for r in results)): if (not all(((r is None) for r in results))): raise RuntimeError("Some workers returned results while others didn't. Make sure that `sgd.report()` is called the same number of times on all workers.") else: results = None return results
9,198,031,502,204,319,000
Fetch next results produced by ``sgd.report()`` from each worker. Assumes ``start_training`` has already been called. Returns: A list of dictionaries of values passed to ``sgd.report()`` from each worker. Each item corresponds to an intermediate result a single worker. If there are no more items to fetch, returns None.
python/ray/util/sgd/v2/backends/backend.py
fetch_next_result
cuongnvan/ray
python
def fetch_next_result(self) -> Optional[List[Dict]]: 'Fetch next results produced by ``sgd.report()`` from each worker.\n\n Assumes ``start_training`` has already been called.\n\n Returns:\n A list of dictionaries of values passed to ``sgd.report()`` from\n each worker. Each item corresponds to an intermediate result\n a single worker. If there are no more items to fetch,\n returns None.\n ' def get_next(): try: session = get_session() except ValueError: raise SGDBackendError('`fetch_next_result` has been called before `start_training`. Please call `start_training` before `fetch_next_result`.') try: result = session.get_next() except RuntimeError: raise SGDBackendError('`fetch_next_result` has been called before `start_training`. Please call `start_training` before `fetch_next_result`.') return result futures = self.worker_group.execute_async(get_next) results = self.get_with_failure_handling(futures) if any(((r is None) for r in results)): if (not all(((r is None) for r in results))): raise RuntimeError("Some workers returned results while others didn't. Make sure that `sgd.report()` is called the same number of times on all workers.") else: results = None return results
def finish_training(self) -> List[T]: 'Finish training and return final results. Propagate any exceptions.\n\n Blocks until training is finished on all workers.\n\n Assumes `start_training` has already been called.\n\n Returns:\n A list of return values from calling ``train_func`` on each worker.\n Each item corresponds to the return value from a single worker.\n ' def end_training(): try: session = get_session() except ValueError: raise SGDBackendError('`finish_training` has been called before `start_training`. Please call `start_training` before `finish_training`.') try: output = session.finish() finally: shutdown_session() return output futures = self.worker_group.execute_async(end_training) return self.get_with_failure_handling(futures)
-105,494,462,415,802,850
Finish training and return final results. Propagate any exceptions. Blocks until training is finished on all workers. Assumes `start_training` has already been called. Returns: A list of return values from calling ``train_func`` on each worker. Each item corresponds to the return value from a single worker.
python/ray/util/sgd/v2/backends/backend.py
finish_training
cuongnvan/ray
python
def finish_training(self) -> List[T]: 'Finish training and return final results. Propagate any exceptions.\n\n Blocks until training is finished on all workers.\n\n Assumes `start_training` has already been called.\n\n Returns:\n A list of return values from calling ``train_func`` on each worker.\n Each item corresponds to the return value from a single worker.\n ' def end_training(): try: session = get_session() except ValueError: raise SGDBackendError('`finish_training` has been called before `start_training`. Please call `start_training` before `finish_training`.') try: output = session.finish() finally: shutdown_session() return output futures = self.worker_group.execute_async(end_training) return self.get_with_failure_handling(futures)
def get_with_failure_handling(self, remote_values): 'Gets the remote values while handling for worker failures.\n\n Args:\n remote_values (list): List of object refs representing functions\n that may fail in the middle of execution. For example, running\n a SGD training loop in multiple parallel actor calls.\n\n Returns:\n The resolved objects represented by the passed in ObjectRefs.\n ' unfinished = remote_values try: while (len(unfinished) > 0): (finished, unfinished) = ray.wait(unfinished) ray.get(finished) except RayActorError as exc: logger.exception(str(exc)) self.handle_failure() return return ray.get(remote_values)
-3,861,662,312,918,846,500
Gets the remote values while handling for worker failures. Args: remote_values (list): List of object refs representing functions that may fail in the middle of execution. For example, running a SGD training loop in multiple parallel actor calls. Returns: The resolved objects represented by the passed in ObjectRefs.
python/ray/util/sgd/v2/backends/backend.py
get_with_failure_handling
cuongnvan/ray
python
def get_with_failure_handling(self, remote_values): 'Gets the remote values while handling for worker failures.\n\n Args:\n remote_values (list): List of object refs representing functions\n that may fail in the middle of execution. For example, running\n a SGD training loop in multiple parallel actor calls.\n\n Returns:\n The resolved objects represented by the passed in ObjectRefs.\n ' unfinished = remote_values try: while (len(unfinished) > 0): (finished, unfinished) = ray.wait(unfinished) ray.get(finished) except RayActorError as exc: logger.exception(str(exc)) self.handle_failure() return return ray.get(remote_values)
def shutdown(self): 'Shuts down the workers in the worker group.' try: self._backend.on_shutdown(self.worker_group, self._backend_config) except RayActorError: logger.warning('Graceful shutdown of backend failed. This is expected if one of the workers has crashed.') self.worker_group.shutdown() self.worker_group = InactiveWorkerGroup()
7,046,521,673,996,014,000
Shuts down the workers in the worker group.
python/ray/util/sgd/v2/backends/backend.py
shutdown
cuongnvan/ray
python
def shutdown(self): try: self._backend.on_shutdown(self.worker_group, self._backend_config) except RayActorError: logger.warning('Graceful shutdown of backend failed. This is expected if one of the workers has crashed.') self.worker_group.shutdown() self.worker_group = InactiveWorkerGroup()
def test_create_user(self): '\n Test User model can create a user successfully\n ' self.assertIsInstance(User.objects.create_user(username='username', email='[email protected]', password='password'), User)
-8,015,587,724,313,469,000
Test User model can create a user successfully
authors/apps/authentication/tests/test_create_user.py
test_create_user
andela/ah-bird-box
python
def test_create_user(self): '\n \n ' self.assertIsInstance(User.objects.create_user(username='username', email='[email protected]', password='password'), User)
def test_random_color_py(degrees=(0.1, 1.9), plot=False): '\n Test Python RandomColor\n ' logger.info('Test RandomColor') data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.ToTensor()]) ds_original = data.map(operations=transforms_original, input_columns='image') ds_original = ds_original.batch(512) for (idx, (image, _)) in enumerate(ds_original): if (idx == 0): images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_random_color = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.RandomColor(degrees=degrees), F.ToTensor()]) ds_random_color = data.map(operations=transforms_random_color, input_columns='image') ds_random_color = ds_random_color.batch(512) for (idx, (image, _)) in enumerate(ds_random_color): if (idx == 0): images_random_color = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_random_color = np.append(images_random_color, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_random_color[i], images_original[i]) logger.info('MSE= {}'.format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_random_color)
-2,311,255,942,487,829,500
Test Python RandomColor
tests/ut/python/dataset/test_random_color.py
test_random_color_py
king4arabs/mindspore
python
def test_random_color_py(degrees=(0.1, 1.9), plot=False): '\n \n ' logger.info('Test RandomColor') data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.ToTensor()]) ds_original = data.map(operations=transforms_original, input_columns='image') ds_original = ds_original.batch(512) for (idx, (image, _)) in enumerate(ds_original): if (idx == 0): images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_random_color = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.RandomColor(degrees=degrees), F.ToTensor()]) ds_random_color = data.map(operations=transforms_random_color, input_columns='image') ds_random_color = ds_random_color.batch(512) for (idx, (image, _)) in enumerate(ds_random_color): if (idx == 0): images_random_color = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_random_color = np.append(images_random_color, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_random_color[i], images_original[i]) logger.info('MSE= {}'.format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_random_color)
def test_random_color_c(degrees=(0.1, 1.9), plot=False, run_golden=True): '\n Test Cpp RandomColor\n ' logger.info('test_random_color_op') original_seed = config_get_set_seed(10) original_num_parallel_workers = config_get_set_num_parallel_workers(1) data1 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=['image'], shuffle=False) data2 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=['image'], shuffle=False) if (degrees is None): c_op = vision.RandomColor() else: c_op = vision.RandomColor(degrees) data1 = data1.map(operations=[vision.Decode()], input_columns=['image']) data2 = data2.map(operations=[vision.Decode(), c_op], input_columns=['image']) image_random_color_op = [] image = [] for (item1, item2) in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True), data2.create_dict_iterator(num_epochs=1, output_numpy=True)): actual = item1['image'] expected = item2['image'] image.append(actual) image_random_color_op.append(expected) if run_golden: filename = 'random_color_op_02_result.npz' save_and_check_md5(data2, filename, generate_golden=GENERATE_GOLDEN) if plot: visualize_list(image, image_random_color_op) ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
8,684,165,931,818,608,000
Test Cpp RandomColor
tests/ut/python/dataset/test_random_color.py
test_random_color_c
king4arabs/mindspore
python
def test_random_color_c(degrees=(0.1, 1.9), plot=False, run_golden=True): '\n \n ' logger.info('test_random_color_op') original_seed = config_get_set_seed(10) original_num_parallel_workers = config_get_set_num_parallel_workers(1) data1 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=['image'], shuffle=False) data2 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=['image'], shuffle=False) if (degrees is None): c_op = vision.RandomColor() else: c_op = vision.RandomColor(degrees) data1 = data1.map(operations=[vision.Decode()], input_columns=['image']) data2 = data2.map(operations=[vision.Decode(), c_op], input_columns=['image']) image_random_color_op = [] image = [] for (item1, item2) in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True), data2.create_dict_iterator(num_epochs=1, output_numpy=True)): actual = item1['image'] expected = item2['image'] image.append(actual) image_random_color_op.append(expected) if run_golden: filename = 'random_color_op_02_result.npz' save_and_check_md5(data2, filename, generate_golden=GENERATE_GOLDEN) if plot: visualize_list(image, image_random_color_op) ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_color_py_md5(): '\n Test Python RandomColor with md5 check\n ' logger.info('Test RandomColor with md5 check') original_seed = config_get_set_seed(10) original_num_parallel_workers = config_get_set_num_parallel_workers(1) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.RandomColor((2.0, 2.5)), F.ToTensor()]) data = data.map(operations=transforms, input_columns='image') filename = 'random_color_01_result.npz' save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
8,487,769,337,350,655,000
Test Python RandomColor with md5 check
tests/ut/python/dataset/test_random_color.py
test_random_color_py_md5
king4arabs/mindspore
python
def test_random_color_py_md5(): '\n \n ' logger.info('Test RandomColor with md5 check') original_seed = config_get_set_seed(10) original_num_parallel_workers = config_get_set_num_parallel_workers(1) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.RandomColor((2.0, 2.5)), F.ToTensor()]) data = data.map(operations=transforms, input_columns='image') filename = 'random_color_01_result.npz' save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_compare_random_color_op(degrees=None, plot=False): '\n Compare Random Color op in Python and Cpp\n ' logger.info('test_random_color_op') original_seed = config_get_set_seed(5) original_num_parallel_workers = config_get_set_num_parallel_workers(1) data1 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=['image'], shuffle=False) data2 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=['image'], shuffle=False) if (degrees is None): c_op = vision.RandomColor() p_op = F.RandomColor() else: c_op = vision.RandomColor(degrees) p_op = F.RandomColor(degrees) transforms_random_color_py = mindspore.dataset.transforms.py_transforms.Compose([(lambda img: img.astype(np.uint8)), F.ToPIL(), p_op, np.array]) data1 = data1.map(operations=[vision.Decode(), c_op], input_columns=['image']) data2 = data2.map(operations=[vision.Decode()], input_columns=['image']) data2 = data2.map(operations=transforms_random_color_py, input_columns=['image']) image_random_color_op = [] image = [] for (item1, item2) in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True), data2.create_dict_iterator(num_epochs=1, output_numpy=True)): actual = item1['image'] expected = item2['image'] image_random_color_op.append(actual) image.append(expected) assert (actual.shape == expected.shape) mse = diff_mse(actual, expected) logger.info('MSE= {}'.format(str(np.mean(mse)))) ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers) if plot: visualize_list(image, image_random_color_op)
-6,266,641,206,013,360,000
Compare Random Color op in Python and Cpp
tests/ut/python/dataset/test_random_color.py
test_compare_random_color_op
king4arabs/mindspore
python
def test_compare_random_color_op(degrees=None, plot=False): '\n \n ' logger.info('test_random_color_op') original_seed = config_get_set_seed(5) original_num_parallel_workers = config_get_set_num_parallel_workers(1) data1 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=['image'], shuffle=False) data2 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=['image'], shuffle=False) if (degrees is None): c_op = vision.RandomColor() p_op = F.RandomColor() else: c_op = vision.RandomColor(degrees) p_op = F.RandomColor(degrees) transforms_random_color_py = mindspore.dataset.transforms.py_transforms.Compose([(lambda img: img.astype(np.uint8)), F.ToPIL(), p_op, np.array]) data1 = data1.map(operations=[vision.Decode(), c_op], input_columns=['image']) data2 = data2.map(operations=[vision.Decode()], input_columns=['image']) data2 = data2.map(operations=transforms_random_color_py, input_columns=['image']) image_random_color_op = [] image = [] for (item1, item2) in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True), data2.create_dict_iterator(num_epochs=1, output_numpy=True)): actual = item1['image'] expected = item2['image'] image_random_color_op.append(actual) image.append(expected) assert (actual.shape == expected.shape) mse = diff_mse(actual, expected) logger.info('MSE= {}'.format(str(np.mean(mse)))) ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers) if plot: visualize_list(image, image_random_color_op)
def test_random_color_c_errors(): '\n Test that Cpp RandomColor errors with bad input\n ' with pytest.raises(TypeError) as error_info: vision.RandomColor(12) assert ('degrees must be either a tuple or a list.' in str(error_info.value)) with pytest.raises(TypeError) as error_info: vision.RandomColor(('col', 3)) assert ("Argument degrees[0] with value col is not of type (<class 'int'>, <class 'float'>)." in str(error_info.value)) with pytest.raises(ValueError) as error_info: vision.RandomColor((0.9, 0.1)) assert ('degrees should be in (min,max) format. Got (max,min).' in str(error_info.value)) with pytest.raises(ValueError) as error_info: vision.RandomColor((0.9,)) assert ('degrees must be a sequence with length 2.' in str(error_info.value)) mnist_ds = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False) mnist_ds = mnist_ds.map(operations=vision.RandomColor(), input_columns='image') with pytest.raises(RuntimeError) as error_info: for _ in enumerate(mnist_ds): pass assert ('image shape is not <H,W,C> or channel is not 3' in str(error_info.value))
-8,695,921,805,919,751,000
Test that Cpp RandomColor errors with bad input
tests/ut/python/dataset/test_random_color.py
test_random_color_c_errors
king4arabs/mindspore
python
def test_random_color_c_errors(): '\n \n ' with pytest.raises(TypeError) as error_info: vision.RandomColor(12) assert ('degrees must be either a tuple or a list.' in str(error_info.value)) with pytest.raises(TypeError) as error_info: vision.RandomColor(('col', 3)) assert ("Argument degrees[0] with value col is not of type (<class 'int'>, <class 'float'>)." in str(error_info.value)) with pytest.raises(ValueError) as error_info: vision.RandomColor((0.9, 0.1)) assert ('degrees should be in (min,max) format. Got (max,min).' in str(error_info.value)) with pytest.raises(ValueError) as error_info: vision.RandomColor((0.9,)) assert ('degrees must be a sequence with length 2.' in str(error_info.value)) mnist_ds = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False) mnist_ds = mnist_ds.map(operations=vision.RandomColor(), input_columns='image') with pytest.raises(RuntimeError) as error_info: for _ in enumerate(mnist_ds): pass assert ('image shape is not <H,W,C> or channel is not 3' in str(error_info.value))
def __init__(self, resource_handle, create_op, name): 'Creates a _TreeEnsembleSavable object.\n\n Args:\n resource_handle: handle to the decision tree ensemble variable.\n create_op: the op to initialize the variable.\n name: the name to save the tree ensemble variable under.\n ' (stamp_token, serialized) = gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle) slice_spec = '' specs = [saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, (name + '_stamp')), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, (name + '_serialized'))] super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name) self._resource_handle = resource_handle self._create_op = create_op
-7,466,408,169,068,972,000
Creates a _TreeEnsembleSavable object. Args: resource_handle: handle to the decision tree ensemble variable. create_op: the op to initialize the variable. name: the name to save the tree ensemble variable under.
tensorflow/python/ops/boosted_trees_ops.py
__init__
AnyaTracy/tensorflow
python
def __init__(self, resource_handle, create_op, name): 'Creates a _TreeEnsembleSavable object.\n\n Args:\n resource_handle: handle to the decision tree ensemble variable.\n create_op: the op to initialize the variable.\n name: the name to save the tree ensemble variable under.\n ' (stamp_token, serialized) = gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle) slice_spec = specs = [saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, (name + '_stamp')), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, (name + '_serialized'))] super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name) self._resource_handle = resource_handle self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes): "Restores the associated tree ensemble from 'restored_tensors'.\n\n Args:\n restored_tensors: the tensors that were loaded from a checkpoint.\n unused_restored_shapes: the shapes this object should conform to after\n restore. Not meaningful for trees.\n\n Returns:\n The operation that restores the state of the tree ensemble variable.\n " with ops.control_dependencies([self._create_op]): return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self._resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1])
4,755,641,117,312,512,000
Restores the associated tree ensemble from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint. unused_restored_shapes: the shapes this object should conform to after restore. Not meaningful for trees. Returns: The operation that restores the state of the tree ensemble variable.
tensorflow/python/ops/boosted_trees_ops.py
restore
AnyaTracy/tensorflow
python
def restore(self, restored_tensors, unused_restored_shapes): "Restores the associated tree ensemble from 'restored_tensors'.\n\n Args:\n restored_tensors: the tensors that were loaded from a checkpoint.\n unused_restored_shapes: the shapes this object should conform to after\n restore. Not meaningful for trees.\n\n Returns:\n The operation that restores the state of the tree ensemble variable.\n " with ops.control_dependencies([self._create_op]): return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self._resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1])
def get_stamp_token(self): 'Returns the current stamp token of the resource.' (stamp_token, _, _, _, _) = gen_boosted_trees_ops.boosted_trees_get_ensemble_states(self.resource_handle) return stamp_token
-9,195,282,269,080,987,000
Returns the current stamp token of the resource.
tensorflow/python/ops/boosted_trees_ops.py
get_stamp_token
AnyaTracy/tensorflow
python
def get_stamp_token(self): (stamp_token, _, _, _, _) = gen_boosted_trees_ops.boosted_trees_get_ensemble_states(self.resource_handle) return stamp_token
def get_states(self): 'Returns states of the tree ensemble.\n\n Returns:\n stamp_token, num_trees, num_finalized_trees, num_attempted_layers and\n range of the nodes in the latest layer.\n ' (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, nodes_range) = gen_boosted_trees_ops.boosted_trees_get_ensemble_states(self.resource_handle) return (array_ops.identity(stamp_token, name='stamp_token'), array_ops.identity(num_trees, name='num_trees'), array_ops.identity(num_finalized_trees, name='num_finalized_trees'), array_ops.identity(num_attempted_layers, name='num_attempted_layers'), array_ops.identity(nodes_range, name='last_layer_nodes_range'))
4,252,967,200,812,865,500
Returns states of the tree ensemble. Returns: stamp_token, num_trees, num_finalized_trees, num_attempted_layers and range of the nodes in the latest layer.
tensorflow/python/ops/boosted_trees_ops.py
get_states
AnyaTracy/tensorflow
python
def get_states(self): 'Returns states of the tree ensemble.\n\n Returns:\n stamp_token, num_trees, num_finalized_trees, num_attempted_layers and\n range of the nodes in the latest layer.\n ' (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, nodes_range) = gen_boosted_trees_ops.boosted_trees_get_ensemble_states(self.resource_handle) return (array_ops.identity(stamp_token, name='stamp_token'), array_ops.identity(num_trees, name='num_trees'), array_ops.identity(num_finalized_trees, name='num_finalized_trees'), array_ops.identity(num_attempted_layers, name='num_attempted_layers'), array_ops.identity(nodes_range, name='last_layer_nodes_range'))
def serialize(self): 'Serializes the ensemble into proto and returns the serialized proto.\n\n Returns:\n stamp_token: int64 scalar Tensor to denote the stamp of the resource.\n serialized_proto: string scalar Tensor of the serialized proto.\n ' return gen_boosted_trees_ops.boosted_trees_serialize_ensemble(self.resource_handle)
1,912,311,882,728,369
Serializes the ensemble into proto and returns the serialized proto. Returns: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto.
tensorflow/python/ops/boosted_trees_ops.py
serialize
AnyaTracy/tensorflow
python
def serialize(self): 'Serializes the ensemble into proto and returns the serialized proto.\n\n Returns:\n stamp_token: int64 scalar Tensor to denote the stamp of the resource.\n serialized_proto: string scalar Tensor of the serialized proto.\n ' return gen_boosted_trees_ops.boosted_trees_serialize_ensemble(self.resource_handle)
def deserialize(self, stamp_token, serialized_proto): 'Deserialize the input proto and resets the ensemble from it.\n\n Args:\n stamp_token: int64 scalar Tensor to denote the stamp of the resource.\n serialized_proto: string scalar Tensor of the serialized proto.\n\n Returns:\n Operation (for dependencies).\n ' return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token, serialized_proto)
660,778,015,599,044,900
Deserialize the input proto and resets the ensemble from it. Args: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto. Returns: Operation (for dependencies).
tensorflow/python/ops/boosted_trees_ops.py
deserialize
AnyaTracy/tensorflow
python
def deserialize(self, stamp_token, serialized_proto): 'Deserialize the input proto and resets the ensemble from it.\n\n Args:\n stamp_token: int64 scalar Tensor to denote the stamp of the resource.\n serialized_proto: string scalar Tensor of the serialized proto.\n\n Returns:\n Operation (for dependencies).\n ' return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token, serialized_proto)
def ZZZ(self): 'hardcoded/mock instance of the class' return CookieContainer()
7,398,721,178,004,803,000
hardcoded/mock instance of the class
release/stubs.min/System/Net/__init___parts/CookieContainer.py
ZZZ
tranconbv/ironpython-stubs
python
def ZZZ(self): return CookieContainer()
def Add(self, *__args): '\n Add(self: CookieContainer,cookie: Cookie)\n\n Adds a System.Net.Cookie to a System.Net.CookieContainer. This method uses the domain from the System.Net.Cookie to determine which domain collection to associate the \n\n System.Net.Cookie with.\n\n \n\n \n\n cookie: The System.Net.Cookie to be added to the System.Net.CookieContainer.\n\n Add(self: CookieContainer,cookies: CookieCollection)\n\n Adds the contents of a System.Net.CookieCollection to the System.Net.CookieContainer.\n\n \n\n cookies: The System.Net.CookieCollection to be added to the System.Net.CookieContainer.\n\n Add(self: CookieContainer,uri: Uri,cookie: Cookie)\n\n Adds a System.Net.Cookie to the System.Net.CookieContainer for a particular URI.\n\n \n\n uri: The URI of the System.Net.Cookie to be added to the System.Net.CookieContainer.\n\n cookie: The System.Net.Cookie to be added to the System.Net.CookieContainer.\n\n Add(self: CookieContainer,uri: Uri,cookies: CookieCollection)\n\n Adds the contents of a System.Net.CookieCollection to the System.Net.CookieContainer for a particular URI.\n\n \n\n uri: The URI of the System.Net.CookieCollection to be added to the System.Net.CookieContainer.\n\n cookies: The System.Net.CookieCollection to be added to the System.Net.CookieContainer.\n ' pass
-8,225,875,563,953,717,000
Add(self: CookieContainer,cookie: Cookie) Adds a System.Net.Cookie to a System.Net.CookieContainer. This method uses the domain from the System.Net.Cookie to determine which domain collection to associate the System.Net.Cookie with. cookie: The System.Net.Cookie to be added to the System.Net.CookieContainer. Add(self: CookieContainer,cookies: CookieCollection) Adds the contents of a System.Net.CookieCollection to the System.Net.CookieContainer. cookies: The System.Net.CookieCollection to be added to the System.Net.CookieContainer. Add(self: CookieContainer,uri: Uri,cookie: Cookie) Adds a System.Net.Cookie to the System.Net.CookieContainer for a particular URI. uri: The URI of the System.Net.Cookie to be added to the System.Net.CookieContainer. cookie: The System.Net.Cookie to be added to the System.Net.CookieContainer. Add(self: CookieContainer,uri: Uri,cookies: CookieCollection) Adds the contents of a System.Net.CookieCollection to the System.Net.CookieContainer for a particular URI. uri: The URI of the System.Net.CookieCollection to be added to the System.Net.CookieContainer. cookies: The System.Net.CookieCollection to be added to the System.Net.CookieContainer.
release/stubs.min/System/Net/__init___parts/CookieContainer.py
Add
tranconbv/ironpython-stubs
python
def Add(self, *__args): '\n Add(self: CookieContainer,cookie: Cookie)\n\n Adds a System.Net.Cookie to a System.Net.CookieContainer. This method uses the domain from the System.Net.Cookie to determine which domain collection to associate the \n\n System.Net.Cookie with.\n\n \n\n \n\n cookie: The System.Net.Cookie to be added to the System.Net.CookieContainer.\n\n Add(self: CookieContainer,cookies: CookieCollection)\n\n Adds the contents of a System.Net.CookieCollection to the System.Net.CookieContainer.\n\n \n\n cookies: The System.Net.CookieCollection to be added to the System.Net.CookieContainer.\n\n Add(self: CookieContainer,uri: Uri,cookie: Cookie)\n\n Adds a System.Net.Cookie to the System.Net.CookieContainer for a particular URI.\n\n \n\n uri: The URI of the System.Net.Cookie to be added to the System.Net.CookieContainer.\n\n cookie: The System.Net.Cookie to be added to the System.Net.CookieContainer.\n\n Add(self: CookieContainer,uri: Uri,cookies: CookieCollection)\n\n Adds the contents of a System.Net.CookieCollection to the System.Net.CookieContainer for a particular URI.\n\n \n\n uri: The URI of the System.Net.CookieCollection to be added to the System.Net.CookieContainer.\n\n cookies: The System.Net.CookieCollection to be added to the System.Net.CookieContainer.\n ' pass
def GetCookieHeader(self, uri): '\n GetCookieHeader(self: CookieContainer,uri: Uri) -> str\n\n \n\n Gets the HTTP cookie header that contains the HTTP cookies that represent the System.Net.Cookie instances that are associated with a specific URI.\n\n \n\n uri: The URI of the System.Net.Cookie instances desired.\n\n Returns: An HTTP cookie header,with strings representing System.Net.Cookie instances delimited by semicolons.\n ' pass
5,028,364,629,411,337,000
GetCookieHeader(self: CookieContainer,uri: Uri) -> str Gets the HTTP cookie header that contains the HTTP cookies that represent the System.Net.Cookie instances that are associated with a specific URI. uri: The URI of the System.Net.Cookie instances desired. Returns: An HTTP cookie header,with strings representing System.Net.Cookie instances delimited by semicolons.
release/stubs.min/System/Net/__init___parts/CookieContainer.py
GetCookieHeader
tranconbv/ironpython-stubs
python
def GetCookieHeader(self, uri): '\n GetCookieHeader(self: CookieContainer,uri: Uri) -> str\n\n \n\n Gets the HTTP cookie header that contains the HTTP cookies that represent the System.Net.Cookie instances that are associated with a specific URI.\n\n \n\n uri: The URI of the System.Net.Cookie instances desired.\n\n Returns: An HTTP cookie header,with strings representing System.Net.Cookie instances delimited by semicolons.\n ' pass
def GetCookies(self, uri): '\n GetCookies(self: CookieContainer,uri: Uri) -> CookieCollection\n\n \n\n Gets a System.Net.CookieCollection that contains the System.Net.Cookie instances that are associated with a specific URI.\n\n \n\n uri: The URI of the System.Net.Cookie instances desired.\n\n Returns: A System.Net.CookieCollection that contains the System.Net.Cookie instances that are associated with a specific URI.\n ' pass
-6,849,928,071,652,846,000
GetCookies(self: CookieContainer,uri: Uri) -> CookieCollection Gets a System.Net.CookieCollection that contains the System.Net.Cookie instances that are associated with a specific URI. uri: The URI of the System.Net.Cookie instances desired. Returns: A System.Net.CookieCollection that contains the System.Net.Cookie instances that are associated with a specific URI.
release/stubs.min/System/Net/__init___parts/CookieContainer.py
GetCookies
tranconbv/ironpython-stubs
python
def GetCookies(self, uri): '\n GetCookies(self: CookieContainer,uri: Uri) -> CookieCollection\n\n \n\n Gets a System.Net.CookieCollection that contains the System.Net.Cookie instances that are associated with a specific URI.\n\n \n\n uri: The URI of the System.Net.Cookie instances desired.\n\n Returns: A System.Net.CookieCollection that contains the System.Net.Cookie instances that are associated with a specific URI.\n ' pass
def SetCookies(self, uri, cookieHeader): '\n SetCookies(self: CookieContainer,uri: Uri,cookieHeader: str)\n\n Adds System.Net.Cookie instances for one or more cookies from an HTTP cookie header to the System.Net.CookieContainer for a specific URI.\n\n \n\n uri: The URI of the System.Net.CookieCollection.\n\n cookieHeader: The contents of an HTTP set-cookie header as returned by a HTTP server,with System.Net.Cookie instances delimited by commas.\n ' pass
767,938,559,518,995,000
SetCookies(self: CookieContainer,uri: Uri,cookieHeader: str) Adds System.Net.Cookie instances for one or more cookies from an HTTP cookie header to the System.Net.CookieContainer for a specific URI. uri: The URI of the System.Net.CookieCollection. cookieHeader: The contents of an HTTP set-cookie header as returned by a HTTP server,with System.Net.Cookie instances delimited by commas.
release/stubs.min/System/Net/__init___parts/CookieContainer.py
SetCookies
tranconbv/ironpython-stubs
python
def SetCookies(self, uri, cookieHeader): '\n SetCookies(self: CookieContainer,uri: Uri,cookieHeader: str)\n\n Adds System.Net.Cookie instances for one or more cookies from an HTTP cookie header to the System.Net.CookieContainer for a specific URI.\n\n \n\n uri: The URI of the System.Net.CookieCollection.\n\n cookieHeader: The contents of an HTTP set-cookie header as returned by a HTTP server,with System.Net.Cookie instances delimited by commas.\n ' pass
def __add__(self, *args): ' x.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+y ' pass
-6,471,137,132,733,238,000
x.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+y
release/stubs.min/System/Net/__init___parts/CookieContainer.py
__add__
tranconbv/ironpython-stubs
python
def __add__(self, *args): ' ' pass
@staticmethod def __new__(self, capacity=None, perDomainCapacity=None, maxCookieSize=None): '\n __new__(cls: type)\n\n __new__(cls: type,capacity: int)\n\n __new__(cls: type,capacity: int,perDomainCapacity: int,maxCookieSize: int)\n ' pass
2,610,757,587,463,797,000
__new__(cls: type) __new__(cls: type,capacity: int) __new__(cls: type,capacity: int,perDomainCapacity: int,maxCookieSize: int)
release/stubs.min/System/Net/__init___parts/CookieContainer.py
__new__
tranconbv/ironpython-stubs
python
@staticmethod def __new__(self, capacity=None, perDomainCapacity=None, maxCookieSize=None): '\n __new__(cls: type)\n\n __new__(cls: type,capacity: int)\n\n __new__(cls: type,capacity: int,perDomainCapacity: int,maxCookieSize: int)\n ' pass
@cache_json('cbsa_lookup.json') def cbsa_lookup(): '\n Construct a County->CBSA Lookup table from NBER data\n Returns: dict\n each key is a (State Code, County FIPS code) tuple\n each value is a (CBSA FIPS code, CBSA Name) tuple\n ' logging.info('Beginning CBSA lookup') cbsa_lookup = defaultdict(dict) download = requests.get(URL) decoded_content = download.content.decode('latin-1').encode('utf-8') reader = csv.reader(decoded_content.splitlines(), delimiter=',') next(reader) for row in reader: state_code = row[1] fipscounty = row[3][(- 3):] cbsa = row[4] cbsaname = row[5] cbsa_lookup[state_code][fipscounty] = (cbsa, cbsaname) return cbsa_lookup
-8,489,966,963,965,884,000
Construct a County->CBSA Lookup table from NBER data Returns: dict each key is a (State Code, County FIPS code) tuple each value is a (CBSA FIPS code, CBSA Name) tuple
datasets/nber_county_cbsa.py
cbsa_lookup
squatter1/skills-ml
python
@cache_json('cbsa_lookup.json') def cbsa_lookup(): '\n Construct a County->CBSA Lookup table from NBER data\n Returns: dict\n each key is a (State Code, County FIPS code) tuple\n each value is a (CBSA FIPS code, CBSA Name) tuple\n ' logging.info('Beginning CBSA lookup') cbsa_lookup = defaultdict(dict) download = requests.get(URL) decoded_content = download.content.decode('latin-1').encode('utf-8') reader = csv.reader(decoded_content.splitlines(), delimiter=',') next(reader) for row in reader: state_code = row[1] fipscounty = row[3][(- 3):] cbsa = row[4] cbsaname = row[5] cbsa_lookup[state_code][fipscounty] = (cbsa, cbsaname) return cbsa_lookup
def gelu(x): "Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n " return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
1,420,372,343,885,535,700
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415
models/SketchTransformer/models/networks.py
gelu
avalonstrel/SketchBERT
python
def gelu(x): "Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n " return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
def __init__(self, hidden_size, eps=1e-12): '\n Construct a layernorm module in the TF style (epsilon inside the square root).\n ' super(SketchLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps
-8,357,680,159,207,132,000
Construct a layernorm module in the TF style (epsilon inside the square root).
models/SketchTransformer/models/networks.py
__init__
avalonstrel/SketchBERT
python
def __init__(self, hidden_size, eps=1e-12): '\n \n ' super(SketchLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps
def transpose_(self, x): '\n Transpose Function for simplicity.\n ' new_x_shape = (x.size()[:(- 1)] + (self.num_heads, self.head_dim)) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3)
-6,336,780,184,453,579,000
Transpose Function for simplicity.
models/SketchTransformer/models/networks.py
transpose_
avalonstrel/SketchBERT
python
def transpose_(self, x): '\n \n ' new_x_shape = (x.size()[:(- 1)] + (self.num_heads, self.head_dim)) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None, output_attentions=False, keep_multihead_output=False): '\n Input:\n hidden_states[batch, seq_len, hidden_dim]\n attention_mask[batch, 1, 1, seq_len]\n Output:\n context_states[batch, seq_len, hidden_dim]\n attention_probs[seq_len, hidden_dim]\n ' query = self.query(hidden_states) key = self.key(hidden_states) value = self.value(hidden_states) multi_query = self.transpose_(query) multi_key = self.transpose_(key) multi_value = self.transpose_(value) attention_scores = torch.matmul(multi_query, multi_key.transpose((- 1), (- 2))) attention_scores = (attention_scores / self.scale_factor) attention_scores = (attention_scores + attention_mask) attention_probs = F.softmax(attention_scores, dim=(- 1)) attention_probs = self.dropout(attention_probs) if (head_mask is not None): attention_probs = (attention_probs * head_mask) context_states = torch.matmul(attention_probs, multi_value) if keep_multihead_output: self.multihead_output = context_states self.multihead_output.retain_grad() context_states = context_states.permute(0, 2, 1, 3) context_states = context_states.contiguous().view((context_states.size()[:(- 2)] + ((- 1),))) if output_attentions: return (context_states, attention_probs) return context_states
-3,967,524,633,107,120,000
Input: hidden_states[batch, seq_len, hidden_dim] attention_mask[batch, 1, 1, seq_len] Output: context_states[batch, seq_len, hidden_dim] attention_probs[seq_len, hidden_dim]
models/SketchTransformer/models/networks.py
forward
avalonstrel/SketchBERT
python
def forward(self, hidden_states, attention_mask, head_mask=None, output_attentions=False, keep_multihead_output=False): '\n Input:\n hidden_states[batch, seq_len, hidden_dim]\n attention_mask[batch, 1, 1, seq_len]\n Output:\n context_states[batch, seq_len, hidden_dim]\n attention_probs[seq_len, hidden_dim]\n ' query = self.query(hidden_states) key = self.key(hidden_states) value = self.value(hidden_states) multi_query = self.transpose_(query) multi_key = self.transpose_(key) multi_value = self.transpose_(value) attention_scores = torch.matmul(multi_query, multi_key.transpose((- 1), (- 2))) attention_scores = (attention_scores / self.scale_factor) attention_scores = (attention_scores + attention_mask) attention_probs = F.softmax(attention_scores, dim=(- 1)) attention_probs = self.dropout(attention_probs) if (head_mask is not None): attention_probs = (attention_probs * head_mask) context_states = torch.matmul(attention_probs, multi_value) if keep_multihead_output: self.multihead_output = context_states self.multihead_output.retain_grad() context_states = context_states.permute(0, 2, 1, 3) context_states = context_states.contiguous().view((context_states.size()[:(- 2)] + ((- 1),))) if output_attentions: return (context_states, attention_probs) return context_states
def get_seg_states(self, hidden_states, segment_index): '\n Input:\n hidden_states[batch, seq_len, hidden_dim]\n segment_index[batch, seq_len]\n ' seg_states = torch.zeros(hidden_states.size(0), self.max_segment, hidden_states.size(2)).to(hidden_states.device) length = (segment_index == 0).sum(dim=1) length_mask = length_to_mask(length, max_len=self.max_segment, dtype=torch.float) seg_states[(length_mask == 1), :] = hidden_states[(segment_index == 0), :] return (seg_states, length_mask)
-7,235,732,249,509,121,000
Input: hidden_states[batch, seq_len, hidden_dim] segment_index[batch, seq_len]
models/SketchTransformer/models/networks.py
get_seg_states
avalonstrel/SketchBERT
python
def get_seg_states(self, hidden_states, segment_index): '\n Input:\n hidden_states[batch, seq_len, hidden_dim]\n segment_index[batch, seq_len]\n ' seg_states = torch.zeros(hidden_states.size(0), self.max_segment, hidden_states.size(2)).to(hidden_states.device) length = (segment_index == 0).sum(dim=1) length_mask = length_to_mask(length, max_len=self.max_segment, dtype=torch.float) seg_states[(length_mask == 1), :] = hidden_states[(segment_index == 0), :] return (seg_states, length_mask)
def forward(self, hidden_states, attention_mask, segments, segment_index, head_mask=None, output_attentions=False): '\n Input:\n hidden_states[batch, seg_len, hidden_dim]:\n attention_mask[batch, seg_len](segment-based)\n segments[batch, seg_len]:\n segment_index[batch, seq_len]\n\n ' local_states = self.local_attention(hidden_states, attention_mask, head_mask) if output_attentions: (local_states, attention_probs) = local_states input_prefix = (hidden_states.size(1) - segment_index.size(1)) (seg_states, seg_atten_mask) = self.get_seg_states(local_states[:, input_prefix:, :], segment_index) if (self.segment_atten_type == 'multi'): seg_states = self.segment_attention(seg_states, seg_atten_mask.unsqueeze(1).unsqueeze(2), head_mask) if output_attentions: (seg_states, attention_probs) = seg_states local_inter_states = self.local_inter_layer(local_states) seg_inter_states = self.seg_inter_layer(seg_states) aug_seg_inter_states = torch.gather(seg_inter_states, 1, (segments[:, input_prefix:] - 2).view(segments.size(0), (- 1), 1).repeat(1, 1, seg_inter_states.size(2))) inter_states = torch.zeros(local_inter_states.size(0), local_inter_states.size(1), self.inter_dim).to(local_inter_states.device) inter_states[:, :, :(self.inter_dim // 2)] = local_inter_states inter_states[:, input_prefix:, (self.inter_dim // 2):] = aug_seg_inter_states inter_states[:, :input_prefix, (self.inter_dim // 2):] = seg_inter_states.sum(dim=1, keepdim=True) output_states = self.output(inter_states, hidden_states) if output_attentions: return (output_states, attention_probs) return output_states
6,916,960,397,091,090,000
Input: hidden_states[batch, seg_len, hidden_dim]: attention_mask[batch, seg_len](segment-based) segments[batch, seg_len]: segment_index[batch, seq_len]
models/SketchTransformer/models/networks.py
forward
avalonstrel/SketchBERT
python
def forward(self, hidden_states, attention_mask, segments, segment_index, head_mask=None, output_attentions=False): '\n Input:\n hidden_states[batch, seg_len, hidden_dim]:\n attention_mask[batch, seg_len](segment-based)\n segments[batch, seg_len]:\n segment_index[batch, seq_len]\n\n ' local_states = self.local_attention(hidden_states, attention_mask, head_mask) if output_attentions: (local_states, attention_probs) = local_states input_prefix = (hidden_states.size(1) - segment_index.size(1)) (seg_states, seg_atten_mask) = self.get_seg_states(local_states[:, input_prefix:, :], segment_index) if (self.segment_atten_type == 'multi'): seg_states = self.segment_attention(seg_states, seg_atten_mask.unsqueeze(1).unsqueeze(2), head_mask) if output_attentions: (seg_states, attention_probs) = seg_states local_inter_states = self.local_inter_layer(local_states) seg_inter_states = self.seg_inter_layer(seg_states) aug_seg_inter_states = torch.gather(seg_inter_states, 1, (segments[:, input_prefix:] - 2).view(segments.size(0), (- 1), 1).repeat(1, 1, seg_inter_states.size(2))) inter_states = torch.zeros(local_inter_states.size(0), local_inter_states.size(1), self.inter_dim).to(local_inter_states.device) inter_states[:, :, :(self.inter_dim // 2)] = local_inter_states inter_states[:, input_prefix:, (self.inter_dim // 2):] = aug_seg_inter_states inter_states[:, :input_prefix, (self.inter_dim // 2):] = seg_inter_states.sum(dim=1, keepdim=True) output_states = self.output(inter_states, hidden_states) if output_attentions: return (output_states, attention_probs) return output_states
def forward(self, input_states, attention_mask, targets=None, segments=None, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): '\n Input:\n input_states[batch, seq_len, 5],\n zs[batch, latent_dim]\n ' if (attention_mask is None): attention_mask = torch.ones(input_states.size(0), input_states.size(1)) if (len(attention_mask.size()) == 3): extended_attention_mask = attention_mask.unsqueeze(1) elif (len(attention_mask.size()) == 2): extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0)) attention_mask = extended_attention_mask if (head_mask is not None): if (head_mask.dim() == 1): head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1)) head_mask = head_mask.expand_as(self.num_hidden_layers, (- 1), (- 1), (- 1), (- 1)) elif (head_mask.dim() == 2): head_mask = head_mask.unsqueeze(1).unsqueeze((- 1)).unsqueeze((- 1)) head_mask = head_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) else: head_mask = None input_states = self.embedding(input_states) if (self.pos_embedding is not None): pos_states = self.pos_embedding(self.get_pos_states(input_states)) input_states = (input_states + pos_states) if ((self.segment_embedding is not None) and (segments is not None)): segment_states = self.segment_embedding(segments) input_states = (input_states + segment_states) if ((self.cls_embedding is not None) and (targets is not None)): cls_states = self.cls_embedding(targets) cls_states = cls_states.unsqueeze(1).repeat(1, input_states.size(1), 1) input_states = (input_states + cls_states) input_states = self.embed_refine_net(input_states) output_states = self.encoder(input_states, attention_mask, head_mask, output_all_states, output_attentions, keep_multihead_output) if output_attentions: (output_states, attention_probs) = output_states return (output_states[(- 1)], attention_probs) return output_states[(- 1)]
-1,672,195,904,779,026,700
Input: input_states[batch, seq_len, 5], zs[batch, latent_dim]
models/SketchTransformer/models/networks.py
forward
avalonstrel/SketchBERT
python
def forward(self, input_states, attention_mask, targets=None, segments=None, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): '\n Input:\n input_states[batch, seq_len, 5],\n zs[batch, latent_dim]\n ' if (attention_mask is None): attention_mask = torch.ones(input_states.size(0), input_states.size(1)) if (len(attention_mask.size()) == 3): extended_attention_mask = attention_mask.unsqueeze(1) elif (len(attention_mask.size()) == 2): extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0)) attention_mask = extended_attention_mask if (head_mask is not None): if (head_mask.dim() == 1): head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1)) head_mask = head_mask.expand_as(self.num_hidden_layers, (- 1), (- 1), (- 1), (- 1)) elif (head_mask.dim() == 2): head_mask = head_mask.unsqueeze(1).unsqueeze((- 1)).unsqueeze((- 1)) head_mask = head_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) else: head_mask = None input_states = self.embedding(input_states) if (self.pos_embedding is not None): pos_states = self.pos_embedding(self.get_pos_states(input_states)) input_states = (input_states + pos_states) if ((self.segment_embedding is not None) and (segments is not None)): segment_states = self.segment_embedding(segments) input_states = (input_states + segment_states) if ((self.cls_embedding is not None) and (targets is not None)): cls_states = self.cls_embedding(targets) cls_states = cls_states.unsqueeze(1).repeat(1, input_states.size(1), 1) input_states = (input_states + cls_states) input_states = self.embed_refine_net(input_states) output_states = self.encoder(input_states, attention_mask, head_mask, output_all_states, output_attentions, keep_multihead_output) if output_attentions: (output_states, attention_probs) = output_states return (output_states[(- 1)], attention_probs) return output_states[(- 1)]
def forward(self, input_states, zs, attention_mask, targets=None, segments=None, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): '\n Input:\n input_states[batch, seq_len, 5],\n zs[batch, latent_dim]\n ' if (attention_mask is None): attention_mask = torch.ones(input_states.size(0), input_states.size(1)) if (len(attention_mask.size()) == 3): extended_attention_mask = attention_mask.unsqueeze(1) elif (len(attention_mask.size()) == 2): extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0)) attention_mask = extended_attention_mask if (head_mask is not None): if (head_mask.dim() == 1): head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1)) head_mask = head_mask.expand_as(self.num_hidden_layers, (- 1), (- 1), (- 1), (- 1)) elif (head_mask.dim() == 2): head_mask = head_mask.unsqueeze(1).unsqueeze((- 1)).unsqueeze((- 1)) head_mask = head_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) else: head_mask = None input_states = self.embedding(input_states) if (self.pos_embedding is not None): pos_states = self.pos_embedding(self.get_pos_states(input_states)) input_states = (input_states + pos_states) if ((self.segment_embedding is not None) and (segments is not None)): segment_states = self.segment_embedding(segments) input_states = (input_states + segment_states) if ((self.cls_embedding is not None) and (targets is not None)): cls_states = self.cls_embedding(targets) cls_states = cls_states.unsqueeze(1).repeat(1, input_states.size(1), 1) input_states = (input_states + cls_states) input_states = self.embed_refine_net(input_states) input_states = torch.cat([input_states, zs.unsqueeze(1).repeat(1, input_states.size(1), 1)], dim=2) input_states = self.latent_fusion(input_states) output_states = self.encoder(input_states, attention_mask, head_mask, output_all_states, output_attentions, keep_multihead_output) if output_attentions: (output_states, attention_probs) = output_states output_states = output_states[(- 1)] for re_fc in self.re_fcs: output_states = re_fc(output_states) return (output_states, attention_probs) output_states = output_states[(- 1)] for re_fc in self.re_fcs: output_states = re_fc(output_states) return output_states
-4,880,509,655,281,427,000
Input: input_states[batch, seq_len, 5], zs[batch, latent_dim]
models/SketchTransformer/models/networks.py
forward
avalonstrel/SketchBERT
python
def forward(self, input_states, zs, attention_mask, targets=None, segments=None, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): '\n Input:\n input_states[batch, seq_len, 5],\n zs[batch, latent_dim]\n ' if (attention_mask is None): attention_mask = torch.ones(input_states.size(0), input_states.size(1)) if (len(attention_mask.size()) == 3): extended_attention_mask = attention_mask.unsqueeze(1) elif (len(attention_mask.size()) == 2): extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0)) attention_mask = extended_attention_mask if (head_mask is not None): if (head_mask.dim() == 1): head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1)) head_mask = head_mask.expand_as(self.num_hidden_layers, (- 1), (- 1), (- 1), (- 1)) elif (head_mask.dim() == 2): head_mask = head_mask.unsqueeze(1).unsqueeze((- 1)).unsqueeze((- 1)) head_mask = head_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) else: head_mask = None input_states = self.embedding(input_states) if (self.pos_embedding is not None): pos_states = self.pos_embedding(self.get_pos_states(input_states)) input_states = (input_states + pos_states) if ((self.segment_embedding is not None) and (segments is not None)): segment_states = self.segment_embedding(segments) input_states = (input_states + segment_states) if ((self.cls_embedding is not None) and (targets is not None)): cls_states = self.cls_embedding(targets) cls_states = cls_states.unsqueeze(1).repeat(1, input_states.size(1), 1) input_states = (input_states + cls_states) input_states = self.embed_refine_net(input_states) input_states = torch.cat([input_states, zs.unsqueeze(1).repeat(1, input_states.size(1), 1)], dim=2) input_states = self.latent_fusion(input_states) output_states = self.encoder(input_states, attention_mask, head_mask, output_all_states, output_attentions, keep_multihead_output) if output_attentions: (output_states, attention_probs) = output_states output_states = output_states[(- 1)] for re_fc in self.re_fcs: output_states = re_fc(output_states) return (output_states, attention_probs) output_states = output_states[(- 1)] for re_fc in self.re_fcs: output_states = re_fc(output_states) return output_states
def forward(self, hidden_states): '\n Input:\n hidden_states[batch, seq_len+cls_dim, hidden_dim](0 dim is cls)\n Output:\n x_pred[batch, seq_len+cls_dim, 2*max_size[0]+1]\n y_pred[batch, seq_len+cls_dim, 2*max_size[1]+1]\n type_pred[batch, seq_len+cls_dim, type_size]\n ' hidden_states = hidden_states[:, (self.cls_in_input + self.rel_in_input):, :] x_pred = self.x_pooling(hidden_states) y_pred = self.y_pooling(hidden_states) type_pred = self.type_pooling(hidden_states) return (x_pred, y_pred, type_pred)
5,573,380,538,648,980,000
Input: hidden_states[batch, seq_len+cls_dim, hidden_dim](0 dim is cls) Output: x_pred[batch, seq_len+cls_dim, 2*max_size[0]+1] y_pred[batch, seq_len+cls_dim, 2*max_size[1]+1] type_pred[batch, seq_len+cls_dim, type_size]
models/SketchTransformer/models/networks.py
forward
avalonstrel/SketchBERT
python
def forward(self, hidden_states): '\n Input:\n hidden_states[batch, seq_len+cls_dim, hidden_dim](0 dim is cls)\n Output:\n x_pred[batch, seq_len+cls_dim, 2*max_size[0]+1]\n y_pred[batch, seq_len+cls_dim, 2*max_size[1]+1]\n type_pred[batch, seq_len+cls_dim, type_size]\n ' hidden_states = hidden_states[:, (self.cls_in_input + self.rel_in_input):, :] x_pred = self.x_pooling(hidden_states) y_pred = self.y_pooling(hidden_states) type_pred = self.type_pooling(hidden_states) return (x_pred, y_pred, type_pred)
def forward(self, hidden_states, segment_index): '\n Input:\n hidden_states[batch, seg_len, hidden_dim]\n segment_index[batch, seq_len]\n ' seg_states = hidden_states[:, (self.cls_in_input + self.rel_in_input):, :][(segment_index == 0), :] return self.sg_fc(seg_states)
1,118,064,636,496,338,400
Input: hidden_states[batch, seg_len, hidden_dim] segment_index[batch, seq_len]
models/SketchTransformer/models/networks.py
forward
avalonstrel/SketchBERT
python
def forward(self, hidden_states, segment_index): '\n Input:\n hidden_states[batch, seg_len, hidden_dim]\n segment_index[batch, seq_len]\n ' seg_states = hidden_states[:, (self.cls_in_input + self.rel_in_input):, :][(segment_index == 0), :] return self.sg_fc(seg_states)
@property def function_object(self): 'get the generated function object' return self._function
-4,883,460,870,703,305,000
get the generated function object
mlrun/runtimes/function_reference.py
function_object
AlonMaor14/mlrun
python
@property def function_object(self): return self._function
def to_function(self, default_kind=None): 'generate a function object from the ref definitions' if (self.url and ('://' not in self.url)): if (not os.path.isfile(self.url)): raise OSError(f'{self.url} not found') kind = (self.kind or default_kind) if self.url: if (self.url.endswith('.yaml') or self.url.startswith('db://') or self.url.startswith('hub://')): func = mlrun.import_function(self.url) if self.image: func.spec.image = self.image elif self.url.endswith('.ipynb'): func = mlrun.code_to_function(self.name, filename=self.url, image=self.image, kind=kind) elif self.url.endswith('.py'): if (not self.image): raise ValueError('image must be provided with py code files, use function object for more control/settings') func = mlrun.code_to_function(self.name, filename=self.url, image=self.image, kind=kind) else: raise ValueError(f'unsupported function url {self.url} or no spec') if self.spec: func = enrich_function_from_dict(func, self.spec) elif (self.code is not None): code = self.code if (kind == mlrun.runtimes.RuntimeKinds.serving): code = (code + mlrun_footer.format(mlrun.runtimes.serving.serving_subkind)) func = mlrun.new_function(self.name, kind=kind, image=self.image) data = b64encode(code.encode('utf-8')).decode('utf-8') func.spec.build.functionSourceCode = data if (kind not in mlrun.runtimes.RuntimeKinds.nuclio_runtimes()): func.spec.default_handler = 'handler' if self.spec: func = enrich_function_from_dict(func, self.spec) elif self.spec: func = mlrun.new_function(self.name, runtime=self.spec) else: raise ValueError('url or spec or code must be specified') if self.requirements: func.with_requirements(self.requirements) self._function = func return func
-1,610,808,312,545,578,200
generate a function object from the ref definitions
mlrun/runtimes/function_reference.py
to_function
AlonMaor14/mlrun
python
def to_function(self, default_kind=None): if (self.url and ('://' not in self.url)): if (not os.path.isfile(self.url)): raise OSError(f'{self.url} not found') kind = (self.kind or default_kind) if self.url: if (self.url.endswith('.yaml') or self.url.startswith('db://') or self.url.startswith('hub://')): func = mlrun.import_function(self.url) if self.image: func.spec.image = self.image elif self.url.endswith('.ipynb'): func = mlrun.code_to_function(self.name, filename=self.url, image=self.image, kind=kind) elif self.url.endswith('.py'): if (not self.image): raise ValueError('image must be provided with py code files, use function object for more control/settings') func = mlrun.code_to_function(self.name, filename=self.url, image=self.image, kind=kind) else: raise ValueError(f'unsupported function url {self.url} or no spec') if self.spec: func = enrich_function_from_dict(func, self.spec) elif (self.code is not None): code = self.code if (kind == mlrun.runtimes.RuntimeKinds.serving): code = (code + mlrun_footer.format(mlrun.runtimes.serving.serving_subkind)) func = mlrun.new_function(self.name, kind=kind, image=self.image) data = b64encode(code.encode('utf-8')).decode('utf-8') func.spec.build.functionSourceCode = data if (kind not in mlrun.runtimes.RuntimeKinds.nuclio_runtimes()): func.spec.default_handler = 'handler' if self.spec: func = enrich_function_from_dict(func, self.spec) elif self.spec: func = mlrun.new_function(self.name, runtime=self.spec) else: raise ValueError('url or spec or code must be specified') if self.requirements: func.with_requirements(self.requirements) self._function = func return func
def deploy(self, **kwargs): 'deploy the function' self._address = self._function.deploy(**kwargs) return self._address
3,107,861,624,235,756,500
deploy the function
mlrun/runtimes/function_reference.py
deploy
AlonMaor14/mlrun
python
def deploy(self, **kwargs): self._address = self._function.deploy(**kwargs) return self._address
def __init__(self, allow_crash_consistent_snapshot=None): 'Constructor for the HypervBackupEnvParams class' self.allow_crash_consistent_snapshot = allow_crash_consistent_snapshot
2,498,918,717,880,499,700
Constructor for the HypervBackupEnvParams class
cohesity_management_sdk/models/hyperv_backup_env_params.py
__init__
anoopbhat/management-sdk-python
python
def __init__(self, allow_crash_consistent_snapshot=None): self.allow_crash_consistent_snapshot = allow_crash_consistent_snapshot
@classmethod def from_dictionary(cls, dictionary): "Creates an instance of this model from a dictionary\n\n Args:\n dictionary (dictionary): A dictionary representation of the object as\n obtained from the deserialization of the server's response. The keys\n MUST match property names in the API description.\n\n Returns:\n object: An instance of this structure class.\n\n " if (dictionary is None): return None allow_crash_consistent_snapshot = dictionary.get('allowCrashConsistentSnapshot') return cls(allow_crash_consistent_snapshot)
-3,474,178,223,966,733,300
Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.
cohesity_management_sdk/models/hyperv_backup_env_params.py
from_dictionary
anoopbhat/management-sdk-python
python
@classmethod def from_dictionary(cls, dictionary): "Creates an instance of this model from a dictionary\n\n Args:\n dictionary (dictionary): A dictionary representation of the object as\n obtained from the deserialization of the server's response. The keys\n MUST match property names in the API description.\n\n Returns:\n object: An instance of this structure class.\n\n " if (dictionary is None): return None allow_crash_consistent_snapshot = dictionary.get('allowCrashConsistentSnapshot') return cls(allow_crash_consistent_snapshot)
@staticmethod def promote_ase_atoms(obj, symmetry=None): ' Convert ASE Atoms object to the one usable by SPRKKR.\n For the case of the usability it is a bit ugly hack: The __class__ attribute\n is replaced so the extra methods and properties of the objects will\n be available.\n\n Parameters\n ----------\n obj: ase.Atoms\n The atoms object to be promoted to be used for SPRKKR calculations\n\n symmetry: boolean or None\n The sites property of the resulting object will consider the symmetry of the structure.\n I.e., the by-symmetry-equal atomic sites will share the same sites object.\n Default None is the same as True, however it does not change the symmetry\n of the already promoted obj passed into the routine.\n ' if (obj and (not isinstance(obj, SPRKKRAtoms))): if (obj.__class__ is Atoms): obj.__class__ = SPRKKRAtoms else: if (not isinstance(obj, Atoms)): raise f'Can not promote class {obj} of class {obj.__class__} to {SPRKKRAtoms}' class SprKKrAtomsEx(obj.__class__, SPRKKRAtoms): pass obj.__class__ = SprKKrAtomsEx obj._init((True if (symmetry is None) else symmetry)) elif (symmetry is not None): obj.symmetry = symmetry return obj
-8,266,258,733,905,201,000
Convert ASE Atoms object to the one usable by SPRKKR. For the case of the usability it is a bit ugly hack: The __class__ attribute is replaced so the extra methods and properties of the objects will be available. Parameters ---------- obj: ase.Atoms The atoms object to be promoted to be used for SPRKKR calculations symmetry: boolean or None The sites property of the resulting object will consider the symmetry of the structure. I.e., the by-symmetry-equal atomic sites will share the same sites object. Default None is the same as True, however it does not change the symmetry of the already promoted obj passed into the routine.
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
promote_ase_atoms
ase2sprkkr/ase2sprkkr
python
@staticmethod def promote_ase_atoms(obj, symmetry=None): ' Convert ASE Atoms object to the one usable by SPRKKR.\n For the case of the usability it is a bit ugly hack: The __class__ attribute\n is replaced so the extra methods and properties of the objects will\n be available.\n\n Parameters\n ----------\n obj: ase.Atoms\n The atoms object to be promoted to be used for SPRKKR calculations\n\n symmetry: boolean or None\n The sites property of the resulting object will consider the symmetry of the structure.\n I.e., the by-symmetry-equal atomic sites will share the same sites object.\n Default None is the same as True, however it does not change the symmetry\n of the already promoted obj passed into the routine.\n ' if (obj and (not isinstance(obj, SPRKKRAtoms))): if (obj.__class__ is Atoms): obj.__class__ = SPRKKRAtoms else: if (not isinstance(obj, Atoms)): raise f'Can not promote class {obj} of class {obj.__class__} to {SPRKKRAtoms}' class SprKKrAtomsEx(obj.__class__, SPRKKRAtoms): pass obj.__class__ = SprKKrAtomsEx obj._init((True if (symmetry is None) else symmetry)) elif (symmetry is not None): obj.symmetry = symmetry return obj
def __init__(self, *args, symmetry=True, potential=None, **kwargs): '\n Creates SPRKKRAtoms atoms\n\n Parameters\n ----------\n *args: list\n The positionals arguments of ase.Atoms.__init__\n symmetry: boolean\n The symmetry will be computed when the sites property will be initialized.\n I.e., the by-symmetry-equal atomic sites will share the same sites object.\n **kwargs: dict\n The named arguments of ase.Atoms.__init__\n ' self._init(symmetry, potential) super().__init__(*args, **kwargs)
5,208,751,560,967,905,000
Creates SPRKKRAtoms atoms Parameters ---------- *args: list The positionals arguments of ase.Atoms.__init__ symmetry: boolean The symmetry will be computed when the sites property will be initialized. I.e., the by-symmetry-equal atomic sites will share the same sites object. **kwargs: dict The named arguments of ase.Atoms.__init__
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
__init__
ase2sprkkr/ase2sprkkr
python
def __init__(self, *args, symmetry=True, potential=None, **kwargs): '\n Creates SPRKKRAtoms atoms\n\n Parameters\n ----------\n *args: list\n The positionals arguments of ase.Atoms.__init__\n symmetry: boolean\n The symmetry will be computed when the sites property will be initialized.\n I.e., the by-symmetry-equal atomic sites will share the same sites object.\n **kwargs: dict\n The named arguments of ase.Atoms.__init__\n ' self._init(symmetry, potential) super().__init__(*args, **kwargs)
def _init(self, symmetry=True, potential=None): ' The initialization of the additional (not-in-ASE) properties. To be used\n by constructor and by promote_ase_atoms' self._unique_sites = None self._potential = potential self._symmetry = symmetry
7,673,537,083,973,724,000
The initialization of the additional (not-in-ASE) properties. To be used by constructor and by promote_ase_atoms
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
_init
ase2sprkkr/ase2sprkkr
python
def _init(self, symmetry=True, potential=None): ' The initialization of the additional (not-in-ASE) properties. To be used\n by constructor and by promote_ase_atoms' self._unique_sites = None self._potential = potential self._symmetry = symmetry
@property def symmetry(self): '\n Whether the sites property is/will be generated using symmetry, i.e.\n whether the Sites objects in the sites property will be shared among\n symmetric atomic sites.\n ' return self._symmetry
-7,475,728,776,709,522,000
Whether the sites property is/will be generated using symmetry, i.e. whether the Sites objects in the sites property will be shared among symmetric atomic sites.
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
symmetry
ase2sprkkr/ase2sprkkr
python
@property def symmetry(self): '\n Whether the sites property is/will be generated using symmetry, i.e.\n whether the Sites objects in the sites property will be shared among\n symmetric atomic sites.\n ' return self._symmetry
@symmetry.setter def symmetry(self, value): '\n Recomputes the sites with enabled/disabled symmetry if the value of the property\n has changed.\n ' if (self._symmetry == value): return self._symmetry = value if (self._unique_sites is not None): if value: self._compute_sites_symmetry() else: self._cancel_sites_symmetry()
4,876,355,944,345,147,000
Recomputes the sites with enabled/disabled symmetry if the value of the property has changed.
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
symmetry
ase2sprkkr/ase2sprkkr
python
@symmetry.setter def symmetry(self, value): '\n Recomputes the sites with enabled/disabled symmetry if the value of the property\n has changed.\n ' if (self._symmetry == value): return self._symmetry = value if (self._unique_sites is not None): if value: self._compute_sites_symmetry() else: self._cancel_sites_symmetry()
def compute_spacegroup_for_atomic_numbers(self, atomic_numbers=None, symprec=1e-05): " Return spacegroup that suits to the atoms' cell structure and to the given\n atomic_numbers (not necessary the real ones, they can be just ''labels'').\n " atomic_numbers = (atomic_numbers if (atomic_numbers is not None) else self.get_atomic_numbers()) sg = spglib.get_spacegroup((self.get_cell(), self.get_scaled_positions(), atomic_numbers), symprec=symprec) if (sg is None): return None sg_no = int(sg[(sg.find('(') + 1):sg.find(')')]) spacegroup = Spacegroup(sg_no) return spacegroup
254,699,370,758,858,100
Return spacegroup that suits to the atoms' cell structure and to the given atomic_numbers (not necessary the real ones, they can be just ''labels'').
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
compute_spacegroup_for_atomic_numbers
ase2sprkkr/ase2sprkkr
python
def compute_spacegroup_for_atomic_numbers(self, atomic_numbers=None, symprec=1e-05): " Return spacegroup that suits to the atoms' cell structure and to the given\n atomic_numbers (not necessary the real ones, they can be just labels).\n " atomic_numbers = (atomic_numbers if (atomic_numbers is not None) else self.get_atomic_numbers()) sg = spglib.get_spacegroup((self.get_cell(), self.get_scaled_positions(), atomic_numbers), symprec=symprec) if (sg is None): return None sg_no = int(sg[(sg.find('(') + 1):sg.find(')')]) spacegroup = Spacegroup(sg_no) return spacegroup
def compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-05): ' SPRKKR has some properties shared by all by-symmetry-equal sites.\n This method initializes _sites property, that holds these properties:\n makes identical all the atoms on the "symmetry identical positions" with\n the same atomic number.\n\n The method is called automatically when the sites property is firstly accessed.\n The effect of the method is the nearly same as setting the symmetry property.\n However, setting the symmetry property on an \'already symmetrized\' object has\n no effect, while this methods always recompute the sites property.\n\n Parameters\n ----------\n spacegroup: Spacegroup\n If not None, the given spacegroup is used for determining the symmetry,\n instead of the one determined by cell geometry.\n\n atomic_numbers: [ int ]\n Atomic numbers used to determine the spacegroup (if it is not given) to compute\n the symmetry. The atomic numbers can be \'\'virtual\'\', just to denote the equivalence\n of the sites.\n The array should have the same length as the number of atoms in the unit cell.\n If None, self.symbols are used.\n\n consider_old: bool\n If True, and _unique_sites is not None, the non-symmetry-equivalent sites won\'t\n be equivalent in the newly computed symmetry.\n\n symprec: float\n A threshold for spatial error for the symmetry computing. See spglib.get_spacegroup\n\n ' self._symmetry = True SPRKKRAtoms._compute_sites_symmetry(**locals())
8,060,052,139,602,305,000
SPRKKR has some properties shared by all by-symmetry-equal sites. This method initializes _sites property, that holds these properties: makes identical all the atoms on the "symmetry identical positions" with the same atomic number. The method is called automatically when the sites property is firstly accessed. The effect of the method is the nearly same as setting the symmetry property. However, setting the symmetry property on an 'already symmetrized' object has no effect, while this methods always recompute the sites property. Parameters ---------- spacegroup: Spacegroup If not None, the given spacegroup is used for determining the symmetry, instead of the one determined by cell geometry. atomic_numbers: [ int ] Atomic numbers used to determine the spacegroup (if it is not given) to compute the symmetry. The atomic numbers can be ''virtual'', just to denote the equivalence of the sites. The array should have the same length as the number of atoms in the unit cell. If None, self.symbols are used. consider_old: bool If True, and _unique_sites is not None, the non-symmetry-equivalent sites won't be equivalent in the newly computed symmetry. symprec: float A threshold for spatial error for the symmetry computing. See spglib.get_spacegroup
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
compute_sites_symmetry
ase2sprkkr/ase2sprkkr
python
def compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-05): ' SPRKKR has some properties shared by all by-symmetry-equal sites.\n This method initializes _sites property, that holds these properties:\n makes identical all the atoms on the "symmetry identical positions" with\n the same atomic number.\n\n The method is called automatically when the sites property is firstly accessed.\n The effect of the method is the nearly same as setting the symmetry property.\n However, setting the symmetry property on an \'already symmetrized\' object has\n no effect, while this methods always recompute the sites property.\n\n Parameters\n ----------\n spacegroup: Spacegroup\n If not None, the given spacegroup is used for determining the symmetry,\n instead of the one determined by cell geometry.\n\n atomic_numbers: [ int ]\n Atomic numbers used to determine the spacegroup (if it is not given) to compute\n the symmetry. The atomic numbers can be \'\'virtual\'\', just to denote the equivalence\n of the sites.\n The array should have the same length as the number of atoms in the unit cell.\n If None, self.symbols are used.\n\n consider_old: bool\n If True, and _unique_sites is not None, the non-symmetry-equivalent sites won\'t\n be equivalent in the newly computed symmetry.\n\n symprec: float\n A threshold for spatial error for the symmetry computing. See spglib.get_spacegroup\n\n ' self._symmetry = True SPRKKRAtoms._compute_sites_symmetry(**locals())
def _compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-05): ' See compute_sites_symmetry - this metod does just the same, but it does not set the symmetry property.' occupation = self.info.get('occupancy', {}) if ((not spacegroup) and self._symmetry): if atomic_numbers: mapping = UniqueValuesMapping(atomic_numbers) else: mapping = UniqueValuesMapping(self.get_atomic_numbers()) if (consider_old and self._unique_sites): mapping = mapping.merge(self._unique_sites) if occupation: def gen_occ(): for i in range(len(mapping)): val = occupation.get(i, None) if (val is None): (yield val) else: (yield tuple(((k, val[k]) for k in val))) mapping = mapping.merge(gen_occ()) spacegroup = self.compute_spacegroup_for_atomic_numbers(mapping.mapping, symprec=symprec) self.info['spacegroup'] = spacegroup if (not spacegroup): return self.cancel_sites_symmetry() tags = spacegroup.tag_sites(self.get_scaled_positions()) mapping = mapping.merge(tags) tags = mapping.mapping sites = np.empty(len(tags), dtype=object) (uniq, umap) = np.unique(tags, return_inverse=True) used = set() for i in range(len(uniq)): index = (umap == i) if (self._unique_sites is not None): possible = (i for i in self._unique_sites[index]) site = next(filter(None, possible), None) if (site in used): site = site.copy() else: used.add(site) else: site = None if (not site): symbol = self.symbols[numpy_index(umap, i)] for ai in np.where(index)[0]: if ((ai in occupation) and occupation[ai]): symbol = occupation[ai] site = Site(self, symbol) sites[index] = site self.sites = sites
-7,394,917,638,340,702,000
See compute_sites_symmetry - this metod does just the same, but it does not set the symmetry property.
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
_compute_sites_symmetry
ase2sprkkr/ase2sprkkr
python
def _compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-05): ' ' occupation = self.info.get('occupancy', {}) if ((not spacegroup) and self._symmetry): if atomic_numbers: mapping = UniqueValuesMapping(atomic_numbers) else: mapping = UniqueValuesMapping(self.get_atomic_numbers()) if (consider_old and self._unique_sites): mapping = mapping.merge(self._unique_sites) if occupation: def gen_occ(): for i in range(len(mapping)): val = occupation.get(i, None) if (val is None): (yield val) else: (yield tuple(((k, val[k]) for k in val))) mapping = mapping.merge(gen_occ()) spacegroup = self.compute_spacegroup_for_atomic_numbers(mapping.mapping, symprec=symprec) self.info['spacegroup'] = spacegroup if (not spacegroup): return self.cancel_sites_symmetry() tags = spacegroup.tag_sites(self.get_scaled_positions()) mapping = mapping.merge(tags) tags = mapping.mapping sites = np.empty(len(tags), dtype=object) (uniq, umap) = np.unique(tags, return_inverse=True) used = set() for i in range(len(uniq)): index = (umap == i) if (self._unique_sites is not None): possible = (i for i in self._unique_sites[index]) site = next(filter(None, possible), None) if (site in used): site = site.copy() else: used.add(site) else: site = None if (not site): symbol = self.symbols[numpy_index(umap, i)] for ai in np.where(index)[0]: if ((ai in occupation) and occupation[ai]): symbol = occupation[ai] site = Site(self, symbol) sites[index] = site self.sites = sites
def cancel_sites_symmetry(self): ' Cancel the use of symmetry in the structure, i.e., makes the Site object\n uniqe (not shared) for each atomic site.\n\n Calling this method is nearly equivalent to the setting the symmetry property\n to False, however, this method always recompute the sites object, while\n setting symmetry=False recomputes the sites property only if it was previously\n set to False.\n ' self._symmetry = False self._cancel_sites_symmetry()
-1,303,938,581,556,577,000
Cancel the use of symmetry in the structure, i.e., makes the Site object uniqe (not shared) for each atomic site. Calling this method is nearly equivalent to the setting the symmetry property to False, however, this method always recompute the sites object, while setting symmetry=False recomputes the sites property only if it was previously set to False.
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
cancel_sites_symmetry
ase2sprkkr/ase2sprkkr
python
def cancel_sites_symmetry(self): ' Cancel the use of symmetry in the structure, i.e., makes the Site object\n uniqe (not shared) for each atomic site.\n\n Calling this method is nearly equivalent to the setting the symmetry property\n to False, however, this method always recompute the sites object, while\n setting symmetry=False recomputes the sites property only if it was previously\n set to False.\n ' self._symmetry = False self._cancel_sites_symmetry()
def _cancel_sites_symmetry(self): ' See cancel_sites_symmetry - this metod does just the same, but it does not set the symmetry property.' sites = np.empty(len(self), dtype=object) used = set() occupation = self.info.get('occupancy', {}) for i in range(len(self)): if (self._unique_sites is not None): site = self._unique_sites[i] if (site in used): site = site.copy() else: used.add(site) else: symbol = (occupation[i] if ((i in occupation) and occupation[i]) else self.symbols[i]) site = Site(self, symbol) sites[i] = site self.sites = sites
7,062,645,137,594,079,000
See cancel_sites_symmetry - this metod does just the same, but it does not set the symmetry property.
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
_cancel_sites_symmetry
ase2sprkkr/ase2sprkkr
python
def _cancel_sites_symmetry(self): ' ' sites = np.empty(len(self), dtype=object) used = set() occupation = self.info.get('occupancy', {}) for i in range(len(self)): if (self._unique_sites is not None): site = self._unique_sites[i] if (site in used): site = site.copy() else: used.add(site) else: symbol = (occupation[i] if ((i in occupation) and occupation[i]) else self.symbols[i]) site = Site(self, symbol) sites[i] = site self.sites = sites
@property def sites(self): ' The sites property holds all the information for the SPR-KKR package:\n atomic types (including number of semicore and valence electrons),\n occupancy, symmetries, meshes...\n Some of the properties are stored in the ASE atoms properties\n (e.g. occupancy, atomic symbol), however, ASE is not able to hold them\n all and/or to describe fully the SPR-KKR options; thus, these properties\n are hold in this array.\n\n The changes made on this array are reflected (as is possible)\n to the ASE properties, but the opposite does not hold - to reflect the changes\n in these properties please create a new Atoms object with given properties.\n ' if (self._unique_sites is None): self._compute_sites_symmetry() return self._unique_sites
-5,191,896,333,846,247,000
The sites property holds all the information for the SPR-KKR package: atomic types (including number of semicore and valence electrons), occupancy, symmetries, meshes... Some of the properties are stored in the ASE atoms properties (e.g. occupancy, atomic symbol), however, ASE is not able to hold them all and/or to describe fully the SPR-KKR options; thus, these properties are hold in this array. The changes made on this array are reflected (as is possible) to the ASE properties, but the opposite does not hold - to reflect the changes in these properties please create a new Atoms object with given properties.
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
sites
ase2sprkkr/ase2sprkkr
python
@property def sites(self): ' The sites property holds all the information for the SPR-KKR package:\n atomic types (including number of semicore and valence electrons),\n occupancy, symmetries, meshes...\n Some of the properties are stored in the ASE atoms properties\n (e.g. occupancy, atomic symbol), however, ASE is not able to hold them\n all and/or to describe fully the SPR-KKR options; thus, these properties\n are hold in this array.\n\n The changes made on this array are reflected (as is possible)\n to the ASE properties, but the opposite does not hold - to reflect the changes\n in these properties please create a new Atoms object with given properties.\n ' if (self._unique_sites is None): self._compute_sites_symmetry() return self._unique_sites
@sites.setter def sites(self, v): ' Set the sites property and update all other dependent\n properties (symbols, occupancy) according to the sites ' an = np.zeros(len(v), dtype=int) occ = {} for (i, j) in enumerate(v): occ[i] = j.occupation.as_dict an[i] = j.occupation.primary_atomic_number self.set_atomic_numbers(an) self.info['occupancy'] = occ self._unique_sites = v
2,869,206,069,894,840,000
Set the sites property and update all other dependent properties (symbols, occupancy) according to the sites
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
sites
ase2sprkkr/ase2sprkkr
python
@sites.setter def sites(self, v): ' Set the sites property and update all other dependent\n properties (symbols, occupancy) according to the sites ' an = np.zeros(len(v), dtype=int) occ = {} for (i, j) in enumerate(v): occ[i] = j.occupation.as_dict an[i] = j.occupation.primary_atomic_number self.set_atomic_numbers(an) self.info['occupancy'] = occ self._unique_sites = v
def upload_file_to_shock(self, file_path, token): '\n Use HTTP multi-part POST to save a file to a SHOCK instance.\n ' if (token is None): raise Exception('Authentication token required!') header = {'Authorization': 'Oauth {0}'.format(token)} if (file_path is None): raise Exception('No file given for upload to SHOCK!') with open(os.path.abspath(file_path), 'rb') as data_file: files = {'upload': data_file} response = requests.post((self.shockURL + '/node'), headers=header, files=files, stream=True, allow_redirects=True) self.check_shock_response(response, 'Error trying to upload contig FASTA file {} to Shock: '.format(file_path)) return response.json()['data']
-7,824,875,898,772,669,000
Use HTTP multi-part POST to save a file to a SHOCK instance.
lib/kb_SPAdes/kb_SPAdesImpl.py
upload_file_to_shock
mclark58/kb_SPAdes
python
def upload_file_to_shock(self, file_path, token): '\n \n ' if (token is None): raise Exception('Authentication token required!') header = {'Authorization': 'Oauth {0}'.format(token)} if (file_path is None): raise Exception('No file given for upload to SHOCK!') with open(os.path.abspath(file_path), 'rb') as data_file: files = {'upload': data_file} response = requests.post((self.shockURL + '/node'), headers=header, files=files, stream=True, allow_redirects=True) self.check_shock_response(response, 'Error trying to upload contig FASTA file {} to Shock: '.format(file_path)) return response.json()['data']
def run_SPAdes(self, ctx, params): '\n Run SPAdes on paired end libraries\n :param params: instance of type "SPAdesParams" (Input parameters for\n running SPAdes. workspace_name - the name of the workspace from\n which to take input and store output. output_contigset_name - the\n name of the output contigset read_libraries - a list of Illumina\n PairedEndLibrary files in FASTQ or BAM format. dna_source -\n (optional) the source of the DNA used for sequencing\n \'single_cell\': DNA amplified from a single cell via MDA anything\n else: Standard DNA sample from multiple cells. Default value is\n None. min_contig_length - (optional) integer to filter out contigs\n with length < min_contig_length from the SPAdes output. Default\n value is 0 implying no filter. kmer_sizes - (optional) K-mer\n sizes, Default values: 33, 55, 77, 99, 127 (all values must be\n odd, less than 128 and listed in ascending order) In the absence\n of these values, K values are automatically selected.\n skip_error_correction - (optional) Assembly only (No error\n correction). By default this is disabled.) -> structure: parameter\n "workspace_name" of String, parameter "output_contigset_name" of\n String, parameter "read_libraries" of list of type\n "paired_end_lib" (The workspace object name of a PairedEndLibrary\n file, whether of the KBaseAssembly or KBaseFile type.), parameter\n "dna_source" of String, parameter "min_contig_length" of Long,\n parameter "kmer_sizes" of list of Long, parameter\n "skip_error_correction" of type "bool" (A boolean. 0 = false,\n anything else = true.)\n :returns: instance of type "SPAdesOutput" (Output parameters for\n SPAdes run. report_name - the name of the KBaseReport.Report\n workspace object. report_ref - the workspace reference of the\n report.) -> structure: parameter "report_name" of String,\n parameter "report_ref" of String\n ' self.log(('Running run_SPAdes with params:\n' + pformat(params))) token = ctx['token'] self.process_params(params) wsname = params[self.PARAM_IN_WS] obj_ids = [] for r in params[self.PARAM_IN_LIB]: obj_ids.append({'ref': (r if ('/' in r) else ((wsname + '/') + r))}) ws = Workspace(self.workspaceURL, token=token) ws_info = ws.get_object_info_new({'objects': obj_ids}) reads_params = [] reftoname = {} for (wsi, oid) in zip(ws_info, obj_ids): ref = oid['ref'] reads_params.append(ref) obj_name = wsi[1] reftoname[ref] = ((wsi[7] + '/') + obj_name) readcli = ReadsUtils(self.callbackURL, token=ctx['token']) typeerr = ((('Supported types: KBaseFile.SingleEndLibrary ' + 'KBaseFile.PairedEndLibrary ') + 'KBaseAssembly.SingleEndLibrary ') + 'KBaseAssembly.PairedEndLibrary') try: reads = readcli.download_reads({'read_libraries': reads_params, 'interleaved': 'false', 'gzipped': None})['files'] except ServerError as se: self.log('logging stacktrace from dynamic client error') self.log(se.data) if (typeerr in se.message): prefix = se.message.split('.')[0] raise ValueError((((prefix + '. Only the types ') + 'KBaseAssembly.PairedEndLibrary ') + 'and KBaseFile.PairedEndLibrary are supported')) else: raise self.log(('Got reads data from converter:\n' + pformat(reads))) phred_type = self.check_reads(params, reads, reftoname) reads_data = [] for ref in reads: reads_name = reftoname[ref] f = reads[ref]['files'] seq_tech = reads[ref]['sequencing_tech'] if (f['type'] == 'interleaved'): reads_data.append({'fwd_file': f['fwd'], 'type': 'paired', 'seq_tech': seq_tech}) elif (f['type'] == 'paired'): reads_data.append({'fwd_file': f['fwd'], 'rev_file': f['rev'], 'type': 'paired', 'seq_tech': seq_tech}) elif (f['type'] == 'single'): reads_data.append({'fwd_file': f['fwd'], 'type': 'single', 'seq_tech': seq_tech}) else: raise ValueError(('Something is very wrong with read lib' + reads_name)) kmer_sizes = None if ((self.PARAM_IN_KMER_SIZES in params) and (params[self.PARAM_IN_KMER_SIZES] is not None)): if (len(params[self.PARAM_IN_KMER_SIZES]) > 0): kmer_sizes = ','.join((str(num) for num in params[self.PARAM_IN_KMER_SIZES])) skip_error_correction = 0 if ((self.PARAM_IN_SKIP_ERR_CORRECT in params) and (params[self.PARAM_IN_SKIP_ERR_CORRECT] is not None)): if (params[self.PARAM_IN_SKIP_ERR_CORRECT] == 1): skip_error_correction = 1 spades_out = self.exec_spades(params[self.PARAM_IN_DNA_SOURCE], reads_data, phred_type, kmer_sizes, skip_error_correction) self.log(('SPAdes output dir: ' + spades_out)) output_contigs = os.path.join(spades_out, 'scaffolds.fasta') self.log('Uploading FASTA file to Assembly') assemblyUtil = AssemblyUtil(self.callbackURL, token=ctx['token'], service_ver='release') if (params.get('min_contig_length', 0) > 0): assemblyUtil.save_assembly_from_fasta({'file': {'path': output_contigs}, 'workspace_name': wsname, 'assembly_name': params[self.PARAM_IN_CS_NAME], 'min_contig_length': params['min_contig_length']}) (report_name, report_ref) = self.load_report((output_contigs + '.filtered.fa'), params, wsname) else: assemblyUtil.save_assembly_from_fasta({'file': {'path': output_contigs}, 'workspace_name': wsname, 'assembly_name': params[self.PARAM_IN_CS_NAME]}) (report_name, report_ref) = self.load_report(output_contigs, params, wsname) output = {'report_name': report_name, 'report_ref': report_ref} if (not isinstance(output, dict)): raise ValueError(('Method run_SPAdes return value ' + 'output is not type dict as required.')) return [output]
-7,648,011,864,929,356,000
Run SPAdes on paired end libraries :param params: instance of type "SPAdesParams" (Input parameters for running SPAdes. workspace_name - the name of the workspace from which to take input and store output. output_contigset_name - the name of the output contigset read_libraries - a list of Illumina PairedEndLibrary files in FASTQ or BAM format. dna_source - (optional) the source of the DNA used for sequencing 'single_cell': DNA amplified from a single cell via MDA anything else: Standard DNA sample from multiple cells. Default value is None. min_contig_length - (optional) integer to filter out contigs with length < min_contig_length from the SPAdes output. Default value is 0 implying no filter. kmer_sizes - (optional) K-mer sizes, Default values: 33, 55, 77, 99, 127 (all values must be odd, less than 128 and listed in ascending order) In the absence of these values, K values are automatically selected. skip_error_correction - (optional) Assembly only (No error correction). By default this is disabled.) -> structure: parameter "workspace_name" of String, parameter "output_contigset_name" of String, parameter "read_libraries" of list of type "paired_end_lib" (The workspace object name of a PairedEndLibrary file, whether of the KBaseAssembly or KBaseFile type.), parameter "dna_source" of String, parameter "min_contig_length" of Long, parameter "kmer_sizes" of list of Long, parameter "skip_error_correction" of type "bool" (A boolean. 0 = false, anything else = true.) :returns: instance of type "SPAdesOutput" (Output parameters for SPAdes run. report_name - the name of the KBaseReport.Report workspace object. report_ref - the workspace reference of the report.) -> structure: parameter "report_name" of String, parameter "report_ref" of String
lib/kb_SPAdes/kb_SPAdesImpl.py
run_SPAdes
mclark58/kb_SPAdes
python
def run_SPAdes(self, ctx, params): '\n Run SPAdes on paired end libraries\n :param params: instance of type "SPAdesParams" (Input parameters for\n running SPAdes. workspace_name - the name of the workspace from\n which to take input and store output. output_contigset_name - the\n name of the output contigset read_libraries - a list of Illumina\n PairedEndLibrary files in FASTQ or BAM format. dna_source -\n (optional) the source of the DNA used for sequencing\n \'single_cell\': DNA amplified from a single cell via MDA anything\n else: Standard DNA sample from multiple cells. Default value is\n None. min_contig_length - (optional) integer to filter out contigs\n with length < min_contig_length from the SPAdes output. Default\n value is 0 implying no filter. kmer_sizes - (optional) K-mer\n sizes, Default values: 33, 55, 77, 99, 127 (all values must be\n odd, less than 128 and listed in ascending order) In the absence\n of these values, K values are automatically selected.\n skip_error_correction - (optional) Assembly only (No error\n correction). By default this is disabled.) -> structure: parameter\n "workspace_name" of String, parameter "output_contigset_name" of\n String, parameter "read_libraries" of list of type\n "paired_end_lib" (The workspace object name of a PairedEndLibrary\n file, whether of the KBaseAssembly or KBaseFile type.), parameter\n "dna_source" of String, parameter "min_contig_length" of Long,\n parameter "kmer_sizes" of list of Long, parameter\n "skip_error_correction" of type "bool" (A boolean. 0 = false,\n anything else = true.)\n :returns: instance of type "SPAdesOutput" (Output parameters for\n SPAdes run. report_name - the name of the KBaseReport.Report\n workspace object. report_ref - the workspace reference of the\n report.) -> structure: parameter "report_name" of String,\n parameter "report_ref" of String\n ' self.log(('Running run_SPAdes with params:\n' + pformat(params))) token = ctx['token'] self.process_params(params) wsname = params[self.PARAM_IN_WS] obj_ids = [] for r in params[self.PARAM_IN_LIB]: obj_ids.append({'ref': (r if ('/' in r) else ((wsname + '/') + r))}) ws = Workspace(self.workspaceURL, token=token) ws_info = ws.get_object_info_new({'objects': obj_ids}) reads_params = [] reftoname = {} for (wsi, oid) in zip(ws_info, obj_ids): ref = oid['ref'] reads_params.append(ref) obj_name = wsi[1] reftoname[ref] = ((wsi[7] + '/') + obj_name) readcli = ReadsUtils(self.callbackURL, token=ctx['token']) typeerr = ((('Supported types: KBaseFile.SingleEndLibrary ' + 'KBaseFile.PairedEndLibrary ') + 'KBaseAssembly.SingleEndLibrary ') + 'KBaseAssembly.PairedEndLibrary') try: reads = readcli.download_reads({'read_libraries': reads_params, 'interleaved': 'false', 'gzipped': None})['files'] except ServerError as se: self.log('logging stacktrace from dynamic client error') self.log(se.data) if (typeerr in se.message): prefix = se.message.split('.')[0] raise ValueError((((prefix + '. Only the types ') + 'KBaseAssembly.PairedEndLibrary ') + 'and KBaseFile.PairedEndLibrary are supported')) else: raise self.log(('Got reads data from converter:\n' + pformat(reads))) phred_type = self.check_reads(params, reads, reftoname) reads_data = [] for ref in reads: reads_name = reftoname[ref] f = reads[ref]['files'] seq_tech = reads[ref]['sequencing_tech'] if (f['type'] == 'interleaved'): reads_data.append({'fwd_file': f['fwd'], 'type': 'paired', 'seq_tech': seq_tech}) elif (f['type'] == 'paired'): reads_data.append({'fwd_file': f['fwd'], 'rev_file': f['rev'], 'type': 'paired', 'seq_tech': seq_tech}) elif (f['type'] == 'single'): reads_data.append({'fwd_file': f['fwd'], 'type': 'single', 'seq_tech': seq_tech}) else: raise ValueError(('Something is very wrong with read lib' + reads_name)) kmer_sizes = None if ((self.PARAM_IN_KMER_SIZES in params) and (params[self.PARAM_IN_KMER_SIZES] is not None)): if (len(params[self.PARAM_IN_KMER_SIZES]) > 0): kmer_sizes = ','.join((str(num) for num in params[self.PARAM_IN_KMER_SIZES])) skip_error_correction = 0 if ((self.PARAM_IN_SKIP_ERR_CORRECT in params) and (params[self.PARAM_IN_SKIP_ERR_CORRECT] is not None)): if (params[self.PARAM_IN_SKIP_ERR_CORRECT] == 1): skip_error_correction = 1 spades_out = self.exec_spades(params[self.PARAM_IN_DNA_SOURCE], reads_data, phred_type, kmer_sizes, skip_error_correction) self.log(('SPAdes output dir: ' + spades_out)) output_contigs = os.path.join(spades_out, 'scaffolds.fasta') self.log('Uploading FASTA file to Assembly') assemblyUtil = AssemblyUtil(self.callbackURL, token=ctx['token'], service_ver='release') if (params.get('min_contig_length', 0) > 0): assemblyUtil.save_assembly_from_fasta({'file': {'path': output_contigs}, 'workspace_name': wsname, 'assembly_name': params[self.PARAM_IN_CS_NAME], 'min_contig_length': params['min_contig_length']}) (report_name, report_ref) = self.load_report((output_contigs + '.filtered.fa'), params, wsname) else: assemblyUtil.save_assembly_from_fasta({'file': {'path': output_contigs}, 'workspace_name': wsname, 'assembly_name': params[self.PARAM_IN_CS_NAME]}) (report_name, report_ref) = self.load_report(output_contigs, params, wsname) output = {'report_name': report_name, 'report_ref': report_ref} if (not isinstance(output, dict)): raise ValueError(('Method run_SPAdes return value ' + 'output is not type dict as required.')) return [output]
def run_HybridSPAdes(self, ctx, params): '\n Run HybridSPAdes on paired end libraries with PacBio CLR and Oxford Nanopore reads\n :param params: instance of type "HybridSPAdesParams" (------To run\n HybridSPAdes 3.13.0 you need at least one library of the following\n types:------ 1) Illumina paired-end/high-quality\n mate-pairs/unpaired reads 2) IonTorrent paired-end/high-quality\n mate-pairs/unpaired reads 3) PacBio CCS reads Version 3.13.0 of\n SPAdes supports paired-end reads, mate-pairs and unpaired reads.\n SPAdes can take as input several paired-end and mate-pair\n libraries simultaneously. workspace_name - the name of the\n workspace from which to take input and store output.\n output_contigset_name - the name of the output contigset\n read_libraries - a list of Illumina or IonTorrent\n paired-end/high-quality mate-pairs/unpaired reads\n long_reads_libraries - a list of PacBio, Oxford Nanopore Sanger\n reads and/or additional contigs dna_source - the source of the DNA\n used for sequencing \'single_cell\': DNA amplified from a single\n cell via MDA anything else: Standard DNA sample from multiple\n cells. Default value is None. pipeline_options - a list of string\n specifying how the SPAdes pipeline should be run kmer_sizes -\n (optional) K-mer sizes, Default values: 21, 33, 55, 77, 99, 127\n (all values must be odd, less than 128 and listed in ascending\n order) In the absence of these values, K values are automatically\n selected. min_contig_length - integer to filter out contigs with\n length < min_contig_length from the HybridSPAdes output. Default\n value is 0 implying no filter. @optional dna_source @optional\n pipeline_options @optional kmer_sizes @optional min_contig_length)\n -> structure: parameter "workspace_name" of String, parameter\n "output_contigset_name" of String, parameter "reads_libraries" of\n list of type "ReadsParams" (parameter groups--define attributes\n for specifying inputs with YAML data set file (advanced) The\n following attributes are available: - orientation ("fr", "rf",\n "ff") - type ("paired-end", "mate-pairs", "hq-mate-pairs",\n "single", "pacbio", "nanopore", "sanger", "trusted-contigs",\n "untrusted-contigs") - interlaced reads (comma-separated list of\n files with interlaced reads) - left reads (comma-separated list of\n files with left reads) - right reads (comma-separated list of\n files with right reads) - single reads (comma-separated list of\n files with single reads or unpaired reads from paired library) -\n merged reads (comma-separated list of files with merged reads)) ->\n structure: parameter "lib_ref" of type "obj_ref" (An X/Y/Z style\n KBase object reference), parameter "orientation" of String,\n parameter "lib_type" of String, parameter "long_reads_libraries"\n of list of type "LongReadsParams" -> structure: parameter\n "long_reads_ref" of type "obj_ref" (An X/Y/Z style KBase object\n reference), parameter "long_reads_type" of String, parameter\n "dna_source" of String, parameter "pipeline_options" of list of\n String, parameter "kmer_sizes" of list of Long, parameter\n "min_contig_length" of Long, parameter "create_report" of type\n "bool" (A boolean. 0 = false, anything else = true.)\n :returns: instance of type "SPAdesOutput" (Output parameters for\n SPAdes run. report_name - the name of the KBaseReport.Report\n workspace object. report_ref - the workspace reference of the\n report.) -> structure: parameter "report_name" of String,\n parameter "report_ref" of String\n ' self.log('Running run_HybridSPAdes with params:\n{}'.format(json.dumps(params, indent=1))) spades_assembler = SPAdesAssembler(self.cfg, ctx.provenance()) output = spades_assembler.run_hybrid_spades(params) if (not isinstance(output, dict)): raise ValueError(('Method run_HybridSPAdes return value ' + 'output is not type dict as required.')) return [output]
-3,089,188,004,854,902,000
Run HybridSPAdes on paired end libraries with PacBio CLR and Oxford Nanopore reads :param params: instance of type "HybridSPAdesParams" (------To run HybridSPAdes 3.13.0 you need at least one library of the following types:------ 1) Illumina paired-end/high-quality mate-pairs/unpaired reads 2) IonTorrent paired-end/high-quality mate-pairs/unpaired reads 3) PacBio CCS reads Version 3.13.0 of SPAdes supports paired-end reads, mate-pairs and unpaired reads. SPAdes can take as input several paired-end and mate-pair libraries simultaneously. workspace_name - the name of the workspace from which to take input and store output. output_contigset_name - the name of the output contigset read_libraries - a list of Illumina or IonTorrent paired-end/high-quality mate-pairs/unpaired reads long_reads_libraries - a list of PacBio, Oxford Nanopore Sanger reads and/or additional contigs dna_source - the source of the DNA used for sequencing 'single_cell': DNA amplified from a single cell via MDA anything else: Standard DNA sample from multiple cells. Default value is None. pipeline_options - a list of string specifying how the SPAdes pipeline should be run kmer_sizes - (optional) K-mer sizes, Default values: 21, 33, 55, 77, 99, 127 (all values must be odd, less than 128 and listed in ascending order) In the absence of these values, K values are automatically selected. min_contig_length - integer to filter out contigs with length < min_contig_length from the HybridSPAdes output. Default value is 0 implying no filter. @optional dna_source @optional pipeline_options @optional kmer_sizes @optional min_contig_length) -> structure: parameter "workspace_name" of String, parameter "output_contigset_name" of String, parameter "reads_libraries" of list of type "ReadsParams" (parameter groups--define attributes for specifying inputs with YAML data set file (advanced) The following attributes are available: - orientation ("fr", "rf", "ff") - type ("paired-end", "mate-pairs", "hq-mate-pairs", "single", "pacbio", "nanopore", "sanger", "trusted-contigs", "untrusted-contigs") - interlaced reads (comma-separated list of files with interlaced reads) - left reads (comma-separated list of files with left reads) - right reads (comma-separated list of files with right reads) - single reads (comma-separated list of files with single reads or unpaired reads from paired library) - merged reads (comma-separated list of files with merged reads)) -> structure: parameter "lib_ref" of type "obj_ref" (An X/Y/Z style KBase object reference), parameter "orientation" of String, parameter "lib_type" of String, parameter "long_reads_libraries" of list of type "LongReadsParams" -> structure: parameter "long_reads_ref" of type "obj_ref" (An X/Y/Z style KBase object reference), parameter "long_reads_type" of String, parameter "dna_source" of String, parameter "pipeline_options" of list of String, parameter "kmer_sizes" of list of Long, parameter "min_contig_length" of Long, parameter "create_report" of type "bool" (A boolean. 0 = false, anything else = true.) :returns: instance of type "SPAdesOutput" (Output parameters for SPAdes run. report_name - the name of the KBaseReport.Report workspace object. report_ref - the workspace reference of the report.) -> structure: parameter "report_name" of String, parameter "report_ref" of String
lib/kb_SPAdes/kb_SPAdesImpl.py
run_HybridSPAdes
mclark58/kb_SPAdes
python
def run_HybridSPAdes(self, ctx, params): '\n Run HybridSPAdes on paired end libraries with PacBio CLR and Oxford Nanopore reads\n :param params: instance of type "HybridSPAdesParams" (------To run\n HybridSPAdes 3.13.0 you need at least one library of the following\n types:------ 1) Illumina paired-end/high-quality\n mate-pairs/unpaired reads 2) IonTorrent paired-end/high-quality\n mate-pairs/unpaired reads 3) PacBio CCS reads Version 3.13.0 of\n SPAdes supports paired-end reads, mate-pairs and unpaired reads.\n SPAdes can take as input several paired-end and mate-pair\n libraries simultaneously. workspace_name - the name of the\n workspace from which to take input and store output.\n output_contigset_name - the name of the output contigset\n read_libraries - a list of Illumina or IonTorrent\n paired-end/high-quality mate-pairs/unpaired reads\n long_reads_libraries - a list of PacBio, Oxford Nanopore Sanger\n reads and/or additional contigs dna_source - the source of the DNA\n used for sequencing \'single_cell\': DNA amplified from a single\n cell via MDA anything else: Standard DNA sample from multiple\n cells. Default value is None. pipeline_options - a list of string\n specifying how the SPAdes pipeline should be run kmer_sizes -\n (optional) K-mer sizes, Default values: 21, 33, 55, 77, 99, 127\n (all values must be odd, less than 128 and listed in ascending\n order) In the absence of these values, K values are automatically\n selected. min_contig_length - integer to filter out contigs with\n length < min_contig_length from the HybridSPAdes output. Default\n value is 0 implying no filter. @optional dna_source @optional\n pipeline_options @optional kmer_sizes @optional min_contig_length)\n -> structure: parameter "workspace_name" of String, parameter\n "output_contigset_name" of String, parameter "reads_libraries" of\n list of type "ReadsParams" (parameter groups--define attributes\n for specifying inputs with YAML data set file (advanced) The\n following attributes are available: - orientation ("fr", "rf",\n "ff") - type ("paired-end", "mate-pairs", "hq-mate-pairs",\n "single", "pacbio", "nanopore", "sanger", "trusted-contigs",\n "untrusted-contigs") - interlaced reads (comma-separated list of\n files with interlaced reads) - left reads (comma-separated list of\n files with left reads) - right reads (comma-separated list of\n files with right reads) - single reads (comma-separated list of\n files with single reads or unpaired reads from paired library) -\n merged reads (comma-separated list of files with merged reads)) ->\n structure: parameter "lib_ref" of type "obj_ref" (An X/Y/Z style\n KBase object reference), parameter "orientation" of String,\n parameter "lib_type" of String, parameter "long_reads_libraries"\n of list of type "LongReadsParams" -> structure: parameter\n "long_reads_ref" of type "obj_ref" (An X/Y/Z style KBase object\n reference), parameter "long_reads_type" of String, parameter\n "dna_source" of String, parameter "pipeline_options" of list of\n String, parameter "kmer_sizes" of list of Long, parameter\n "min_contig_length" of Long, parameter "create_report" of type\n "bool" (A boolean. 0 = false, anything else = true.)\n :returns: instance of type "SPAdesOutput" (Output parameters for\n SPAdes run. report_name - the name of the KBaseReport.Report\n workspace object. report_ref - the workspace reference of the\n report.) -> structure: parameter "report_name" of String,\n parameter "report_ref" of String\n ' self.log('Running run_HybridSPAdes with params:\n{}'.format(json.dumps(params, indent=1))) spades_assembler = SPAdesAssembler(self.cfg, ctx.provenance()) output = spades_assembler.run_hybrid_spades(params) if (not isinstance(output, dict)): raise ValueError(('Method run_HybridSPAdes return value ' + 'output is not type dict as required.')) return [output]
def run_metaSPAdes(self, ctx, params): '\n Run SPAdes on paired end libraries for metagenomes\n :param params: instance of type "SPAdesParams" (Input parameters for\n running SPAdes. workspace_name - the name of the workspace from\n which to take input and store output. output_contigset_name - the\n name of the output contigset read_libraries - a list of Illumina\n PairedEndLibrary files in FASTQ or BAM format. dna_source -\n (optional) the source of the DNA used for sequencing\n \'single_cell\': DNA amplified from a single cell via MDA anything\n else: Standard DNA sample from multiple cells. Default value is\n None. min_contig_length - (optional) integer to filter out contigs\n with length < min_contig_length from the SPAdes output. Default\n value is 0 implying no filter. kmer_sizes - (optional) K-mer\n sizes, Default values: 33, 55, 77, 99, 127 (all values must be\n odd, less than 128 and listed in ascending order) In the absence\n of these values, K values are automatically selected.\n skip_error_correction - (optional) Assembly only (No error\n correction). By default this is disabled.) -> structure: parameter\n "workspace_name" of String, parameter "output_contigset_name" of\n String, parameter "read_libraries" of list of type\n "paired_end_lib" (The workspace object name of a PairedEndLibrary\n file, whether of the KBaseAssembly or KBaseFile type.), parameter\n "dna_source" of String, parameter "min_contig_length" of Long,\n parameter "kmer_sizes" of list of Long, parameter\n "skip_error_correction" of type "bool" (A boolean. 0 = false,\n anything else = true.)\n :returns: instance of type "SPAdesOutput" (Output parameters for\n SPAdes run. report_name - the name of the KBaseReport.Report\n workspace object. report_ref - the workspace reference of the\n report.) -> structure: parameter "report_name" of String,\n parameter "report_ref" of String\n ' output = self.run_SPAdes(ctx, params)[0] if (not isinstance(output, dict)): raise ValueError(('Method run_metaSPAdes return value ' + 'output is not type dict as required.')) return [output]
1,842,031,156,406,198,000
Run SPAdes on paired end libraries for metagenomes :param params: instance of type "SPAdesParams" (Input parameters for running SPAdes. workspace_name - the name of the workspace from which to take input and store output. output_contigset_name - the name of the output contigset read_libraries - a list of Illumina PairedEndLibrary files in FASTQ or BAM format. dna_source - (optional) the source of the DNA used for sequencing 'single_cell': DNA amplified from a single cell via MDA anything else: Standard DNA sample from multiple cells. Default value is None. min_contig_length - (optional) integer to filter out contigs with length < min_contig_length from the SPAdes output. Default value is 0 implying no filter. kmer_sizes - (optional) K-mer sizes, Default values: 33, 55, 77, 99, 127 (all values must be odd, less than 128 and listed in ascending order) In the absence of these values, K values are automatically selected. skip_error_correction - (optional) Assembly only (No error correction). By default this is disabled.) -> structure: parameter "workspace_name" of String, parameter "output_contigset_name" of String, parameter "read_libraries" of list of type "paired_end_lib" (The workspace object name of a PairedEndLibrary file, whether of the KBaseAssembly or KBaseFile type.), parameter "dna_source" of String, parameter "min_contig_length" of Long, parameter "kmer_sizes" of list of Long, parameter "skip_error_correction" of type "bool" (A boolean. 0 = false, anything else = true.) :returns: instance of type "SPAdesOutput" (Output parameters for SPAdes run. report_name - the name of the KBaseReport.Report workspace object. report_ref - the workspace reference of the report.) -> structure: parameter "report_name" of String, parameter "report_ref" of String
lib/kb_SPAdes/kb_SPAdesImpl.py
run_metaSPAdes
mclark58/kb_SPAdes
python
def run_metaSPAdes(self, ctx, params): '\n Run SPAdes on paired end libraries for metagenomes\n :param params: instance of type "SPAdesParams" (Input parameters for\n running SPAdes. workspace_name - the name of the workspace from\n which to take input and store output. output_contigset_name - the\n name of the output contigset read_libraries - a list of Illumina\n PairedEndLibrary files in FASTQ or BAM format. dna_source -\n (optional) the source of the DNA used for sequencing\n \'single_cell\': DNA amplified from a single cell via MDA anything\n else: Standard DNA sample from multiple cells. Default value is\n None. min_contig_length - (optional) integer to filter out contigs\n with length < min_contig_length from the SPAdes output. Default\n value is 0 implying no filter. kmer_sizes - (optional) K-mer\n sizes, Default values: 33, 55, 77, 99, 127 (all values must be\n odd, less than 128 and listed in ascending order) In the absence\n of these values, K values are automatically selected.\n skip_error_correction - (optional) Assembly only (No error\n correction). By default this is disabled.) -> structure: parameter\n "workspace_name" of String, parameter "output_contigset_name" of\n String, parameter "read_libraries" of list of type\n "paired_end_lib" (The workspace object name of a PairedEndLibrary\n file, whether of the KBaseAssembly or KBaseFile type.), parameter\n "dna_source" of String, parameter "min_contig_length" of Long,\n parameter "kmer_sizes" of list of Long, parameter\n "skip_error_correction" of type "bool" (A boolean. 0 = false,\n anything else = true.)\n :returns: instance of type "SPAdesOutput" (Output parameters for\n SPAdes run. report_name - the name of the KBaseReport.Report\n workspace object. report_ref - the workspace reference of the\n report.) -> structure: parameter "report_name" of String,\n parameter "report_ref" of String\n ' output = self.run_SPAdes(ctx, params)[0] if (not isinstance(output, dict)): raise ValueError(('Method run_metaSPAdes return value ' + 'output is not type dict as required.')) return [output]
def init(self, var_list=None, ckpt_dir=None, ckpt_file=None, optimistic=False): '\n :param var_list: vars for restore\n :param ckpt_dir: prefix of model files.\n :param ckpt_file: exact name of model file, priority is higher than `ckpt_dir`\n :param optimistic: only restore weights of same names with model.\n :return:\n ' assert ((var_list is None) or (len(var_list) > 0)), 'invalid var_list: {}'.format(var_list) assert ((ckpt_dir is not None) or (ckpt_file is not None)), 'ckpt_dir and ckpt_file are both None' self._var_list = var_list self._restore_optimistic = optimistic if (ckpt_file is None): assert os.path.exists(ckpt_dir), ('invalid checkpoint dir: %s' % ckpt_dir) self.restore_ckpt_file = tf.train.latest_checkpoint(os.path.dirname((ckpt_dir + os.sep))) else: self.restore_ckpt_file = ckpt_file self._inited = True return self
7,315,703,408,418,164,000
:param var_list: vars for restore :param ckpt_dir: prefix of model files. :param ckpt_file: exact name of model file, priority is higher than `ckpt_dir` :param optimistic: only restore weights of same names with model. :return:
tensorkit/restore.py
init
nonu116/HDR-GAN
python
def init(self, var_list=None, ckpt_dir=None, ckpt_file=None, optimistic=False): '\n :param var_list: vars for restore\n :param ckpt_dir: prefix of model files.\n :param ckpt_file: exact name of model file, priority is higher than `ckpt_dir`\n :param optimistic: only restore weights of same names with model.\n :return:\n ' assert ((var_list is None) or (len(var_list) > 0)), 'invalid var_list: {}'.format(var_list) assert ((ckpt_dir is not None) or (ckpt_file is not None)), 'ckpt_dir and ckpt_file are both None' self._var_list = var_list self._restore_optimistic = optimistic if (ckpt_file is None): assert os.path.exists(ckpt_dir), ('invalid checkpoint dir: %s' % ckpt_dir) self.restore_ckpt_file = tf.train.latest_checkpoint(os.path.dirname((ckpt_dir + os.sep))) else: self.restore_ckpt_file = ckpt_file self._inited = True return self
def _restore_vars(self, sess): '\n :param sess:\n :return: boolean for successful or not\n ' if (not self._restore_optimistic): if (self.restore_ckpt_file is None): logger.warn(Color.yellow('No checkpoint file for restore vars, checkpoint file is None', bold=True)) return False self._restore_saver = tf.train.Saver(self._var_list, name='tk_restore') self._restore_saver.restore(sess, self.restore_ckpt_file) return True else: return self._optimistic_restore_model(sess)
-6,708,701,434,879,647,000
:param sess: :return: boolean for successful or not
tensorkit/restore.py
_restore_vars
nonu116/HDR-GAN
python
def _restore_vars(self, sess): '\n :param sess:\n :return: boolean for successful or not\n ' if (not self._restore_optimistic): if (self.restore_ckpt_file is None): logger.warn(Color.yellow('No checkpoint file for restore vars, checkpoint file is None', bold=True)) return False self._restore_saver = tf.train.Saver(self._var_list, name='tk_restore') self._restore_saver.restore(sess, self.restore_ckpt_file) return True else: return self._optimistic_restore_model(sess)
def _optimistic_restore_model(self, sess): '\n restore weights of same names with model.\n :param sess:\n :return:\n ' if (self.restore_ckpt_file is None): logger.warn(Color.yellow('No ckpt file for restore vars, ckpt file is None')) return False reader = tf.train.NewCheckpointReader(self.restore_ckpt_file) saved_shapes = reader.get_variable_to_shape_map() if (self._var_list is None): restore_key2vars = {var.name.split(':')[0]: var for var in tf.global_variables()} elif isinstance(self._var_list, list): restore_key2vars = {var.name.split(':')[0]: var for var in self._var_list} elif isinstance(self._var_list, dict): restore_key2vars = self._var_list else: raise RuntimeError('type error {}'.format(self._var_list)) assert (len(restore_key2vars) > 0) restore_key2vars = sorted([(k, v) for (k, v) in restore_key2vars.items() if (k in saved_shapes)]) msg = [] var_list = dict() with tf.variable_scope('', reuse=True): for (key, var) in restore_key2vars: var_shape = var.get_shape().as_list() if (var_shape == saved_shapes[key]): var_list[key] = var var_name = var.name[:var.name.index(':')] msg.append(('- restoring variable: {}'.format(var_name) if (var_name == key) else '- restoring variable {} from {}'.format(var_name, key))) else: msg.append(Color.yellow('- variable({}) with inconsistent shape: {}(graph) != {}(ckpt)'.format(key, var_shape, saved_shapes[key]))) if (len(var_list) != 0): msg += ['- total variable count: {}'.format(len(var_list))] logger.info('\n'.join(msg)) saver = tf.train.Saver(var_list, name='tk_restore') saver.restore(sess, self.restore_ckpt_file) return True else: logger.warn(Color.yellow('No vars need to restore from file: {}'.format(self.restore_ckpt_file))) return False
-3,370,335,051,060,885,500
restore weights of same names with model. :param sess: :return:
tensorkit/restore.py
_optimistic_restore_model
nonu116/HDR-GAN
python
def _optimistic_restore_model(self, sess): '\n restore weights of same names with model.\n :param sess:\n :return:\n ' if (self.restore_ckpt_file is None): logger.warn(Color.yellow('No ckpt file for restore vars, ckpt file is None')) return False reader = tf.train.NewCheckpointReader(self.restore_ckpt_file) saved_shapes = reader.get_variable_to_shape_map() if (self._var_list is None): restore_key2vars = {var.name.split(':')[0]: var for var in tf.global_variables()} elif isinstance(self._var_list, list): restore_key2vars = {var.name.split(':')[0]: var for var in self._var_list} elif isinstance(self._var_list, dict): restore_key2vars = self._var_list else: raise RuntimeError('type error {}'.format(self._var_list)) assert (len(restore_key2vars) > 0) restore_key2vars = sorted([(k, v) for (k, v) in restore_key2vars.items() if (k in saved_shapes)]) msg = [] var_list = dict() with tf.variable_scope(, reuse=True): for (key, var) in restore_key2vars: var_shape = var.get_shape().as_list() if (var_shape == saved_shapes[key]): var_list[key] = var var_name = var.name[:var.name.index(':')] msg.append(('- restoring variable: {}'.format(var_name) if (var_name == key) else '- restoring variable {} from {}'.format(var_name, key))) else: msg.append(Color.yellow('- variable({}) with inconsistent shape: {}(graph) != {}(ckpt)'.format(key, var_shape, saved_shapes[key]))) if (len(var_list) != 0): msg += ['- total variable count: {}'.format(len(var_list))] logger.info('\n'.join(msg)) saver = tf.train.Saver(var_list, name='tk_restore') saver.restore(sess, self.restore_ckpt_file) return True else: logger.warn(Color.yellow('No vars need to restore from file: {}'.format(self.restore_ckpt_file))) return False
def _type_repr(obj): 'Return the repr() of an object, special-casing types (internal helper).\n If obj is a type, we return a shorter version than the default\n type.__repr__, based on the module and qualified name, which is\n typically enough to uniquely identify a type. For everything\n else, we fall back on repr(obj).\n ' if (isinstance(obj, type) and (obj.__module__ != 'typing')): if (obj.__module__ == 'builtins'): return obj.__qualname__ return f'{obj.__module__}.{obj.__qualname__}' if (obj is ...): return '...' if isinstance(obj, types.FunctionType): return obj.__name__ return repr(obj)
7,618,330,322,038,824,000
Return the repr() of an object, special-casing types (internal helper). If obj is a type, we return a shorter version than the default type.__repr__, based on the module and qualified name, which is typically enough to uniquely identify a type. For everything else, we fall back on repr(obj).
venv/Lib/site-packages/torch/fx/node.py
_type_repr
Westlanderz/AI-Plat1
python
def _type_repr(obj): 'Return the repr() of an object, special-casing types (internal helper).\n If obj is a type, we return a shorter version than the default\n type.__repr__, based on the module and qualified name, which is\n typically enough to uniquely identify a type. For everything\n else, we fall back on repr(obj).\n ' if (isinstance(obj, type) and (obj.__module__ != 'typing')): if (obj.__module__ == 'builtins'): return obj.__qualname__ return f'{obj.__module__}.{obj.__qualname__}' if (obj is ...): return '...' if isinstance(obj, types.FunctionType): return obj.__name__ return repr(obj)
@compatibility(is_backward_compatible=True) def map_arg(a: Argument, fn: Callable[([Node], Argument)]) -> Argument: '\n Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.\n ' assert callable(fn), 'torch.fx.map_arg(a, fn): fn must be a callable' return map_aggregate(a, (lambda x: (fn(x) if isinstance(x, Node) else x)))
-5,129,645,273,626,015,000
Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
venv/Lib/site-packages/torch/fx/node.py
map_arg
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def map_arg(a: Argument, fn: Callable[([Node], Argument)]) -> Argument: '\n \n ' assert callable(fn), 'torch.fx.map_arg(a, fn): fn must be a callable' return map_aggregate(a, (lambda x: (fn(x) if isinstance(x, Node) else x)))
@compatibility(is_backward_compatible=True) def map_aggregate(a: Argument, fn: Callable[([Argument], Argument)]) -> Argument: '\n Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.\n ' if isinstance(a, tuple): return tuple((map_aggregate(elem, fn) for elem in a)) elif isinstance(a, list): return immutable_list((map_aggregate(elem, fn) for elem in a)) elif isinstance(a, dict): return immutable_dict(((k, map_aggregate(v, fn)) for (k, v) in a.items())) elif isinstance(a, slice): return slice(map_aggregate(a.start, fn), map_aggregate(a.stop, fn), map_aggregate(a.step, fn)) else: return fn(a)
-8,735,130,783,526,883,000
Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
venv/Lib/site-packages/torch/fx/node.py
map_aggregate
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def map_aggregate(a: Argument, fn: Callable[([Argument], Argument)]) -> Argument: '\n \n ' if isinstance(a, tuple): return tuple((map_aggregate(elem, fn) for elem in a)) elif isinstance(a, list): return immutable_list((map_aggregate(elem, fn) for elem in a)) elif isinstance(a, dict): return immutable_dict(((k, map_aggregate(v, fn)) for (k, v) in a.items())) elif isinstance(a, slice): return slice(map_aggregate(a.start, fn), map_aggregate(a.stop, fn), map_aggregate(a.step, fn)) else: return fn(a)
@compatibility(is_backward_compatible=True) def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target', args: Tuple[('Argument', ...)], kwargs: Dict[(str, 'Argument')], return_type: Optional[Any]=None) -> None: "\n Instantiate an instance of ``Node``. Note: most often, you want to use the\n Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather\n than instantiating a ``Node`` directly.\n\n Args:\n graph (Graph): The ``Graph`` to which this ``Node`` should belong.\n\n name (str): The name to which the output of this ``Node`` should be assigned\n\n op (str): The opcode for this ``Node``. Can be one of 'placeholder',\n 'call_method', 'call_module', 'call_function', 'get_attr',\n 'output'\n\n target ('Target'): The target this op should call. See the broader\n ``Node`` docstring for more details.\n\n args (Tuple['Argument']): The args to be passed to ``target``\n\n kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target``\n\n return_type (Optional[Any]): The python type expression representing the\n type of the output of this node. This field can be used for\n annotation of values in the generated code or for other types\n of analyses.\n " self.graph = graph self.name = name assert (op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root']) self.op = op if (op == 'call_function'): if (not callable(target)): raise ValueError(f"Node [graph = {graph}, name = '{name}'] target {target} has type {torch.typename(target)} but a Callable is expected") elif (not isinstance(target, str)): raise ValueError(f"Node [graph = {graph}, name = '{name}'] target {target} has type {torch.typename(target)} but a str is expected") self.target = target self._input_nodes: Dict[(Node, None)] = {} self.__update_args_kwargs(map_arg(args, (lambda x: x)), map_arg(kwargs, (lambda x: x))) self.users: Dict[('Node', None)] = {} self.type: Optional[Any] = return_type self._prev = self self._next = self self._erased = False self._repr_fn: Optional[Callable[([Node], str)]] = None self._stack_trace: Optional[str] = None self.meta: Dict[(str, Any)] = {}
-269,260,086,734,934,900
Instantiate an instance of ``Node``. Note: most often, you want to use the Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather than instantiating a ``Node`` directly. Args: graph (Graph): The ``Graph`` to which this ``Node`` should belong. name (str): The name to which the output of this ``Node`` should be assigned op (str): The opcode for this ``Node``. Can be one of 'placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output' target ('Target'): The target this op should call. See the broader ``Node`` docstring for more details. args (Tuple['Argument']): The args to be passed to ``target`` kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target`` return_type (Optional[Any]): The python type expression representing the type of the output of this node. This field can be used for annotation of values in the generated code or for other types of analyses.
venv/Lib/site-packages/torch/fx/node.py
__init__
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target', args: Tuple[('Argument', ...)], kwargs: Dict[(str, 'Argument')], return_type: Optional[Any]=None) -> None: "\n Instantiate an instance of ``Node``. Note: most often, you want to use the\n Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather\n than instantiating a ``Node`` directly.\n\n Args:\n graph (Graph): The ``Graph`` to which this ``Node`` should belong.\n\n name (str): The name to which the output of this ``Node`` should be assigned\n\n op (str): The opcode for this ``Node``. Can be one of 'placeholder',\n 'call_method', 'call_module', 'call_function', 'get_attr',\n 'output'\n\n target ('Target'): The target this op should call. See the broader\n ``Node`` docstring for more details.\n\n args (Tuple['Argument']): The args to be passed to ``target``\n\n kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target``\n\n return_type (Optional[Any]): The python type expression representing the\n type of the output of this node. This field can be used for\n annotation of values in the generated code or for other types\n of analyses.\n " self.graph = graph self.name = name assert (op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root']) self.op = op if (op == 'call_function'): if (not callable(target)): raise ValueError(f"Node [graph = {graph}, name = '{name}'] target {target} has type {torch.typename(target)} but a Callable is expected") elif (not isinstance(target, str)): raise ValueError(f"Node [graph = {graph}, name = '{name}'] target {target} has type {torch.typename(target)} but a str is expected") self.target = target self._input_nodes: Dict[(Node, None)] = {} self.__update_args_kwargs(map_arg(args, (lambda x: x)), map_arg(kwargs, (lambda x: x))) self.users: Dict[('Node', None)] = {} self.type: Optional[Any] = return_type self._prev = self self._next = self self._erased = False self._repr_fn: Optional[Callable[([Node], str)]] = None self._stack_trace: Optional[str] = None self.meta: Dict[(str, Any)] = {}
@property def next(self) -> 'Node': '\n Returns the next ``Node`` in the linked list of Nodes.\n\n Returns:\n\n The next ``Node`` in the linked list of Nodes.\n ' return self._next
-112,273,230,731,126,510
Returns the next ``Node`` in the linked list of Nodes. Returns: The next ``Node`` in the linked list of Nodes.
venv/Lib/site-packages/torch/fx/node.py
next
Westlanderz/AI-Plat1
python
@property def next(self) -> 'Node': '\n Returns the next ``Node`` in the linked list of Nodes.\n\n Returns:\n\n The next ``Node`` in the linked list of Nodes.\n ' return self._next
@property def prev(self) -> 'Node': '\n Returns the previous ``Node`` in the linked list of Nodes.\n\n Returns:\n\n The previous ``Node`` in the linked list of Nodes.\n ' return self._prev
-7,637,238,228,281,718,000
Returns the previous ``Node`` in the linked list of Nodes. Returns: The previous ``Node`` in the linked list of Nodes.
venv/Lib/site-packages/torch/fx/node.py
prev
Westlanderz/AI-Plat1
python
@property def prev(self) -> 'Node': '\n Returns the previous ``Node`` in the linked list of Nodes.\n\n Returns:\n\n The previous ``Node`` in the linked list of Nodes.\n ' return self._prev
@compatibility(is_backward_compatible=True) def prepend(self, x: 'Node') -> None: '\n Insert x before this node in the list of nodes in the graph. Example::\n\n Before: p -> self\n bx -> x -> ax\n After: p -> x -> self\n bx -> ax\n\n Args:\n x (Node): The node to put before this node. Must be a member of the same graph.\n ' assert (self.graph == x.graph), 'Attempting to move a Node into a different Graph' x._remove_from_list() p = self._prev (p._next, x._prev) = (x, p) (x._next, self._prev) = (self, x)
-7,424,264,496,378,813,000
Insert x before this node in the list of nodes in the graph. Example:: Before: p -> self bx -> x -> ax After: p -> x -> self bx -> ax Args: x (Node): The node to put before this node. Must be a member of the same graph.
venv/Lib/site-packages/torch/fx/node.py
prepend
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def prepend(self, x: 'Node') -> None: '\n Insert x before this node in the list of nodes in the graph. Example::\n\n Before: p -> self\n bx -> x -> ax\n After: p -> x -> self\n bx -> ax\n\n Args:\n x (Node): The node to put before this node. Must be a member of the same graph.\n ' assert (self.graph == x.graph), 'Attempting to move a Node into a different Graph' x._remove_from_list() p = self._prev (p._next, x._prev) = (x, p) (x._next, self._prev) = (self, x)
@compatibility(is_backward_compatible=True) def append(self, x: 'Node') -> None: '\n Insert x after this node in the list of nodes in the graph.\n Equvalent to ``self.next.prepend(x)``\n\n Args:\n x (Node): The node to put after this node. Must be a member of the same graph.\n ' self._next.prepend(x)
-1,150,084,651,612,744,200
Insert x after this node in the list of nodes in the graph. Equvalent to ``self.next.prepend(x)`` Args: x (Node): The node to put after this node. Must be a member of the same graph.
venv/Lib/site-packages/torch/fx/node.py
append
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def append(self, x: 'Node') -> None: '\n Insert x after this node in the list of nodes in the graph.\n Equvalent to ``self.next.prepend(x)``\n\n Args:\n x (Node): The node to put after this node. Must be a member of the same graph.\n ' self._next.prepend(x)
@property def args(self) -> Tuple[(Argument, ...)]: "\n The tuple of arguments to this ``Node``. The interpretation of arguments\n depends on the node's opcode. See the :class:`Node` docstring for more\n information.\n\n Assignment to this property is allowed. All accounting of uses and users\n is updated automatically on assignment.\n " return self._args
3,899,425,412,167,228,400
The tuple of arguments to this ``Node``. The interpretation of arguments depends on the node's opcode. See the :class:`Node` docstring for more information. Assignment to this property is allowed. All accounting of uses and users is updated automatically on assignment.
venv/Lib/site-packages/torch/fx/node.py
args
Westlanderz/AI-Plat1
python
@property def args(self) -> Tuple[(Argument, ...)]: "\n The tuple of arguments to this ``Node``. The interpretation of arguments\n depends on the node's opcode. See the :class:`Node` docstring for more\n information.\n\n Assignment to this property is allowed. All accounting of uses and users\n is updated automatically on assignment.\n " return self._args
@args.setter def args(self, a: Tuple[(Argument, ...)]): "\n Set the tuple of arguments to this Node. The interpretation of arguments\n depends on the node's opcode. See the ``fx.Graph`` docstring for more\n information.\n " self.__update_args_kwargs(map_arg(a, (lambda x: x)), self._kwargs)
6,250,060,837,152,039,000
Set the tuple of arguments to this Node. The interpretation of arguments depends on the node's opcode. See the ``fx.Graph`` docstring for more information.
venv/Lib/site-packages/torch/fx/node.py
args
Westlanderz/AI-Plat1
python
@args.setter def args(self, a: Tuple[(Argument, ...)]): "\n Set the tuple of arguments to this Node. The interpretation of arguments\n depends on the node's opcode. See the ``fx.Graph`` docstring for more\n information.\n " self.__update_args_kwargs(map_arg(a, (lambda x: x)), self._kwargs)
@property def kwargs(self) -> Dict[(str, Argument)]: "\n The dict of keyword arguments to this ``Node``. The interpretation of arguments\n depends on the node's opcode. See the :class:`Node` docstring for more\n information.\n\n Assignment to this property is allowed. All accounting of uses and users\n is updated automatically on assignment.\n " return self._kwargs
-5,066,895,608,468,566,000
The dict of keyword arguments to this ``Node``. The interpretation of arguments depends on the node's opcode. See the :class:`Node` docstring for more information. Assignment to this property is allowed. All accounting of uses and users is updated automatically on assignment.
venv/Lib/site-packages/torch/fx/node.py
kwargs
Westlanderz/AI-Plat1
python
@property def kwargs(self) -> Dict[(str, Argument)]: "\n The dict of keyword arguments to this ``Node``. The interpretation of arguments\n depends on the node's opcode. See the :class:`Node` docstring for more\n information.\n\n Assignment to this property is allowed. All accounting of uses and users\n is updated automatically on assignment.\n " return self._kwargs
@kwargs.setter def kwargs(self, k: Dict[(str, Argument)]): "\n Set the dict of kwargs to this Node. The interpretation of arguments\n depends on the node's opcode. See the ``fx.Graph`` docstring for more\n information.\n " self.__update_args_kwargs(self._args, map_arg(k, (lambda x: x)))
-835,094,360,972,673,800
Set the dict of kwargs to this Node. The interpretation of arguments depends on the node's opcode. See the ``fx.Graph`` docstring for more information.
venv/Lib/site-packages/torch/fx/node.py
kwargs
Westlanderz/AI-Plat1
python
@kwargs.setter def kwargs(self, k: Dict[(str, Argument)]): "\n Set the dict of kwargs to this Node. The interpretation of arguments\n depends on the node's opcode. See the ``fx.Graph`` docstring for more\n information.\n " self.__update_args_kwargs(self._args, map_arg(k, (lambda x: x)))
@property def all_input_nodes(self) -> List['Node']: '\n Return all Nodes that are inputs to this Node. This is equivalent to\n iterating over ``args`` and ``kwargs`` and only collecting the values that\n are Nodes.\n\n Returns:\n\n List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this\n ``Node``, in that order.\n ' return list(self._input_nodes.keys())
-7,689,755,375,074,671,000
Return all Nodes that are inputs to this Node. This is equivalent to iterating over ``args`` and ``kwargs`` and only collecting the values that are Nodes. Returns: List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this ``Node``, in that order.
venv/Lib/site-packages/torch/fx/node.py
all_input_nodes
Westlanderz/AI-Plat1
python
@property def all_input_nodes(self) -> List['Node']: '\n Return all Nodes that are inputs to this Node. This is equivalent to\n iterating over ``args`` and ``kwargs`` and only collecting the values that\n are Nodes.\n\n Returns:\n\n List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this\n ``Node``, in that order.\n ' return list(self._input_nodes.keys())
@compatibility(is_backward_compatible=True) def update_arg(self, idx: int, arg: Argument) -> None: '\n Update an existing positional argument to contain the new value\n ``arg``. After calling, ``self.args[idx] == arg``.\n\n Args:\n\n idx (int): The index into ``self.args`` of the element to update\n arg (Argument): The new argument value to write into ``args``\n ' args = list(self.args) args[idx] = arg self.args = tuple(args)
-6,276,467,114,808,523,000
Update an existing positional argument to contain the new value ``arg``. After calling, ``self.args[idx] == arg``. Args: idx (int): The index into ``self.args`` of the element to update arg (Argument): The new argument value to write into ``args``
venv/Lib/site-packages/torch/fx/node.py
update_arg
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def update_arg(self, idx: int, arg: Argument) -> None: '\n Update an existing positional argument to contain the new value\n ``arg``. After calling, ``self.args[idx] == arg``.\n\n Args:\n\n idx (int): The index into ``self.args`` of the element to update\n arg (Argument): The new argument value to write into ``args``\n ' args = list(self.args) args[idx] = arg self.args = tuple(args)
@compatibility(is_backward_compatible=True) def update_kwarg(self, key: str, arg: Argument) -> None: '\n Update an existing keyword argument to contain the new value\n ``arg``. After calling, ``self.kwargs[key] == arg``.\n\n Args:\n\n key (str): The key in ``self.kwargs`` of the element to update\n arg (Argument): The new argument value to write into ``kwargs``\n ' kwargs = dict(self.kwargs) kwargs[key] = arg self.kwargs = kwargs
265,324,208,968,271,550
Update an existing keyword argument to contain the new value ``arg``. After calling, ``self.kwargs[key] == arg``. Args: key (str): The key in ``self.kwargs`` of the element to update arg (Argument): The new argument value to write into ``kwargs``
venv/Lib/site-packages/torch/fx/node.py
update_kwarg
Westlanderz/AI-Plat1
python
@compatibility(is_backward_compatible=True) def update_kwarg(self, key: str, arg: Argument) -> None: '\n Update an existing keyword argument to contain the new value\n ``arg``. After calling, ``self.kwargs[key] == arg``.\n\n Args:\n\n key (str): The key in ``self.kwargs`` of the element to update\n arg (Argument): The new argument value to write into ``kwargs``\n ' kwargs = dict(self.kwargs) kwargs[key] = arg self.kwargs = kwargs
@property def stack_trace(self) -> Optional[str]: '\n Return the Python stack trace that was recorded during tracing, if any.\n This property is usually populated by `Tracer.create_proxy`. To record\n stack traces during tracing for debug purposes, set\n `record_stack_traces = True` on the `Tracer` instance.\n ' return self._stack_trace
-4,988,679,728,696,897,000
Return the Python stack trace that was recorded during tracing, if any. This property is usually populated by `Tracer.create_proxy`. To record stack traces during tracing for debug purposes, set `record_stack_traces = True` on the `Tracer` instance.
venv/Lib/site-packages/torch/fx/node.py
stack_trace
Westlanderz/AI-Plat1
python
@property def stack_trace(self) -> Optional[str]: '\n Return the Python stack trace that was recorded during tracing, if any.\n This property is usually populated by `Tracer.create_proxy`. To record\n stack traces during tracing for debug purposes, set\n `record_stack_traces = True` on the `Tracer` instance.\n ' return self._stack_trace
def __update_args_kwargs(self, new_args: Tuple[('Argument', ...)], new_kwargs: Dict[(str, 'Argument')]): '\n This API is internal. Do *not* call it directly.\n ' self._args = new_args self._kwargs = new_kwargs for old_use in self._input_nodes.keys(): old_use.users.pop(self) self._input_nodes = {} map_arg(self._args, (lambda n: self._input_nodes.setdefault(n))) map_arg(self._kwargs, (lambda n: self._input_nodes.setdefault(n))) for new_use in self._input_nodes.keys(): new_use.users.setdefault(self)
-4,245,011,480,387,905,500
This API is internal. Do *not* call it directly.
venv/Lib/site-packages/torch/fx/node.py
__update_args_kwargs
Westlanderz/AI-Plat1
python
def __update_args_kwargs(self, new_args: Tuple[('Argument', ...)], new_kwargs: Dict[(str, 'Argument')]): '\n \n ' self._args = new_args self._kwargs = new_kwargs for old_use in self._input_nodes.keys(): old_use.users.pop(self) self._input_nodes = {} map_arg(self._args, (lambda n: self._input_nodes.setdefault(n))) map_arg(self._kwargs, (lambda n: self._input_nodes.setdefault(n))) for new_use in self._input_nodes.keys(): new_use.users.setdefault(self)