body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def test_align_no_blank_columns(self): 'correctly handle a file with no white space at line starts' parser = MinimalNexusAlignParser('data/nexus_aa.nxs') seqs = {n: s for (n, s) in parser} self.assertEqual(len(seqs), 10) lengths = set((len(seqs[n]) for n in seqs)) self.assertEqual(lengths, {234})
7,375,363,045,367,535,000
correctly handle a file with no white space at line starts
tests/test_parse/test_nexus.py
test_align_no_blank_columns
tla256/cogent3
python
def test_align_no_blank_columns(self): parser = MinimalNexusAlignParser('data/nexus_aa.nxs') seqs = {n: s for (n, s) in parser} self.assertEqual(len(seqs), 10) lengths = set((len(seqs[n]) for n in seqs)) self.assertEqual(lengths, {234})
def test_load_seqs_interface(self): 'load_aligned_seqs correctly loads nexus alignments' aln = load_aligned_seqs('data/nexus_mixed.nex') self.assertEqual(aln.num_seqs, 4) self.assertEqual(len(aln), 20) aln = load_aligned_seqs('data/nexus_aa.nxs') self.assertEqual(aln.num_seqs, 10) self.assertEqual(len(aln), 234)
5,215,797,518,133,954,000
load_aligned_seqs correctly loads nexus alignments
tests/test_parse/test_nexus.py
test_load_seqs_interface
tla256/cogent3
python
def test_load_seqs_interface(self): aln = load_aligned_seqs('data/nexus_mixed.nex') self.assertEqual(aln.num_seqs, 4) self.assertEqual(len(aln), 20) aln = load_aligned_seqs('data/nexus_aa.nxs') self.assertEqual(aln.num_seqs, 10) self.assertEqual(len(aln), 234)
def _set_cdc_on_table(session, table_name, value, ks_name=None): '\n Uses <session> to set CDC to <value> on <ks_name>.<table_name>.\n ' table_string = (((ks_name + '.') + table_name) if ks_name else table_name) value_string = ('true' if value else 'false') stmt = ((('ALTER TABLE ' + table_string) + ' WITH CDC = ') + value_string) logger.debug(stmt) session.execute(stmt)
3,866,652,390,675,072,000
Uses <session> to set CDC to <value> on <ks_name>.<table_name>.
cdc_test.py
_set_cdc_on_table
Ankou76ers/cassandra-dtest
python
def _set_cdc_on_table(session, table_name, value, ks_name=None): '\n \n ' table_string = (((ks_name + '.') + table_name) if ks_name else table_name) value_string = ('true' if value else 'false') stmt = ((('ALTER TABLE ' + table_string) + ' WITH CDC = ') + value_string) logger.debug(stmt) session.execute(stmt)
def _get_set_cdc_func(session, ks_name, table_name): '\n Close over a session, keyspace name, and table name and return a function\n that takes enables CDC on that keyspace if its argument is truthy and\n otherwise disables it.\n ' def set_cdc(value): return _set_cdc_on_table(session=session, ks_name=ks_name, table_name=table_name, value=value) return set_cdc
4,794,727,932,145,934,000
Close over a session, keyspace name, and table name and return a function that takes enables CDC on that keyspace if its argument is truthy and otherwise disables it.
cdc_test.py
_get_set_cdc_func
Ankou76ers/cassandra-dtest
python
def _get_set_cdc_func(session, ks_name, table_name): '\n Close over a session, keyspace name, and table name and return a function\n that takes enables CDC on that keyspace if its argument is truthy and\n otherwise disables it.\n ' def set_cdc(value): return _set_cdc_on_table(session=session, ks_name=ks_name, table_name=table_name, value=value) return set_cdc
def _create_temp_dir(self, dir_name, verbose=True): '\n Create a directory that will be deleted when this test class is torn\n down.\n ' if verbose: logger.debug(('creating ' + dir_name)) try: os.mkdir(dir_name) except OSError as e: if (e.errno != errno.EEXIST): logger.debug((dir_name + ' already exists. removing and recreating.')) shutil.rmtree(dir_name) os.mkdir(dir_name) else: raise e def debug_and_rmtree(): shutil.rmtree(dir_name) logger.debug((dir_name + ' removed')) self.addCleanup(debug_and_rmtree)
-3,564,366,202,013,176,300
Create a directory that will be deleted when this test class is torn down.
cdc_test.py
_create_temp_dir
Ankou76ers/cassandra-dtest
python
def _create_temp_dir(self, dir_name, verbose=True): '\n Create a directory that will be deleted when this test class is torn\n down.\n ' if verbose: logger.debug(('creating ' + dir_name)) try: os.mkdir(dir_name) except OSError as e: if (e.errno != errno.EEXIST): logger.debug((dir_name + ' already exists. removing and recreating.')) shutil.rmtree(dir_name) os.mkdir(dir_name) else: raise e def debug_and_rmtree(): shutil.rmtree(dir_name) logger.debug((dir_name + ' removed')) self.addCleanup(debug_and_rmtree)
def prepare(self, ks_name, table_name=None, cdc_enabled_table=None, gc_grace_seconds=None, column_spec=None, configuration_overrides=None, table_id=None): "\n Create a 1-node cluster, start it, create a keyspace, and if\n <table_name>, create a table in that keyspace. If <cdc_enabled_table>,\n that table is created with CDC enabled. If <column_spec>, use that\n string to specify the schema of the table -- for example, a valid value\n is 'a int PRIMARY KEY, b int'. The <configuration_overrides> is\n treated as a dict-like object and passed to\n self.cluster.set_configuration_options.\n " config_defaults = {'cdc_enabled': True, 'commitlog_segment_size_in_mb': 2} if (configuration_overrides is None): configuration_overrides = {} self.cluster.populate(1) self.cluster.set_configuration_options(dict(config_defaults, **configuration_overrides)) self.cluster.start() node = self.cluster.nodelist()[0] session = self.patient_cql_connection(node) create_ks(session, ks_name, rf=1) if (table_name is not None): assert (cdc_enabled_table is not None), 'if creating a table in prepare, must specify whether or not CDC is enabled on it' assert (column_spec is not None), 'if creating a table in prepare, must specify its schema' options = {} if (gc_grace_seconds is not None): options['gc_grace_seconds'] = gc_grace_seconds if (table_id is not None): options['id'] = table_id if cdc_enabled_table: options['cdc'] = 'true' stmt = _get_create_table_statement(ks_name, table_name, column_spec, options=options) logger.debug(stmt) session.execute(stmt) return (node, session)
5,382,586,513,136,337,000
Create a 1-node cluster, start it, create a keyspace, and if <table_name>, create a table in that keyspace. If <cdc_enabled_table>, that table is created with CDC enabled. If <column_spec>, use that string to specify the schema of the table -- for example, a valid value is 'a int PRIMARY KEY, b int'. The <configuration_overrides> is treated as a dict-like object and passed to self.cluster.set_configuration_options.
cdc_test.py
prepare
Ankou76ers/cassandra-dtest
python
def prepare(self, ks_name, table_name=None, cdc_enabled_table=None, gc_grace_seconds=None, column_spec=None, configuration_overrides=None, table_id=None): "\n Create a 1-node cluster, start it, create a keyspace, and if\n <table_name>, create a table in that keyspace. If <cdc_enabled_table>,\n that table is created with CDC enabled. If <column_spec>, use that\n string to specify the schema of the table -- for example, a valid value\n is 'a int PRIMARY KEY, b int'. The <configuration_overrides> is\n treated as a dict-like object and passed to\n self.cluster.set_configuration_options.\n " config_defaults = {'cdc_enabled': True, 'commitlog_segment_size_in_mb': 2} if (configuration_overrides is None): configuration_overrides = {} self.cluster.populate(1) self.cluster.set_configuration_options(dict(config_defaults, **configuration_overrides)) self.cluster.start() node = self.cluster.nodelist()[0] session = self.patient_cql_connection(node) create_ks(session, ks_name, rf=1) if (table_name is not None): assert (cdc_enabled_table is not None), 'if creating a table in prepare, must specify whether or not CDC is enabled on it' assert (column_spec is not None), 'if creating a table in prepare, must specify its schema' options = {} if (gc_grace_seconds is not None): options['gc_grace_seconds'] = gc_grace_seconds if (table_id is not None): options['id'] = table_id if cdc_enabled_table: options['cdc'] = 'true' stmt = _get_create_table_statement(ks_name, table_name, column_spec, options=options) logger.debug(stmt) session.execute(stmt) return (node, session)
def _assert_cdc_data_readable_on_round_trip(self, start_with_cdc_enabled): '\n Parameterized test asserting that data written to a table is still\n readable after flipping the CDC flag on that table, then flipping it\n again. Starts with CDC enabled if start_with_cdc_enabled, otherwise\n starts with it disabled.\n ' (ks_name, table_name) = ('ks', 'tab') sequence = ([True, False, True] if start_with_cdc_enabled else [False, True, False]) (start_enabled, alter_path) = (sequence[0], list(sequence[1:])) (node, session) = self.prepare(ks_name=ks_name, table_name=table_name, cdc_enabled_table=start_enabled, column_spec='a int PRIMARY KEY, b int') set_cdc = _get_set_cdc_func(session=session, ks_name=ks_name, table_name=table_name) insert_stmt = session.prepare((('INSERT INTO ' + table_name) + ' (a, b) VALUES (?, ?)')) start = 0 stop = 1000 step = 1 data = [(n, min((n + step), stop)) for n in range(start, stop, step)] execute_concurrent_with_args(session, insert_stmt, data) assert ([] == list(node.get_sstables(ks_name, table_name))) for enable in alter_path: set_cdc(enable) assert_resultset_contains(session.execute(('SELECT * FROM ' + table_name)), data)
-7,486,889,407,550,058,000
Parameterized test asserting that data written to a table is still readable after flipping the CDC flag on that table, then flipping it again. Starts with CDC enabled if start_with_cdc_enabled, otherwise starts with it disabled.
cdc_test.py
_assert_cdc_data_readable_on_round_trip
Ankou76ers/cassandra-dtest
python
def _assert_cdc_data_readable_on_round_trip(self, start_with_cdc_enabled): '\n Parameterized test asserting that data written to a table is still\n readable after flipping the CDC flag on that table, then flipping it\n again. Starts with CDC enabled if start_with_cdc_enabled, otherwise\n starts with it disabled.\n ' (ks_name, table_name) = ('ks', 'tab') sequence = ([True, False, True] if start_with_cdc_enabled else [False, True, False]) (start_enabled, alter_path) = (sequence[0], list(sequence[1:])) (node, session) = self.prepare(ks_name=ks_name, table_name=table_name, cdc_enabled_table=start_enabled, column_spec='a int PRIMARY KEY, b int') set_cdc = _get_set_cdc_func(session=session, ks_name=ks_name, table_name=table_name) insert_stmt = session.prepare((('INSERT INTO ' + table_name) + ' (a, b) VALUES (?, ?)')) start = 0 stop = 1000 step = 1 data = [(n, min((n + step), stop)) for n in range(start, stop, step)] execute_concurrent_with_args(session, insert_stmt, data) assert ([] == list(node.get_sstables(ks_name, table_name))) for enable in alter_path: set_cdc(enable) assert_resultset_contains(session.execute(('SELECT * FROM ' + table_name)), data)
def test_cdc_enabled_data_readable_on_round_trip(self): '\n Test that data is readable after an enabled->disabled->enabled round\n trip.\n ' self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=True)
8,841,384,700,411,847,000
Test that data is readable after an enabled->disabled->enabled round trip.
cdc_test.py
test_cdc_enabled_data_readable_on_round_trip
Ankou76ers/cassandra-dtest
python
def test_cdc_enabled_data_readable_on_round_trip(self): '\n Test that data is readable after an enabled->disabled->enabled round\n trip.\n ' self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=True)
def test_cdc_disabled_data_readable_on_round_trip(self): '\n Test that data is readable after an disabled->enabled->disabled round\n trip.\n ' self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=False)
6,506,598,792,747,785,000
Test that data is readable after an disabled->enabled->disabled round trip.
cdc_test.py
test_cdc_disabled_data_readable_on_round_trip
Ankou76ers/cassandra-dtest
python
def test_cdc_disabled_data_readable_on_round_trip(self): '\n Test that data is readable after an disabled->enabled->disabled round\n trip.\n ' self._assert_cdc_data_readable_on_round_trip(start_with_cdc_enabled=False)
def test_non_cdc_segments_deleted_after_replay(self): '\n Test that non-cdc segment files generated in previous runs are deleted\n after replay.\n ' (ks_name, table_name) = ('ks', 'tab') (node, session) = self.prepare(ks_name=ks_name, table_name=table_name, cdc_enabled_table=True, column_spec='a int PRIMARY KEY, b int') old_files = _get_cdc_raw_files(node.get_path()) node.drain() session.cluster.shutdown() node.stop() node.start(wait_for_binary_proto=True) new_files = _get_cdc_raw_files(node.get_path()) assert (len(old_files.intersection(new_files)) == 0)
-2,039,349,003,963,691,000
Test that non-cdc segment files generated in previous runs are deleted after replay.
cdc_test.py
test_non_cdc_segments_deleted_after_replay
Ankou76ers/cassandra-dtest
python
def test_non_cdc_segments_deleted_after_replay(self): '\n Test that non-cdc segment files generated in previous runs are deleted\n after replay.\n ' (ks_name, table_name) = ('ks', 'tab') (node, session) = self.prepare(ks_name=ks_name, table_name=table_name, cdc_enabled_table=True, column_spec='a int PRIMARY KEY, b int') old_files = _get_cdc_raw_files(node.get_path()) node.drain() session.cluster.shutdown() node.stop() node.start(wait_for_binary_proto=True) new_files = _get_cdc_raw_files(node.get_path()) assert (len(old_files.intersection(new_files)) == 0)
def test_insertion_and_commitlog_behavior_after_reaching_cdc_total_space(self): '\n Test that C* behaves correctly when CDC tables have consumed all the\n space available to them. In particular: after writing\n cdc_total_space_in_mb MB into CDC commitlogs:\n - CDC writes are rejected\n - non-CDC writes are accepted\n - on flush, CDC commitlogs are copied to cdc_raw\n - on flush, non-CDC commitlogs are not copied to cdc_raw\n This is a lot of behavior to validate in one test, but we do so to\n avoid running multiple tests that each write 1MB of data to fill\n cdc_total_space_in_mb.\n ' ks_name = 'ks' full_cdc_table_info = TableInfo(ks_name=ks_name, table_name='full_cdc_tab', column_spec=_16_uuid_column_spec, insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'full_cdc_tab'), options={'cdc': 'true'}) configuration_overrides = {'cdc_total_space_in_mb': 4} (node, session) = self.prepare(ks_name=ks_name, configuration_overrides=configuration_overrides) session.execute(full_cdc_table_info.create_stmt) non_cdc_table_info = TableInfo(ks_name=ks_name, table_name='non_cdc_tab', column_spec=_16_uuid_column_spec, insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'non_cdc_tab')) session.execute(non_cdc_table_info.create_stmt) empty_cdc_table_info = TableInfo(ks_name=ks_name, table_name='empty_cdc_tab', column_spec=_16_uuid_column_spec, insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'empty_cdc_tab'), options={'cdc': 'true'}) session.execute(empty_cdc_table_info.create_stmt) logger.debug('flushing non-CDC commitlogs') node.flush() logger.debug('beginning data insert to fill CDC commitlogs') rows_loaded = _write_to_cdc_write_failure(session, full_cdc_table_info.insert_stmt) assert (0 < rows_loaded), 'No CDC rows inserted. This may happen when cdc_total_space_in_mb > commitlog_segment_size_in_mb' commitlog_dir = os.path.join(node.get_path(), 'commitlogs') commitlogs_size = size_of_files_in_dir(commitlog_dir) logger.debug('Commitlog dir ({d}) is {b}B'.format(d=commitlog_dir, b=commitlogs_size)) try: session.execute(full_cdc_table_info.insert_stmt) raise Exception('WriteFailure expected') except WriteFailure: pass try: session.execute(empty_cdc_table_info.insert_stmt) raise Exception('WriteFailure expected') except WriteFailure: pass node.drain() session.cluster.shutdown() node.stop() node.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node) pre_non_cdc_write_cdc_raw_segments = _get_cdc_raw_files(node.get_path()) before_cdc_state = [] if (self.cluster.version() >= '4.0'): node1_path = os.path.join(node.get_path(), 'cdc_raw') before_cdc_state = [ReplayData.load(node1_path, name) for name in os.listdir(node1_path) if name.endswith('_cdc.idx')] pre_non_cdc_write_segments = _get_commitlog_files(node.get_path()) non_cdc_prepared_insert = session.prepare(non_cdc_table_info.insert_stmt) session.execute(non_cdc_prepared_insert, ()) (start, time_limit) = (time.time(), 600) rate_limited_debug = get_rate_limited_function(logger.debug, 5) logger.debug('writing to non-cdc table') while (_get_commitlog_files(node.get_path()) <= pre_non_cdc_write_segments): elapsed = (time.time() - start) rate_limited_debug(' non-cdc load step has lasted {s:.2f}s'.format(s=elapsed)) assert (elapsed <= time_limit), "It's been over a {s}s and we haven't written a new commitlog segment. Something is wrong.".format(s=time_limit) execute_concurrent(session, ((non_cdc_prepared_insert, ()) for _ in range(1000)), concurrency=500, raise_on_first_error=True) node.drain() session.cluster.shutdown() if (self.cluster.version() < '4.0'): assert (pre_non_cdc_write_cdc_raw_segments == _get_cdc_raw_files(node.get_path())) else: node2_path = os.path.join(node.get_path(), 'cdc_raw') after_cdc_state = [ReplayData.load(node2_path, name) for name in os.listdir(node2_path) if name.endswith('_cdc.idx')] found = True for idx in before_cdc_state: idx_found = False for idx_two in after_cdc_state: if compare_replay_data(idx, idx_two): idx_found = True if (not idx_found): found = False break if (not found): self._fail_and_print_sets(before_cdc_state, after_cdc_state, 'Found CDC index in before not matched in after (non-CDC write test)') orphan_found = False for idx_two in after_cdc_state: index_found = False for idx in before_cdc_state: if compare_replay_data(idx_two, idx): index_found = True if (not index_found): orphan_found = True break if orphan_found: self._fail_and_print_sets(before_cdc_state, after_cdc_state, 'Found orphaned index file in after CDC state not in former.')
6,291,607,521,855,861,000
Test that C* behaves correctly when CDC tables have consumed all the space available to them. In particular: after writing cdc_total_space_in_mb MB into CDC commitlogs: - CDC writes are rejected - non-CDC writes are accepted - on flush, CDC commitlogs are copied to cdc_raw - on flush, non-CDC commitlogs are not copied to cdc_raw This is a lot of behavior to validate in one test, but we do so to avoid running multiple tests that each write 1MB of data to fill cdc_total_space_in_mb.
cdc_test.py
test_insertion_and_commitlog_behavior_after_reaching_cdc_total_space
Ankou76ers/cassandra-dtest
python
def test_insertion_and_commitlog_behavior_after_reaching_cdc_total_space(self): '\n Test that C* behaves correctly when CDC tables have consumed all the\n space available to them. In particular: after writing\n cdc_total_space_in_mb MB into CDC commitlogs:\n - CDC writes are rejected\n - non-CDC writes are accepted\n - on flush, CDC commitlogs are copied to cdc_raw\n - on flush, non-CDC commitlogs are not copied to cdc_raw\n This is a lot of behavior to validate in one test, but we do so to\n avoid running multiple tests that each write 1MB of data to fill\n cdc_total_space_in_mb.\n ' ks_name = 'ks' full_cdc_table_info = TableInfo(ks_name=ks_name, table_name='full_cdc_tab', column_spec=_16_uuid_column_spec, insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'full_cdc_tab'), options={'cdc': 'true'}) configuration_overrides = {'cdc_total_space_in_mb': 4} (node, session) = self.prepare(ks_name=ks_name, configuration_overrides=configuration_overrides) session.execute(full_cdc_table_info.create_stmt) non_cdc_table_info = TableInfo(ks_name=ks_name, table_name='non_cdc_tab', column_spec=_16_uuid_column_spec, insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'non_cdc_tab')) session.execute(non_cdc_table_info.create_stmt) empty_cdc_table_info = TableInfo(ks_name=ks_name, table_name='empty_cdc_tab', column_spec=_16_uuid_column_spec, insert_stmt=_get_16_uuid_insert_stmt(ks_name, 'empty_cdc_tab'), options={'cdc': 'true'}) session.execute(empty_cdc_table_info.create_stmt) logger.debug('flushing non-CDC commitlogs') node.flush() logger.debug('beginning data insert to fill CDC commitlogs') rows_loaded = _write_to_cdc_write_failure(session, full_cdc_table_info.insert_stmt) assert (0 < rows_loaded), 'No CDC rows inserted. This may happen when cdc_total_space_in_mb > commitlog_segment_size_in_mb' commitlog_dir = os.path.join(node.get_path(), 'commitlogs') commitlogs_size = size_of_files_in_dir(commitlog_dir) logger.debug('Commitlog dir ({d}) is {b}B'.format(d=commitlog_dir, b=commitlogs_size)) try: session.execute(full_cdc_table_info.insert_stmt) raise Exception('WriteFailure expected') except WriteFailure: pass try: session.execute(empty_cdc_table_info.insert_stmt) raise Exception('WriteFailure expected') except WriteFailure: pass node.drain() session.cluster.shutdown() node.stop() node.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node) pre_non_cdc_write_cdc_raw_segments = _get_cdc_raw_files(node.get_path()) before_cdc_state = [] if (self.cluster.version() >= '4.0'): node1_path = os.path.join(node.get_path(), 'cdc_raw') before_cdc_state = [ReplayData.load(node1_path, name) for name in os.listdir(node1_path) if name.endswith('_cdc.idx')] pre_non_cdc_write_segments = _get_commitlog_files(node.get_path()) non_cdc_prepared_insert = session.prepare(non_cdc_table_info.insert_stmt) session.execute(non_cdc_prepared_insert, ()) (start, time_limit) = (time.time(), 600) rate_limited_debug = get_rate_limited_function(logger.debug, 5) logger.debug('writing to non-cdc table') while (_get_commitlog_files(node.get_path()) <= pre_non_cdc_write_segments): elapsed = (time.time() - start) rate_limited_debug(' non-cdc load step has lasted {s:.2f}s'.format(s=elapsed)) assert (elapsed <= time_limit), "It's been over a {s}s and we haven't written a new commitlog segment. Something is wrong.".format(s=time_limit) execute_concurrent(session, ((non_cdc_prepared_insert, ()) for _ in range(1000)), concurrency=500, raise_on_first_error=True) node.drain() session.cluster.shutdown() if (self.cluster.version() < '4.0'): assert (pre_non_cdc_write_cdc_raw_segments == _get_cdc_raw_files(node.get_path())) else: node2_path = os.path.join(node.get_path(), 'cdc_raw') after_cdc_state = [ReplayData.load(node2_path, name) for name in os.listdir(node2_path) if name.endswith('_cdc.idx')] found = True for idx in before_cdc_state: idx_found = False for idx_two in after_cdc_state: if compare_replay_data(idx, idx_two): idx_found = True if (not idx_found): found = False break if (not found): self._fail_and_print_sets(before_cdc_state, after_cdc_state, 'Found CDC index in before not matched in after (non-CDC write test)') orphan_found = False for idx_two in after_cdc_state: index_found = False for idx in before_cdc_state: if compare_replay_data(idx_two, idx): index_found = True if (not index_found): orphan_found = True break if orphan_found: self._fail_and_print_sets(before_cdc_state, after_cdc_state, 'Found orphaned index file in after CDC state not in former.')
def logger(self): 'Setup logging and return the logger' logging.config.dictConfig(self.logging) mylogger = getLogger('hazelsync') mylogger.debug('Logger initialized') return mylogger
3,415,069,736,334,545,000
Setup logging and return the logger
hazelsync/settings.py
logger
Japannext/hazelsync
python
def logger(self): logging.config.dictConfig(self.logging) mylogger = getLogger('hazelsync') mylogger.debug('Logger initialized') return mylogger
def job(self, job_type: str) -> dict: 'Return defaults for a job type' return self.job_options.get(job_type, {})
-2,602,378,712,164,210,000
Return defaults for a job type
hazelsync/settings.py
job
Japannext/hazelsync
python
def job(self, job_type: str) -> dict: return self.job_options.get(job_type, {})
def backend(self, backend_type: str) -> dict: 'Return defaults for a backend type' return self.backend_options.get(backend_type, {})
4,074,459,888,001,224,700
Return defaults for a backend type
hazelsync/settings.py
backend
Japannext/hazelsync
python
def backend(self, backend_type: str) -> dict: return self.backend_options.get(backend_type, {})
@staticmethod def list() -> dict: 'List the backup cluster found in the settings' settings = {} for path in ClusterSettings.directory.glob('*.yaml'): cluster = path.stem settings[cluster] = {'path': path} try: settings[cluster]['config_status'] = 'success' except KeyError as err: log.error(err) settings[cluster]['config'] = {} settings[cluster]['config_status'] = 'failure' return settings
863,354,750,726,884,600
List the backup cluster found in the settings
hazelsync/settings.py
list
Japannext/hazelsync
python
@staticmethod def list() -> dict: settings = {} for path in ClusterSettings.directory.glob('*.yaml'): cluster = path.stem settings[cluster] = {'path': path} try: settings[cluster]['config_status'] = 'success' except KeyError as err: log.error(err) settings[cluster]['config'] = {} settings[cluster]['config_status'] = 'failure' return settings
def job(self): 'Return the job options (merged with defaults)' defaults = self.globals.job(self.job_type) options = self.job_options return (self.job_type, {**defaults, **options})
-1,912,766,609,994,605,800
Return the job options (merged with defaults)
hazelsync/settings.py
job
Japannext/hazelsync
python
def job(self): defaults = self.globals.job(self.job_type) options = self.job_options return (self.job_type, {**defaults, **options})
def backend(self): 'Return the backend option (merged with defaults)' defaults = self.globals.backend(self.backend_type) options = self.backend_options return (self.backend_type, {**defaults, **options})
6,782,587,185,357,458,000
Return the backend option (merged with defaults)
hazelsync/settings.py
backend
Japannext/hazelsync
python
def backend(self): defaults = self.globals.backend(self.backend_type) options = self.backend_options return (self.backend_type, {**defaults, **options})
def get_value(self): '\n @summary: 返回 SetDetailData 对象\n @note: 引用集群资源变量某一列某一行的属性,如 ${value.bk_set_name[0]} -> "集群1"\n @note: 引用集群资源变量某一列的全部属性,多行用换行符 `\n` 分隔,如 ${value.flat__bk_set_name} -> "集群1\n集群2"\n @note: 引用集群资源变量的模块分配的 IP ${value._module[0]["gamesvr"]} -> "127.0.0.1,127.0.0.2"\n @return:\n ' separator = self.value.get('separator', ',') return SetDetailData(self.value['data'], separator)
-347,988,065,715,103,740
@summary: 返回 SetDetailData 对象 @note: 引用集群资源变量某一列某一行的属性,如 ${value.bk_set_name[0]} -> "集群1" @note: 引用集群资源变量某一列的全部属性,多行用换行符 ` ` 分隔,如 ${value.flat__bk_set_name} -> "集群1 集群2" @note: 引用集群资源变量的模块分配的 IP ${value._module[0]["gamesvr"]} -> "127.0.0.1,127.0.0.2" @return:
pipeline_plugins/variables/collections/sites/open/cc.py
get_value
springborland/bk-sops
python
def get_value(self): '\n @summary: 返回 SetDetailData 对象\n @note: 引用集群资源变量某一列某一行的属性,如 ${value.bk_set_name[0]} -> "集群1"\n @note: 引用集群资源变量某一列的全部属性,多行用换行符 `\n` 分隔,如 ${value.flat__bk_set_name} -> "集群1\n集群2"\n @note: 引用集群资源变量的模块分配的 IP ${value._module[0]["gamesvr"]} -> "127.0.0.1,127.0.0.2"\n @return:\n ' separator = self.value.get('separator', ',') return SetDetailData(self.value['data'], separator)
def get_value(self): '\n @summary: 返回 dict 对象,将每个可从CMDB查询到的输入IP作为键,将从CMDB查询到的主机属性封装成字典作为值\n @note: 引用127.0.0.1的所有属性,如 ${value["127.0.0.1"]} -> {"bk_host_id": 999, "import_from": 3, ...}\n @note: 引用127.0.0.1的bk_host_id属性,如 ${value["127.0.0.1"]["bk_host_id"]} -> 999\n @return:\n ' username = self.pipeline_data['executor'] project_id = self.pipeline_data['project_id'] project = Project.objects.get(id=project_id) bk_biz_id = (project.bk_biz_id if project.from_cmdb else '') bk_supplier_account = supplier_account_for_project(project_id) ip_list = get_ip_by_regex(self.value) if (not ip_list): return {} hosts_list = get_business_host(username, bk_biz_id, bk_supplier_account, ['bk_cpu', 'bk_isp_name', 'bk_os_name', 'bk_province_name', 'bk_host_id', 'import_from', 'bk_os_version', 'bk_disk', 'operator', 'bk_mem', 'bk_host_name', 'bk_host_innerip', 'bk_comment', 'bk_os_bit', 'bk_outer_mac', 'bk_asset_id', 'bk_service_term', 'bk_sla', 'bk_cpu_mhz', 'bk_host_outerip', 'bk_state_name', 'bk_os_type', 'bk_mac', 'bk_bak_operator', 'bk_supplier_account', 'bk_sn', 'bk_cpu_module'], ip_list) hosts = {} for host in hosts_list: ip = host['bk_host_innerip'] if ('bk_cloud_id' in host): host.pop('bk_cloud_id') hosts[ip] = host return hosts
7,958,199,696,952,713,000
@summary: 返回 dict 对象,将每个可从CMDB查询到的输入IP作为键,将从CMDB查询到的主机属性封装成字典作为值 @note: 引用127.0.0.1的所有属性,如 ${value["127.0.0.1"]} -> {"bk_host_id": 999, "import_from": 3, ...} @note: 引用127.0.0.1的bk_host_id属性,如 ${value["127.0.0.1"]["bk_host_id"]} -> 999 @return:
pipeline_plugins/variables/collections/sites/open/cc.py
get_value
springborland/bk-sops
python
def get_value(self): '\n @summary: 返回 dict 对象,将每个可从CMDB查询到的输入IP作为键,将从CMDB查询到的主机属性封装成字典作为值\n @note: 引用127.0.0.1的所有属性,如 ${value["127.0.0.1"]} -> {"bk_host_id": 999, "import_from": 3, ...}\n @note: 引用127.0.0.1的bk_host_id属性,如 ${value["127.0.0.1"]["bk_host_id"]} -> 999\n @return:\n ' username = self.pipeline_data['executor'] project_id = self.pipeline_data['project_id'] project = Project.objects.get(id=project_id) bk_biz_id = (project.bk_biz_id if project.from_cmdb else ) bk_supplier_account = supplier_account_for_project(project_id) ip_list = get_ip_by_regex(self.value) if (not ip_list): return {} hosts_list = get_business_host(username, bk_biz_id, bk_supplier_account, ['bk_cpu', 'bk_isp_name', 'bk_os_name', 'bk_province_name', 'bk_host_id', 'import_from', 'bk_os_version', 'bk_disk', 'operator', 'bk_mem', 'bk_host_name', 'bk_host_innerip', 'bk_comment', 'bk_os_bit', 'bk_outer_mac', 'bk_asset_id', 'bk_service_term', 'bk_sla', 'bk_cpu_mhz', 'bk_host_outerip', 'bk_state_name', 'bk_os_type', 'bk_mac', 'bk_bak_operator', 'bk_supplier_account', 'bk_sn', 'bk_cpu_module'], ip_list) hosts = {} for host in hosts_list: ip = host['bk_host_innerip'] if ('bk_cloud_id' in host): host.pop('bk_cloud_id') hosts[ip] = host return hosts
@pytest.fixture(scope='module') def storage_cluster(api, request): 'Specific storage cluster.\n\n :param api: api\n :param request: pytest request\n\n :raises RuntimeError: Unknown cluster name\n :return: cluster box\n ' cluster_name = request.param cluster = None if (cluster_name != 'DEFAULT'): cluster = QueryOPF(api=api, url='/cluster/list', request_data={}, errors_mapping={}, paginated_field='clusters', required_sid=True).filter_by(Filter('name', FilterCondition.equals, cluster_name)).first() if (cluster is None): raise RuntimeError('Unknown cluster name {name}'.format(name=cluster_name)) return cluster
5,424,851,730,693,390,000
Specific storage cluster. :param api: api :param request: pytest request :raises RuntimeError: Unknown cluster name :return: cluster box
tests/fixtures/account.py
storage_cluster
agolovkina/sdk-python
python
@pytest.fixture(scope='module') def storage_cluster(api, request): 'Specific storage cluster.\n\n :param api: api\n :param request: pytest request\n\n :raises RuntimeError: Unknown cluster name\n :return: cluster box\n ' cluster_name = request.param cluster = None if (cluster_name != 'DEFAULT'): cluster = QueryOPF(api=api, url='/cluster/list', request_data={}, errors_mapping={}, paginated_field='clusters', required_sid=True).filter_by(Filter('name', FilterCondition.equals, cluster_name)).first() if (cluster is None): raise RuntimeError('Unknown cluster name {name}'.format(name=cluster_name)) return cluster
def create_account(api, account_name: str) -> Tuple[(Box, Box)]: 'Create new account.\n\n :param api: api\n :param account_name: account name\n :raises RuntimeError: Cant find account\n :return: user params\n ' QueryO(api=api, url='/account/add', request_data={'name': account_name}, errors_mapping={'DUPLICATE_NAME': DuplicateName()}, required_sid=True).get() account = api.Account.list().filter_by(Filter('name', FilterCondition.equals, account_name)).first() if (account is None): raise RuntimeError('Cant find test account') admin_role = api.Role.list(account_id=account.uuid).filter_by(Filter('name', FilterCondition.equals, 'Administrator')).first() if (admin_role is None): raise RuntimeError('Cant find admin role') api.Role.set(uuid=admin_role.uuid, permissions=json.dumps({'study_delete': 1, 'study_duplicate': 1, 'study_split': 1, 'study_merge': 1, 'study_delete_image': 1})).get() user = api.User.get(account_id=account.uuid).get() logger.info('Created account %s', account.name) return (account, user)
1,231,547,780,667,811,600
Create new account. :param api: api :param account_name: account name :raises RuntimeError: Cant find account :return: user params
tests/fixtures/account.py
create_account
agolovkina/sdk-python
python
def create_account(api, account_name: str) -> Tuple[(Box, Box)]: 'Create new account.\n\n :param api: api\n :param account_name: account name\n :raises RuntimeError: Cant find account\n :return: user params\n ' QueryO(api=api, url='/account/add', request_data={'name': account_name}, errors_mapping={'DUPLICATE_NAME': DuplicateName()}, required_sid=True).get() account = api.Account.list().filter_by(Filter('name', FilterCondition.equals, account_name)).first() if (account is None): raise RuntimeError('Cant find test account') admin_role = api.Role.list(account_id=account.uuid).filter_by(Filter('name', FilterCondition.equals, 'Administrator')).first() if (admin_role is None): raise RuntimeError('Cant find admin role') api.Role.set(uuid=admin_role.uuid, permissions=json.dumps({'study_delete': 1, 'study_duplicate': 1, 'study_split': 1, 'study_merge': 1, 'study_delete_image': 1})).get() user = api.User.get(account_id=account.uuid).get() logger.info('Created account %s', account.name) return (account, user)
def account_studies(api, account) -> List[Box]: 'List of account studies.\n\n :param api: api\n :param account: account\n :return: list of studies\n ' account_namespaces = [account.namespace_id] group_namespaces = [group.namespace_id for group in api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()] account_namespaces.extend(group_namespaces) acc_studies = [] for account_namespace in account_namespaces: studies = api.Study.list().filter_by(Filter(field_name='phi_namespace', condition=FilterCondition.equals, value=account_namespace)).all() acc_studies.extend(list(studies)) return acc_studies
3,311,823,025,680,647,700
List of account studies. :param api: api :param account: account :return: list of studies
tests/fixtures/account.py
account_studies
agolovkina/sdk-python
python
def account_studies(api, account) -> List[Box]: 'List of account studies.\n\n :param api: api\n :param account: account\n :return: list of studies\n ' account_namespaces = [account.namespace_id] group_namespaces = [group.namespace_id for group in api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()] account_namespaces.extend(group_namespaces) acc_studies = [] for account_namespace in account_namespaces: studies = api.Study.list().filter_by(Filter(field_name='phi_namespace', condition=FilterCondition.equals, value=account_namespace)).all() acc_studies.extend(list(studies)) return acc_studies
def delete_account(api, account) -> Box: 'Delete account.\n\n :param api: api\n :param account: account\n :raises RuntimeError: if account have undeleted studies\n ' try: QueryO(api=api, url='/account/delete/', request_data={'uuid': account.uuid}, errors_mapping={'NOT_EMPTY': NotEmpty()}, required_sid=True).get() except NotEmpty: acc_studies = account_studies(api, account) raise RuntimeError('Account have undeleted studies:\n{studies}'.format(studies='\n'.join([str((study.uuid, study.study_uid)) for study in acc_studies])))
-5,036,035,843,466,138,000
Delete account. :param api: api :param account: account :raises RuntimeError: if account have undeleted studies
tests/fixtures/account.py
delete_account
agolovkina/sdk-python
python
def delete_account(api, account) -> Box: 'Delete account.\n\n :param api: api\n :param account: account\n :raises RuntimeError: if account have undeleted studies\n ' try: QueryO(api=api, url='/account/delete/', request_data={'uuid': account.uuid}, errors_mapping={'NOT_EMPTY': NotEmpty()}, required_sid=True).get() except NotEmpty: acc_studies = account_studies(api, account) raise RuntimeError('Account have undeleted studies:\n{studies}'.format(studies='\n'.join([str((study.uuid, study.study_uid)) for study in acc_studies])))
def clear_studies(api, account): 'Delete account studies.\n\n :param api: api\n :param account: account\n ' account_namespaces = [account.namespace_id] group_namespaces = [group.namespace_id for group in api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()] account_namespaces.extend(group_namespaces) for account_namespace in account_namespaces: studies = api.Study.list().filter_by(Filter(field_name='phi_namespace', condition=FilterCondition.equals, value=account_namespace)).all() for study in studies: study_uid = study.uuid logger.error('Remove undeleted study %s', study_uid) api.Study.delete(uuid=study_uid).get()
-3,648,538,665,187,540,500
Delete account studies. :param api: api :param account: account
tests/fixtures/account.py
clear_studies
agolovkina/sdk-python
python
def clear_studies(api, account): 'Delete account studies.\n\n :param api: api\n :param account: account\n ' account_namespaces = [account.namespace_id] group_namespaces = [group.namespace_id for group in api.Group.list(account_id=account.uuid).only(Group.namespace_id).all()] account_namespaces.extend(group_namespaces) for account_namespace in account_namespaces: studies = api.Study.list().filter_by(Filter(field_name='phi_namespace', condition=FilterCondition.equals, value=account_namespace)).all() for study in studies: study_uid = study.uuid logger.error('Remove undeleted study %s', study_uid) api.Study.delete(uuid=study_uid).get()
@pytest.fixture(scope='module') def account(api, storage_cluster): 'Get account.\n\n :param api: ambra api\n :param storage_cluster: storage cluster\n\n :yields: test account\n\n :raises RuntimeError: On deleted account with existing studies\n :raises TimeoutError: Time for waiting account deletion is out\n ' account_name = settings.TEST_ACCOUNT_NAME if storage_cluster: account_name = '{account}_{cluster}'.format(account=account_name, cluster=storage_cluster.name) try: (account, user) = create_account(api, account_name) except DuplicateName: logger.error('Duplicated account: %s', account_name) account = api.Account.list().filter_by(Filter('name', FilterCondition.equals, account_name)).first() if (account is None): raise RuntimeError('Account duplicated but not exists') clear_studies(api, account) delete_account(api, account) (account, user) = create_account(api, account_name) if (storage_cluster is not None): QueryO(api=api, url='/cluster/account/bind', request_data={'account_id': account.uuid, 'cluster_id': storage_cluster.uuid}, errors_mapping={}, required_sid=True).get() logger.info('Bind account to storage cluster {name}'.format(name=storage_cluster.name)) (yield UserParams(account=account, user=user)) delete_account(api, account) start = monotonic() while True: if ((monotonic() - start) >= settings.API['account_deletion_timeout']): raise TimeoutError('Account still exists') account = api.Account.list().filter_by(Filter('name', FilterCondition.equals, account_name)).first() if (account is None): return sleep(settings.API['account_deletion_check_interval'])
-804,873,413,144,816,600
Get account. :param api: ambra api :param storage_cluster: storage cluster :yields: test account :raises RuntimeError: On deleted account with existing studies :raises TimeoutError: Time for waiting account deletion is out
tests/fixtures/account.py
account
agolovkina/sdk-python
python
@pytest.fixture(scope='module') def account(api, storage_cluster): 'Get account.\n\n :param api: ambra api\n :param storage_cluster: storage cluster\n\n :yields: test account\n\n :raises RuntimeError: On deleted account with existing studies\n :raises TimeoutError: Time for waiting account deletion is out\n ' account_name = settings.TEST_ACCOUNT_NAME if storage_cluster: account_name = '{account}_{cluster}'.format(account=account_name, cluster=storage_cluster.name) try: (account, user) = create_account(api, account_name) except DuplicateName: logger.error('Duplicated account: %s', account_name) account = api.Account.list().filter_by(Filter('name', FilterCondition.equals, account_name)).first() if (account is None): raise RuntimeError('Account duplicated but not exists') clear_studies(api, account) delete_account(api, account) (account, user) = create_account(api, account_name) if (storage_cluster is not None): QueryO(api=api, url='/cluster/account/bind', request_data={'account_id': account.uuid, 'cluster_id': storage_cluster.uuid}, errors_mapping={}, required_sid=True).get() logger.info('Bind account to storage cluster {name}'.format(name=storage_cluster.name)) (yield UserParams(account=account, user=user)) delete_account(api, account) start = monotonic() while True: if ((monotonic() - start) >= settings.API['account_deletion_timeout']): raise TimeoutError('Account still exists') account = api.Account.list().filter_by(Filter('name', FilterCondition.equals, account_name)).first() if (account is None): return sleep(settings.API['account_deletion_check_interval'])
@pytest.fixture def create_group(api, account): 'Create new group in account.\n\n :param api: api fixture\n :param account: account fixture\n :yields: create_group function\n ' groups = [] group_counter = 0 def _create_group(name: Optional[str]=None): nonlocal group_counter group_counter += 1 if (name is None): name = 'SDK_TEST_GROUP_{gnum}'.format(gnum=group_counter) account_id = account.account.uuid response = api.Group.add(account_id=account_id, name=name).get() group = GroupParams(uuid=response.uuid, namespace_id=response.namespace_id, name=name) groups.append(group) api.Group.user_add(uuid=group.uuid, user_id=account.user.uuid).get() return group (yield _create_group) for group in groups: api.Group.delete(uuid=group.uuid).get()
-1,279,402,041,010,284,300
Create new group in account. :param api: api fixture :param account: account fixture :yields: create_group function
tests/fixtures/account.py
create_group
agolovkina/sdk-python
python
@pytest.fixture def create_group(api, account): 'Create new group in account.\n\n :param api: api fixture\n :param account: account fixture\n :yields: create_group function\n ' groups = [] group_counter = 0 def _create_group(name: Optional[str]=None): nonlocal group_counter group_counter += 1 if (name is None): name = 'SDK_TEST_GROUP_{gnum}'.format(gnum=group_counter) account_id = account.account.uuid response = api.Group.add(account_id=account_id, name=name).get() group = GroupParams(uuid=response.uuid, namespace_id=response.namespace_id, name=name) groups.append(group) api.Group.user_add(uuid=group.uuid, user_id=account.user.uuid).get() return group (yield _create_group) for group in groups: api.Group.delete(uuid=group.uuid).get()
def __init__(self, api_version=None, kind=None, name=None): '\n V2beta1CrossVersionObjectReference - a model defined in Swagger\n ' self._api_version = None self._kind = None self._name = None self.discriminator = None if (api_version is not None): self.api_version = api_version self.kind = kind self.name = name
8,192,905,385,589,957,000
V2beta1CrossVersionObjectReference - a model defined in Swagger
kubernetes/client/models/v2beta1_cross_version_object_reference.py
__init__
StephenPCG/python
python
def __init__(self, api_version=None, kind=None, name=None): '\n \n ' self._api_version = None self._kind = None self._name = None self.discriminator = None if (api_version is not None): self.api_version = api_version self.kind = kind self.name = name
@property def api_version(self): '\n Gets the api_version of this V2beta1CrossVersionObjectReference.\n API version of the referent\n\n :return: The api_version of this V2beta1CrossVersionObjectReference.\n :rtype: str\n ' return self._api_version
-3,087,162,814,788,035,600
Gets the api_version of this V2beta1CrossVersionObjectReference. API version of the referent :return: The api_version of this V2beta1CrossVersionObjectReference. :rtype: str
kubernetes/client/models/v2beta1_cross_version_object_reference.py
api_version
StephenPCG/python
python
@property def api_version(self): '\n Gets the api_version of this V2beta1CrossVersionObjectReference.\n API version of the referent\n\n :return: The api_version of this V2beta1CrossVersionObjectReference.\n :rtype: str\n ' return self._api_version
@api_version.setter def api_version(self, api_version): '\n Sets the api_version of this V2beta1CrossVersionObjectReference.\n API version of the referent\n\n :param api_version: The api_version of this V2beta1CrossVersionObjectReference.\n :type: str\n ' self._api_version = api_version
-793,649,945,325,276,200
Sets the api_version of this V2beta1CrossVersionObjectReference. API version of the referent :param api_version: The api_version of this V2beta1CrossVersionObjectReference. :type: str
kubernetes/client/models/v2beta1_cross_version_object_reference.py
api_version
StephenPCG/python
python
@api_version.setter def api_version(self, api_version): '\n Sets the api_version of this V2beta1CrossVersionObjectReference.\n API version of the referent\n\n :param api_version: The api_version of this V2beta1CrossVersionObjectReference.\n :type: str\n ' self._api_version = api_version
@property def kind(self): '\n Gets the kind of this V2beta1CrossVersionObjectReference.\n Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"\n\n :return: The kind of this V2beta1CrossVersionObjectReference.\n :rtype: str\n ' return self._kind
2,425,916,078,310,303,000
Gets the kind of this V2beta1CrossVersionObjectReference. Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" :return: The kind of this V2beta1CrossVersionObjectReference. :rtype: str
kubernetes/client/models/v2beta1_cross_version_object_reference.py
kind
StephenPCG/python
python
@property def kind(self): '\n Gets the kind of this V2beta1CrossVersionObjectReference.\n Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"\n\n :return: The kind of this V2beta1CrossVersionObjectReference.\n :rtype: str\n ' return self._kind
@kind.setter def kind(self, kind): '\n Sets the kind of this V2beta1CrossVersionObjectReference.\n Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"\n\n :param kind: The kind of this V2beta1CrossVersionObjectReference.\n :type: str\n ' if (kind is None): raise ValueError('Invalid value for `kind`, must not be `None`') self._kind = kind
2,954,922,473,310,548,500
Sets the kind of this V2beta1CrossVersionObjectReference. Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" :param kind: The kind of this V2beta1CrossVersionObjectReference. :type: str
kubernetes/client/models/v2beta1_cross_version_object_reference.py
kind
StephenPCG/python
python
@kind.setter def kind(self, kind): '\n Sets the kind of this V2beta1CrossVersionObjectReference.\n Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"\n\n :param kind: The kind of this V2beta1CrossVersionObjectReference.\n :type: str\n ' if (kind is None): raise ValueError('Invalid value for `kind`, must not be `None`') self._kind = kind
@property def name(self): '\n Gets the name of this V2beta1CrossVersionObjectReference.\n Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n :return: The name of this V2beta1CrossVersionObjectReference.\n :rtype: str\n ' return self._name
-6,171,665,245,504,551,000
Gets the name of this V2beta1CrossVersionObjectReference. Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names :return: The name of this V2beta1CrossVersionObjectReference. :rtype: str
kubernetes/client/models/v2beta1_cross_version_object_reference.py
name
StephenPCG/python
python
@property def name(self): '\n Gets the name of this V2beta1CrossVersionObjectReference.\n Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n :return: The name of this V2beta1CrossVersionObjectReference.\n :rtype: str\n ' return self._name
@name.setter def name(self, name): '\n Sets the name of this V2beta1CrossVersionObjectReference.\n Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n :param name: The name of this V2beta1CrossVersionObjectReference.\n :type: str\n ' if (name is None): raise ValueError('Invalid value for `name`, must not be `None`') self._name = name
1,106,479,108,438,382,700
Sets the name of this V2beta1CrossVersionObjectReference. Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names :param name: The name of this V2beta1CrossVersionObjectReference. :type: str
kubernetes/client/models/v2beta1_cross_version_object_reference.py
name
StephenPCG/python
python
@name.setter def name(self, name): '\n Sets the name of this V2beta1CrossVersionObjectReference.\n Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n :param name: The name of this V2beta1CrossVersionObjectReference.\n :type: str\n ' if (name is None): raise ValueError('Invalid value for `name`, must not be `None`') self._name = name
def to_dict(self): '\n Returns the model properties as a dict\n ' result = {} for (attr, _) in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
2,191,974,537,531,847,000
Returns the model properties as a dict
kubernetes/client/models/v2beta1_cross_version_object_reference.py
to_dict
StephenPCG/python
python
def to_dict(self): '\n \n ' result = {} for (attr, _) in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_str(self): '\n Returns the string representation of the model\n ' return pformat(self.to_dict())
-3,531,024,894,346,511,000
Returns the string representation of the model
kubernetes/client/models/v2beta1_cross_version_object_reference.py
to_str
StephenPCG/python
python
def to_str(self): '\n \n ' return pformat(self.to_dict())
def __repr__(self): '\n For `print` and `pprint`\n ' return self.to_str()
5,853,962,500,611,353,000
For `print` and `pprint`
kubernetes/client/models/v2beta1_cross_version_object_reference.py
__repr__
StephenPCG/python
python
def __repr__(self): '\n \n ' return self.to_str()
def __eq__(self, other): '\n Returns true if both objects are equal\n ' if (not isinstance(other, V2beta1CrossVersionObjectReference)): return False return (self.__dict__ == other.__dict__)
-3,340,942,517,178,243,000
Returns true if both objects are equal
kubernetes/client/models/v2beta1_cross_version_object_reference.py
__eq__
StephenPCG/python
python
def __eq__(self, other): '\n \n ' if (not isinstance(other, V2beta1CrossVersionObjectReference)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): '\n Returns true if both objects are not equal\n ' return (not (self == other))
3,600,423,175,817,510,400
Returns true if both objects are not equal
kubernetes/client/models/v2beta1_cross_version_object_reference.py
__ne__
StephenPCG/python
python
def __ne__(self, other): '\n \n ' return (not (self == other))
@app.route('/hello') @track_requests def HelloWorld(): 'A hello message\n Example endpoint returning a hello message\n ---\n responses:\n 200:\n description: A successful reply\n examples:\n text/plain: Hello from Appsody!\n ' return 'Hello from Appsody!'
6,637,010,655,578,749,000
A hello message Example endpoint returning a hello message --- responses: 200: description: A successful reply examples: text/plain: Hello from Appsody!
sources/image_preprocessor/__init__.py
HelloWorld
Bhaskers-Blu-Org1/process-images-derive-insights
python
@app.route('/hello') @track_requests def HelloWorld(): 'A hello message\n Example endpoint returning a hello message\n ---\n responses:\n 200:\n description: A successful reply\n examples:\n text/plain: Hello from Appsody!\n ' return 'Hello from Appsody!'
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): 'ExtensionsV1beta1Scale - a model defined in OpenAPI' self._api_version = None self._kind = None self._metadata = None self._spec = None self._status = None self.discriminator = None if (api_version is not None): self.api_version = api_version if (kind is not None): self.kind = kind if (metadata is not None): self.metadata = metadata if (spec is not None): self.spec = spec if (status is not None): self.status = status
-3,213,912,742,942,138,000
ExtensionsV1beta1Scale - a model defined in OpenAPI
kubernetes/client/models/extensions_v1beta1_scale.py
__init__
ACXLM/python
python
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): self._api_version = None self._kind = None self._metadata = None self._spec = None self._status = None self.discriminator = None if (api_version is not None): self.api_version = api_version if (kind is not None): self.kind = kind if (metadata is not None): self.metadata = metadata if (spec is not None): self.spec = spec if (status is not None): self.status = status
@property def api_version(self): 'Gets the api_version of this ExtensionsV1beta1Scale. # noqa: E501\n\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501\n\n :return: The api_version of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: str\n ' return self._api_version
6,668,571,443,484,836,000
Gets the api_version of this ExtensionsV1beta1Scale. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this ExtensionsV1beta1Scale. # noqa: E501 :rtype: str
kubernetes/client/models/extensions_v1beta1_scale.py
api_version
ACXLM/python
python
@property def api_version(self): 'Gets the api_version of this ExtensionsV1beta1Scale. # noqa: E501\n\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501\n\n :return: The api_version of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: str\n ' return self._api_version
@api_version.setter def api_version(self, api_version): 'Sets the api_version of this ExtensionsV1beta1Scale.\n\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501\n\n :param api_version: The api_version of this ExtensionsV1beta1Scale. # noqa: E501\n :type: str\n ' self._api_version = api_version
1,476,418,542,245,848,000
Sets the api_version of this ExtensionsV1beta1Scale. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this ExtensionsV1beta1Scale. # noqa: E501 :type: str
kubernetes/client/models/extensions_v1beta1_scale.py
api_version
ACXLM/python
python
@api_version.setter def api_version(self, api_version): 'Sets the api_version of this ExtensionsV1beta1Scale.\n\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501\n\n :param api_version: The api_version of this ExtensionsV1beta1Scale. # noqa: E501\n :type: str\n ' self._api_version = api_version
@property def kind(self): 'Gets the kind of this ExtensionsV1beta1Scale. # noqa: E501\n\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501\n\n :return: The kind of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: str\n ' return self._kind
-2,684,591,780,846,492,000
Gets the kind of this ExtensionsV1beta1Scale. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this ExtensionsV1beta1Scale. # noqa: E501 :rtype: str
kubernetes/client/models/extensions_v1beta1_scale.py
kind
ACXLM/python
python
@property def kind(self): 'Gets the kind of this ExtensionsV1beta1Scale. # noqa: E501\n\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501\n\n :return: The kind of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: str\n ' return self._kind
@kind.setter def kind(self, kind): 'Sets the kind of this ExtensionsV1beta1Scale.\n\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501\n\n :param kind: The kind of this ExtensionsV1beta1Scale. # noqa: E501\n :type: str\n ' self._kind = kind
-8,335,573,646,077,467,000
Sets the kind of this ExtensionsV1beta1Scale. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this ExtensionsV1beta1Scale. # noqa: E501 :type: str
kubernetes/client/models/extensions_v1beta1_scale.py
kind
ACXLM/python
python
@kind.setter def kind(self, kind): 'Sets the kind of this ExtensionsV1beta1Scale.\n\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501\n\n :param kind: The kind of this ExtensionsV1beta1Scale. # noqa: E501\n :type: str\n ' self._kind = kind
@property def metadata(self): 'Gets the metadata of this ExtensionsV1beta1Scale. # noqa: E501\n\n\n :return: The metadata of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: V1ObjectMeta\n ' return self._metadata
-13,547,596,297,764,276
Gets the metadata of this ExtensionsV1beta1Scale. # noqa: E501 :return: The metadata of this ExtensionsV1beta1Scale. # noqa: E501 :rtype: V1ObjectMeta
kubernetes/client/models/extensions_v1beta1_scale.py
metadata
ACXLM/python
python
@property def metadata(self): 'Gets the metadata of this ExtensionsV1beta1Scale. # noqa: E501\n\n\n :return: The metadata of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: V1ObjectMeta\n ' return self._metadata
@metadata.setter def metadata(self, metadata): 'Sets the metadata of this ExtensionsV1beta1Scale.\n\n\n :param metadata: The metadata of this ExtensionsV1beta1Scale. # noqa: E501\n :type: V1ObjectMeta\n ' self._metadata = metadata
7,311,167,920,556,201,000
Sets the metadata of this ExtensionsV1beta1Scale. :param metadata: The metadata of this ExtensionsV1beta1Scale. # noqa: E501 :type: V1ObjectMeta
kubernetes/client/models/extensions_v1beta1_scale.py
metadata
ACXLM/python
python
@metadata.setter def metadata(self, metadata): 'Sets the metadata of this ExtensionsV1beta1Scale.\n\n\n :param metadata: The metadata of this ExtensionsV1beta1Scale. # noqa: E501\n :type: V1ObjectMeta\n ' self._metadata = metadata
@property def spec(self): 'Gets the spec of this ExtensionsV1beta1Scale. # noqa: E501\n\n\n :return: The spec of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: ExtensionsV1beta1ScaleSpec\n ' return self._spec
6,507,947,979,207,770,000
Gets the spec of this ExtensionsV1beta1Scale. # noqa: E501 :return: The spec of this ExtensionsV1beta1Scale. # noqa: E501 :rtype: ExtensionsV1beta1ScaleSpec
kubernetes/client/models/extensions_v1beta1_scale.py
spec
ACXLM/python
python
@property def spec(self): 'Gets the spec of this ExtensionsV1beta1Scale. # noqa: E501\n\n\n :return: The spec of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: ExtensionsV1beta1ScaleSpec\n ' return self._spec
@spec.setter def spec(self, spec): 'Sets the spec of this ExtensionsV1beta1Scale.\n\n\n :param spec: The spec of this ExtensionsV1beta1Scale. # noqa: E501\n :type: ExtensionsV1beta1ScaleSpec\n ' self._spec = spec
-4,437,859,536,225,099,000
Sets the spec of this ExtensionsV1beta1Scale. :param spec: The spec of this ExtensionsV1beta1Scale. # noqa: E501 :type: ExtensionsV1beta1ScaleSpec
kubernetes/client/models/extensions_v1beta1_scale.py
spec
ACXLM/python
python
@spec.setter def spec(self, spec): 'Sets the spec of this ExtensionsV1beta1Scale.\n\n\n :param spec: The spec of this ExtensionsV1beta1Scale. # noqa: E501\n :type: ExtensionsV1beta1ScaleSpec\n ' self._spec = spec
@property def status(self): 'Gets the status of this ExtensionsV1beta1Scale. # noqa: E501\n\n\n :return: The status of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: ExtensionsV1beta1ScaleStatus\n ' return self._status
5,318,401,970,238,419,000
Gets the status of this ExtensionsV1beta1Scale. # noqa: E501 :return: The status of this ExtensionsV1beta1Scale. # noqa: E501 :rtype: ExtensionsV1beta1ScaleStatus
kubernetes/client/models/extensions_v1beta1_scale.py
status
ACXLM/python
python
@property def status(self): 'Gets the status of this ExtensionsV1beta1Scale. # noqa: E501\n\n\n :return: The status of this ExtensionsV1beta1Scale. # noqa: E501\n :rtype: ExtensionsV1beta1ScaleStatus\n ' return self._status
@status.setter def status(self, status): 'Sets the status of this ExtensionsV1beta1Scale.\n\n\n :param status: The status of this ExtensionsV1beta1Scale. # noqa: E501\n :type: ExtensionsV1beta1ScaleStatus\n ' self._status = status
-3,870,650,832,372,444,700
Sets the status of this ExtensionsV1beta1Scale. :param status: The status of this ExtensionsV1beta1Scale. # noqa: E501 :type: ExtensionsV1beta1ScaleStatus
kubernetes/client/models/extensions_v1beta1_scale.py
status
ACXLM/python
python
@status.setter def status(self, status): 'Sets the status of this ExtensionsV1beta1Scale.\n\n\n :param status: The status of this ExtensionsV1beta1Scale. # noqa: E501\n :type: ExtensionsV1beta1ScaleStatus\n ' self._status = status
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
8,442,519,487,048,767,000
Returns the model properties as a dict
kubernetes/client/models/extensions_v1beta1_scale.py
to_dict
ACXLM/python
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
kubernetes/client/models/extensions_v1beta1_scale.py
to_str
ACXLM/python
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
kubernetes/client/models/extensions_v1beta1_scale.py
__repr__
ACXLM/python
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, ExtensionsV1beta1Scale)): return False return (self.__dict__ == other.__dict__)
6,668,779,138,109,585,000
Returns true if both objects are equal
kubernetes/client/models/extensions_v1beta1_scale.py
__eq__
ACXLM/python
python
def __eq__(self, other): if (not isinstance(other, ExtensionsV1beta1Scale)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
7,764,124,047,908,058,000
Returns true if both objects are not equal
kubernetes/client/models/extensions_v1beta1_scale.py
__ne__
ACXLM/python
python
def __ne__(self, other): return (not (self == other))
def handler(self, param: Optional[any]=None, http_request: Type[HttpRequest]=None) -> HttpResponse: 'Metodo para chamar o caso de uso' response = None if (not param): raise HttpBadRequestError(message='Essa requisiçao exige o seguinte parametro: <int:user_id>, error!') if (not str(param).isnumeric()): raise HttpUnprocessableEntity(message='O parametro <user_id> deve ser do tipo inteiro, error!') try: response = None if (not http_request.body): raise DefaultError(type_error=400) name = http_request.body.get('name', None) email = http_request.body.get('email', None) username = http_request.body.get('username', None) password = http_request.body.get('password', None) response = self.__usecase.update(user_id=param, name=name, email=email, username=username, password=password) return self.__format_response(response['data']) except DefaultError as error: if (error.type_error == 400): raise HttpBadRequestError(message='Esta requisicao precisa dos seguintes parametros: <str:name>, <str:email>, <str:username>, <any:password>, error!') from error if (error.type_error == 404): raise HttpNotFound(message='Usuario nao encontrado, error!') from error raise error except Exception as error: raise error
2,084,713,247,792,810,200
Metodo para chamar o caso de uso
mitmirror/presenters/controllers/users/update_user_controller.py
handler
Claayton/mitmirror-api
python
def handler(self, param: Optional[any]=None, http_request: Type[HttpRequest]=None) -> HttpResponse: response = None if (not param): raise HttpBadRequestError(message='Essa requisiçao exige o seguinte parametro: <int:user_id>, error!') if (not str(param).isnumeric()): raise HttpUnprocessableEntity(message='O parametro <user_id> deve ser do tipo inteiro, error!') try: response = None if (not http_request.body): raise DefaultError(type_error=400) name = http_request.body.get('name', None) email = http_request.body.get('email', None) username = http_request.body.get('username', None) password = http_request.body.get('password', None) response = self.__usecase.update(user_id=param, name=name, email=email, username=username, password=password) return self.__format_response(response['data']) except DefaultError as error: if (error.type_error == 400): raise HttpBadRequestError(message='Esta requisicao precisa dos seguintes parametros: <str:name>, <str:email>, <str:username>, <any:password>, error!') from error if (error.type_error == 404): raise HttpNotFound(message='Usuario nao encontrado, error!') from error raise error except Exception as error: raise error
@classmethod def __format_response(cls, response_method: Type[User]) -> HttpResponse: 'Formatando a resposta' response = {'message': 'Informacoes do usuario atualizadas com sucesso!', 'data': {'id': response_method.id, 'name': response_method.name, 'email': response_method.email, 'username': response_method.username, 'password_hash': 'Nao mostramos isso aqui!', 'secundary_id': response_method.secundary_id, 'is_staff': response_method.is_staff, 'is_active_user': response_method.is_active_user, 'last_login': datetime.isoformat(response_method.last_login), 'date_joined': datetime.isoformat(response_method.date_joined)}} return HttpResponse(status_code=200, body=response)
-528,875,641,314,126,200
Formatando a resposta
mitmirror/presenters/controllers/users/update_user_controller.py
__format_response
Claayton/mitmirror-api
python
@classmethod def __format_response(cls, response_method: Type[User]) -> HttpResponse: response = {'message': 'Informacoes do usuario atualizadas com sucesso!', 'data': {'id': response_method.id, 'name': response_method.name, 'email': response_method.email, 'username': response_method.username, 'password_hash': 'Nao mostramos isso aqui!', 'secundary_id': response_method.secundary_id, 'is_staff': response_method.is_staff, 'is_active_user': response_method.is_active_user, 'last_login': datetime.isoformat(response_method.last_login), 'date_joined': datetime.isoformat(response_method.date_joined)}} return HttpResponse(status_code=200, body=response)
def initialization(): 'Инициализация нужных файлов игры' pygame.init() pygame.display.set_icon(pygame.image.load('data/icon.bmp')) pygame.display.set_caption('SPACE')
1,552,549,711,354,153,700
Инициализация нужных файлов игры
main.py
initialization
shycoldii/asteroids
python
def initialization(): pygame.init() pygame.display.set_icon(pygame.image.load('data/icon.bmp')) pygame.display.set_caption('SPACE')
def _validate_tags(namespace): ' Extracts multiple space-separated tags in key[=value] format ' if isinstance(namespace.tags, list): tags_dict = {} for item in namespace.tags: tags_dict.update(_validate_tag(item)) namespace.tags = tags_dict
499,165,198,836,220
Extracts multiple space-separated tags in key[=value] format
src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/validators.py
_validate_tags
aag09/azurecli
python
def _validate_tags(namespace): ' ' if isinstance(namespace.tags, list): tags_dict = {} for item in namespace.tags: tags_dict.update(_validate_tag(item)) namespace.tags = tags_dict
def _validate_tag(string): ' Extracts a single tag in key[=value] format ' result = {} if string: comps = string.split('=', 1) result = ({comps[0]: comps[1]} if (len(comps) > 1) else {string: ''}) return result
-8,924,955,365,198,874,000
Extracts a single tag in key[=value] format
src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/validators.py
_validate_tag
aag09/azurecli
python
def _validate_tag(string): ' ' result = {} if string: comps = string.split('=', 1) result = ({comps[0]: comps[1]} if (len(comps) > 1) else {string: }) return result
@property def version(self) -> CIDVersion: '\n CID version.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.version\n 1\n\n ' return self._version
2,141,663,901,753,808,000
CID version. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.version 1
multiformats/cid/__init__.py
version
hashberg-io/multiformats
python
@property def version(self) -> CIDVersion: '\n CID version.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.version\n 1\n\n ' return self._version
@property def base(self) -> Multibase: '\n Multibase used to encode the CID:\n\n - if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used\n - if a CIDv1 was decoded from a bytestring, the \'base58btc\' multibase is used\n - for a CIDv0, \'base58btc\' is always used\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.base\n Multibase(name=\'base58btc\', code=\'z\',\n status=\'default\', description=\'base58 bitcoin\')\n\n ' return self._base
8,751,055,337,958,033,000
Multibase used to encode the CID: - if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used - if a CIDv1 was decoded from a bytestring, the 'base58btc' multibase is used - for a CIDv0, 'base58btc' is always used Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.base Multibase(name='base58btc', code='z', status='default', description='base58 bitcoin')
multiformats/cid/__init__.py
base
hashberg-io/multiformats
python
@property def base(self) -> Multibase: '\n Multibase used to encode the CID:\n\n - if a CIDv1 was decoded from a multibase-encoded string, the encoding multibase is used\n - if a CIDv1 was decoded from a bytestring, the \'base58btc\' multibase is used\n - for a CIDv0, \'base58btc\' is always used\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.base\n Multibase(name=\'base58btc\', code=\'z\',\n status=\'default\', description=\'base58 bitcoin\')\n\n ' return self._base
@property def codec(self) -> Multicodec: '\n Codec that the multihash digest refers to.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.codec\n Multicodec(name=\'raw\', tag=\'ipld\', code=\'0x55\',\n status=\'permanent\', description=\'raw binary\')\n\n ' return self._codec
2,974,990,426,359,168,000
Codec that the multihash digest refers to. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.codec Multicodec(name='raw', tag='ipld', code='0x55', status='permanent', description='raw binary')
multiformats/cid/__init__.py
codec
hashberg-io/multiformats
python
@property def codec(self) -> Multicodec: '\n Codec that the multihash digest refers to.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.codec\n Multicodec(name=\'raw\', tag=\'ipld\', code=\'0x55\',\n status=\'permanent\', description=\'raw binary\')\n\n ' return self._codec
@property def hashfun(self) -> Multihash: '\n Multihash used to produce the multihash digest.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.hashfun\n Multicodec(name=\'sha2-256\', tag=\'multihash\', code=\'0x12\',\n status=\'permanent\', description=\'\')\n\n ' return self._hashfun
1,040,017,296,922,144,400
Multihash used to produce the multihash digest. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.hashfun Multicodec(name='sha2-256', tag='multihash', code='0x12', status='permanent', description='')
multiformats/cid/__init__.py
hashfun
hashberg-io/multiformats
python
@property def hashfun(self) -> Multihash: '\n Multihash used to produce the multihash digest.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.hashfun\n Multicodec(name=\'sha2-256\', tag=\'multihash\', code=\'0x12\',\n status=\'permanent\', description=\'\')\n\n ' return self._hashfun
@property def digest(self) -> bytes: '\n Multihash digest.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.digest.hex()\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\'\n\n ' return self._digest
340,595,137,949,885,060
Multihash digest. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.digest.hex() '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
multiformats/cid/__init__.py
digest
hashberg-io/multiformats
python
@property def digest(self) -> bytes: '\n Multihash digest.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.digest.hex()\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\'\n\n ' return self._digest
@property def raw_digest(self) -> bytes: '\n Raw hash digest, decoded from the multihash digest.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.raw_digest.hex()\n \'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\'\n\n ' return multihash.unwrap(self._digest)
-3,530,206,930,099,280,000
Raw hash digest, decoded from the multihash digest. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.raw_digest.hex() '6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95'
multiformats/cid/__init__.py
raw_digest
hashberg-io/multiformats
python
@property def raw_digest(self) -> bytes: '\n Raw hash digest, decoded from the multihash digest.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.raw_digest.hex()\n \'6e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\'\n\n ' return multihash.unwrap(self._digest)
@property def human_readable(self) -> str: '\n Human-readable representation of the CID.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.human_readable\n \'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)\'\n\n ' raw_digest = self.raw_digest hashfun_str = f'({self.hashfun.name} : {(len(raw_digest) * 8)} : {raw_digest.hex().upper()})' return f'{self.base.name} - cidv{self.version} - {self.codec.name} - {hashfun_str}'
776,096,244,485,080,600
Human-readable representation of the CID. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.human_readable 'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)'
multiformats/cid/__init__.py
human_readable
hashberg-io/multiformats
python
@property def human_readable(self) -> str: '\n Human-readable representation of the CID.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.human_readable\n \'base58btc - cidv1 - raw - (sha2-256 : 256 : 6E6FF7950A36187A801613426E858DCE686CD7D7E3C0FC42EE0330072D245C95)\'\n\n ' raw_digest = self.raw_digest hashfun_str = f'({self.hashfun.name} : {(len(raw_digest) * 8)} : {raw_digest.hex().upper()})' return f'{self.base.name} - cidv{self.version} - {self.codec.name} - {hashfun_str}'
def encode(self, base: Union[(None, str, Multibase)]=None) -> str: '\n Encodes the CID using a given multibase. If :obj:`None` is given,\n the CID\'s own multibase is used by default.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.encode() # default: cid.base\n \'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA\'\n >>> cid.encode("base32")\n \'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su\'\n\n :param base: the multibase to be used for encoding\n :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*\n\n :raises KeyError: see :meth:`multiformats.multibase.Multibase.encode`\n\n ' if (self.version == 0): if (base is not None): raise ValueError('CIDv0 cannot be multibase-encoded, please set multibase=None.') return base58btc.encode(bytes(self)) if ((base is None) or (base == self.base)): base = self.base elif isinstance(base, str): base = multibase.get(base) else: multibase.validate_multibase(base) return base.encode(bytes(self))
-56,374,101,431,808,100
Encodes the CID using a given multibase. If :obj:`None` is given, the CID's own multibase is used by default. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid.encode() # default: cid.base 'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA' >>> cid.encode("base32") 'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su' :param base: the multibase to be used for encoding :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional* :raises KeyError: see :meth:`multiformats.multibase.Multibase.encode`
multiformats/cid/__init__.py
encode
hashberg-io/multiformats
python
def encode(self, base: Union[(None, str, Multibase)]=None) -> str: '\n Encodes the CID using a given multibase. If :obj:`None` is given,\n the CID\'s own multibase is used by default.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid.encode() # default: cid.base\n \'zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA\'\n >>> cid.encode("base32")\n \'bafkreidon73zkcrwdb5iafqtijxildoonbwnpv7dyd6ef3qdgads2jc4su\'\n\n :param base: the multibase to be used for encoding\n :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*\n\n :raises KeyError: see :meth:`multiformats.multibase.Multibase.encode`\n\n ' if (self.version == 0): if (base is not None): raise ValueError('CIDv0 cannot be multibase-encoded, please set multibase=None.') return base58btc.encode(bytes(self)) if ((base is None) or (base == self.base)): base = self.base elif isinstance(base, str): base = multibase.get(base) else: multibase.validate_multibase(base) return base.encode(bytes(self))
def set(self, *, base: Union[(None, str, Multibase)]=None, version: Union[(None, int)]=None, codec: Union[(None, str, int, Multicodec)]=None) -> 'CID': '\n Returns a new CID obtained by setting new values for one or more of:\n ``base``, ``version``, or ``codec``.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(base="base32")\n CID(\'base32\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(codec="dag-cbor")\n CID(\'base58btc\', 1, \'dag-cbor\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(version=0, codec="dag-pb")\n CID(\'base58btc\', 0, \'dag-pb\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n # Note: \'CID.set\' returns new instances,\n # the original \'cid\' instance is unchanged\n\n If setting ``version`` to 0, ``base`` must be \'base58btc\' and ``codec`` must be \'dag-pb\'.\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(version=0, codec="dag-pb")\n CID(\'base58btc\', 0, \'dag-pb\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(version=0)\n ValueError: CIDv0 multicodec must be \'dag-pb\', found \'raw\' instead.\n >>> cid.set(version=0, codec="dag-pb", base="base32")\n ValueError: CIDv0 multibase must be \'base58btc\', found \'base32\' instead\n\n :param base: the new CID multibase, or :obj:`None` if multibase unchanged\n :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*\n :param version: the new CID version, or :obj:`None` if version unchanged\n :type version: :obj:`None`, 0 or 1, *optional*\n :param codec: the new content multicodec, or :obj:`None` if multicodec unchanged\n :type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional*\n\n :raises KeyError: if the multibase or multicodec are unknown\n\n ' hashfun = self.hashfun digest = self.digest if ((base is not None) and (base not in (self.base, self.base.name))): base = _CID_validate_multibase(base) else: base = self.base if ((codec is not None) and (codec not in (self.codec, self.codec.name, self.codec.code))): codec = _CID_validate_multicodec(codec) else: codec = self.codec if ((version is not None) and (version != self.version)): _CID_validate_version(version, base, codec, hashfun) else: version = self.version return CID._new_instance(CID, base, version, codec, hashfun, digest)
-8,268,395,013,245,805,000
Returns a new CID obtained by setting new values for one or more of: ``base``, ``version``, or ``codec``. Example usage: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(base="base32") CID('base32', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(codec="dag-cbor") CID('base58btc', 1, 'dag-cbor', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(version=0, codec="dag-pb") CID('base58btc', 0, 'dag-pb', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') # Note: 'CID.set' returns new instances, # the original 'cid' instance is unchanged If setting ``version`` to 0, ``base`` must be 'base58btc' and ``codec`` must be 'dag-pb'. >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> cid = CID.decode(s) >>> cid CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(version=0, codec="dag-pb") CID('base58btc', 0, 'dag-pb', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') >>> cid.set(version=0) ValueError: CIDv0 multicodec must be 'dag-pb', found 'raw' instead. >>> cid.set(version=0, codec="dag-pb", base="base32") ValueError: CIDv0 multibase must be 'base58btc', found 'base32' instead :param base: the new CID multibase, or :obj:`None` if multibase unchanged :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional* :param version: the new CID version, or :obj:`None` if version unchanged :type version: :obj:`None`, 0 or 1, *optional* :param codec: the new content multicodec, or :obj:`None` if multicodec unchanged :type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional* :raises KeyError: if the multibase or multicodec are unknown
multiformats/cid/__init__.py
set
hashberg-io/multiformats
python
def set(self, *, base: Union[(None, str, Multibase)]=None, version: Union[(None, int)]=None, codec: Union[(None, str, int, Multicodec)]=None) -> 'CID': '\n Returns a new CID obtained by setting new values for one or more of:\n ``base``, ``version``, or ``codec``.\n\n Example usage:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(base="base32")\n CID(\'base32\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(codec="dag-cbor")\n CID(\'base58btc\', 1, \'dag-cbor\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(version=0, codec="dag-pb")\n CID(\'base58btc\', 0, \'dag-pb\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n # Note: \'CID.set\' returns new instances,\n # the original \'cid\' instance is unchanged\n\n If setting ``version`` to 0, ``base`` must be \'base58btc\' and ``codec`` must be \'dag-pb\'.\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> cid = CID.decode(s)\n >>> cid\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(version=0, codec="dag-pb")\n CID(\'base58btc\', 0, \'dag-pb\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n >>> cid.set(version=0)\n ValueError: CIDv0 multicodec must be \'dag-pb\', found \'raw\' instead.\n >>> cid.set(version=0, codec="dag-pb", base="base32")\n ValueError: CIDv0 multibase must be \'base58btc\', found \'base32\' instead\n\n :param base: the new CID multibase, or :obj:`None` if multibase unchanged\n :type base: :obj:`None`, :obj:`str` or :class:`~multiformats.multibase.Multibase`, *optional*\n :param version: the new CID version, or :obj:`None` if version unchanged\n :type version: :obj:`None`, 0 or 1, *optional*\n :param codec: the new content multicodec, or :obj:`None` if multicodec unchanged\n :type codec: :obj:`None`, :obj:`str` or :class:`~multiformats.multicodec.Multicodec`, *optional*\n\n :raises KeyError: if the multibase or multicodec are unknown\n\n ' hashfun = self.hashfun digest = self.digest if ((base is not None) and (base not in (self.base, self.base.name))): base = _CID_validate_multibase(base) else: base = self.base if ((codec is not None) and (codec not in (self.codec, self.codec.name, self.codec.code))): codec = _CID_validate_multicodec(codec) else: codec = self.codec if ((version is not None) and (version != self.version)): _CID_validate_version(version, base, codec, hashfun) else: version = self.version return CID._new_instance(CID, base, version, codec, hashfun, digest)
@staticmethod def decode(cid: Union[(str, BytesLike)]) -> 'CID': '\n Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes`\n using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded.\n\n Example usage for CIDv1 multibase-encoded string:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> CID.decode(s)\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n\n Example usage for CIDv1 bytestring (multibase always set to \'base58btc\'):\n\n >>> b = bytes.fromhex(\n ... "015512206e6ff7950a36187a801613426e85"\n ... "8dce686cd7d7e3c0fc42ee0330072d245c95")\n >>> CID.decode(b)\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n\n Example usage for CIDv0 base58-encoded string:\n\n >>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR"\n >>> CID.decode(s)\n CID(\'base58btc\', 0, \'dag-pb\',\n \'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a\')\n\n Example usage for CIDv0 bytestring (multibase always set to \'base58btc\'):\n\n >>> b = bytes.fromhex(\n ... "1220c3c4733ec8affd06cf9e9ff50ffc6b"\n ... "cd2ec85a6170004bb709669c31de94391a")\n >>> CID.decode(b)\n CID(\'base58btc\', 0, \'dag-pb\',\n \'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a\')\n\n :param cid: the CID bytes or multibase-encoded string\n :type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike`\n\n :raises ValueError: if the CID is malformed or the CID version is unsupported\n :raises KeyError: if the multibase, multicodec or multihash are unknown\n\n ' if isinstance(cid, str): (cid, mb) = _binary_cid_from_str(cid) else: mb = multibase.get('base58btc') validate(cid, BytesLike) cid = memoryview(cid) if ((len(cid) == 34) and (cid[0] == 18) and (cid[1] == 32)): v = 0 mc_code = 112 digest = cid else: (v, _, cid) = varint.decode_raw(cid) if (v == 0): raise ValueError('CIDv0 is malformed.') if (v in (2, 3)): raise ValueError('CID versions 2 and 3 are reserved for future use.') if (v != 1): raise ValueError(f'CIDv{v} is currently not supported.') (mc_code, _, cid) = multicodec.unwrap_raw(cid) digest = cid mc = multicodec.get(code=mc_code) (mh_code, _) = multihash.unwrap_raw(digest) mh = multihash.get(code=mh_code) return CID._new_instance(CID, mb, v, mc, mh, digest)
6,971,011,179,152,187,000
Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded. Example usage for CIDv1 multibase-encoded string: >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA" >>> CID.decode(s) CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') Example usage for CIDv1 bytestring (multibase always set to 'base58btc'): >>> b = bytes.fromhex( ... "015512206e6ff7950a36187a801613426e85" ... "8dce686cd7d7e3c0fc42ee0330072d245c95") >>> CID.decode(b) CID('base58btc', 1, 'raw', '12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95') Example usage for CIDv0 base58-encoded string: >>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR" >>> CID.decode(s) CID('base58btc', 0, 'dag-pb', '1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a') Example usage for CIDv0 bytestring (multibase always set to 'base58btc'): >>> b = bytes.fromhex( ... "1220c3c4733ec8affd06cf9e9ff50ffc6b" ... "cd2ec85a6170004bb709669c31de94391a") >>> CID.decode(b) CID('base58btc', 0, 'dag-pb', '1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a') :param cid: the CID bytes or multibase-encoded string :type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike` :raises ValueError: if the CID is malformed or the CID version is unsupported :raises KeyError: if the multibase, multicodec or multihash are unknown
multiformats/cid/__init__.py
decode
hashberg-io/multiformats
python
@staticmethod def decode(cid: Union[(str, BytesLike)]) -> 'CID': '\n Decodes a CID from a bytestring or a hex string (which will be converted to :obj:`bytes`\n using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded.\n\n Example usage for CIDv1 multibase-encoded string:\n\n >>> s = "zb2rhe5P4gXftAwvA4eXQ5HJwsER2owDyS9sKaQRRVQPn93bA"\n >>> CID.decode(s)\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n\n Example usage for CIDv1 bytestring (multibase always set to \'base58btc\'):\n\n >>> b = bytes.fromhex(\n ... "015512206e6ff7950a36187a801613426e85"\n ... "8dce686cd7d7e3c0fc42ee0330072d245c95")\n >>> CID.decode(b)\n CID(\'base58btc\', 1, \'raw\',\n \'12206e6ff7950a36187a801613426e858dce686cd7d7e3c0fc42ee0330072d245c95\')\n\n Example usage for CIDv0 base58-encoded string:\n\n >>> s = "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR"\n >>> CID.decode(s)\n CID(\'base58btc\', 0, \'dag-pb\',\n \'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a\')\n\n Example usage for CIDv0 bytestring (multibase always set to \'base58btc\'):\n\n >>> b = bytes.fromhex(\n ... "1220c3c4733ec8affd06cf9e9ff50ffc6b"\n ... "cd2ec85a6170004bb709669c31de94391a")\n >>> CID.decode(b)\n CID(\'base58btc\', 0, \'dag-pb\',\n \'1220c3c4733ec8affd06cf9e9ff50ffc6bcd2ec85a6170004bb709669c31de94391a\')\n\n :param cid: the CID bytes or multibase-encoded string\n :type cid: :obj:`str` or :obj:`~multiformats.varint.BytesLike`\n\n :raises ValueError: if the CID is malformed or the CID version is unsupported\n :raises KeyError: if the multibase, multicodec or multihash are unknown\n\n ' if isinstance(cid, str): (cid, mb) = _binary_cid_from_str(cid) else: mb = multibase.get('base58btc') validate(cid, BytesLike) cid = memoryview(cid) if ((len(cid) == 34) and (cid[0] == 18) and (cid[1] == 32)): v = 0 mc_code = 112 digest = cid else: (v, _, cid) = varint.decode_raw(cid) if (v == 0): raise ValueError('CIDv0 is malformed.') if (v in (2, 3)): raise ValueError('CID versions 2 and 3 are reserved for future use.') if (v != 1): raise ValueError(f'CIDv{v} is currently not supported.') (mc_code, _, cid) = multicodec.unwrap_raw(cid) digest = cid mc = multicodec.get(code=mc_code) (mh_code, _) = multihash.unwrap_raw(digest) mh = multihash.get(code=mh_code) return CID._new_instance(CID, mb, v, mc, mh, digest)
@staticmethod def peer_id(pk_bytes: Union[(str, BytesLike)]) -> 'CID': '\n Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1.\n\n The ``pk_bytes`` argument should be the binary public key, encoded according to the\n `PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_.\n This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`).\n Note: the hex string is not multibase encoded.\n\n Example usage with Ed25519 public key:\n\n >>> pk_bytes = bytes.fromhex(\n ... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93")\n ... # a 32-byte Ed25519 public key\n >>> peer_id = CID.peer_id(pk_bytes)\n >>> peer_id\n CID(\'base32\', 1, \'libp2p-key\',\n \'00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93\')\n #^^ 0x00 = \'identity\' multihash used (public key length <= 42)\n # ^^ 0x20 = 32-bytes of raw hash digestlength\n >>> str(peer_id)\n \'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm\'\n\n Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_\n public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:\n\n >>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey\n >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat\n >>> private_key = Ed25519PrivateKey.generate()\n >>> public_key = private_key.public_key()\n >>> pk_bytes = public_key.public_bytes(\n ... encoding=Encoding.Raw,\n ... format=PublicFormat.Raw\n ... )\n >>> pk_bytes.hex()\n "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93"\n\n Example usage with DER-encoded RSA public key:\n\n >>> pk_bytes = bytes.fromhex(\n ... "30820122300d06092a864886f70d01010105000382010f003082010a02820101"\n ... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"\n ... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"\n ... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"\n ... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"\n ... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"\n ... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"\n ... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"\n ... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"\n ... "370203010001")\n ... # a 294-byte RSA public key\n >>> peer_id = CID.peer_id(pk_bytes)\n >>> peer_id\n CID(\'base32\', 1, \'libp2p-key\',\n \'1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f\')\n #^^ 0x12 = \'sha2-256\' multihash used (public key length > 42)\n # ^^ 0x20 = 32-bytes of raw hash digest length\n >>> str(peer_id)\n \'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4\'\n\n Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_\n public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:\n\n >>> from cryptography.hazmat.primitives.asymmetric import rsa\n >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat\n >>> private_key = rsa.generate_private_key(\n ... public_exponent=65537,\n ... key_size=2048,\n ... )\n >>> public_key = private_key.public_key()\n >>> pk_bytes = public_key.public_bytes(\n ... encoding=Encoding.DER,\n ... format=PublicFormat.SubjectPublicKeyInfo\n ... )\n >>> pk_bytes.hex()\n "30820122300d06092a864886f70d01010105000382010f003082010a02820101"\n "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"\n "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"\n "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"\n "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"\n "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"\n "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"\n "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"\n "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"\n "370203010001"\n\n :param pk_bytes: the public key bytes\n :type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike`\n\n :raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes\n\n ' if isinstance(pk_bytes, str): pk_bytes = bytes.fromhex(pk_bytes) else: validate(pk_bytes, BytesLike) if (len(pk_bytes) <= 42): mh = multihash.get('identity') digest = multihash.digest(pk_bytes, mh) else: mh = multihash.get('sha2-256') digest = multihash.digest(pk_bytes, mh) mc = multicodec.get(code=114) mb = multibase.get('base32') return CID._new_instance(CID, mb, 1, mc, mh, digest)
5,168,933,804,621,999,000
Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1. The ``pk_bytes`` argument should be the binary public key, encoded according to the `PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_. This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`). Note: the hex string is not multibase encoded. Example usage with Ed25519 public key: >>> pk_bytes = bytes.fromhex( ... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93") ... # a 32-byte Ed25519 public key >>> peer_id = CID.peer_id(pk_bytes) >>> peer_id CID('base32', 1, 'libp2p-key', '00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93') #^^ 0x00 = 'identity' multihash used (public key length <= 42) # ^^ 0x20 = 32-bytes of raw hash digestlength >>> str(peer_id) 'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm' Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_ public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library: >>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat >>> private_key = Ed25519PrivateKey.generate() >>> public_key = private_key.public_key() >>> pk_bytes = public_key.public_bytes( ... encoding=Encoding.Raw, ... format=PublicFormat.Raw ... ) >>> pk_bytes.hex() "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93" Example usage with DER-encoded RSA public key: >>> pk_bytes = bytes.fromhex( ... "30820122300d06092a864886f70d01010105000382010f003082010a02820101" ... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e" ... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70" ... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5" ... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036" ... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d" ... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557" ... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8" ... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec" ... "370203010001") ... # a 294-byte RSA public key >>> peer_id = CID.peer_id(pk_bytes) >>> peer_id CID('base32', 1, 'libp2p-key', '1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f') #^^ 0x12 = 'sha2-256' multihash used (public key length > 42) # ^^ 0x20 = 32-bytes of raw hash digest length >>> str(peer_id) 'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4' Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_ public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library: >>> from cryptography.hazmat.primitives.asymmetric import rsa >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat >>> private_key = rsa.generate_private_key( ... public_exponent=65537, ... key_size=2048, ... ) >>> public_key = private_key.public_key() >>> pk_bytes = public_key.public_bytes( ... encoding=Encoding.DER, ... format=PublicFormat.SubjectPublicKeyInfo ... ) >>> pk_bytes.hex() "30820122300d06092a864886f70d01010105000382010f003082010a02820101" "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e" "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70" "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5" "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036" "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d" "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557" "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8" "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec" "370203010001" :param pk_bytes: the public key bytes :type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike` :raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes
multiformats/cid/__init__.py
peer_id
hashberg-io/multiformats
python
@staticmethod def peer_id(pk_bytes: Union[(str, BytesLike)]) -> 'CID': '\n Wraps the raw hash of a public key into a `PeerID <https://docs.libp2p.io/concepts/peer-id/>`_, as a CIDv1.\n\n The ``pk_bytes`` argument should be the binary public key, encoded according to the\n `PeerID spec <https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md>`_.\n This can be passed as a bytestring or as a hex string (which will be converted to :obj:`bytes` using :obj:`bytes.fromhex`).\n Note: the hex string is not multibase encoded.\n\n Example usage with Ed25519 public key:\n\n >>> pk_bytes = bytes.fromhex(\n ... "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93")\n ... # a 32-byte Ed25519 public key\n >>> peer_id = CID.peer_id(pk_bytes)\n >>> peer_id\n CID(\'base32\', 1, \'libp2p-key\',\n \'00201498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93\')\n #^^ 0x00 = \'identity\' multihash used (public key length <= 42)\n # ^^ 0x20 = 32-bytes of raw hash digestlength\n >>> str(peer_id)\n \'bafzaaiautc2um6td375c3soz4bu4v4dv2fx4gp65jq5qdp5nvzsdg5t5sm\'\n\n Snippet showing how to obtain the `Ed25519 <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ed25519/>`_\n public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:\n\n >>> from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey\n >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat\n >>> private_key = Ed25519PrivateKey.generate()\n >>> public_key = private_key.public_key()\n >>> pk_bytes = public_key.public_bytes(\n ... encoding=Encoding.Raw,\n ... format=PublicFormat.Raw\n ... )\n >>> pk_bytes.hex()\n "1498b5467a63dffa2dc9d9e069caf075d16fc33fdd4c3b01bfadae6433767d93"\n\n Example usage with DER-encoded RSA public key:\n\n >>> pk_bytes = bytes.fromhex(\n ... "30820122300d06092a864886f70d01010105000382010f003082010a02820101"\n ... "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"\n ... "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"\n ... "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"\n ... "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"\n ... "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"\n ... "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"\n ... "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"\n ... "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"\n ... "370203010001")\n ... # a 294-byte RSA public key\n >>> peer_id = CID.peer_id(pk_bytes)\n >>> peer_id\n CID(\'base32\', 1, \'libp2p-key\',\n \'1220c1a6513ffb14f202f75453c49666a5b9d7ed9a1a068891daf824d477573f829f\')\n #^^ 0x12 = \'sha2-256\' multihash used (public key length > 42)\n # ^^ 0x20 = 32-bytes of raw hash digest length\n >>> str(peer_id)\n \'bafzbeigbuzit76yu6ibpovctyslgnjnz27wzugqgrci5v6be2r3vop4ct4\'\n\n Snippet showing how to obtain the `RSA <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/>`_\n public key bytestring using the `cryptography <https://github.com/pyca/cryptography>`_ library:\n\n >>> from cryptography.hazmat.primitives.asymmetric import rsa\n >>> from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat\n >>> private_key = rsa.generate_private_key(\n ... public_exponent=65537,\n ... key_size=2048,\n ... )\n >>> public_key = private_key.public_key()\n >>> pk_bytes = public_key.public_bytes(\n ... encoding=Encoding.DER,\n ... format=PublicFormat.SubjectPublicKeyInfo\n ... )\n >>> pk_bytes.hex()\n "30820122300d06092a864886f70d01010105000382010f003082010a02820101"\n "009a56a5c11e2705d0bfe0cd1fa66d5e519095cc741b62ed99ddf129c32e046e"\n "5ba3958bb8a068b05a95a6a0623cc3c889b1581793cd84a34cc2307e0dd74c70"\n "b4f230c74e5063ecd8e906d372be4eba13f47d04427a717ac78cb12b4b9c2ab5"\n "591f36f98021a70f84d782c36c51819054228ff35a45efa3f82b27849ec89036"\n "26b4a4c4b40f9f74b79caf55253687124c79cb10cd3bc73f0c44fbd341e5417d"\n "2e85e900d22849d2bc85ca6bf037f1f5b4f9759b4b6942fccdf1140b30ea7557"\n "87deb5c373c5953c14d64b523959a76a32a599903974a98cf38d4aaac7e359f8"\n "6b00a91dcf424bf794592139e7097d7e65889259227c07155770276b6eda4cec"\n "370203010001"\n\n :param pk_bytes: the public key bytes\n :type pk_bytes: :obj:`str` or :obj:`~multiformats.varint.BytesLike`\n\n :raises ValueError: if ``pk_bytes`` is passed as a string and is not the hex-string of some bytes\n\n ' if isinstance(pk_bytes, str): pk_bytes = bytes.fromhex(pk_bytes) else: validate(pk_bytes, BytesLike) if (len(pk_bytes) <= 42): mh = multihash.get('identity') digest = multihash.digest(pk_bytes, mh) else: mh = multihash.get('sha2-256') digest = multihash.digest(pk_bytes, mh) mc = multicodec.get(code=114) mb = multibase.get('base32') return CID._new_instance(CID, mb, 1, mc, mh, digest)
def train_negative_sampling(self, u_nid, train_pos_unid_inid_map, test_pos_unid_inid_map, neg_unid_inid_map, data): '\n Unliked popular movie negative sampling:\n :param u_nid:\n :param train_pos_unid_inid_map:\n :param test_pos_unid_inid_map:\n :param neg_unid_inid_map:\n :param data:\n :return:\n ' num_pos_samples = len(train_pos_unid_inid_map[u_nid]) negative_inids = (test_pos_unid_inid_map[u_nid] + neg_unid_inid_map[u_nid]) nid_occs = np.array([data.item_nid_occs[0][nid] for nid in negative_inids]) nid_occs = (nid_occs / np.sum(nid_occs)) negative_inids = rd.choices(population=negative_inids, weights=nid_occs, k=(num_pos_samples * 5)) return negative_inids
1,163,391,387,365,685,800
Unliked popular movie negative sampling: :param u_nid: :param train_pos_unid_inid_map: :param test_pos_unid_inid_map: :param neg_unid_inid_map: :param data: :return:
benchmark/recsys/gcn_solver.py
train_negative_sampling
356255531/pytorch_geometric
python
def train_negative_sampling(self, u_nid, train_pos_unid_inid_map, test_pos_unid_inid_map, neg_unid_inid_map, data): '\n Unliked popular movie negative sampling:\n :param u_nid:\n :param train_pos_unid_inid_map:\n :param test_pos_unid_inid_map:\n :param neg_unid_inid_map:\n :param data:\n :return:\n ' num_pos_samples = len(train_pos_unid_inid_map[u_nid]) negative_inids = (test_pos_unid_inid_map[u_nid] + neg_unid_inid_map[u_nid]) nid_occs = np.array([data.item_nid_occs[0][nid] for nid in negative_inids]) nid_occs = (nid_occs / np.sum(nid_occs)) negative_inids = rd.choices(population=negative_inids, weights=nid_occs, k=(num_pos_samples * 5)) return negative_inids
def fun(x): '\n Function returning the parameters of the normal sampler.\n mean = product of elements of x\n variance = exp(|x|)/(1+exp(|x|)).\n ' return (np.prod(x), (np.exp(np.sum(x)) / (np.exp(np.sum(x)) + 1)))
3,916,633,977,899,014,700
Function returning the parameters of the normal sampler. mean = product of elements of x variance = exp(|x|)/(1+exp(|x|)).
seqgibbs/tests/test_samplers.py
fun
I-Bouros/seqgibbs
python
def fun(x): '\n Function returning the parameters of the normal sampler.\n mean = product of elements of x\n variance = exp(|x|)/(1+exp(|x|)).\n ' return (np.prod(x), (np.exp(np.sum(x)) / (np.exp(np.sum(x)) + 1)))
def another_fun(x): '\n Function returning the parameters of the normal sampler.\n mean = sum of elements of x\n variance = exp(|x|)/(1+exp(|x|)).\n ' return (np.sum(x), (np.exp(np.sum(x)) / (np.exp(np.sum(x)) + 1)))
-1,303,013,952,990,475,500
Function returning the parameters of the normal sampler. mean = sum of elements of x variance = exp(|x|)/(1+exp(|x|)).
seqgibbs/tests/test_samplers.py
another_fun
I-Bouros/seqgibbs
python
def another_fun(x): '\n Function returning the parameters of the normal sampler.\n mean = sum of elements of x\n variance = exp(|x|)/(1+exp(|x|)).\n ' return (np.sum(x), (np.exp(np.sum(x)) / (np.exp(np.sum(x)) + 1)))
def _group(template, resource, action, proid): 'Render group template.' return template.format(resource=resource, action=action, proid=proid)
-1,832,616,014,194,641,400
Render group template.
lib/python/treadmill/api/authz/group.py
_group
bothejjms/treadmill
python
def _group(template, resource, action, proid): return template.format(resource=resource, action=action, proid=proid)
def authorize(user, action, resource, resource_id, payload): 'Authorize user/action/resource' del payload _LOGGER.info('Authorize: %s %s %s %s', user, action, resource, resource_id) proid = None if resource_id: proid = resource_id.partition('.')[0] why = [] for group_template in groups: group_name = _group(group_template, action=action, resource=resource, proid=proid) _LOGGER.info('Check authorization group: %s', group_name) try: group = grp.getgrnam(group_name) username = user.partition('@')[0] members = group.gr_mem _LOGGER.info('Authorized: User %s is member of %s.', username, group_name) if (username in members): return (True, why) else: why.append('{} not member of {}'.format(username, group_name)) except KeyError: _LOGGER.info('Group does not exist: %s', group_name) why.append('no such group: {}'.format(group_name)) return (False, why)
4,777,468,944,904,468,000
Authorize user/action/resource
lib/python/treadmill/api/authz/group.py
authorize
bothejjms/treadmill
python
def authorize(user, action, resource, resource_id, payload): del payload _LOGGER.info('Authorize: %s %s %s %s', user, action, resource, resource_id) proid = None if resource_id: proid = resource_id.partition('.')[0] why = [] for group_template in groups: group_name = _group(group_template, action=action, resource=resource, proid=proid) _LOGGER.info('Check authorization group: %s', group_name) try: group = grp.getgrnam(group_name) username = user.partition('@')[0] members = group.gr_mem _LOGGER.info('Authorized: User %s is member of %s.', username, group_name) if (username in members): return (True, why) else: why.append('{} not member of {}'.format(username, group_name)) except KeyError: _LOGGER.info('Group does not exist: %s', group_name) why.append('no such group: {}'.format(group_name)) return (False, why)
def get_conf_dir(self): '\n Returns the path to the directory where Cassandra config are located\n ' return os.path.join(self.get_path(), 'resources', 'cassandra', 'conf')
5,371,359,768,358,725,000
Returns the path to the directory where Cassandra config are located
ccmlib/dse_node.py
get_conf_dir
thobbs/ccm
python
def get_conf_dir(self): '\n \n ' return os.path.join(self.get_path(), 'resources', 'cassandra', 'conf')
def watch_log_for_alive(self, nodes, from_mark=None, timeout=720, filename='system.log'): '\n Watch the log of this node until it detects that the provided other\n nodes are marked UP. This method works similarly to watch_log_for_death.\n\n We want to provide a higher default timeout when this is called on DSE.\n ' super(DseNode, self).watch_log_for_alive(nodes, from_mark=from_mark, timeout=timeout, filename=filename)
-150,998,833,348,054,400
Watch the log of this node until it detects that the provided other nodes are marked UP. This method works similarly to watch_log_for_death. We want to provide a higher default timeout when this is called on DSE.
ccmlib/dse_node.py
watch_log_for_alive
thobbs/ccm
python
def watch_log_for_alive(self, nodes, from_mark=None, timeout=720, filename='system.log'): '\n Watch the log of this node until it detects that the provided other\n nodes are marked UP. This method works similarly to watch_log_for_death.\n\n We want to provide a higher default timeout when this is called on DSE.\n ' super(DseNode, self).watch_log_for_alive(nodes, from_mark=from_mark, timeout=timeout, filename=filename)
def export_dse_home_in_dse_env_sh(self): "\n Due to the way CCM lays out files, separating the repository\n from the node(s) confs, the `dse-env.sh` script of each node\n needs to have its DSE_HOME var set and exported. Since DSE\n 4.5.x, the stock `dse-env.sh` file includes a commented-out\n place to do exactly this, intended for installers.\n Basically: read in the file, write it back out and add the two\n lines.\n 'sstableloader' is an example of a node script that depends on\n this, when used in a CCM-built cluster.\n " with open((self.get_bin_dir() + '/dse-env.sh'), 'r') as dse_env_sh: buf = dse_env_sh.readlines() with open((self.get_bin_dir() + '/dse-env.sh'), 'w') as out_file: for line in buf: out_file.write(line) if (line == '# This is here so the installer can force set DSE_HOME\n'): out_file.write((('DSE_HOME=' + self.get_install_dir()) + '\nexport DSE_HOME\n'))
-31,969,036,990,758,484
Due to the way CCM lays out files, separating the repository from the node(s) confs, the `dse-env.sh` script of each node needs to have its DSE_HOME var set and exported. Since DSE 4.5.x, the stock `dse-env.sh` file includes a commented-out place to do exactly this, intended for installers. Basically: read in the file, write it back out and add the two lines. 'sstableloader' is an example of a node script that depends on this, when used in a CCM-built cluster.
ccmlib/dse_node.py
export_dse_home_in_dse_env_sh
thobbs/ccm
python
def export_dse_home_in_dse_env_sh(self): "\n Due to the way CCM lays out files, separating the repository\n from the node(s) confs, the `dse-env.sh` script of each node\n needs to have its DSE_HOME var set and exported. Since DSE\n 4.5.x, the stock `dse-env.sh` file includes a commented-out\n place to do exactly this, intended for installers.\n Basically: read in the file, write it back out and add the two\n lines.\n 'sstableloader' is an example of a node script that depends on\n this, when used in a CCM-built cluster.\n " with open((self.get_bin_dir() + '/dse-env.sh'), 'r') as dse_env_sh: buf = dse_env_sh.readlines() with open((self.get_bin_dir() + '/dse-env.sh'), 'w') as out_file: for line in buf: out_file.write(line) if (line == '# This is here so the installer can force set DSE_HOME\n'): out_file.write((('DSE_HOME=' + self.get_install_dir()) + '\nexport DSE_HOME\n'))
def load_labels(cache_dir: Union[(Path, str)]) -> Tuple[(Tuple[(str, ...)], np.ndarray, List[str], np.ndarray)]: '\n prepare all the labels\n ' filename_io = download_url('https://raw.githubusercontent.com/csailvision/places365/master/IO_places365.txt', cache_dir) with open(filename_io) as f: lines = f.readlines() labels_IO = [] for line in lines: items = line.rstrip().split() labels_IO.append((int(items[(- 1)]) - 1)) labels_IO = np.array(labels_IO) filename_category = download_url('https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt', cache_dir) _classes = list() with open(filename_category) as class_file: for line in class_file: _classes.append(line.strip().split(' ')[0][3:]) classes = tuple(_classes) filename_attribute = download_url('https://raw.githubusercontent.com/csailvision/places365/master/labels_sunattribute.txt', cache_dir) with open(filename_attribute) as f: lines = f.readlines() labels_attribute = [item.rstrip() for item in lines] filename_W = download_url('http://places2.csail.mit.edu/models_places365/W_sceneattribute_wideresnet18.npy', cache_dir) W_attribute = np.load(filename_W) return (classes, labels_IO, labels_attribute, W_attribute)
-3,650,344,468,072,724,500
prepare all the labels
scripts/detect_room.py
load_labels
airbert-vln/bnb-dataset
python
def load_labels(cache_dir: Union[(Path, str)]) -> Tuple[(Tuple[(str, ...)], np.ndarray, List[str], np.ndarray)]: '\n \n ' filename_io = download_url('https://raw.githubusercontent.com/csailvision/places365/master/IO_places365.txt', cache_dir) with open(filename_io) as f: lines = f.readlines() labels_IO = [] for line in lines: items = line.rstrip().split() labels_IO.append((int(items[(- 1)]) - 1)) labels_IO = np.array(labels_IO) filename_category = download_url('https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt', cache_dir) _classes = list() with open(filename_category) as class_file: for line in class_file: _classes.append(line.strip().split(' ')[0][3:]) classes = tuple(_classes) filename_attribute = download_url('https://raw.githubusercontent.com/csailvision/places365/master/labels_sunattribute.txt', cache_dir) with open(filename_attribute) as f: lines = f.readlines() labels_attribute = [item.rstrip() for item in lines] filename_W = download_url('http://places2.csail.mit.edu/models_places365/W_sceneattribute_wideresnet18.npy', cache_dir) W_attribute = np.load(filename_W) return (classes, labels_IO, labels_attribute, W_attribute)
def softmax(x): 'Compute softmax values for each sets of scores in x.' e_x = np.exp((x - np.max(x))) return (e_x / e_x.sum(axis=0))
-8,078,771,299,122,488,000
Compute softmax values for each sets of scores in x.
scripts/detect_room.py
softmax
airbert-vln/bnb-dataset
python
def softmax(x): e_x = np.exp((x - np.max(x))) return (e_x / e_x.sum(axis=0))
def onerror(func, path, exc_info): '\n Error handler for ``shutil.rmtree``.\n\n If the error is due to an access error (read only file)\n it attempts to add write permission and then retries.\n\n If the error is for another reason it re-raises the error.\n\n Usage : ``shutil.rmtree(path, onerror=onerror)``\n ' import stat if (not os.access(path, os.W_OK)): os.chmod(path, stat.S_IWUSR) func(path)
2,055,500,432,482,497,300
Error handler for ``shutil.rmtree``. If the error is due to an access error (read only file) it attempts to add write permission and then retries. If the error is for another reason it re-raises the error. Usage : ``shutil.rmtree(path, onerror=onerror)``
thlib/ui_classes/ui_watch_folder_classes.py
onerror
listyque/TACTIC-Handler
python
def onerror(func, path, exc_info): '\n Error handler for ``shutil.rmtree``.\n\n If the error is due to an access error (read only file)\n it attempts to add write permission and then retries.\n\n If the error is for another reason it re-raises the error.\n\n Usage : ``shutil.rmtree(path, onerror=onerror)``\n ' import stat if (not os.access(path, os.W_OK)): os.chmod(path, stat.S_IWUSR) func(path)
def triangleNumber(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' result = 0 nums.sort() for i in reversed(xrange(2, len(nums))): (left, right) = (0, (i - 1)) while (left < right): if ((nums[left] + nums[right]) > nums[i]): result += (right - left) right -= 1 else: left += 1 return result
3,631,428,936,445,728,000
:type nums: List[int] :rtype: int
Python/valid-triangle-number.py
triangleNumber
20kzhan/LeetCode-Solutions
python
def triangleNumber(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' result = 0 nums.sort() for i in reversed(xrange(2, len(nums))): (left, right) = (0, (i - 1)) while (left < right): if ((nums[left] + nums[right]) > nums[i]): result += (right - left) right -= 1 else: left += 1 return result
def triangleNumber(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' result = 0 nums.sort() for i in xrange((len(nums) - 2)): if (nums[i] == 0): continue k = (i + 2) for j in xrange((i + 1), (len(nums) - 1)): while ((k < len(nums)) and ((nums[i] + nums[j]) > nums[k])): k += 1 result += ((k - j) - 1) return result
2,975,241,749,865,687,600
:type nums: List[int] :rtype: int
Python/valid-triangle-number.py
triangleNumber
20kzhan/LeetCode-Solutions
python
def triangleNumber(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' result = 0 nums.sort() for i in xrange((len(nums) - 2)): if (nums[i] == 0): continue k = (i + 2) for j in xrange((i + 1), (len(nums) - 1)): while ((k < len(nums)) and ((nums[i] + nums[j]) > nums[k])): k += 1 result += ((k - j) - 1) return result
def test_ftu_with_tour(self): '\n https://moztrap.mozilla.org/manage/case/6119/\n ' self.ftu.run_ftu_setup_with_default_values() self.ftu.tap_take_tour() self.assertEqual(self.ftu.step1_header_text, 'Swipe up and down to browse your apps and bookmarks. Tap and hold an icon to delete, move, or edit it.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step2_header_text, 'Tap to expand and collapse app groups. Drag an app into a new space to create a group.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step3_header_text, 'Swipe down to access recent notifications, usage information and settings.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step4_header_text, 'Drag from the left edge of your screen to return to recently used apps.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step5_header_text, 'Tap on the search box anytime to start a search or go to a website.') self.ftu.tap_back() self.assertEqual(self.ftu.step4_header_text, 'Drag from the left edge of your screen to return to recently used apps.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step5_header_text, 'Tap on the search box anytime to start a search or go to a website.') self.ftu.tap_tour_next() self.ftu.wait_for_finish_tutorial_section() self.ftu.tap_lets_go_button() self.wait_for_condition((lambda m: (self.apps.displayed_app.name == Homescreen.name)))
11,680,524,589,999,332
https://moztrap.mozilla.org/manage/case/6119/
tests/python/gaia-ui-tests/gaiatest/tests/functional/ftu/test_ftu_with_tour.py
test_ftu_with_tour
ADLR-es/gaia
python
def test_ftu_with_tour(self): '\n \n ' self.ftu.run_ftu_setup_with_default_values() self.ftu.tap_take_tour() self.assertEqual(self.ftu.step1_header_text, 'Swipe up and down to browse your apps and bookmarks. Tap and hold an icon to delete, move, or edit it.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step2_header_text, 'Tap to expand and collapse app groups. Drag an app into a new space to create a group.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step3_header_text, 'Swipe down to access recent notifications, usage information and settings.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step4_header_text, 'Drag from the left edge of your screen to return to recently used apps.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step5_header_text, 'Tap on the search box anytime to start a search or go to a website.') self.ftu.tap_back() self.assertEqual(self.ftu.step4_header_text, 'Drag from the left edge of your screen to return to recently used apps.') self.ftu.tap_tour_next() self.assertEqual(self.ftu.step5_header_text, 'Tap on the search box anytime to start a search or go to a website.') self.ftu.tap_tour_next() self.ftu.wait_for_finish_tutorial_section() self.ftu.tap_lets_go_button() self.wait_for_condition((lambda m: (self.apps.displayed_app.name == Homescreen.name)))
def __init__(self): 'Instantiation function.' self.params = {'n_m': numpy.nan, 'K': numpy.nan, 'tht': numpy.nan}
-3,131,612,132,851,073,000
Instantiation function.
src/pygaps/modelling/temkinapprox.py
__init__
ReginaPeralta/ReginaPeralta
python
def __init__(self): self.params = {'n_m': numpy.nan, 'K': numpy.nan, 'tht': numpy.nan}
def loading(self, pressure): '\n Calculate loading at specified pressure.\n\n Parameters\n ----------\n pressure : float\n The pressure at which to calculate the loading.\n\n Returns\n -------\n float\n Loading at specified pressure.\n ' lang_load = ((self.params['K'] * pressure) / (1.0 + (self.params['K'] * pressure))) return (self.params['n_m'] * (lang_load + ((self.params['tht'] * (lang_load ** 2)) * (lang_load - 1))))
-3,862,127,436,033,543,000
Calculate loading at specified pressure. Parameters ---------- pressure : float The pressure at which to calculate the loading. Returns ------- float Loading at specified pressure.
src/pygaps/modelling/temkinapprox.py
loading
ReginaPeralta/ReginaPeralta
python
def loading(self, pressure): '\n Calculate loading at specified pressure.\n\n Parameters\n ----------\n pressure : float\n The pressure at which to calculate the loading.\n\n Returns\n -------\n float\n Loading at specified pressure.\n ' lang_load = ((self.params['K'] * pressure) / (1.0 + (self.params['K'] * pressure))) return (self.params['n_m'] * (lang_load + ((self.params['tht'] * (lang_load ** 2)) * (lang_load - 1))))
def pressure(self, loading): '\n Calculate pressure at specified loading.\n\n For the TemkinApprox model, the pressure will\n be computed numerically as no analytical inversion is possible.\n\n Parameters\n ----------\n loading : float\n The loading at which to calculate the pressure.\n\n Returns\n -------\n float\n Pressure at specified loading.\n ' def fun(x): return (self.loading(x) - loading) opt_res = scipy.optimize.root(fun, 0, method='hybr') if (not opt_res.success): raise CalculationError('\n Root finding for value {0} failed.\n '.format(loading)) return opt_res.x
-7,795,259,018,044,517,000
Calculate pressure at specified loading. For the TemkinApprox model, the pressure will be computed numerically as no analytical inversion is possible. Parameters ---------- loading : float The loading at which to calculate the pressure. Returns ------- float Pressure at specified loading.
src/pygaps/modelling/temkinapprox.py
pressure
ReginaPeralta/ReginaPeralta
python
def pressure(self, loading): '\n Calculate pressure at specified loading.\n\n For the TemkinApprox model, the pressure will\n be computed numerically as no analytical inversion is possible.\n\n Parameters\n ----------\n loading : float\n The loading at which to calculate the pressure.\n\n Returns\n -------\n float\n Pressure at specified loading.\n ' def fun(x): return (self.loading(x) - loading) opt_res = scipy.optimize.root(fun, 0, method='hybr') if (not opt_res.success): raise CalculationError('\n Root finding for value {0} failed.\n '.format(loading)) return opt_res.x
def spreading_pressure(self, pressure): '\n Calculate spreading pressure at specified gas pressure.\n\n Function that calculates spreading pressure by solving the\n following integral at each point i.\n\n .. math::\n\n \\pi = \\int_{0}^{p_i} \\frac{n_i(p_i)}{p_i} dp_i\n\n The integral for the TemkinApprox model is solved analytically.\n\n .. math::\n\n \\pi = n_m \\Big( \\ln{(1 + K p)} + \\frac{\\theta (2 K p + 1)}{2(1 + K p)^2}\\Big)\n\n Parameters\n ----------\n pressure : float\n The pressure at which to calculate the spreading pressure.\n\n Returns\n -------\n float\n Spreading pressure at specified pressure.\n ' one_plus_kp = (1.0 + (self.params['K'] * pressure)) return (self.params['n_m'] * (numpy.log(one_plus_kp) + ((self.params['tht'] * (((2.0 * self.params['K']) * pressure) + 1.0)) / (2.0 * (one_plus_kp ** 2)))))
-8,227,195,899,010,587,000
Calculate spreading pressure at specified gas pressure. Function that calculates spreading pressure by solving the following integral at each point i. .. math:: \pi = \int_{0}^{p_i} \frac{n_i(p_i)}{p_i} dp_i The integral for the TemkinApprox model is solved analytically. .. math:: \pi = n_m \Big( \ln{(1 + K p)} + \frac{\theta (2 K p + 1)}{2(1 + K p)^2}\Big) Parameters ---------- pressure : float The pressure at which to calculate the spreading pressure. Returns ------- float Spreading pressure at specified pressure.
src/pygaps/modelling/temkinapprox.py
spreading_pressure
ReginaPeralta/ReginaPeralta
python
def spreading_pressure(self, pressure): '\n Calculate spreading pressure at specified gas pressure.\n\n Function that calculates spreading pressure by solving the\n following integral at each point i.\n\n .. math::\n\n \\pi = \\int_{0}^{p_i} \\frac{n_i(p_i)}{p_i} dp_i\n\n The integral for the TemkinApprox model is solved analytically.\n\n .. math::\n\n \\pi = n_m \\Big( \\ln{(1 + K p)} + \\frac{\\theta (2 K p + 1)}{2(1 + K p)^2}\\Big)\n\n Parameters\n ----------\n pressure : float\n The pressure at which to calculate the spreading pressure.\n\n Returns\n -------\n float\n Spreading pressure at specified pressure.\n ' one_plus_kp = (1.0 + (self.params['K'] * pressure)) return (self.params['n_m'] * (numpy.log(one_plus_kp) + ((self.params['tht'] * (((2.0 * self.params['K']) * pressure) + 1.0)) / (2.0 * (one_plus_kp ** 2)))))
def initial_guess(self, pressure, loading): '\n Return initial guess for fitting.\n\n Parameters\n ----------\n pressure : ndarray\n Pressure data.\n loading : ndarray\n Loading data.\n\n Returns\n -------\n dict\n Dictionary of initial guesses for the parameters.\n ' (saturation_loading, langmuir_k) = super().initial_guess(pressure, loading) guess = {'n_m': saturation_loading, 'K': langmuir_k, 'tht': 0.0} for param in guess: if (guess[param] < self.param_bounds[param][0]): guess[param] = self.param_bounds[param][0] if (guess[param] > self.param_bounds[param][1]): guess[param] = self.param_bounds[param][1] return guess
4,176,960,451,183,268,400
Return initial guess for fitting. Parameters ---------- pressure : ndarray Pressure data. loading : ndarray Loading data. Returns ------- dict Dictionary of initial guesses for the parameters.
src/pygaps/modelling/temkinapprox.py
initial_guess
ReginaPeralta/ReginaPeralta
python
def initial_guess(self, pressure, loading): '\n Return initial guess for fitting.\n\n Parameters\n ----------\n pressure : ndarray\n Pressure data.\n loading : ndarray\n Loading data.\n\n Returns\n -------\n dict\n Dictionary of initial guesses for the parameters.\n ' (saturation_loading, langmuir_k) = super().initial_guess(pressure, loading) guess = {'n_m': saturation_loading, 'K': langmuir_k, 'tht': 0.0} for param in guess: if (guess[param] < self.param_bounds[param][0]): guess[param] = self.param_bounds[param][0] if (guess[param] > self.param_bounds[param][1]): guess[param] = self.param_bounds[param][1] return guess
def get_batch(self, items: list): 'Return the defined batch of the given items.\n Items are usually input files.' if (len(items) < self.batches): raise WorkflowError('Batching rule {} has less input files than batches. Please choose a smaller number of batches.'.format(self.rulename)) items = sorted(items) batch_len = math.floor((len(items) / self.batches)) idx = (self.idx - 1) i = (idx * batch_len) if self.is_final: return items[i:] else: return items[i:(i + batch_len)]
-192,219,685,288,529,400
Return the defined batch of the given items. Items are usually input files.
snakemake/dag.py
get_batch
baileythegreen/snakemake
python
def get_batch(self, items: list): 'Return the defined batch of the given items.\n Items are usually input files.' if (len(items) < self.batches): raise WorkflowError('Batching rule {} has less input files than batches. Please choose a smaller number of batches.'.format(self.rulename)) items = sorted(items) batch_len = math.floor((len(items) / self.batches)) idx = (self.idx - 1) i = (idx * batch_len) if self.is_final: return items[i:] else: return items[i:(i + batch_len)]
def init(self, progress=False): ' Initialise the DAG. ' for job in map(self.rule2job, self.targetrules): job = self.update([job], progress=progress) self.targetjobs.add(job) for file in self.targetfiles: job = self.update(self.file2jobs(file), file=file, progress=progress) self.targetjobs.add(job) self.cleanup() self.update_needrun() self.set_until_jobs() self.delete_omitfrom_jobs() self.update_jobids() self.check_directory_outputs() for (i, job) in enumerate(self.jobs): job.is_valid()
5,137,139,006,577,920,000
Initialise the DAG.
snakemake/dag.py
init
baileythegreen/snakemake
python
def init(self, progress=False): ' ' for job in map(self.rule2job, self.targetrules): job = self.update([job], progress=progress) self.targetjobs.add(job) for file in self.targetfiles: job = self.update(self.file2jobs(file), file=file, progress=progress) self.targetjobs.add(job) self.cleanup() self.update_needrun() self.set_until_jobs() self.delete_omitfrom_jobs() self.update_jobids() self.check_directory_outputs() for (i, job) in enumerate(self.jobs): job.is_valid()
def check_directory_outputs(self): 'Check that no output file is contained in a directory output of the same or another rule.' outputs = sorted({(path(f), job) for job in self.jobs for f in job.output for path in (os.path.abspath, os.path.realpath)}) for i in range((len(outputs) - 1)): ((a, job_a), (b, job_b)) = outputs[i:(i + 2)] try: common = os.path.commonpath([a, b]) except ValueError: continue if ((a != b) and (common == os.path.commonpath([a])) and (job_a != job_b)): raise ChildIOException(parent=outputs[i], child=outputs[(i + 1)])
7,875,700,181,244,890,000
Check that no output file is contained in a directory output of the same or another rule.
snakemake/dag.py
check_directory_outputs
baileythegreen/snakemake
python
def check_directory_outputs(self): outputs = sorted({(path(f), job) for job in self.jobs for f in job.output for path in (os.path.abspath, os.path.realpath)}) for i in range((len(outputs) - 1)): ((a, job_a), (b, job_b)) = outputs[i:(i + 2)] try: common = os.path.commonpath([a, b]) except ValueError: continue if ((a != b) and (common == os.path.commonpath([a])) and (job_a != job_b)): raise ChildIOException(parent=outputs[i], child=outputs[(i + 1)])
def update_output_index(self): 'Update the OutputIndex.' self.output_index = OutputIndex(self.rules)
4,577,355,156,363,228,000
Update the OutputIndex.
snakemake/dag.py
update_output_index
baileythegreen/snakemake
python
def update_output_index(self): self.output_index = OutputIndex(self.rules)
def check_incomplete(self): 'Check if any output files are incomplete. This is done by looking up\n markers in the persistence module.' if (not self.ignore_incomplete): incomplete = self.incomplete_files if incomplete: if self.force_incomplete: logger.debug('Forcing incomplete files:') logger.debug(('\t' + '\n\t'.join(incomplete))) self.forcefiles.update(incomplete) else: raise IncompleteFilesException(incomplete)
3,707,933,761,524,800,000
Check if any output files are incomplete. This is done by looking up markers in the persistence module.
snakemake/dag.py
check_incomplete
baileythegreen/snakemake
python
def check_incomplete(self): 'Check if any output files are incomplete. This is done by looking up\n markers in the persistence module.' if (not self.ignore_incomplete): incomplete = self.incomplete_files if incomplete: if self.force_incomplete: logger.debug('Forcing incomplete files:') logger.debug(('\t' + '\n\t'.join(incomplete))) self.forcefiles.update(incomplete) else: raise IncompleteFilesException(incomplete)
def incomplete_external_jobid(self, job): 'Return the external jobid of the job if it is marked as incomplete.\n\n Returns None, if job is not incomplete, or if no external jobid has been\n registered or if force_incomplete is True.\n ' if self.force_incomplete: return None jobids = self.workflow.persistence.external_jobids(job) if (len(jobids) == 1): return jobids[0] elif (len(jobids) > 1): raise WorkflowError('Multiple different external jobids registered for output files of incomplete job {} ({}). This job cannot be resumed. Execute Snakemake with --rerun-incomplete to fix this issue.'.format(job.jobid, jobids))
-2,468,886,771,994,416,000
Return the external jobid of the job if it is marked as incomplete. Returns None, if job is not incomplete, or if no external jobid has been registered or if force_incomplete is True.
snakemake/dag.py
incomplete_external_jobid
baileythegreen/snakemake
python
def incomplete_external_jobid(self, job): 'Return the external jobid of the job if it is marked as incomplete.\n\n Returns None, if job is not incomplete, or if no external jobid has been\n registered or if force_incomplete is True.\n ' if self.force_incomplete: return None jobids = self.workflow.persistence.external_jobids(job) if (len(jobids) == 1): return jobids[0] elif (len(jobids) > 1): raise WorkflowError('Multiple different external jobids registered for output files of incomplete job {} ({}). This job cannot be resumed. Execute Snakemake with --rerun-incomplete to fix this issue.'.format(job.jobid, jobids))
def check_dynamic(self): 'Check dynamic output and update downstream rules if necessary.' if self.has_dynamic_rules: for job in filter((lambda job: (job.dynamic_output and (not self.needrun(job)))), self.jobs): self.update_dynamic(job) self.postprocess()
-7,339,925,665,385,330,000
Check dynamic output and update downstream rules if necessary.
snakemake/dag.py
check_dynamic
baileythegreen/snakemake
python
def check_dynamic(self): if self.has_dynamic_rules: for job in filter((lambda job: (job.dynamic_output and (not self.needrun(job)))), self.jobs): self.update_dynamic(job) self.postprocess()
@property def dynamic_output_jobs(self): 'Iterate over all jobs with dynamic output files.' return (job for job in self.jobs if job.dynamic_output)
-3,501,796,108,319,540,000
Iterate over all jobs with dynamic output files.
snakemake/dag.py
dynamic_output_jobs
baileythegreen/snakemake
python
@property def dynamic_output_jobs(self): return (job for job in self.jobs if job.dynamic_output)
@property def jobs(self): ' All jobs in the DAG. ' for job in self.bfs(self.dependencies, *self.targetjobs): (yield job)
4,769,151,645,865,818,000
All jobs in the DAG.
snakemake/dag.py
jobs
baileythegreen/snakemake
python
@property def jobs(self): ' ' for job in self.bfs(self.dependencies, *self.targetjobs): (yield job)
@property def needrun_jobs(self): ' Jobs that need to be executed. ' for job in filter(self.needrun, self.bfs(self.dependencies, *self.targetjobs, stop=self.noneedrun_finished)): (yield job)
7,452,190,725,727,733,000
Jobs that need to be executed.
snakemake/dag.py
needrun_jobs
baileythegreen/snakemake
python
@property def needrun_jobs(self): ' ' for job in filter(self.needrun, self.bfs(self.dependencies, *self.targetjobs, stop=self.noneedrun_finished)): (yield job)