text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self): return self.client.format_url( "/providers/Microsoft.Network/trafficManagerGeographicHierarchies/default", **self.url_parameters )
[ 274 ]
def METHOD_NAME(self, test_data, device, args): model = self.model model.to(device) model.eval() metrics = {"test_correct": 0, "test_loss": 0, "test_total": 0} criterion = nn.CrossEntropyLoss().to(device) with torch.no_grad(): for batch_idx, (x, target) in enumerate(test_data): x = x.to(device) target = target.to(device) pred = model(x) loss = criterion(pred, target) # pylint: disable=E1102 _, predicted = torch.max(pred, -1) correct = predicted.eq(target).sum() metrics["test_correct"] += correct.item() metrics["test_loss"] += loss.item() * target.size(0) metrics["test_total"] += target.size(0) return metrics
[ 9 ]
def METHOD_NAME(process, threshold_silicon=threshold_conc_proc.threshold_silicon, # in mipT threshold_scintillator=threshold_conc_proc.threshold_scintillator, # in mipT coarsenTriggerCells=threshold_conc_proc.coarsenTriggerCells ): parameters = threshold_conc_proc.clone( threshold_silicon = threshold_silicon, threshold_scintillator = threshold_scintillator, coarsenTriggerCells = coarsenTriggerCells ) process.l1tHGCalConcentratorProducer.ProcessorParameters = parameters return process
[ 343, 17595, 853 ]
def METHOD_NAME(): """ "Upgrade" packages """ with _loaded_state(__opts__) as state: for p in state["packages"]: version_float = float(state["packages"][p]) version_float = version_float + 1.0 state["packages"][p] = str(version_float) return state["packages"]
[ 738 ]
def METHOD_NAME (self): params_change_list = [ # variable | value | # =================================== ('PlatformRules', 'Server'), ('BpmStrutVersion', '0x22'), ('BpmRevision', '0x01'), ('BpmRevocation', '1'), ('AcmRevocation', '2'), ('NEMPages', '0x40'), ('AcpiBase', '0x500'), ('IbbFlags', '0x3'), ('IbbHashAlgID', '0x0B:SHA256'), ('TxtInclude', 'FALSE'), ('PcdInclude', 'TRUE'), ('BpmSigScheme', '0x14:RSASSA'), ('BpmSigPubKey', 'Bpmgen2\\keys\\bpm_pubkey_2048.pem'), ('BpmSigPrivKey', 'Bpmgen2\\keys\\bpm_privkey_2048.pem'), ('BpmKeySizeBits', '2048'), ('BpmSigHashAlgID', '0x0B:SHA256'), ] return params_change_list
[ 19, 12356, 434, 194, 245 ]
async def METHOD_NAME(self): # Verification requires lot less input parameters suite = BbsBlsSignatureProof2020( key_pair=WalletKeyPair(wallet=self.wallet, key_type=BLS12381G2), ) result = await verify( document=DOC_DERIVED_BBS, suites=[suite], purpose=AssertionProofPurpose(), document_loader=custom_document_loader, ) assert result.verified
[ 9, 1162, 5584, 12149, 1334, -1 ]
f METHOD_NAME(self, x, y, out, idx):
[ 9, 245, 2443 ]
def METHOD_NAME(self, **kwargs): env = { "GOROOT_FINAL": self.goroot_dir, } if self.go_bootstrap: env["GOROOT_BOOTSTRAP"] = self.go_bootstrap cmd = "bash make.bash".split() if self.config.verbose: cmd += ["-v"] self.run_cmd(cmd, cwd=self.make_dir, env=env)
[ 296 ]
METHOD_NAME(self):
[ 14608 ]
def METHOD_NAME(self): pass
[ 72, 710 ]
def METHOD_NAME(self, cuda=False): func = lambda x: torch.sin(x) means = torch.randn(10) variances = torch.randn(10).abs() quadrature = GaussHermiteQuadrature1D() if cuda: means = means.cuda() variances = variances.cuda() quadrature = quadrature.cuda() dist = torch.distributions.Normal(means, variances.sqrt()) # Use quadrature results = quadrature(func, dist) # Use Monte-Carlo samples = dist.rsample(torch.Size([20000])) actual = func(samples).mean(0) self.assertLess(torch.mean(torch.abs(actual - results)), 0.1)
[ 9, 6383, 13695, 14287, 1170, 227, 1576 ]
def METHOD_NAME(self): """ test correct config type :return: """ self.assertEqual(type(self.conf.get()), collections.OrderedDict)
[ 9, 44 ]
def METHOD_NAME(self) -> str: return f"{self.api_base}/chat/completions"
[ 274 ]
def METHOD_NAME(ts): e = api.load('de421.bsp') jup = e['jupiter barycenter'] astrometric = e['sun'].at(ts.utc(1980, 1, 1, 0, 0)).observe(jup) hlat, hlon, d = astrometric.ecliptic_latlon() compare(hlat.degrees, 1.013, 0.001) compare(hlon.degrees, 151.3229, 0.001)
[ 9, 12246, 896 ]
def METHOD_NAME(self, show=None): """ Update the Emby Media Server host via HTTP API. :return: True for no issue or False if there was an error """ if app.USE_EMBY: if not app.EMBY_HOST: log.debug('EMBY: No host specified, check your settings') return False if show: # EMBY only supports TVDB ids provider = 'tvdbid' if show.indexer == INDEXER_TVDBV2: tvdb_id = show.indexerid else: # Try using external ids to get a TVDB id tvdb_id = show.externals.get(mappings[INDEXER_TVDBV2], None) if tvdb_id is None: if show.indexer == INDEXER_TVRAGE: log.warning('EMBY: TVRage indexer no longer valid') else: log.warning( 'EMBY: Unable to find a TVDB ID for {series},' ' and {indexer} indexer is unsupported', {'series': show.name, 'indexer': indexer_id_to_name(show.indexer)} ) return False params = { provider: text_type(tvdb_id) } else: params = {} url = 'http://{host}/emby/Library/Series/Updated'.format(host=app.EMBY_HOST) try: resp = self.session.post( url=url, params=params, headers={ 'X-MediaBrowser-Token': app.EMBY_APIKEY } ) resp.raise_for_status() if resp.text: log.debug('EMBY: HTTP response: {0}', resp.text.replace('\n', '')) log.info('EMBY: Successfully sent a "Series Library Updated" command.') return True except (HTTPError, RequestException) as error: log.warning('EMBY: Warning: Unable to contact Emby at {url}: {error}', {'url': url, 'error': ex(error)}) return False
[ 86, 3106 ]
def METHOD_NAME(self, v1, v2): angle = v1.angle(v2) self.assertAlmostEqual(90, angle, delta=TS_POINT_EPSILON)
[ 638, 5329 ]
def METHOD_NAME(function): # Check that the Rbf function interpolates through the nodes (3D). x = random.rand(50, 1)*4 - 2 y = random.rand(50, 1)*4 - 2 z = random.rand(50, 1)*4 - 2 d = x*exp(-x**2 - y**2) rbf = Rbf(x, y, z, d, epsilon=2, function=function) di = rbf(x, y, z) di.shape = x.shape assert_array_almost_equal(di, d)
[ 250, -1, 4239 ]
def METHOD_NAME(): host = None port = None unix = None write = 1 storage = '1' try: opts, args = getopt.getopt(sys.argv[1:], 'p:h:U:S:1', ['nowrite']) for o, a in opts: if o == '-p': port = int(a) elif o == '-h': host = a elif o == '-U': unix = a elif o == '-S': storage = a elif o == '--nowrite': write = 0 elif o == '-1': ZEO_VERSION = 1 # NOQA: F841 unused variable except Exception as err: s = str(err) if s: s = ": " + s print(err.__class__.__name__ + s) usage() if unix is not None: addr = unix else: if host is None: host = socket.gethostname() if port is None: usage() addr = host, port setup_logging() check_server(addr, storage, write)
[ 57 ]
def METHOD_NAME(model_name): repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} conv_layer = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" config = BitConfig( conv_layer=conv_layer, num_labels=1000, id2label=id2label, label2id=label2id, ) return config
[ 19, 200 ]
def METHOD_NAME(self, client_name=None): session_to_use = self.get_session_by_name_or_default(client_name) return session_to_use.METHOD_NAME()
[ 19, 1988, 390, 274 ]
def METHOD_NAME(system, integrator, platform=None): """Create a Context. If platform is None, GLOBAL_ALCHEMY_PLATFORM is used. """ if platform is None: platform = GLOBAL_FORCES_PLATFORM if platform is not None: context = openmm.Context(system, integrator, platform) else: context = openmm.Context(system, integrator) return context
[ 129, 198 ]
def METHOD_NAME(self): pass
[ 538 ]
def METHOD_NAME(): """ Headers containing non-ascii codepoints should default to decoding as utf-8. """ raw_headers = [(b"Custom", "Code point: ☃".encode("utf-8"))] headers = httpx.Headers(raw_headers) assert dict(headers) == {"custom": "Code point: ☃"} assert headers.encoding == "utf-8"
[ 9, 2131, 1268, 7634, 1629 ]
def METHOD_NAME(self, *args, **kwargs): pass
[ 527, 539 ]
def METHOD_NAME(self): self.send_fuzzed_response()
[ 74, 1276 ]
def METHOD_NAME(self): # setup_system() self.redis = ( fakeredis.FakeStrictRedis() ) # (server=fakeredis.FakeServer(), version=7) orig_xadd = self.redis.xadd self.xadd_id = "" def xadd_side_effect(*args, **kwargs): self.xadd_id = orig_xadd(*args, **kwargs) return self.xadd_id def xread_side_effect(*args, **kwargs): return [ ["topic", [[self.xadd_id, {"id": self.xadd_id, "result": "SUCCESS"}]]] ] self.redis.xadd = Mock() self.redis.xadd.side_effect = xadd_side_effect self.redis.xread = Mock() self.redis.xread.side_effect = xread_side_effect self.patcher = patch("redis.Redis", return_value=self.redis) self.mock_redis = self.patcher.start() self.addCleanup(self.patcher.stop) self.model = TargetModel(name="INST", scope="DEFAULT") self.model.create() collect = Packet("INST", "COLLECT") Store.hset("DEFAULT__openc3cmd__INST", "COLLECT", json.dumps(collect.as_json())) abort = Packet("INST", "ABORT") Store.hset("DEFAULT__openc3cmd__INST", "ABORT", json.dumps(abort.as_json()))
[ 0, 1 ]
def METHOD_NAME(box_bg="#CCCCCC", arrow1="#88CCFF", arrow2="#88FF88", supervised=True): fig = plt.figure(figsize=(9, 6), facecolor="w") ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False) ax.set_xlim(0, 9) ax.set_ylim(0, 6) patches = [ Rectangle((0.3, 3.6), 1.5, 1.8, zorder=1, fc=box_bg), Rectangle((0.5, 3.8), 1.5, 1.8, zorder=2, fc=box_bg), Rectangle((0.7, 4.0), 1.5, 1.8, zorder=3, fc=box_bg), Rectangle((2.9, 3.6), 0.2, 1.8, fc=box_bg), Rectangle((3.1, 3.8), 0.2, 1.8, fc=box_bg), Rectangle((3.3, 4.0), 0.2, 1.8, fc=box_bg), Rectangle((0.3, 0.2), 1.5, 1.8, fc=box_bg), Rectangle((2.9, 0.2), 0.2, 1.8, fc=box_bg), Circle((5.5, 3.5), 1.0, fc=box_bg), Polygon([[5.5, 1.7], [6.1, 1.1], [5.5, 0.5], [4.9, 1.1]], fc=box_bg), FancyArrow( 2.3, 4.6, 0.35, 0, fc=arrow1, width=0.25, head_width=0.5, head_length=0.2 ), FancyArrow( 3.75, 4.2, 0.5, -0.2, fc=arrow1, width=0.25, head_width=0.5, head_length=0.2 ), FancyArrow( 5.5, 2.4, 0, -0.4, fc=arrow1, width=0.25, head_width=0.5, head_length=0.2 ), FancyArrow( 2.0, 1.1, 0.5, 0, fc=arrow2, width=0.25, head_width=0.5, head_length=0.2 ), FancyArrow( 3.3, 1.1, 1.3, 0, fc=arrow2, width=0.25, head_width=0.5, head_length=0.2 ), FancyArrow( 6.2, 1.1, 0.8, 0, fc=arrow2, width=0.25, head_width=0.5, head_length=0.2 ), ] if supervised: patches += [ Rectangle((0.3, 2.4), 1.5, 0.5, zorder=1, fc=box_bg), Rectangle((0.5, 2.6), 1.5, 0.5, zorder=2, fc=box_bg), Rectangle((0.7, 2.8), 1.5, 0.5, zorder=3, fc=box_bg), FancyArrow( 2.3, 2.9, 2.0, 0, fc=arrow1, width=0.25, head_width=0.5, head_length=0.2 ), Rectangle((7.3, 0.85), 1.5, 0.5, fc=box_bg), ] else: patches += [Rectangle((7.3, 0.2), 1.5, 1.8, fc=box_bg)] for p in patches: ax.add_patch(p) plt.text( 1.45, 4.9, "Training\nText,\nDocuments,\nImages,\netc.", ha="center", va="center", fontsize=14, ) plt.text(3.6, 4.9, "Feature\nVectors", ha="left", va="center", fontsize=14) plt.text( 5.5, 3.5, "Machine\nLearning\nAlgorithm", ha="center", va="center", fontsize=14 ) plt.text( 1.05, 1.1, "New Text,\nDocument,\nImage,\netc.", ha="center", va="center", fontsize=14, ) plt.text(3.3, 1.7, "Feature\nVector", ha="left", va="center", fontsize=14) plt.text(5.5, 1.1, "Predictive\nModel", ha="center", va="center", fontsize=12) if supervised: plt.text(1.45, 3.05, "Labels", ha="center", va="center", fontsize=14) plt.text(8.05, 1.1, "Expected\nLabel", ha="center", va="center", fontsize=14) plt.text( 8.8, 5.8, "Supervised Learning Model", ha="right", va="top", fontsize=18 ) else: plt.text( 8.05, 1.1, "Likelihood\nor Cluster ID\nor Better\nRepresentation", ha="center", va="center", fontsize=12, ) plt.text( 8.8, 5.8, "Unsupervised Learning Model", ha="right", va="top", fontsize=18 )
[ 129, 414 ]
def METHOD_NAME( event, services_by_dependencies, soa_dir, synapse_service_dir ): filename = event[3].decode() log.debug(f"process_inotify_event on {filename}") service_instance, suffix = os.path.splitext(filename) if suffix != ".json": return services_to_update = services_by_dependencies.get(service_instance, ()) if not services_to_update: return # filter active_service_groups() down to just the names in services_to_update service_groups = { service_group: macs for service_group, macs in firewall.active_service_groups().items() if service_group in services_to_update } try: with firewall.firewall_flock(): firewall.ensure_service_chains(service_groups, soa_dir, synapse_service_dir) for service_to_update in services_to_update: log.debug(f"Updated {service_to_update}") except TimeoutError as e: log.error( "Unable to update firewalls for {} because time-out obtaining flock: {}".format( service_groups.keys(), e ) )
[ 356, 5495, 417 ]
def METHOD_NAME(management_group_id: Optional[str] = None, variable_name: Optional[str] = None, variable_value_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVariableValueAtManagementGroupResult: """ This operation retrieves a single variable value; given its name, management group it was created at and the variable it's created for. Azure REST API version: 2022-08-01-preview. :param str management_group_id: The ID of the management group. :param str variable_name: The name of the variable to operate on. :param str variable_value_name: The name of the variable value to operate on. """ __args__ = dict() __args__['managementGroupId'] = management_group_id __args__['variableName'] = variable_name __args__['variableValueName'] = variable_value_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:authorization:getVariableValueAtManagementGroup', __args__, opts=opts, typ=GetVariableValueAtManagementGroupResult).value return AwaitableGetVariableValueAtManagementGroupResult( id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type'), values=pulumi.get(__ret__, 'values'))
[ 19, 1210, 99, 1541, 3611, 846 ]
def METHOD_NAME(self) -> str: if not (parsed := ci.util.urlparse(self.api_url())): return None return f'{parsed.scheme}://{parsed.hostname}'
[ 414, 274 ]
def METHOD_NAME(): lock = filelock.FileLock(LOCK_FILE) try: lock.acquire(timeout=0.1) lock.release() except filelock.Timeout: try: with open(TIMESTAMP_FILE, 'rt') as f: last_locked = float(f.readline().strip()) assert time.time() - last_locked <= TIMEOUT # Respectively: file not found, invalid content, old timestamp. except (IOError, ValueError, AssertionError): app.logger.warning('Liveness check failed.') return ('The current task has taken too long.', HTTPStatus.INTERNAL_SERVER_ERROR) return 'Service alive'
[ 10639, 250 ]
def METHOD_NAME(a): return \
[ 497, 12663, 497, 1305, 238, 3264, 8046 ]
def METHOD_NAME(self): if self._mom is not None: return self.log.info("MOM: Using named unix socket: %s", self._sock_path) self._mom = ThrottledClient(unixrpc.UnixXmlRpcClient( self._sock_path, CONNECTION_TIMEOUT_SEC ))
[ 707 ]
def METHOD_NAME(): with open('xor_tokenholders.csv') as f: lines = f.readlines() data = {} for line in lines[1:]: parts = line.split(',') addr = parts[0].strip('"').replace('000000000000000000000000', '') balance = Decimal(parts[1].strip('"')) data[addr] = balance return data
[ 214, 466, 11965 ]
def METHOD_NAME(): nonlocal tracker tracker = rpc.connect_tracker(self.tracker_host, self.tracker_port)
[ 707 ]
def METHOD_NAME(self): """Test conflicting definitions path""" fn = env.input_path("mergeerror1.yaml") with self.assertRaises(ValueError) as ve: SchemaLoader(fn) self.assertEqual( "Conflicting URIs (http://example.org/schema2, http://example.org/schema1) for item: c1", str(ve.exception), )
[ 9, -1 ]
def METHOD_NAME(web3): """https://eth.wiki/json-rpc/API#eth_gettransactioncount""" result = web3.request( "eth_getTransactionCount", [autonity.ACCOUNT, "latest"]) validators.HexString().validate(result) assert result == "0x0"
[ 9, 4689, 19, 1853, 29 ]
def METHOD_NAME(annotation, api_key): ''' Takes annotation dict and api_key string''' base_url = 'https://api.circonus.com/v2' anootate_post_endpoint = '/annotation' resp = requests.post(base_url + anootate_post_endpoint, headers=build_headers(api_key), data=json.dumps(annotation)) resp.raise_for_status() return resp
[ 72, 2141 ]
def METHOD_NAME(self): with self.subTest("regular match"): self.assertEqual(match_totem("pac").get().key, "pacifism") self.assertEqual(match_totem("death").get().key, "death") self.assertFalse(match_totem("nonexistent")) with self.subTest("limited scope"): self.assertEqual(match_totem("pac", {"pacifism", "desperation"}).get().key, "pacifism") self.assertFalse(match_totem("death", {"pacifism", "desperation"}))
[ 9, 590, 16957 ]
def METHOD_NAME( self, documents: Sequence[Document], show_progress: bool = False, ) -> List[BaseNode]: """Parse document into nodes. Args: documents (Sequence[Document]): documents to parse include_metadata (bool): whether to include metadata in nodes """ with self.callback_manager.event( CBEventType.NODE_PARSING, payload={EventPayload.DOCUMENTS: documents} ) as event: all_nodes: List[BaseNode] = [] documents_with_progress = get_tqdm_iterable( documents, show_progress, "Parsing documents into nodes" ) for document in documents_with_progress: self.sentence_splitter(document.text) nodes = self.build_window_nodes_from_documents([document]) all_nodes.extend(nodes) if self.metadata_extractor is not None: all_nodes = self.metadata_extractor.process_nodes(all_nodes) event.on_end(payload={EventPayload.NODES: all_nodes}) return all_nodes
[ 19, 480, 280, 7510 ]
def METHOD_NAME(self, create_n): for t in self.types: if t == "mlm": aug = CharInsert( "mlm", create_n=create_n, model_name="__internal_testing__/tiny-random-ernie", vocab="test_vocab" ) augmented = aug.augment(self.sequences) self.assertEqual(len(self.sequences), len(augmented)) continue elif t == "custom": aug = CharInsert( "custom", create_n=create_n, custom_file_path=self.custom_file_path, vocab="test_vocab" ) else: aug = CharInsert(t, create_n=create_n, vocab="test_vocab") augmented = aug.augment(self.sequences) self.assertEqual(len(self.sequences), len(augmented)) self.assertEqual(create_n, len(augmented[0])) self.assertEqual(create_n, len(augmented[1]))
[ 9, 3874, 408 ]
def METHOD_NAME( parsed_document: str, model_id: str, section_name: str, model_info: ModelInfo, code_formatter: CodeFormatter, ): """ :param parsed_document: OpenApi parsed document :param model_id: instance or shared :param section_name: init or instances :param model_info: Information to build the model file :param code_formatter: """ # Whether or not there are options with default values options_with_defaults = len(model_info.defaults_file_lines) > 0 model_file_lines = parsed_document.splitlines() _add_imports(model_file_lines, options_with_defaults, len(model_info.deprecation_data)) _fix_types(model_file_lines) if model_id in model_info.deprecation_data: model_file_lines += _define_deprecation_functions(model_id, section_name) model_file_lines += _define_validator_functions(model_id, model_info.validator_data, options_with_defaults) config_lines = [] for i, line in enumerate(model_file_lines): if line.startswith(' model_config = ConfigDict('): config_lines.append(i) extra_config_lines = [' arbitrary_types_allowed=True,'] for i, line_number in enumerate(config_lines): index = line_number + (len(extra_config_lines) * i) + 1 for line in extra_config_lines: model_file_lines.insert(index, line) if i == len(config_lines) - 1: model_file_lines.insert(index, ' validate_default=True,') model_file_lines.append('') model_file_contents = '\n'.join(model_file_lines) if any(len(line) > 120 for line in model_file_lines): model_file_contents = code_formatter.apply_black(model_file_contents) return model_file_contents
[ 56, 578, 171 ]
def METHOD_NAME(self) -> 'outputs.SystemDataResponse': """ Metadata pertaining to creation and last modification of the resource. """ return pulumi.get(self, "system_data")
[ 112, 365 ]
def METHOD_NAME(self): return self.labeltext
[ 19, 636, 526 ]
def METHOD_NAME(self): return count_parameters_in_MB(self.history)
[ 19, 578, 1318 ]
def METHOD_NAME(self) -> TableauGraphQLApiQueryExtractor: """ Builds a TableauGraphQLApiQueryExtractor. All data required can be retrieved with a single GraphQL call. :return: A TableauGraphQLApiQueryExtractor that provides dashboard query metadata. """ extractor = TableauGraphQLApiQueryExtractor() tableau_extractor_conf = \ Scoped.get_scoped_conf(self._conf, extractor.get_scope())\ .with_fallback(self._conf)\ .with_fallback(ConfigFactory.from_dict({TableauGraphQLApiExtractor.QUERY: self.query, STATIC_RECORD_DICT: {'product': 'tableau'} } ) ) extractor.init(conf=tableau_extractor_conf) return extractor
[ 56, 2761 ]
def METHOD_NAME(const_args): # test construction info = ModifierInfo(**const_args) assert info, "No object returned for info constructor" # test serialization info_str = info.json() assert info_str, "No json returned for info" # test deserialization info_reconst = ModifierInfo.parse_raw(info_str) assert info == info_reconst, "Reconstructed does not equal original"
[ 9, 2872, 100, 2526 ]
def METHOD_NAME(filedata): HEAD_FIX = dict( num_head_lines=93, num_head_fmt=1001, data_originator="Brem, Benjamin; Baltensperger, Urs", sponsor_organisation="CH02L, Paul Scherrer Institut, PSI, Laboratory of Atmospheric Chemistry (LAC), OFLB, , 5232, Villigen PSI, Switzerland", submitter="Brem, Benjamin", project_association="ACTRIS CREATE EMEP GAW-WDCA", vol_num=1, vol_totnum=1, ref_date=np.datetime64("2019-01-01T00:00:00"), revision_date=np.datetime64("2021-05-28T00:00:00"), freq=0.041667, descr_time_unit="days from file reference point", num_cols_dependent=23, mul_factors=[1.0] * 23, vals_invalid=[999.999999, 9999.99, 999.99, 9999.99] + [99.99999999] * 9 + [999.99999999] * 9 + [9.999999], descr_first_col="end_time of measurement, days from the file reference point", ) assert isinstance(filedata.head_fix, dict) assert filedata.head_fix == HEAD_FIX
[ 9, 6077, 6078, 6079, 171, 373, 1112 ]
def METHOD_NAME(long_running_task, root_mailbox_copy): """Check that a long-running task (i.e., running or paused for more than two days) is detected and an e-mail notification is sent to admin users. :id: effc1ff2-263b-11ee-b623-000c2989e153 :setup: 1. Create an admin user with e-mail 'root@localhost'. 2. Change the long-running tasks checker cron schedule from '0 0 * * * ' (midnight) to '* * * * * ' (every minute). 3. Start the `sendmail` service (disabled by default). :steps: 1. Create a long-running task: 1a. Schedule a sample task to run on the Satellite host. 2b. In DB, update the task start time and status report time to two days back, so it is considered by Satellite as a long-running task. 2. Update the long-running task checker schedule to run every minute (it runs at midnight by default). 3. Wait for the notification e-mail to be sent to the admin user address. 4. Check the e-mail if it contains all the important information, like, the task ID, link to the task, link to all long-running tasks. :BZ: 1950836, 2223996 :customerscenario: true """ task_id = long_running_task['task']['id'] assert task_id for email in root_mailbox_copy: if task_id in email.as_string(): assert 'Tasks pending since' in email.get( 'Subject' ), f'Notification e-mail has wrong subject: {email.get("Subject")}' for mime_body in email.get_payload(): body_text = mime_body.as_string() assert 'Tasks lingering in states running, paused since' in body_text assert f'/foreman_tasks/tasks/{task_id}' in body_text assert ( '/foreman_tasks/tasks?search=state+%5E+%28running%2C+paused' '%29+AND+state_updated_at' in body_text ), 'Link for long-running tasks is missing in the e-mail body.' if not is_open('BZ:2223996'): assert findall(r'_\("[\w\s]*"\)', body_text), 'Untranslated strings found.'
[ 9, 2302, 857, 43, 524, 1340, 620 ]
def METHOD_NAME(term: str = "", sort: str = "articlePublishedDate") -> pd.DataFrame: """Get news for a given term and source. [Source: Ultima Insights News Monitor] Parameters ---------- term : str term to search on the news articles sort: str the column to sort by Returns ------- articles: pd.DataFrame term to search on the news articles Examples -------- >>> from openbb_terminal.sdk import openbb >>> openbb.news() """ # Necessary for installer so that it can locate the correct certificates for # API calls and https # https://stackoverflow.com/questions/27835619/urllib-and-ssl-certificate-verify-failed-error/73270162#73270162 os.environ["REQUESTS_CA_BUNDLE"] = certifi.where() os.environ["SSL_CERT_FILE"] = certifi.where() current_user = get_current_user() if current_user.credentials.API_ULTIMA_KEY == NO_API_KEY: auth_header = None else: auth_header = { "Authorization": f"Bearer {current_user.credentials.API_ULTIMA_KEY}" } have_data = False limit = 0 while not have_data: if term: term = quote(term) term = term.upper() term = term.strip() if term in supported_terms(): if auth_header: data = request( f"{ULTIMA_BASE_URL}/getNewsArticles/{term}", headers=auth_header ) else: data = request(f"{ULTIMA_BASE_URL}/getNewsArticles/{term}") else: console.print( "[red]Ticker not supported. Unable to retrieve data\n[/red]" ) return pd.DataFrame() else: console.print("[red]No term specified. Unable to retrieve data\n[/red]") return pd.DataFrame() if ( hasattr(data, "status") and data.status_code == 200 ): # Checking if data has status attribute and if data request succeeded if data.content: have_data = True elif limit == 60: # Breaking if 60 successful requests return no data console.print("[red]Timeout occurred. Please try again\n[/red]") break limit = limit + 1 elif ( hasattr(data, "status") and data.status_code == 429 ): # If data request failed console.print( "[red]Too many requests. Please get an API Key from https://www.ultimainsights.ai/[/red]" ) break elif ( hasattr(data, "status") and data.status_code != 200 ): # If data request failed console.print("[red]Status code not 200. Unable to retrieve data\n[/red]") break else: # console.print("[red]Could not retrieve data\n[/red]") break if not data.json(): return pd.DataFrame() df = pd.DataFrame( data.json(), columns=[ "articleHeadline", "articleURL", "articlePublishedDate", "riskCategory", "riskElaboratedDescription", "relevancyScore", ], ) df = df[df["relevancyScore"] < 5] df = df[df["relevancyScore"] > 3.5] df["riskElaboratedDescription"] = df["riskElaboratedDescription"].str.replace( "\n", "" ) df["riskElaboratedDescription"] = df["riskElaboratedDescription"].str.replace( "\n", "" ) df["articlePublishedDate"] = pd.to_datetime(df["articlePublishedDate"]) df = df.sort_values(by=[sort], ascending=False) return df
[ 19, 2665 ]
def METHOD_NAME(self, index_addons_mock): collection = Collection.objects.create(author=self.user, slug='foo') addon = addon_factory() index_addons_mock.reset_mock() collection.add_addon(addon) assert index_addons_mock.call_count == 0 collection = Collection.objects.create( author=self.user, slug='featured', id=settings.COLLECTION_FEATURED_THEMES_ID ) addon_featured = addon_factory() index_addons_mock.reset_mock() collection.add_addon(addon_featured) assert collection.addons.count() == 1 assert index_addons_mock.call_count == 1 assert index_addons_mock.call_args[0] == ([addon_featured.pk],)
[ 9, 238, 1555, 3253 ]
def METHOD_NAME(self, time: float = 10) -> None: self.log.info('Wait until the temperature is stable') stability = self.temp_stability.get() while stability > 0.02 or stability < 0: sleep(time) stability = self.temp_stability.get()
[ 618, 7202 ]
def METHOD_NAME(self): os.system("echo 'testtest'>%s" % os.path.join(self.model_dir, download_model_file_name2)) self.repo.push('add new file') self.repo.tag_and_push(self.revision2, 'Test revision')
[ 238, 80, 171, 61, 82 ]
def METHOD_NAME(self) -> str: """ Gets the resource type. """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME(self): # Verify MVCC isolates connections. # This will fail if Connection doesn't poll for changes. db = DB(self._storage) try: c1 = db.open() r1 = c1.root() r1['alpha'] = PersistentMapping() r1['gamma'] = PersistentMapping() transaction.commit() # Open a second connection but don't load root['alpha'] yet c2 = db.open() r2 = c2.root() r1['alpha']['beta'] = 'yes' storage = c1._storage t = transaction.Transaction() t.description = 'isolation test 1' c1.tpc_begin(t) c1.commit(t) storage.tpc_vote(t.data(c1)) storage.tpc_finish(t.data(c1)) # The second connection will now load root['alpha'], but due to # MVCC, it should continue to see the old state. self.assertTrue(r2['alpha']._p_changed is None) # A ghost self.assertTrue(not r2['alpha']) self.assertTrue(r2['alpha']._p_changed == 0) # make root['alpha'] visible to the second connection c2.sync() # Now it should be in sync self.assertTrue(r2['alpha']._p_changed is None) # A ghost self.assertTrue(r2['alpha']) self.assertTrue(r2['alpha']._p_changed == 0) self.assertTrue(r2['alpha']['beta'] == 'yes') # Repeat the test with root['gamma'] r1['gamma']['delta'] = 'yes' storage = c1._storage t = transaction.Transaction() t.description = 'isolation test 2' c1.tpc_begin(t) c1.commit(t) storage.tpc_vote(t.data(c1)) storage.tpc_finish(t.data(c1)) # The second connection will now load root[3], but due to MVCC, # it should continue to see the old state. self.assertTrue(r2['gamma']._p_changed is None) # A ghost self.assertTrue(not r2['gamma']) self.assertTrue(r2['gamma']._p_changed == 0) # make root[3] visible to the second connection c2.sync() # Now it should be in sync self.assertTrue(r2['gamma']._p_changed is None) # A ghost self.assertTrue(r2['gamma']) self.assertTrue(r2['gamma']._p_changed == 0) self.assertTrue(r2['gamma']['delta'] == 'yes') finally: db.close()
[ 9, 436, 550, 5167 ]
def METHOD_NAME(self, msg): # is this a power msg from a PM42? if ((msg.getElement(0) == 0xD0) and ((msg.getElement(1) & 0x60) == 0x60)) : pCmd = (msg.getElement(3) & 0xF0) # PM42 msg type # is this a short circuit / autoreverse message? if ((pCmd == 0x30) or (pCmd == 0x10)) : pAdr = (msg.getElement(1)& 0x1) * 128 + (msg.getElement(2)& 0x7F)+1 # PM42 address pAdrHex = ("0"+java.lang.Integer.toHexString(pAdr))[-2:] # make addr a 2 char string pAdrHex = pAdrHex.upper() # Make sure the hex character, if any, is upper case pSen = "ISPM_"+pAdrHex # internal sensor prefix #bit mapped codes: bits 0-3 correspond to PM42 sections 1-4 mode = msg.getElement(3) # autoreverse if 1, short circuit if 0 state = msg.getElement(4) # ACT if 1, OK if 0 s = sensors.provideSensor(pSen+"1") if ((mode & 0x01) == 0 and (state & 0x01) != 0) : s.state = ACTIVE else : s.state = INACTIVE s = sensors.provideSensor(pSen+"2") if ((mode & 0x02) == 0 and (state & 0x02) != 0) : s.state = ACTIVE else : s.state = INACTIVE s = sensors.provideSensor(pSen+"3") if ((mode & 0x04) == 0 and (state & 0x04) != 0) : s.state = ACTIVE else : s.state = INACTIVE s = sensors.provideSensor(pSen+"4") if ((mode & 0x08) == 0 and (state & 0x08) != 0) : s.state= ACTIVE else : s.state = INACTIVE return
[ 277 ]
def METHOD_NAME(self) -> None: choices = self._waiting + ([] if self._config.only_offsprings else list(self._population.values())) if self._rank_method is not None and self.num_objectives > 1: choices_rank = self._rank_method(choices, n_selected=self._config.popsize) choices = [x for x in choices if x.uid in choices_rank] else: choices.sort(key=base._loss) self._population = {x.uid: x for x in choices[: self._config.popsize]} self._uid_queue.clear() self._waiting.clear() for uid in self._population: self._uid_queue.tell(uid)
[ 1472 ]
def METHOD_NAME(self) -> str: """ The kind of the server vulnerability assessments setting Expected value is 'AzureServersSetting'. """ return pulumi.get(self, "kind")
[ 1253 ]
def METHOD_NAME(self, name: str, type: Any = None) -> Sequence[Any]: args = self._query_args.get(name, []) return [type(arg) for arg in args] if type is not None else args
[ 19, 539, 335 ]
def METHOD_NAME(slashstring): if slashstring[-1] == "/": slashstring = slashstring[0:-1] return slashstring
[ 5528, 4812, 7025 ]
def METHOD_NAME(self, treeview, event): if event.button == 3: # right click pathinfo = treeview.get_path_at_pos(int(event.x), int(event.y)) if pathinfo is not None: path, col = pathinfo[0], pathinfo[1] treeview.grab_focus() treeview.set_cursor(path, col, 0) self.menu.show_all() self.menu.popup( None, None, None, None, event.button, Gtk.get_current_event_time() ) return True return False
[ 1974, 2971, 417 ]
def METHOD_NAME(db): QuestionSetLockedValidator()({ 'section': Section.objects.first(), 'locked': True })
[ 9, 129, 418 ]
def METHOD_NAME(tmp_path, mode): sample = get_sample("ELF/i872_hello_bss.elf") elf: lief.ELF.Binary = lief.ELF.parse(sample) elf.relocate_phdr_table(mode) segment = lief.ELF.Segment() segment.type = lief.ELF.SEGMENT_TYPES.LOAD segment.content = [0xcc for _ in range(0x2000)] elf.add(segment) outpath = tmp_path / "modified.elf" elf.write(outpath.as_posix()) outpath.chmod(outpath.stat().st_mode | stat.S_IEXEC) popen_args = { "stdout": subprocess.PIPE, "stderr": subprocess.STDOUT, "universal_newlines": True } with Popen([outpath.as_posix()], **popen_args) as proc: stdout = proc.stdout.read() assert "Hello World" in stdout, f"Error: {stdout}"
[ 9, 628, 698 ]
async def METHOD_NAME(pipeline_response): deserialized = self._deserialize("ResourceSkuListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem)
[ 297, 365 ]
def METHOD_NAME(): pl = pv.Plotter(shape=(2, 2), window_size=(800, 800)) pl.iren._mouse_left_button_press(200, 600) assert tuple(pl.iren.get_event_subplot_loc()) == (0, 0) pl.iren._mouse_left_button_press(200, 200) assert tuple(pl.iren.get_event_subplot_loc()) == (1, 0) pl.iren._mouse_left_button_press(600, 600) assert tuple(pl.iren.get_event_subplot_loc()) == (0, 1) pl.iren._mouse_left_button_press(600, 200) assert tuple(pl.iren.get_event_subplot_loc()) == (1, 1) pl.close()
[ 9, -1, 10844, 209 ]
def METHOD_NAME(self, bash, family, result): assert_bash_exec( bash, "unset -v COMPREPLY BASH_COMPLETION_KNOWN_HOSTS_WITH_HOSTFILE", ) output = assert_bash_exec( bash, "BASH_COMPLETION_KNOWN_HOSTS_WITH_HOSTFILE= " "_known_hosts_real -%sF _known_hosts_real/localhost_config ''; " r'printf "%%s\n" "${COMPREPLY[@]}"' % family, want_output=True, ) assert sorted(set(output.strip().split())) == sorted(result.split())
[ 9, 1213, 5614 ]
def METHOD_NAME(self, *args): if not self.multiprocessing: self.on_start_module(*args) else: self.on_receive_work(*args)
[ 69, 1148, 3064 ]
def METHOD_NAME(page, port): gspec = GridSpec(width=800, height=600, margin=0) gspec[:, 0 ] = Spacer(styles=dict(background='red')) gspec[0, 1:3] = Spacer(styles=dict(background='green')) gspec[1, 2:4] = Spacer(styles=dict(background='orange')) gspec[2, 1:4] = Spacer(styles=dict(background='blue')) gspec[0:1, 3:4] = Spacer(styles=dict(background='purple')) serve(gspec, port=port, threaded=True, show=False) time.sleep(0.2) page.goto(f"http://localhost:{port}") bbox = page.locator(".bk-GridBox").bounding_box() children = page.locator(".bk-GridBox div") assert bbox['width'] == 800 assert bbox['height'] == 600 bbox1 = children.nth(0).bounding_box() assert bbox1['x'] == 0 assert bbox1['width'] == 200 assert bbox1['height'] == 600 assert children.nth(0).evaluate("""(element) => window.getComputedStyle(element).getPropertyValue('background-color')""") == 'rgb(255, 0, 0)' bbox2 = children.nth(1).bounding_box() assert bbox2['x'] == 200 assert bbox2['y'] == 0 assert bbox2['width'] == 400 assert bbox2['height'] == 200 assert children.nth(1).evaluate("""(element) => window.getComputedStyle(element).getPropertyValue('background-color')""") == 'rgb(0, 128, 0)' bbox3 = children.nth(2).bounding_box() assert bbox3['x'] == 400 assert bbox3['y'] == 200 assert bbox3['width'] == 400 assert bbox3['height'] == 200 assert children.nth(2).evaluate("""(element) => window.getComputedStyle(element).getPropertyValue('background-color')""") == 'rgb(255, 165, 0)' bbox4 = children.nth(3).bounding_box() assert bbox4['x'] == 200 assert bbox4['y'] == 400 assert bbox4['width'] == 600 assert bbox4['height'] == 200 assert children.nth(3).evaluate("""(element) => window.getComputedStyle(element).getPropertyValue('background-color')""") == 'rgb(0, 0, 255)' bbox5 = children.nth(4).bounding_box() assert bbox5['x'] == 600 assert bbox5['y'] == 0 assert bbox5['width'] == 200 assert bbox5['height'] == 200 assert children.nth(4).evaluate("""(element) => window.getComputedStyle(element).getPropertyValue('background-color')""") == 'rgb(128, 0, 128)' gspec[1, 1] = Spacer(styles=dict(background='black')) time.sleep(0.2) children = page.locator(".bk-GridBox > div") bbox6 = children.nth(5).bounding_box() assert children.nth(5).evaluate("""(element) => window.getComputedStyle(element).getPropertyValue('background-color')""") == 'rgb(0, 0, 0)' assert bbox6['x'] == 200 assert bbox6['y'] == 200 assert bbox6['width'] == 200 assert bbox6['height'] == 200
[ 9, 3727 ]
def METHOD_NAME(loading=None): if loading == "withargs": return KeyValueStoreWithArguments return [KeyValueStoreWithArguments]
[ 557, 12768, 41, 335 ]
f METHOD_NAME(self, configs: List[AmpConfig]) -> bool:
[ 0, 736 ]
def METHOD_NAME(self) -> float: return 1e-4
[ 8844, 43, 437 ]
def METHOD_NAME(self) -> str: identity_store_id = self._get_param("IdentityStoreId") display_name = self._get_param("DisplayName") description = self._get_param("Description") group_id, identity_store_id = self.identitystore_backend.METHOD_NAME( identity_store_id=identity_store_id, display_name=display_name, description=description, ) return json.dumps(dict(GroupId=group_id, IdentityStoreId=identity_store_id))
[ 129, 846 ]
def METHOD_NAME(cognite_client): testid = random_string(50) dataset = cognite_client.data_sets.list()[0] extpipe = cognite_client.extraction_pipelines.create( ExtractionPipeline( external_id=f"testid-{testid}", name=f"Test extpipe {testid}", data_set_id=dataset.id, description="Short description", contacts=[ ExtractionPipelineContact( name="John Doe", email="[email protected]", role="owner", send_notification=False ) ], schedule="Continuous", ) ) yield extpipe try: cognite_client.extraction_pipelines.delete(id=extpipe.id) except Exception: pass assert cognite_client.extraction_pipelines.retrieve(extpipe.id) is None
[ 80, -1 ]
def METHOD_NAME(): """Test that _path_to_str returns a string.""" path_str = "foo" assert _path_to_str(path_str) == path_str assert _path_to_str(Path(path_str)) == path_str with pytest.raises(ValueError): _path_to_str(1)
[ 9, 157, 24, 3 ]
def METHOD_NAME(self): # Create Queue self.queue = Queue(maxsize=0) # Create worker thread t = Thread(target=self.worker) t.daemon = True t.start() self.event = Event()
[ 15 ]
def METHOD_NAME(queue_url): # Note: this logic taken from boto, so should be safe return urllib.parse.urlparse(queue_url).path.split("/")[2]
[ 297, 156 ]
def METHOD_NAME(self): app_config = Mock() app_config.APPS_COLORS = {} app_config.APPS_COLORS["primary"] = "#fff" agenda = DummyApp( label="Agenda", slug="agenda", fa_icon="calendar", config={}, main_route="/ui/workspaces/{workspace_id}/agenda", ) agenda.is_active = True app_api = ApplicationApi(app_list=[agenda], show_inactive=False) workspace = Mock() workspace.workspace_id = 12 workspace.agenda_enabled = True default_workspace_menu_entry = app_api.get_default_workspace_menu_entry( workspace=workspace, app_config=app_config ) assert len(default_workspace_menu_entry) == 4 assert default_workspace_menu_entry[0].label == dashboard_menu_entry.label assert default_workspace_menu_entry[1].label == publication_menu_entry.label assert default_workspace_menu_entry[2].label == all_content_menu_entry.label assert default_workspace_menu_entry[3].label == agenda.label
[ 9, 19, 235, 1976, 2470, 475, 1217 ]
def METHOD_NAME(data, checksum=None): """Returns decoded data, verifies hash digest if provided""" cbor = bc32decode(data) if checksum is not None: h = bc32decode(checksum) assert h == hashlib.sha256(cbor).digest() return cbor_decode(cbor)
[ 16619, 1268 ]
def METHOD_NAME(self): path = self.settings.get_data_folder_path() if not os.path.exists(path): try: os.makedirs(path) except OSError as e: log.error('Failed to create data folder! Error: %s', e)
[ 176, 365, 451, 217, 130, 1985 ]
def METHOD_NAME(): return os.METHOD_NAME(0)
[ 14352 ]
METHOD_NAME(self, state):
[ 0, 4799 ]
def METHOD_NAME(self): data = super(Ec2WaiterFactory, self).METHOD_NAME return data
[ 8413, 578, 365 ]
def METHOD_NAME(requests_mock): return { "site_urls": ["https://example.com/"], "start_date": "2022-01-01", "end_date": "2022-02-01", "authorization": { "auth_type": "Client", "client_id": "client_id", "client_secret": "client_secret", "refresh_token": "refresh_token", }, "custom_reports": "[{\"name\": \"custom_dimensions\", \"dimensions\": [\"date\", \"country\", \"device\"]}]", "custom_reports_array": [{"name": "custom_dimensions", "dimensions": ["date", "country", "device"]}], }
[ 200, 1964 ]
def METHOD_NAME(): """ Return a list of strings in colon-hex format representing all the link local IPv6 addresses available on the system, as reported by I{getifaddrs(3)}. """ retList = [] for (interface, family, address) in _interfaces(): interface = nativeString(interface) address = nativeString(address) if family == socket.AF_INET6 and address.startswith("fe80:"): retList.append(f"{address}%{interface}") return retList
[ 10652, 19, 548, 125, 497, 7118, 1065 ]
def METHOD_NAME(form, request_form): """Modify an Action.""" messages = { "success": [], "info": [], "warning": [], "error": [] } mod_action = Actions.query.filter( Actions.unique_id == form.action_id.data).first() if not METHOD_NAME: messages["error"].append("Action not found") else: # Parse custom options for action dict_actions = parse_action_information() if mod_action.action_type in dict_actions: messages["error"], custom_options = custom_options_return_json( messages["error"], dict_actions, request_form, mod_dev=mod_action, device=mod_action.action_type) mod_action.custom_options = custom_options if not messages["error"]: try: db.session.commit() messages["success"].append(f"{TRANSLATIONS['modify']['title']} {TRANSLATIONS['actions']['title']}") except sqlalchemy.exc.OperationalError as except_msg: messages["error"].append(str(except_msg)) except sqlalchemy.exc.IntegrityError as except_msg: messages["error"].append(str(except_msg)) except Exception as except_msg: messages["error"].append(str(except_msg)) return messages
[ 1006, 692 ]
def METHOD_NAME(self, serviceRecord): # Should convert this into a real property return False # True means no sync until user configures
[ 139, 830 ]
def METHOD_NAME(event_source, *, loop=None): """Init the application server. Return: An aiohttp application. """ dispatcher = Dispatcher(event_source) # Schedule the dispatcher loop.create_task(dispatcher.publish()) app = web.Application(loop=loop) app['dispatcher'] = dispatcher app.router.add_get(EVENTS_ENDPOINT, websocket_handler) return app
[ 176, 991 ]
def METHOD_NAME(cls): return "TkAlMergeZmumuPlots.C"
[ -1 ]
def METHOD_NAME(self): self.system.config.forecast_correlation_estimate["frequency"] = "D" self.system.config.forecast_correlation_estimate["floor_at_zero"] = False instrument_code = "US10" ans = self.system.combForecast.get_forecast_correlation_matrices( instrument_code ) self.assertAlmostEqual(ans.corr_list[-1][0][1], 0.012915602974, places=5)
[ 9, 3831 ]
def METHOD_NAME(dfile, cksum, pkg): pkg.log(f"verifying sha256sums for source '{dfile.name}'... ", "") filesum = get_cksum(dfile, pkg) if cksum != filesum: if pkg.accept_checksums: pkg.logger.out_plain("") pkg.logger.warn(f"SHA256 UPDATED: {cksum} -> {filesum}") for i in range(len(pkg.sha256)): if pkg.sha256[i] == cksum: pkg.sha256[i] = filesum return True else: pkg.logger.out_plain("") pkg.logger.out_red( f"SHA256 mismatch for '{dfile.name}':\n{filesum}" ) return False else: make_link(dfile, cksum) pkg.logger.out_plain("OK.") return True
[ 1162, 12327 ]
def METHOD_NAME(storage): return storage.create_experiment().create_ensemble(ensemble_size=100, name="target")
[ 1030, 4700 ]
f METHOD_NAME(self):
[ 203, 243, 772, 466 ]
def METHOD_NAME(test_input, expected): assert semver.str_to_version(test_input) == expected
[ 9, 3, 24, 281 ]
def METHOD_NAME( srcloc, obsloc, freq, sigma, a, b, mu=(mu_0, mu_0, mu_0), eps=epsilon_0, moment=1.0
[ 19, 6585, 4845, 4767, 1630 ]
def METHOD_NAME(dict1, dict2, delta=None, msg=None, places=None, default_value=0): """Assert two dictionaries with numeric values are almost equal. Fail if the two dictionaries are unequal as determined by comparing that the difference between values with the same key are not greater than delta (default 1e-8), or that difference rounded to the given number of decimal places is not zero. If a key in one dictionary is not in the other the default_value keyword argument will be used for the missing value (default 0). If the two objects compare equal then they will automatically compare almost equal. Args: dict1 (dict): a dictionary. dict2 (dict): a dictionary. delta (number): threshold for comparison (defaults to 1e-8). msg (str): return a custom message on failure. places (int): number of decimal places for comparison. default_value (number): default value for missing keys. Raises: TypeError: raises TestCase failureException if the test fails. """ if dict1 == dict2: # Shortcut return if delta is not None and places is not None: raise TypeError("specify delta or places not both") if places is not None: success = True standard_msg = "" # check value for keys in target keys1 = set(dict1.keys()) for key in keys1: val1 = dict1.get(key, default_value) val2 = dict2.get(key, default_value) if round(abs(val1 - val2), places) != 0: success = False standard_msg += "(%s: %s != %s), " % (key, val1, val2) # check values for keys in counts, not in target keys2 = set(dict2.keys()) - keys1 for key in keys2: val1 = dict1.get(key, default_value) val2 = dict2.get(key, default_value) if round(abs(val1 - val2), places) != 0: success = False standard_msg += "(%s: %s != %s), " % (key, val1, val2) if success is True: return standard_msg = standard_msg[:-2] + " within %s places" % places else: if delta is None: delta = 1e-8 # default delta value success = True standard_msg = "" # check value for keys in target keys1 = set(dict1.keys()) for key in keys1: val1 = dict1.get(key, default_value) val2 = dict2.get(key, default_value) if abs(val1 - val2) > delta: success = False standard_msg += "(%s: %s != %s), " % (key, val1, val2) # check values for keys in counts, not in target keys2 = set(dict2.keys()) - keys1 for key in keys2: val1 = dict1.get(key, default_value) val2 = dict2.get(key, default_value) if abs(val1 - val2) > delta: success = False standard_msg += "(%s: %s != %s), " % (key, val1, val2) if success is True: return standard_msg = standard_msg[:-2] + " within %s delta" % delta raise Exception(standard_msg)
[ 638, 553, 1740, 926 ]
def METHOD_NAME(config, ip_list_file): ds = TrainPairDataset(config, ip_list_file) loader = Dataloader( ds, batch_size=config.batch_pair_size, num_workers=config.num_workers, stream_shuffle_size=config.pair_stream_shuffle_size, collate_fn=CollateFn()) model = SkipGramModel(config) if config.warm_start_from: log.info("warm start from %s" % config.warm_start_from) model.set_state_dict(paddle.load(config.warm_start_from)) optim = Adam( learning_rate=config.lr, parameters=model.parameters(), lazy_mode=config.lazy_mode) log.info("starting training...") train(config, model, loader, optim)
[ 57 ]
def METHOD_NAME(directories, ext): """Locate files in the directories with the given extension.""" out = set() for location in directories: for filename in mooseutils.git_ls_files(os.path.join(MooseDocs.ROOT_DIR, location)): if filename.endswith(ext) and not os.path.islink(filename): out.add(filename) return out
[ 957, 3932 ]
def METHOD_NAME(): """URBANopt District Energy Systems"""
[ 615 ]
def METHOD_NAME(self): fdm = CreateFDM(self.sandbox) fdm.load_model('c172r') fdm.load_ic('reset00', True) fdm.run_ic() self.assertEqual(fdm['fcs/flap-cmd-norm'], 0.0) self.assertEqual(fdm['fcs/flap-pos-deg'], 0.0) # Test the flap down sequence. The flap command is set to a value # higher than 1.0 to check that JSBSim clamps it to 1.0 fdm['fcs/flap-cmd-norm'] = 1.5 t = fdm['simulation/sim-time-sec'] while t < 2.0: self.assertAlmostEqual(fdm['fcs/flap-pos-deg'], 5.*t) fdm.run() t = fdm['simulation/sim-time-sec'] while t < 4.0: self.assertAlmostEqual(fdm['fcs/flap-pos-deg'], 10.*(t-1.)) fdm.run() t = fdm['simulation/sim-time-sec'] while t < 5.0: self.assertAlmostEqual(fdm['fcs/flap-pos-deg'], 30.) fdm.run() t = fdm['simulation/sim-time-sec'] # Test the flap up sequence with an interruption at 7.5 deg fdm['fcs/flap-cmd-norm'] = 0.25 while t < 7.0: self.assertAlmostEqual(fdm['fcs/flap-pos-deg'], 30.-10.*(t-5.)) fdm.run() t = fdm['simulation/sim-time-sec'] while t < 7.5: self.assertAlmostEqual(fdm['fcs/flap-pos-deg'], 10.-5.*(t-7.)) fdm.run() t = fdm['simulation/sim-time-sec'] while t < 8.0: self.assertAlmostEqual(fdm['fcs/flap-pos-deg'], 7.5) fdm.run() t = fdm['simulation/sim-time-sec'] # Complete the flap up sequence. The flap command is set to a value # lower than 0.0 to check that JSBSim clamps it to 0.0 fdm['fcs/flap-cmd-norm'] = -1. while t < 9.5: self.assertAlmostEqual(fdm['fcs/flap-pos-deg'], 10.-5.*(t-7.5)) fdm.run() t = fdm['simulation/sim-time-sec'] while t < 10.0: self.assertAlmostEqual(fdm['fcs/flap-pos-deg'], 0.0) fdm.run() t = fdm['simulation/sim-time-sec']
[ 9, 13004, 845 ]
def METHOD_NAME(data): rel = stix2.parse(data, version="2.0") assert rel.type == 'relationship' assert rel.id == RELATIONSHIP_ID assert rel.created == dt.datetime(2016, 4, 6, 20, 6, 37, tzinfo=pytz.utc) assert rel.modified == dt.datetime(2016, 4, 6, 20, 6, 37, tzinfo=pytz.utc) assert rel.relationship_type == "indicates" assert rel.source_ref == INDICATOR_ID assert rel.target_ref == MALWARE_ID
[ 9, 214, 2924 ]