text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(test_class): tester = test_class() for attr in dir(tester): if attr.startswith('test_'): print(attr) getattr(tester, attr)()
[ 22 ]
def METHOD_NAME(self, host, revision, target_dir): self.logger.info("Checking out local revision [%s] for %s.", revision, self.repository_name) self.git_manager.checkout(host, target_dir, revision)
[ 86, 1230, 24, 125, 71 ]
def METHOD_NAME(year, month): print("delete_matrixstore_bq_table") _delete_table_from_bq("prescribing_export", f"prescribing_{year}_{month}")
[ 34, 14842, 3578, 410 ]
def METHOD_NAME(s): """ Help fuzzy matcher by "normalising" string data. :param s: string to be cleaned :return: the cleaned string """ if not s: return None s = s.upper() converted_word = "" for char in s: if char in NSI.replace_table: replace_char = NSI.replace_table[char] else: replace_char = char if replace_char: converted_word += replace_char return converted_word
[ 7939 ]
def METHOD_NAME(self): # e.g. GitHub returns 201 yield self.http2XX(code=201, content="Created")
[ 9, -1 ]
def METHOD_NAME(value, max_len=1024): # type: (Any, int) -> Any """Truncate values which are bytes and greater than `max_len`. Useful for parameters like 'Body' in `put_object` operations. """ if isinstance(value, bytes) and len(value) > max_len: return b"..." return value
[ 5419, 718, 99 ]
def METHOD_NAME( modelName, data, config, logFile, logDir, logHead, crossValidation, bestTrialRetraining=False, transferLearning=False, modelPredict=False ): # create a model agnostic objective instance objectiveMLP = Objective(modelName=modelName, data=data, config=config, logFile=logFile, logDir=logDir, logHead=logHead) # add goal and model specific settings if bestTrialRetraining: objectiveMLP = addBestTrialRetrainingSettings(objectiveMLP, config) elif modelPredict: objectiveMLP = addModelPredictSettings(objectiveMLP) else: objectiveMLP = addHpoSettings(objectiveMLP, crossValidation, config) return objectiveMLP
[ 19, 5139, 7184 ]
def METHOD_NAME(self): print_message("#> Shuffling triples...") random.METHOD_NAME(self.triples)
[ 1124 ]
def METHOD_NAME(filters): from_date, to_date = filters.get("from_date"), filters.get("to_date") if not from_date and to_date: frappe.throw(_("From and To Dates are required.")) elif date_diff(to_date, from_date) < 0: frappe.throw(_("To Date cannot be before From Date."))
[ 187, 469 ]
def METHOD_NAME(context, typ): if typ is int: return context.Int() if typ is bool: return context.Bool() if typ is str: return context.String() if typ is BitVector: return context.BitVector() raise NotImplementedError(typ)
[ 440, 24, 11596, 49, 44 ]
async def METHOD_NAME(model, response): coin = await settings.settings.get_coin( model.currency, {"xpub": model.xpub, "contract": model.contract, **model.additional_xpub_data} ) txes = (await coin.history())["transactions"] for i in txes: response.append({"date": i["date"], "txid": i["txid"], "amount": i["bc_value"]})
[ 19, 2945, 351 ]
def METHOD_NAME(id): response = chpp.getFile('translations', params={'version': '1.1', 'languageId': id}) xml = ET.fromstring(response.content) obj = {} XMLParser.xml_to_python(xml, obj) return obj
[ 19, 2938 ]
def METHOD_NAME(policy_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, resource_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetReplicationPolicyResult]: """ Gets the details of a replication policy. Azure REST API version: 2023-04-01. :param str policy_name: Replication policy name. :param str resource_group_name: The name of the resource group where the recovery services vault is present. :param str resource_name: The name of the recovery services vault. """ ...
[ 19, 3185, 54, 146 ]
def METHOD_NAME(self): TokenRefreshRequest(**TOKEN_REFRESH_REQUEST)
[ 9, 187, 1920, 377 ]
def METHOD_NAME(self, p): x = p[..., 0, None] y = p[..., 1, None] Fx = self.Fx(x, y) Fy = self.Fy(x, y) if type(Fx) is not np.ndarray: Fx = np.ones(x.shape, dtype=np.float_)*Fx if type(Fy) is not np.ndarray: Fy = np.ones(x.shape, dtype=np.float_)*Fy f = np.c_[Fx, Fy] return f
[ 725 ]
def METHOD_NAME(self): """Make sure a script-src/style-src does not have data: or blob:.""" assert 'blob:' not in base_settings.CSP_SCRIPT_SRC assert 'data:' not in base_settings.CSP_SCRIPT_SRC assert 'blob:' not in base_settings.CSP_STYLE_SRC assert 'data:' not in base_settings.CSP_STYLE_SRC
[ 9, 365, 61, 260, 130, 623, 782 ]
def METHOD_NAME( repo_dir: str, templates: list[BaseTemplate], details: list[TemplateChangeDetails] ): repo = Repo(repo_dir) # intended to delete deleted_template_path_to_template: dict[str, BaseTemplate] = { template.file_path: template for template in templates if template.deleted } for template_detail in details: if template_detail.template_path in deleted_template_path_to_template: if template_detail.exceptions_seen: log_params = {"path": template_detail.template_path} log.error( "add_commits_from_delete_templates cannot be deleted due to exceptions in apply", **log_params, ) else: deleted_template_path_to_template[ template_detail.template_path ].delete() diff_list = repo.head.commit.diff() if len(diff_list) > 0: repo.git.commit("-m", "Delete template after successfully delete resources")
[ 1160, 1108, 1914 ]
def METHOD_NAME(self): self.assert_text_renders_to( [['1', '2', '3'], ['4', '5', '6']], '1\t2\t3\n' '4\t5\t6\n' )
[ 9, 612, 245, 47, 50 ]
async def METHOD_NAME( can_messenger: CanMessenger, can_messenger_queue: WaitableCallback, name: PipetteName, model: int, datecode: bytes, ) -> None: """It should write a serial number and read it back.""" node_id = NodeId.pipette_left serial_bytes = serial_val_from_parts(name, model, datecode) s = SerialNumberPayload(serial=SerialField(serial_bytes)) await can_messenger.send(node_id=node_id, message=SetSerialNumber(payload=s)) await can_messenger.send(node_id=node_id, message=InstrumentInfoRequest()) response, arbitration_id = await asyncio.wait_for(can_messenger_queue.read(), 1) assert arbitration_id.parts.originating_node_id == node_id assert isinstance(response, PipetteInfoResponse) assert response.payload.name.value == name.value assert response.payload.model.value == model assert response.payload.serial.value[: len(datecode)] == datecode
[ 9, 0, 4364, 5302 ]
def METHOD_NAME(_fd: int, _events: int) -> None: self.handle_exit(callback)()
[ 1519 ]
def METHOD_NAME(self, tokens): """Word detokenizer, separate by <space>.""" return " ".join(tokens)
[ 2236, 14181 ]
def METHOD_NAME(self) -> Optional[xen_hypervisor_type]: """ Lookup xen hypervisor and provide filename and hypervisor name :return: tuple with filename and hypervisor name :rtype: tuple|None """ xen_hypervisor = self.root_dir + '/boot/xen.gz' if os.path.exists(xen_hypervisor): return xen_hypervisor_type( filename=xen_hypervisor, name='xen.gz' ) return None
[ 19, 8049, 7941 ]
f METHOD_NAME(self):
[ 137, 1815 ]
f METHOD_NAME():
[ 362, 667 ]
def METHOD_NAME(self): result = False if any([s.broker.IS_BACKTESTING_BROKER for s in self._strategies]): result = True return result
[ 137, 8959 ]
def METHOD_NAME(self) -> Dict[str, Union[str, bool]]: module_json: Dict[str, Union[str, bool]] = { "repo_name": self.repo_name, "module_name": self.name, "commit": self.commit, } if self.type == InstallableType.COG: module_json["pinned"] = self.pinned return module_json
[ 24, 763 ]
def METHOD_NAME(self): return getattr(self.enum, "value", self.key)
[ 99, 894, 59 ]
def METHOD_NAME(self, text): elem = et.SubElement(self.html, 'th' if self.header else 'td') elem.text = text return elem
[ 118 ]
def METHOD_NAME(): from litex.build.parser import LiteXArgumentParser parser = LiteXArgumentParser(platform=ztex213.Platform, description="LiteX SoC on Ztex 2.13.") parser.add_target_argument("--expansion", default="debug", help="Expansion board (debug or sbus).") parser.add_target_argument("--sys-clk-freq", default=100e6, type=float, help="System clock frequency.") parser.add_target_argument("--with-spi-sdcard", action="store_true", help="Enable SPI-mode SDCard support.") parser.add_target_argument("--with-sdcard", action="store_true", help="Enable SDCard support.") args = parser.parse_args() soc = BaseSoC(sys_clk_freq=args.sys_clk_freq, expansion=args.expansion, **parser.soc_argdict) assert not (args.with_spi_sdcard and args.with_sdcard) if args.with_spi_sdcard: soc.add_spi_sdcard() # SBus only if args.with_sdcard: soc.add_sdcard() # SBus only builder = Builder(soc, **parser.builder_argdict) if args.build: builder.build(**parser.toolchain_argdict) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(builder.get_bitstream_filename(mode="sram"))
[ 57 ]
def METHOD_NAME(self): self.create_and_verify_stack("combination/function_with_deployment_basic") self._verify_no_deployment_then_update_and_verify_deployment()
[ 9, 1778, 559, 41, 1503, 3216, 1916 ]
def METHOD_NAME(self, other: Union[str, 'ComparableVersion']): return self == other
[ 816 ]
def METHOD_NAME(self): # The user hasn't satisfied any of the credit requirements yet, but she # also hasn't failed any. response = self._get_progress_page() # Expect that the requirements are displayed self.assertContains(response, self.MIN_GRADE_REQ_DISPLAY) self.assertContains(response, self.VERIFICATION_REQ_DISPLAY) self.assertContains(response, "Upcoming") self.assertContains( response, f"{self.USER_FULL_NAME}, you have not yet met the requirements for credit" )
[ 9, 8534, 5186, 2946, 11014 ]
def METHOD_NAME(self): return self.height > self.width
[ 137, 14204 ]
def METHOD_NAME(self, *args: _UpdateArg, **kw: Any) -> Self: r""" Specifies the ON DUPLICATE KEY UPDATE clause. :param \**kw: Column keys linked to UPDATE values. The values may be any SQL expression or supported literal Python values. .. warning:: This dictionary does **not** take into account Python-specified default UPDATE values or generation functions, e.g. those specified using :paramref:`_schema.Column.onupdate`. These values will not be exercised for an ON DUPLICATE KEY UPDATE style of UPDATE, unless values are manually specified here. :param \*args: As an alternative to passing key/value parameters, a dictionary or list of 2-tuples can be passed as a single positional argument. Passing a single dictionary is equivalent to the keyword argument form:: insert().on_duplicate_key_update({"name": "some name"}) Passing a list of 2-tuples indicates that the parameter assignments in the UPDATE clause should be ordered as sent, in a manner similar to that described for the :class:`_expression.Update` construct overall in :ref:`tutorial_parameter_ordered_updates`:: insert().on_duplicate_key_update( [("name", "some name"), ("value", "some value")]) .. versionchanged:: 1.3 parameters can be specified as a dictionary or list of 2-tuples; the latter form provides for parameter ordering. .. versionadded:: 1.2 .. seealso:: :ref:`mysql_insert_on_duplicate_key_update` """ if args and kw: raise exc.ArgumentError( "Can't pass kwargs and positional arguments simultaneously" ) if args: if len(args) > 1: raise exc.ArgumentError( "Only a single dictionary or list of tuples " "is accepted positionally." ) values = args[0] else: values = kw self._post_values_clause = OnDuplicateClause( self.inserted_alias, values ) return self
[ 69, 1119, 59, 86 ]
METHOD_NAME(self):
[ 9, 835, 581 ]
def METHOD_NAME(self) -> 'outputs.SystemDataResponse': """ The system metadata relating to this resource. """ return pulumi.get(self, "system_data")
[ 112, 365 ]
def METHOD_NAME() -> dict[str, list]: """Generate statistics about active members.""" return { "labels": ["Active Members", "Non-active Members"], "datasets": [ { "data": [ Member.active_members.count(), Member.current_members.count() - Member.active_members.count(), ] } ], }
[ 370, 577, 923, 3563 ]
def METHOD_NAME(self, mocker): """Test that AWS credentials are passed successfully into boto3 client instantiation on creating S3 connection.""" client_mock = mocker.patch("botocore.session.Session.create_client") s3_data_set = ParquetDataSet(filepath=S3_PATH, credentials=AWS_CREDENTIALS) pattern = r"Failed while loading data from data set ParquetDataSet\(.+\)" with pytest.raises(DatasetError, match=pattern): s3_data_set.load().compute() assert client_mock.call_count == 1 args, kwargs = client_mock.call_args_list[0] assert args == ("s3",) assert kwargs["aws_access_key_id"] == AWS_CREDENTIALS["key"] assert kwargs["aws_secret_access_key"] == AWS_CREDENTIALS["secret"]
[ 9, 403, 3568 ]
def METHOD_NAME(hardware_source_id: str, sync: bool = True) -> typing.Any: """Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call. """ hardware_source = HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id) assert hardware_source def get_last_data() -> _NDArray: assert hardware_source xdata0 = hardware_source.get_next_xdatas_to_finish()[0] data = xdata0.data if xdata0 else None assert data is not None return data.copy() yield get_last_data
[ 19, 365, 1443, 604, 147 ]
def METHOD_NAME(self) -> None: self.provider.default_tags.clear() self.default_tags.clear()
[ 537, 235, 114 ]
def METHOD_NAME(filters=None): columns, data = [], [] columns = get_columns() data = get_data(filters) return columns, data
[ 750 ]
def METHOD_NAME(G, att_name): for e in G.edges(data=True): e[2][att_name] = datetime(2015, 1, 1) return G
[ 1276, 1101, 884 ]
def METHOD_NAME(self): self._test_passwords("""\ machine host.domain.com login log password pa#ss account acct """, 'pa#ss')
[ 9, 2897, 41, 2026, 1161 ]
def METHOD_NAME(application_name: Optional[pulumi.Input[str]] = None, cluster_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, service_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetManagedClusterServiceResult]: """ Get a Service Fabric service resource created or in the process of being created in the Service Fabric managed application resource. Azure REST API version: 2023-03-01-preview. :param str application_name: The name of the application resource. :param str cluster_name: The name of the cluster resource. :param str resource_group_name: The name of the resource group. :param str service_name: The name of the service resource in the format of {applicationName}~{serviceName}. """ ...
[ 19, 3627, 2059, 549, 146 ]
def METHOD_NAME(self): """test to see that newlines divide words""" # newlines break words self.count("A word.\nAnother word", 4) self.count(r"A word.\\n\nAnother word", 4)
[ 9, 2788 ]
def METHOD_NAME(self, result_name, **kwargs): if result_name == 'Dresult': return self.current_state['D']
[ 19, 1571 ]
def METHOD_NAME(self, subject): return None
[ 1805, 21 ]
def METHOD_NAME(self, config: JsonDict, **kwargs: Any) -> None: self.enable_room_list_search = config.get("enable_room_list_search", True) alias_creation_rules = config.get("alias_creation_rules") if alias_creation_rules is not None: self._alias_creation_rules = [ _RoomDirectoryRule("alias_creation_rules", rule) for rule in alias_creation_rules ] else: self._alias_creation_rules = [ _RoomDirectoryRule("alias_creation_rules", {"action": "allow"}) ] room_list_publication_rules = config.get("room_list_publication_rules") if room_list_publication_rules is not None: self._room_list_publication_rules = [ _RoomDirectoryRule("room_list_publication_rules", rule) for rule in room_list_publication_rules ] else: self._room_list_publication_rules = [ _RoomDirectoryRule("room_list_publication_rules", {"action": "allow"}) ]
[ 203, 200 ]
def METHOD_NAME(self, data): # don't serialize strings if isinstance(data, string_types): return data try: """ `json.dumps()` behaviors: ensure_ascii: If true (the default), the output is guaranteed to have all incoming non-ASCII characters escaped. If false, these characters will be output as-is. separators: an (item_separator, key_separator) tuple, specifying the separators in the output. """ # return json.dumps(data, default=self.default, ensure_ascii=False, separators=(",", ":")) """ `orjson.dumps()` will escape all incoming non-ASCII characters and output the encoded byte-strings. We decode the output byte-strings into string, and as a result, those escaped characters are un-escaped. In Python 3, the default encoding is "utf-8" (see https://docs.python.org/3/library/stdtypes.html#bytes.decode). `orjson.dumps()` will output compact JSON representation, effectively the same behavior with json.dumps(separators=(",", ":")) """ return orjson.METHOD_NAME(data, default=self.default).decode() except (ValueError, TypeError) as e: raise SerializationError(data, e)
[ 4219 ]
def METHOD_NAME(self): conn = ContextMock(name='connection') self.c.connection = conn conn.connection_errors = (KeyError,) conn.channel_errors = () consume = self.c.consume = Mock(name='c.consume') with patch('kombu.mixins.warn') as warn: def se_raises(*args, **kwargs): self.c.should_stop = True raise KeyError('foo') self.c.should_stop = False consume.side_effect = se_raises self.c.run() warn.assert_called()
[ 9, 22, 45 ]
def METHOD_NAME(self, *args, **kwargs): if self.bind_func is None: raise TypeError(f"bind function not exist for method {self.func.__name__}") return self.bind_func(*args, **kwargs)
[ 287 ]
def METHOD_NAME(self): for patch in self.conan_data.get("patches", {}).get(self.version, []): tools.patch(**patch) cmake = self._configure_cmake() cmake.METHOD_NAME()
[ 56 ]
def METHOD_NAME(fn: Callable[P, T]) -> Callable[P, T]: decorators = [ click.option( "--blockstore", "-b", required=True, multiple=True, callback=lambda ctx, param, value: _parse_blockstore_params(value), envvar="PARSEC_BLOCKSTORE", metavar="CONFIG", help="""Blockstore configuration.
[ 5105, 3127, 1881 ]
def METHOD_NAME(self): """Return the weights for the possible hops""" return [x.weight for x in self._get_query_all()]
[ 733 ]
def METHOD_NAME(self, input, backbone_neck): rurrent_pan_out2, rurrent_pan_out1, rurrent_pan_out0 = backbone_neck( torch.split(input, 3, dim=1)[0]) support_pan_out2s = [] support_pan_out1s = [] support_pan_out0s = [] for i in range(self.frame_num - 1): support_pan_out2, support_pan_out1, support_pan_out0 = backbone_neck( torch.split(input, 3, dim=1)[i + 1]) support_pan_out2s.append(support_pan_out2) support_pan_out1s.append(support_pan_out1) support_pan_out0s.append(support_pan_out0) all_pan_out2s = [rurrent_pan_out2] + support_pan_out2s all_pan_out1s = [rurrent_pan_out1] + support_pan_out1s all_pan_out0s = [rurrent_pan_out0] + support_pan_out0s pan_out2s = [] pan_out1s = [] pan_out0s = [] frame_start_id = 0 for i in range(self.conv_group_num): group_frame_num = self.out_channels[i][1] for j in range(group_frame_num): frame_id = frame_start_id + j pan_out2s.append( getattr(self, f'group_{i}_jian2')(all_pan_out2s[frame_id])) pan_out1s.append( getattr(self, f'group_{i}_jian1')(all_pan_out1s[frame_id])) pan_out0s.append( getattr(self, f'group_{i}_jian0')(all_pan_out0s[frame_id])) frame_start_id += group_frame_num if self.with_short_cut: if self.merge_form == 'pure_concat': pan_out2 = torch.cat(pan_out2s, dim=1) + rurrent_pan_out2 pan_out1 = torch.cat(pan_out1s, dim=1) + rurrent_pan_out1 pan_out0 = torch.cat(pan_out0s, dim=1) + rurrent_pan_out0 elif self.merge_form == 'add': pan_out2 = torch.sum( torch.stack(pan_out2s), dim=0) + rurrent_pan_out2 pan_out1 = torch.sum( torch.stack(pan_out1s), dim=0) + rurrent_pan_out1 pan_out0 = torch.sum( torch.stack(pan_out0s), dim=0) + rurrent_pan_out0 else: raise Exception( 'merge_form must be in ["pure_concat", "add"].') else: if self.merge_form == 'pure_concat': pan_out2 = torch.cat(pan_out2s, dim=1) pan_out1 = torch.cat(pan_out1s, dim=1) pan_out0 = torch.cat(pan_out0s, dim=1) elif self.merge_form == 'add': pan_out2 = torch.sum(torch.stack(pan_out2s), dim=0) pan_out1 = torch.sum(torch.stack(pan_out1s), dim=0) pan_out0 = torch.sum(torch.stack(pan_out0s), dim=0) else: raise Exception( 'merge_form must be in ["pure_concat", "add"].') outputs = (pan_out2, pan_out1, pan_out0) return outputs
[ 3988, 76 ]
def METHOD_NAME(self) -> 'outputs.PolicyModelPropertiesResponse': """ Policy model properties. """ return pulumi.get(self, "properties")
[ 748 ]
def METHOD_NAME(pos): """Download elevation Download elevation data for the specified position. ARGUMENTS: pos (lat,lon) The specified position RETURNS: None EXCEPTIONS: Varies If DEBUG is enabled, all errors are exceptions """ try: os.makedirs(CACHEDIR,exist_ok=True) if pos[0] < 0: lat = f"{-math.floor(pos[0])}S" elif pos[0] > 0: lat = f"{math.floor(pos[0])}N" else: lat = "0" if pos[1] < 0: lon = f"{-math.floor(pos[1])}W" elif pos[0] > 0: lon = f"{math.floor(pos[1])}E" else: lon = "0" zipfile = f"{TMPDIR}/{lat}_{lon}.zip" tiffile = f"{CACHEDIR}/{lat}_{lon}.tif" if not os.path.exists(tiffile): verbose(f"{tiffile} not found, downloading {zipfile}...") url = f"{ROOTURL}{abs(math.floor(pos[0]))}W{abs(math.floor(pos[1])):03d}.zip" verbose(f"setting url to {url}") response = session.get(url, stream=True) response.raise_for_status() with open(zipfile, 'wb') as fd: for chunk in response.iter_content(chunk_size=1024*1024): fd.write(chunk) verbose(f"saved zipfile to {zipfile}") with ZipFile(zipfile,"r") as zip: files = zip.namelist() for file in files: if file.endswith(f"{abs(math.floor(pos[0]))}W{abs(math.floor(pos[1])):03d}_dem.tif"): verbose(f"extracting {file} to {TMPDIR}") zip.extract(file,TMPDIR) verbose(f"move {TMPDIR}/{file} to {tiffile}") os.rename(f"{TMPDIR}/{file}",tiffile) else: verbose(f"skipping {file}") os.remove(zipfile) verbose(f"") else: verbose(f"{tiffile} is already downloaded") except Exception as err: error(err)
[ 136, 12258 ]
def METHOD_NAME(test_dir): return Path(test_dir / "cp2k").glob("*/inputs")
[ 8156, 9, 1461 ]
def METHOD_NAME(): dd = _zip_data_helper(('abc', 'def'), 10) for ev in zip_events(*dd, lazy=False): assert set('abcdef') == set(ev['descriptor']['data_keys']) assert set('abcdef') == set(ev['data']) assert set('abcdef') == set(ev['timestamps'])
[ 9, 1426, 239, 3133 ]
def METHOD_NAME(self, method, apiurl="", data=None): """ Do the request to the api url. :param method: get, put, post or delete :type method: basestring :param apiurl: called url :type apiurl: basestring :param data: optional data to send (e.g. post) :type data: dict :return: response from the api :rtype: json """ apikey = aj.config.data.get('dns_api', {}).get('apikey', None) sharing_id = aj.config.data.get('dns_api', {}).get('sharing_id', None) params = {'sharing_id': sharing_id} if method not in ["get", "put", "post", "delete"] or apikey is None: return func = getattr(requests, method) if data is None: resp = func( f"{self.baseUrl}{apiurl}", params=params, headers={"Authorization": f"Apikey {apikey}"} ) else: resp = func( f"{self.baseUrl}{apiurl}", data=data, params=params, headers={"Authorization": f"Apikey {apikey}"} ) return resp
[ 3776 ]
def METHOD_NAME(file_list, mode, shuffle=False, color_jitter=False, rotate=False, data_dir=DATA_DIR, crop_size=DATA_DIM, resize_size=RESIZE_DIM, batch_size=1): def reader(): try: with open(file_list) as flist: full_lines = [line.strip() for line in flist] if shuffle: np.random.shuffle(full_lines) lines = full_lines for line in lines: if mode == 'train' or mode == 'val': img_path, label = line.split() img_path = os.path.join(data_dir, img_path) yield img_path, int(label) elif mode == 'test': img_path = os.path.join(data_dir, line) yield [img_path] except Exception as e: print("Reader failed!\n{}".format(str(e))) os._exit(1) mapper = functools.partial( process_image, mode=mode, color_jitter=color_jitter, rotate=rotate, crop_size=crop_size, resize_size=resize_size) return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE)
[ 781, 3900 ]
def METHOD_NAME(self): self.round_trip( b"\x81\x05caf\xc3\xa9", Frame(True, OP_TEXT, "café".encode("utf-8")) )
[ 9, 256, 4428, 526 ]
def METHOD_NAME(): d = {"x": [0, 0, 0, 1, 1, 1, 2, 2]} df = vaex.from_dict(d) assert df.first("x", selection=[None, "x>0"]).tolist() == [0, 1]
[ 9, 865, 1038 ]
METHOD_NAME(self, tool_id):
[ 19, 3081 ]
def METHOD_NAME(acm_stubber): acm_stubber.add_response("list_certificates", list_certificates_response) acm_stubber.add_response("describe_certificate", describe_issued_cert_response) results = certificate_status_check( cache={}, awsAccountId="012345678901", awsRegion="us-east-1", awsPartition="aws" ) for result in results: assert result["RecordState"] == "ARCHIVED" acm_stubber.assert_no_pending_responses()
[ 9, 18164, 1941, 452, 11818, 250 ]
def METHOD_NAME(self, other) -> EvolvablePlayer: """ Creates and returns a new Player instance with a single crossover point. """ if other.__class__ != self.__class__: raise TypeError( "Crossover must be between the same player classes." ) cycle_list = crossover_lists(self.cycle, other.cycle, self._random) cycle = "".join(cycle_list) cycle, _ = self._normalize_parameters(cycle) return self.create_new(cycle=cycle, seed=self._random.random_seed_int())
[ 5386 ]
METHOD_NAME(self):
[ 187 ]
def METHOD_NAME( request, database, game, players, game_service, player_service, event_loop ): conn = GameConnection( database=database, game=game, player=players.hosting, protocol=mock.create_autospec(QDataStreamProtocol), player_service=player_service, games=game_service ) conn.finished_sim = False def fin(): event_loop.run_until_complete(conn.abort()) request.addfinalizer(fin) return conn
[ 2674, 550 ]
def METHOD_NAME(self): log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]]) x0 = np.array([2, 2]) # Test initial proposal is first point mcmc = pints.MALAMCMC(x0) self.assertTrue(np.all(mcmc.ask() == mcmc._x0)) # Repeated asks self.assertRaises(RuntimeError, mcmc.ask) # Tell without ask mcmc = pints.MALAMCMC(x0) self.assertRaises(RuntimeError, mcmc.tell, 0) # Repeated tells should fail x = mcmc.ask() mcmc.tell(log_pdf.evaluateS1(x)) self.assertRaises(RuntimeError, mcmc.tell, log_pdf.evaluateS1(x)) # Bad starting point mcmc = pints.MALAMCMC(x0) mcmc.ask() self.assertRaises( ValueError, mcmc.tell, (-np.inf, np.array([1, 1]))) # Test initialisation twice mcmc = pints.MALAMCMC(x0) mcmc._running = True self.assertRaises(RuntimeError, mcmc._initialise)
[ 9, 233 ]
def METHOD_NAME(self, event): '''Primary event processing function which kicks off reporead timer threads if a files database was updated.''' name = event.name if not name: return # screen to only the files we care about, skipping temp files if name.endswith(self.filename_suffix) and not name.startswith('.'): path = event.pathname stat = os.stat(path) database = self.databases.get(path, None) if database is None: arch = self.arch_lookup.get(event.path, None) if arch is None: logger.warning( 'Could not determine arch for %s, skipping update', path) return database = Database(arch, path, self.callback_func) self.databases[path] = database database.queue_for_update(stat.st_mtime)
[ 356, 235 ]
def METHOD_NAME(self) -> TextBlock: return create_text_block( self.notification.get_notification_title(ExternalProviders.MSTEAMS, self.context), size=TextSize.LARGE, )
[ 129, 2893, 573 ]
def METHOD_NAME(self): sms_client = self.create_client_from_connection_string() with pytest.raises(HttpResponseError) as ex: sms_responses = sms_client.send( from_=self.phone_number, to=["Ad155500000000000"], message="Hello World via SMS") assert str(ex.value.status_code == "400")
[ 9, 353, 3179, 1278, 24, 3180, 106 ]
def METHOD_NAME(): mock_run = make_mock_run(good_projection, 'one_ring') assert get_run_projection(mock_run, projection_name="nxsas") == good_projection[0] assert get_run_projection(mock_run, projection_name="vogons") is None assert get_run_projection(mock_run) == good_projection[0] # only one projection in run so choose it with pytest.raises(KeyError): mock_run = make_mock_run(projections_same_name, 'one_ring') get_run_projection(mock_run, projection_name="nxsas")
[ 9, 416, 1958, 623, 22 ]
def METHOD_NAME(self): """Trying to reply to a locked thread should raise an exception.""" locked = ThreadFactory(is_locked=True) with self.assertRaises(ThreadLockedError): locked.new_post(creator=locked.creator, content="foo")
[ 9, 418, 600 ]
def METHOD_NAME(vm, handler_path, mem_path): """Spawn page fault handler process.""" # Copy snapshot memory file into chroot of microVM. jailed_mem = vm.create_jailed_resource(mem_path) # Copy the valid page fault binary into chroot of microVM. jailed_handler = vm.create_jailed_resource(handler_path) handler_name = os.path.basename(jailed_handler) args = [SOCKET_PATH, jailed_mem] uffd_handler = UffdHandler(handler_name, args) real_root = os.open("/", os.O_RDONLY) working_dir = os.getcwd() os.chroot(vm.chroot()) os.chdir("/") st = os.stat(handler_name) os.chmod(handler_name, st.st_mode | stat.S_IEXEC) uffd_handler.spawn() try: outs, errs = uffd_handler.proc().communicate(timeout=1) print(outs) print(errs) assert False, "Could not start PF handler!" except TimeoutExpired: print("This is the good case!") # The page fault handler will create the socket path with root rights. # Change rights to the jailer's. os.chown(SOCKET_PATH, vm.jailer.uid, vm.jailer.gid) os.fchdir(real_root) os.chroot(".") os.chdir(working_dir) return uffd_handler
[ 597, 5290, 1519 ]
def METHOD_NAME(self): if self.disable: return self.disable = True with self.get_lock(): self._instances.remove(self) # Restore toolbars self.mpl.rcParams['toolbar'] = self.toolbar # Return to non-interactive mode if not self.wasion: self.plt.ioff() if self.leave: self.display() else: self.plt.METHOD_NAME(self.fig)
[ 1462 ]
def METHOD_NAME(a, b): """Calculate the greatest common divisor of a and b""" while b: a, b = b, a % b return a
[ 12847 ]
def METHOD_NAME(): assert util.to_bytestring('test_str', 'ascii') == b'test_str' assert util.to_bytestring('test_str®') == b'test_str\xc2\xae' assert util.to_bytestring(b'byte_test_str') == b'byte_test_str' with pytest.raises(TypeError) as exc_info: util.to_bytestring(100) msg = '100 is not a string' assert msg in str(exc_info.value)
[ 9, 24, 15016 ]
def METHOD_NAME(self): self.IncidentsRunPlaybook(ctx=self.ctx)()
[ 750, 710 ]
def METHOD_NAME(self): if self.options.shared: self.options.rm_safe("fPIC") self.settings.rm_safe("compiler.cppstd") self.settings.rm_safe("compiler.libcxx")
[ 111 ]
def METHOD_NAME(function, iterable): return {x for x in iterable if function(x)}
[ 0, 527 ]
def METHOD_NAME(model, strategy=None): wrap_cls = None if strategy is None: return _propose_leaf_modules(wrap_cls) for opt in strategy: opt_name = opt[0] if opt_name == "fsdp": if len(opt) > 1: opt_config = opt[1] atorch_wrap_cls = set(to_module_class_by_name(model, opt_config.get("atorch_wrap_cls", set()))) if wrap_cls is None: wrap_cls = atorch_wrap_cls else: wrap_cls = wrap_cls & atorch_wrap_cls if opt_name == "checkpoint": if len(opt) > 1: opt_config = opt[1] ckpt_wrap_cls = set(to_module_class_by_name(model, opt_config)) if wrap_cls is None: wrap_cls = ckpt_wrap_cls else: wrap_cls = wrap_cls & ckpt_wrap_cls leaf_modules = _propose_leaf_modules(wrap_cls) return leaf_modules
[ 13730, 3802, 468, 604, 1554 ]
def METHOD_NAME(): return load_from_matrix("accept_application.csv", test_ids=[])
[ 557, 511, 2757 ]
def METHOD_NAME(config): """Get registered scheduler class. Get a scheduler object from SCHEDULERS. Args: config: A config dict object that contains the scheduler information. Returns: A Scheduler object. """ name = "iterative" if config.start_step == config.end_step: name = "oneshot" return SCHEDULERS[name](config)
[ 19, 1520 ]
def METHOD_NAME(): parser = argparse.ArgumentParser(description="GRAND") # data source params parser.add_argument( "--dataname", type=str, default="cora", help="Name of dataset." ) # cuda params parser.add_argument( "--gpu", type=int, default=-1, help="GPU index. Default: -1, using CPU." ) # training params parser.add_argument( "--epochs", type=int, default=200, help="Training epochs." ) parser.add_argument( "--early_stopping", type=int, default=200, help="Patient epochs to wait before early stopping.", ) parser.add_argument("--lr", type=float, default=0.01, help="Learning rate.") parser.add_argument( "--weight_decay", type=float, default=5e-4, help="L2 reg." ) # model params parser.add_argument( "--hid_dim", type=int, default=32, help="Hidden layer dimensionalities." ) parser.add_argument( "--dropnode_rate", type=float, default=0.5, help="Dropnode rate (1 - keep probability).", ) parser.add_argument( "--input_droprate", type=float, default=0.0, help="dropout rate of input layer", ) parser.add_argument( "--hidden_droprate", type=float, default=0.0, help="dropout rate of hidden layer", ) parser.add_argument("--order", type=int, default=8, help="Propagation step") parser.add_argument( "--sample", type=int, default=4, help="Sampling times of dropnode" ) parser.add_argument( "--tem", type=float, default=0.5, help="Sharpening temperature" ) parser.add_argument( "--lam", type=float, default=1.0, help="Coefficient of consistency regularization", ) parser.add_argument( "--use_bn", action="store_true", default=False, help="Using Batch Normalization", ) args = parser.parse_args() # check cuda if args.gpu != -1 and th.cuda.is_available(): args.device = "cuda:{}".format(args.gpu) else: args.device = "cpu" return args
[ 1545 ]
def METHOD_NAME(self): return "MgmtErrorFormat"
[ 168, 275 ]
def METHOD_NAME(self): """Return a list of datainfo dicts representing interpretation templates """ sample_type_uid = self.context.getRawSampleType() template_uid = self.context.getRawTemplate() def is_suitable(obj): """Returns whether the interpretation passed-in suits well with the underlying sample object """ obj = api.get_object(obj) sample_types = obj.getRawSampleTypes() or [sample_type_uid] if sample_type_uid in sample_types: return True analysis_templates = obj.getRawAnalysisTemplates() if template_uid in analysis_templates: return True return False def get_data_info(item): return { "uid": api.get_uid(item), "title": api.get_title(item) } # Get all available templates query = {"portal_type": "InterpretationTemplate", "review_state": "active", "sort_on": "sortable_title", "sort_order": "ascending"} brains = api.search(query, SETUP_CATALOG) # Purge the templates that do not suit well with current sample brains = filter(is_suitable, brains) return map(get_data_info, brains)
[ 19, 10843, 1914 ]
def METHOD_NAME(config: Config, workspace: Workspace): index_file = workspace.root / f"{config.memory_index}.json" index_file.touch() assert index_file.exists() JSONFileMemory(config) assert index_file.exists() assert index_file.read_text() == "[]"
[ 9, 763, 1645, 176, 41, 13251, 35 ]
def METHOD_NAME(node): if node.type == syms.vfpdef: return METHOD_NAME(node.children[1]) elif node.type == token.NAME: return node.value return [METHOD_NAME(c) for c in node.children if c.type != token.COMMA]
[ 416, 434 ]
METHOD_NAME(self):
[ 571, 1756 ]
def METHOD_NAME(self, obj): return obj.species_name
[ 19, 8669 ]
def METHOD_NAME(cls, parser): ServerEndpoint.add_argument(parser, "endpoint")
[ 238, 5624, 134 ]
def METHOD_NAME(self, exclude=None): self.field_errors = {} try: super().METHOD_NAME(exclude) except ValidationError as error: self.field_errors = error.error_dict raise
[ 1356, 342 ]
def METHOD_NAME(self, res, type_, data, id, name): self.assertEqual(res.type, type_) self.assertEqual(res.data, data) self.assertEqual(res.id, id) self.assertEqual(res.name, name)
[ 638, 2177, 926 ]
def METHOD_NAME(self, command_args): super().METHOD_NAME(command_args) return self.build_lro_poller(self._execute_operations, None)
[ 1519 ]
def METHOD_NAME(self): """ TODO: This function need to be implemented when decide to support monitoring FAN(fand) on this platform. """ raise NotImplementedError
[ 19, 194, 417 ]
def METHOD_NAME(results, image_id, num_id_to_cat_id_map): import pycocotools.mask as mask_util segm_res = [] # for each batch segms = results['segm'].astype(np.uint8) clsid_labels = results['cate_label'] clsid_scores = results['cate_score'] lengths = segms.shape[0] im_id = int(image_id[0][0]) if lengths == 0 or segms is None: return None # for each sample for i in range(lengths - 1): clsid = int(clsid_labels[i]) catid = num_id_to_cat_id_map[clsid] score = float(clsid_scores[i]) mask = segms[i] segm = mask_util.encode(np.array(mask[:, :, np.newaxis], order='F'))[0] segm['counts'] = segm['counts'].decode('utf8') coco_res = { 'image_id': im_id, 'category_id': catid, 'segmentation': segm, 'score': score } segm_res.append(coco_res) return segm_res
[ 19, -1, 10528, 4804 ]
def METHOD_NAME(cls): if location in registry_algorithms and algorithm_type in registry_algorithms[location]: raise ValueError("Cannot have two algorithms with the same name") if location not in registry_algorithms: registry_algorithms[location] = {} registry_algorithms[location][algorithm_type] = cls() return cls
[ 972, 4089 ]
def METHOD_NAME(elmnts): """Fetch the symbol entry in the elements dictionary in Aiida.""" new_dict = {} for key, value in elmnts.items(): new_dict[value['symbol']] = key return new_dict
[ 1047, 872, 280, 1532 ]
def METHOD_NAME(self): # read record fields as an array fields = {} flist = self.aslist() numfields = len(flist) for i in range(numfields): line = flist[i] if line and line[0].isalpha(): field = line.split() if i + 1 < numfields: if not flist[i + 1][0].isalpha(): fields[field[0]] = self.read_array_field( flist[i : i + int(field[1]) + 1] ) else: fields[field[0]] = " ".join(s for s in field[1:]) else: fields[field[0]] = " ".join(s for s in field[1:]) else: continue return fields
[ 19, 342 ]