text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(iter): for epoch in range(iter): y1 = model(x) loss = criterion(y1, y) writer.add_scalar("Loss/train", loss, epoch) optimizer.zero_grad() loss.backward() optimizer.step()
[ 849, 578 ]
def METHOD_NAME(self): test_bin = os.path.join(test_location, "..", "..", "binaries", "tests", "x86_64", "scanf_multi_test") b = angr.Project(test_bin, auto_load_libs=False) pg = b.factory.simulation_manager() expected_outputs = { b"%%04x.%%04x.%%04x\n": Checker( [ lambda x: int(x, 16) == 0xAAAA, lambda x: int(x, 16) == 0xBBBB, lambda x: int(x, 16) == 0xCCCC, ], base=16, multi=True, delimiter=".", ), b"%%04x.%%04x.%%04x and negative numbers\n": Checker( [lambda x: int(x, 16) == -0xCD] * 3, base=16, multi=True, delimiter=".", ), b"%%d.%%d.%%d\n": Checker( [lambda x: int(x, 10) == 133337, lambda x: int(x, 10) == 1337, lambda x: int(x, 10) == 13337], base=10, multi=True, delimiter=".", ), b"%%d.%%d.%%d and negative numbers\n": Checker( [lambda x: int(x, 10) == 2**32 - 1337] * 3, base=10, multi=True, delimiter=".", ), b"%%u\n": Checker( [lambda x: int(x) == 0xAAAA, lambda x: int(x) == 0xBBBB, lambda x: int(x) == 0xCCCC], base=10, multi=True, delimiter=".", ), b"%%u and negative numbers\n": Checker( [lambda s: int(s) == 2**32 - 0xCDCD] * 3, base=10, multi=True, delimiter=".", ), b"Unsupported switch\n": Checker(None, dummy=True), } pg.explore( find=0x40083E, avoid=( 0x4006DB, 0x400776, 0x40080B, ), # avoid all "nope N" branches num_find=len(expected_outputs), ) # check the outputs total_outputs = 0 for path in pg.found: path.posix.dumps(0) test_output = path.posix.dumps(1) if test_output in expected_outputs: assert expected_outputs[test_output].check(path), "Test case failed. Output is %s." % test_output total_outputs += 1 # check that all of the outputs were seen assert total_outputs == len(expected_outputs)
[ 9, -1, 457 ]
def METHOD_NAME(test_case: Dict) -> None: run_tangerine_whistle_blockchain_st_tests(test_case)
[ 9, 256, 3116, 551, 450 ]
def METHOD_NAME(self): parameters = { **self.serialize_url_param( "managedInstanceName", self.ctx.args.managed_instance_name, required=True, ), **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), } return parameters
[ 274, 386 ]
def METHOD_NAME(args: list) -> bool: """Check if all inputs in the list are .iloc friendly.""" return all([is_iloc_like(x) if x is not None else True for x in args])
[ 75, 1461, 472, 10898, 2307 ]
def METHOD_NAME(self, obj, data=None): data = self.entry.get_text() data = data + obj.get_label() if any( x in data for x in [ '/2', '/4', '/8', '/16', '/32', '/64', '/128']): v = [0] + [float(x) for x in data.replace('/','.').split('.')] data = f'{v[-3] + v[-2]/v[-1]:6.7}' self.entry.set_text(data)
[ 15976, 106, 212 ]
def METHOD_NAME(kwargs): # type: (Dict[str, Any]) -> SourceModifiedAccessConditions if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') return SourceModifiedAccessConditions( source_if_modified_since=kwargs.pop('source_if_modified_since', None), source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), source_if_match=if_match or kwargs.pop('source_if_match', None), source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) )
[ 19, 1458, 1626 ]
def METHOD_NAME(self): self.check_args('/NumberField/Qsqrt5', '0.481211825') # regulator
[ 9, 274, -1 ]
def METHOD_NAME( pack: Union[ResourcePack, DataPack], binary_files: bool = False, ) -> JsonDict: listing: JsonDict = { "name": pack.name, "description": pack.description, "pack_format": pack.pack_format, "empty": not pack, } listing["text_files"] = { k: v.text for k, v in pack.list_files(extend=TextFileBase[Any]) } if binary_files: listing["binary_files"] = { k: base64.b64encode(v.blob).decode() for k, v in pack.list_files(extend=BinaryFileBase[Any]) } return listing
[ 129, 1699, 2269 ]
def METHOD_NAME(self, path): """ Deletes a file at 'path' """ if os.path.exists(path): os.remove(path)
[ 34, 171 ]
def METHOD_NAME( self, tests, obj, name, module, source_lines, globs, seen ) -> None: if _is_mocked(obj): return with _patch_unwrap_mock_aware(): # Type ignored because this is a private function. super().METHOD_NAME( # type:ignore[misc] tests, obj, name, module, source_lines, globs, seen )
[ 416 ]
def METHOD_NAME(): global energies system.integrator.run(int_steps) visualizer.update() energies = system.analysis.energy() plot.set_xdata(np.append(plot.get_xdata(), system.time)) plot.set_ydata(np.append(plot.get_ydata(), energies['total']))
[ 57, 1751 ]
def METHOD_NAME(name): " Determine if a name is a class private name. " # Exclude system defined names such as __init__, __add__ etc return name.startswith("__") and not name.endswith("__")
[ 137, 2, 547, 156 ]
def METHOD_NAME(schema: Dict) -> Dict[str, List[str]]: """Find all distribution-like metrics in a Glean table. Metric types are defined in the Glean documentation found here: https://mozilla.github.io/glean/book/user/metrics/index.html """ metric_type_set = { "timing_distribution", "memory_distribution", "custom_distribution", } metrics: Dict[str, List[str]] = {metric_type: [] for metric_type in metric_type_set} excluded_metrics = get_etl_excluded_probes_quickfix("fenix") # Iterate over every element in the schema under the metrics section and # collect a list of metric names. for root_field in schema: if root_field["name"] != "metrics": continue for metric_field in root_field["fields"]: metric_type = metric_field["name"] if metric_type not in metric_type_set: continue for field in metric_field["fields"]: if field["name"] not in excluded_metrics: metrics[metric_type].append(field["name"]) return metrics
[ 19, 664, 1097 ]
def METHOD_NAME(self): update_area(self.area1) added_links = self.session.query(AreaAssociation). \ filter(AreaAssociation.area_id == self.area1.document_id).all() self.assertEqual(len(added_links), 1) self.assertEqual( added_links[0].document_id, self.waypoint1.document_id)
[ 9, 86, 690 ]
def METHOD_NAME(self): self.population = Population()
[ 102, 2 ]
def METHOD_NAME(name: TStr) -> TNone: raise NotImplementedError("syscall not simulated")
[ 5942, 148, 447 ]
def METHOD_NAME(self): lock = defer.DeferredLock() @util.deferredLocked(lock) def check_locked(arg1, arg2): self.assertEqual([lock.locked, arg1, arg2], [True, 1, 2]) return defer.succeed(None) yield check_locked(1, 2) self.assertFalse(lock.locked)
[ 9, 667 ]
def METHOD_NAME(self, validated_data): if (project_id := validated_data.get('project_id')) is not None: validated_data['organization'] = Project.objects.get(pk=project_id).organization db_webhook = Webhook.objects.METHOD_NAME(**validated_data) return db_webhook
[ 129 ]
def METHOD_NAME(i): return socket.inet_ntop(socket.AF_INET6, binascii.unhexlify(hex(i)[2:].replace('L', '')))
[ 962, 24, 1899 ]
def METHOD_NAME(): dim = Dimension(1) foo = IntVector(dim) with pytest.raises(TypeError): foo.set(0, 0.1) with pytest.raises(TypeError): foo.add(0, 0.1)
[ 9, 962, 2505, 1819 ]
def METHOD_NAME(self): self.f = get_temp_copy(get_data_path('test.wma')) self.song = WMAFile(self.f) self.f2 = get_temp_copy(get_data_path('test-2.wma')) self.song2 = WMAFile(self.f2) self.f3 = get_temp_copy(get_data_path('test.asf')) self.song3 = WMAFile(self.f3)
[ 0, 1 ]
def METHOD_NAME(self, pv: SharedPV, op: ServerOperation) -> None: """ Called each time a client issues a put operation on the channel using this handler """ pv.post(op.value()) op.done()
[ 1276 ]
def METHOD_NAME(self, texts, yaml_str=None, topk=10, is_canonicalized=False, filter=True): if self.eidos_reader is None: self.initialize_reader() if yaml_str is None: yaml_str = self.get_default_ontology() text_seq = _list_to_seq(texts) raw_groundings = \ self.eidos_reader.components().ontologyHandler().reground( 'Custom', # name yaml_str, # ontologyYaml text_seq, # texts filter, # filter topk, # topk is_canonicalized # isAlreadyCanonicalized ) # Process the return values into a proper Python representation groundings = [[_get_scored_grounding(entry) for entry in text_grounding] for text_grounding in raw_groundings] return groundings
[ -1, 1701 ]
async def METHOD_NAME(self): queue = BasicMessageQueue() with self.assertRaises(asyncio.TimeoutError): await queue.dequeue(timeout=0) test_value = "test value" await queue.enqueue(test_value) assert await queue.dequeue(timeout=0) == test_value with self.assertRaises(asyncio.TimeoutError): await queue.dequeue(timeout=0) queue.task_done() await queue.join()
[ 9, 419, 11012 ]
async def METHOD_NAME(): await ui.run_javascript('navigator.clipboard.writeText(`' + code + '`)', respond=False) ui.notify('Copied to clipboard', type='positive', color='primary')
[ 215, 544 ]
def METHOD_NAME(self, data): return LoadBalancer( id=data["id"], name=data["name"], state=self.LB_STATE_MAP.get(data["status"], State.UNKNOWN), ip=self._public_ip(data), port=data["listeners"][0]["in"], driver=self.connection.driver, )
[ 24, 3737 ]
def METHOD_NAME(self): from udata.models import User self.metrics['users'] = User.objects(confirmed_at__ne=None, deleted=None).count() self.save()
[ 29, 3467 ]
def METHOD_NAME(title): end_time = time() title_str = 'End ' + title end_time_str = 'end time is: ' + asctime(gmtime(end_time)) elapsed_time_str = 'elapsed time is ' + str(int(end_time-start_time)) + ' seconds' title_len = len(title_str) end_len = len(end_time_str) elapsed_len = len(elapsed_time_str) # # if start or title lengths are odd add a space so line lengths will work out # if title_len%2: title_str = title_str + ' ' if end_len%2: end_time_str = end_time_str + ' ' if elapsed_len%2: elapsed_time_str = elapsed_time_str + ' ' print '\n' + border gap = ' ' gap_len = (border_len-title_len)/2 - edge_len - 1 for i in range(gap_len): gap = gap + ' ' print edge + gap + title_str + gap + edge gap = ' ' gap_len = (border_len-end_len)/2 - edge_len - 1 for i in range(gap_len): gap = gap + ' ' print edge + gap + end_time_str + gap + edge gap = ' ' gap_len = (border_len-elapsed_len)/2 - edge_len - 1 for i in range(gap_len): gap = gap + ' ' print edge + gap + elapsed_time_str + gap + edge print border + '\n\n'
[ 1798, 1134 ]
def METHOD_NAME(self) -> None: self._additions: Attribute[int] = NotSet self._blob_url: Attribute[str] = NotSet self._changes: Attribute[int] = NotSet self._contents_url: Attribute[str] = NotSet self._deletions: Attribute[int] = NotSet self._filename: Attribute[str] = NotSet self._patch: Attribute[str] = NotSet self._previous_filename: Attribute[str] = NotSet self._raw_url: Attribute[str] = NotSet self._sha: Attribute[str] = NotSet self._status: Attribute[str] = NotSet
[ 176, 177 ]
def METHOD_NAME(): """Regression test for #1121""" x = torch.rand(2, 3, 4) legacy_net = torch.nn.Sequential( LegacyPositionalEncoding(4, 0.0), torch.nn.Linear(4, 2) ) latest_net = torch.nn.Sequential(PositionalEncoding(4, 0.0), torch.nn.Linear(4, 2)) latest_net.load_state_dict(legacy_net.state_dict()) legacy = legacy_net(x) latest = latest_net(x) assert torch.allclose(legacy, latest) legacy_net = torch.nn.Sequential( LegacyScaledPositionalEncoding(4, 0.0), torch.nn.Linear(4, 2) ) latest_net = torch.nn.Sequential( ScaledPositionalEncoding(4, 0.0), torch.nn.Linear(4, 2) ) latest_net.load_state_dict(legacy_net.state_dict()) legacy = legacy_net(x) latest = latest_net(x) assert torch.allclose(legacy, latest)
[ 9, 2956 ]
def METHOD_NAME(self, assembler): """ Setup tacs integrator responsible for solving transient problem we will be testing. """ # Create the BDF integrator solver num_stages = 2 # Set the file output format integrator = TACS.DIRKIntegrator( assembler, tinit, tfinal, float(num_steps), num_stages ) return integrator
[ 102, 9991 ]
def METHOD_NAME(self) -> bool: ...
[ 5247 ]
def METHOD_NAME(ov10: bytes, config: Pmd2Data) -> List[DungeonMusicEntry]: block = config.bin_sections.overlay10.data.MUSIC_ID_TABLE lst = [] for i in range(block.address, block.address + block.length, 2): lst.append( DungeonMusicEntry( read_u16(ov10, i), ) ) return lst
[ 19, 3681, 245 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_get_request( scope=scope, filter=filter, api_version=api_version, template_url=self.get.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(self, test, err): """Called when an error has occurred. 'err' is a tuple of values as returned by sys.exc_info().""" self.failures.append((test, self._exc_info_to_string(err, test))) self._mirrorOutput = True
[ 238, 374 ]
def METHOD_NAME(self, widget, **kwargs): if widget.style.font_weight == NORMAL: widget.style.font_weight = BOLD else: widget.style.font_weight = NORMAL
[ 74, 1336 ]
def METHOD_NAME(self, err): self.assert_('Permission denied' in err, '"Permission denied" not found in %r' % err)
[ 638, 4496 ]
def METHOD_NAME(constraint: Optional[pulumi.Input[str]] = None, folder: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOrganizationPolicyResult]: """ Allows management of Organization policies for a Google Folder. For more information see [the official documentation](https://cloud.google.com/resource-manager/docs/organization-policy/overview) ## Example Usage ```python import pulumi import pulumi_gcp as gcp policy = gcp.folder.get_organization_policy(folder="folders/folderid", constraint="constraints/compute.trustedImageProjects") pulumi.export("version", policy.version) ``` :param str constraint: (Required) The name of the Constraint the Policy is configuring, for example, `serviceuser.services`. Check out the [complete list of available constraints](https://cloud.google.com/resource-manager/docs/organization-policy/understanding-constraints#available_constraints). :param str folder: The resource name of the folder to set the policy for. Its format is folders/{folder_id}. """ ...
[ 19, 1044, 54, 146 ]
def METHOD_NAME(self): """ (`~astropy.units.Quantity`) (x, y, z) array of pressure at every point in the domain. """ return self._pressure
[ 4710 ]
def METHOD_NAME(): client = boto3.client("pinpoint", region_name="us-east-1") resp = client.create_app(CreateApplicationRequest={"Name": "myfirstapp"}) assert "ApplicationResponse" in resp assert "Arn" in resp["ApplicationResponse"] assert "Id" in resp["ApplicationResponse"] assert resp["ApplicationResponse"]["Name"] == "myfirstapp" assert "CreationDate" in resp["ApplicationResponse"]
[ 9, 129, 991 ]
def METHOD_NAME(self): with self.assertRaisesRegex(CommandError, "Unknown database unknown"): call_command('sqlcreate', '--database=unknown')
[ 9, 427, 241, 462, 168, 217, 463 ]
def METHOD_NAME(cls, d): return cls(**d)
[ 280, 553 ]
def METHOD_NAME( self, **kwargs # type: Any ): # type: (...) -> Iterable["_models.OperationList"] """Lists all of the available RP operations. The operation returns the RP operations. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either OperationList or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.redhatopenshift.v2020_04_30.models.OperationList] :raises: ~azure.core.exceptions.HttpResponseError """ api_version = kwargs.pop('api_version', "2020-04-30") # type: str cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( api_version=api_version, template_url=self.METHOD_NAME.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( api_version=api_version, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("OperationList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data )
[ 245 ]
def METHOD_NAME(catalogue, config): return fn(catalogue, **config)
[ 667, 41, 200, 529, 2629 ]
f METHOD_NAME(self):
[ 5107 ]
def METHOD_NAME(plugin_name, settings): """Process a single plugin or raise errors that get bubbled up.""" logger.debug(f"Loading {plugin_name}!") # Import plugin module try: plugin = importlib.import_module(plugin_name) except ModuleNotFoundError as err: if getattr(err, "name") == plugin_name: raise PluginNotFound( f"Unable to import plugin {plugin_name}: Module not found. Check that the plugin module has been " f"installed within the correct Python environment." ) from err raise err # Validate plugin config try: plugin_config = plugin.config except AttributeError as err: raise PluginImproperlyConfigured( f"Plugin {plugin_name} does not provide a 'config' variable. This should be defined in the plugin's " f"__init__.py file and point to the PluginConfig subclass." ) from err # Validate user-provided configuration settings and assign defaults. Plugin # validation that fails will stop before modifying any settings. if plugin_name not in settings.PLUGINS_CONFIG: settings.PLUGINS_CONFIG[plugin_name] = {} plugin_config.validate(settings.PLUGINS_CONFIG[plugin_name], settings.VERSION) # Plugin config is valid, so now we can and add to INSTALLED_APPS. plugin_import_path = f"{plugin_config.__module__}.{plugin_config.__name__}" if plugin_import_path not in settings.INSTALLED_APPS: settings.INSTALLED_APPS.append(plugin_import_path) # Include any extra installed apps provided by the plugin # TODO(jathan): We won't be able to support advanced app-ordering concerns # and if the time comes that we do, this will have to be rethought. for plugin_installed_app in plugin_config.installed_apps: if plugin_installed_app not in settings.INSTALLED_APPS: settings.INSTALLED_APPS.append(plugin_installed_app) # Include any extra middleware provided by the plugin for middleware in plugin_config.middleware: if middleware not in settings.MIDDLEWARE: settings.MIDDLEWARE.append(middleware) # Update Constance Config and Constance Fieldset if plugin_config.constance_config: app_config = {} for key, value in plugin_config.constance_config.items(): config_item = value # Enforce ConstanceConfigItem namedtuple if not isinstance(value, ConstanceConfigItem): config_item = ConstanceConfigItem(*value) plugin_config.constance_config[key] = config_item app_config[f"{plugin_name}__{key}"] = config_item settings.CONSTANCE_CONFIG.update(app_config) settings.CONSTANCE_CONFIG_FIELDSETS.update({f"{plugin_config.verbose_name}": app_config.keys()})
[ 557, 2793 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_request( api_version=api_version, template_url=self.list.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(self): """Check if Job is running which is reported by LSF as ``RUN``. Return ``True`` if there is a match otherwise returns ``False``""" return self._state == "RUN"
[ 137, 1340 ]
def METHOD_NAME(self): return self.client.format_url( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}", **self.url_parameters )
[ 274 ]
def METHOD_NAME(self): '''Close all open handles after the request has finished''' for handle in self.handles.values(): handle.close() super(ProcessHandler, self).METHOD_NAME()
[ 69, 1239 ]
def METHOD_NAME(self): """First line, no signature Second line followed by indentation:: indented line """
[ -1 ]
def METHOD_NAME(self, widget, path, text): try: tree_store: Gtk.ListStore = self.builder.get_object('tree_store') tree_store[path][0] = u16_checked(int(text)) except ValueError: return self._regenerate_list()
[ 69, 147, 156, 5848 ]
def METHOD_NAME(exc): """ Returns True if the given exception indicates that there was an attempt to save a User record with an already-existing username. Args: exc (Exception): An exception Returns: bool: Whether or not the exception indicates a duplicate username error """ return re.search(r"\(username\)=\([^\s]+\) already exists", str(exc)) is not None
[ 137, 1119, 2072, 168 ]
def METHOD_NAME(self) -> bool: ...
[ 137, 3271 ]
def METHOD_NAME(self, mock_requests): url = 'http://localhost:8080/frugal' headers = {'foo': 'bar'} resp = Mock(status_code=200) response = b'response' buff = bytearray(4) pack_into('!I', buff, 0, len(response)) resp.content = b64encode(buff + response) mock_requests.post.return_value = resp def get_headers(): return {'baz': 'qux'} tr = THttpTransport(url, headers=headers, get_headers=get_headers, response_capacity=500) tr.open() self.assertTrue(tr.isOpen()) data = b'helloworld' buff = bytearray(4) pack_into('!I', buff, 0, len(data)) encoded_frame = b64encode(buff + data) tr.set_timeout(5000) tr.write(data) tr.flush() mock_requests.post.assert_called_once_with( url, data=encoded_frame, timeout=5, headers={'foo': 'bar', 'baz': 'qux', 'Content-Length': '20', 'Content-Type': 'application/x-frugal', 'Content-Transfer-Encoding': 'base64', 'User-Agent': 'Python/TBaseHttpTransport', 'x-frugal-payload-limit': '500'}) resp = tr.read(len(response)) self.assertEqual(response, resp) tr.close() self.assertTrue(tr.isOpen()) # open/close are no-ops
[ 9, 377, 659 ]
def METHOD_NAME(self): bin_data = None self.Warning.nan_in_image.clear() self.Error.invalid_axis.clear() self.Error.invalid_block.clear() attrs = self.attrs if self.data and len(self.data.domain.attributes) and len(attrs): if np.any(np.isnan(self.data.X)): self.Warning.nan_in_image(np.sum(np.isnan(self.data.X))) try: bin_data = bin_hyperspectra(self.data, attrs, self.bin_shape) except InvalidAxisException as e: self.Error.invalid_axis(e.args[0]) except InvalidBlockShape as e: self.Error.invalid_block(e.args[0]) self.Outputs.bindata.send(bin_data)
[ 1160 ]
def METHOD_NAME(losses, hmc_samples, hmcecs_samples, hmc_runtime, hmcecs_runtime): fig, ax = plt.subplots(2, 2) ax[0, 0].plot(losses, "r") ax[0, 0].set_title("SVI losses") ax[0, 0].set_ylabel("ELBO") if hmc_runtime > hmcecs_runtime: ax[0, 1].bar([0], hmc_runtime, label="hmc", color="b") ax[0, 1].bar([0], hmcecs_runtime, label="hmcecs", color="r") else: ax[0, 1].bar([0], hmcecs_runtime, label="hmcecs", color="r") ax[0, 1].bar([0], hmc_runtime, label="hmc", color="b") ax[0, 1].set_title("Runtime") ax[0, 1].set_ylabel("Seconds") ax[0, 1].legend() ax[0, 1].set_xticks([]) ax[1, 0].plot(jnp.sort(hmc_samples["theta"].mean(0)), "or") ax[1, 0].plot(jnp.sort(hmcecs_samples["theta"].mean(0)), "b") ax[1, 0].set_title(r"$\mathrm{\mathbb{E}}[\theta]$") ax[1, 1].plot(jnp.sort(hmc_samples["theta"].var(0)), "or") ax[1, 1].plot(jnp.sort(hmcecs_samples["theta"].var(0)), "b") ax[1, 1].set_title(r"Var$[\theta]$") for a in ax[1, :]: a.set_xticks([]) fig.tight_layout() fig.savefig("hmcecs_plot.pdf", bbox_inches="tight")
[ 2718, 1288 ]
def METHOD_NAME(self, *args): """Implements the dual cone of the second-order cone See Pg 85 of the MOSEK modelling cookbook for more information""" if args is None: return SOC(self.dual_variables[0], self.dual_variables[1], self.axis) else: # some assertions for verifying `args` def f(x): return x.shape args_shapes = list(map(f, args)) instance_args_shapes = list(map(f, self.args)) assert len(args) == len(self.args) assert args_shapes == instance_args_shapes return SOC(args[0], args[1], self.axis)
[ 2252, 5568 ]
def METHOD_NAME(sourceObj, xRange, yRange, y2Range): """See Plot documentation for content of events""" return {'event': 'limitsChanged', 'source': id(sourceObj), 'xdata': xRange, 'ydata': yRange, 'y2data': y2Range}
[ 123, 4355, 1180, 900 ]
def METHOD_NAME(self, pipe, amount): # keep track of newly inserted observations in redis pipe.METHOD_NAME(self.redis_key, amount) pipe.expire(self.redis_key, 172800) # 2 days
[ 6000 ]
def METHOD_NAME(self): """ Test something that expires before "now" """ clock = task.Clock() am = AddrMap() am.scheduler = IReactorTime(clock) now = datetime.datetime.now() + datetime.timedelta(seconds=-10) nowutc = datetime.datetime.utcnow() + datetime.timedelta(seconds=-10) line = 'www.example.com 72.30.2.43 "%s" EXPIRES="%s"' % (now.strftime(self.fmt), nowutc.strftime(self.fmt)) am.update(line) self.assertTrue('www.example.com' in am.addr) # arguably we shouldn't even have put this in the map maybe, # but the reactor needs to iterate before our expiry callback # gets called (right away) which is simulated by the # clock.advance call clock.advance(0) self.assertTrue('www.example.com' not in am.addr)
[ 9, 5291, 2228 ]
def METHOD_NAME(self, *descendants: StrPath) -> "Traversable": """ Return Traversable resolved with any descendants applied. Each descendant should be a path segment relative to self and each may contain multiple levels separated by ``posixpath.sep`` (``/``). """
[ 17912 ]
def METHOD_NAME(self, state): self.table.header().restoreState(QtCore.QByteArray(state))
[ 1032, 551 ]
def METHOD_NAME(message): return encode(json_dumps(message), constants.MAX_RESULT_LENGTH)
[ 1571 ]
def METHOD_NAME(self, table_name): # Delete mysql table if exists self.test_connection() sql = "drop table if exists " + table_name + ";" try: self.cursor.execute(sql) LOGGER.debug(f"MYSQL delete table:{table_name}") except Exception as e: LOGGER.error(f"MYSQL ERROR: {e} with sql: {sql}") sys.exit(1)
[ 34, 410 ]
def METHOD_NAME(x : 'int32[:]'): import numpy as np y = np.sign(x) return y
[ 877, 2481, 5790 ]
def METHOD_NAME(self, _question): return self.lda_classifier.classify(_question)
[ 515, 39 ]
def METHOD_NAME(self): if self.__res_col is None: self.__res_col = self._db.get_collection('resource') return self.__res_col
[ 191, 2618 ]
def METHOD_NAME(): """ Test output for default values if include_blank_cells is False. """ assert ( None not in io.xlsx_cells( wb, "pivot-notes", include_blank_cells=False, start_point="A1", end_point="G7", )["value"].tolist() )
[ 9, 235, 199, 2882, 383, 1168 ]
def METHOD_NAME(): """ Check if the pytest-xdist plugin is installed, providing parallel tests """ # Check xdist exists without importing, otherwise pytests emits warnings from importlib.util import find_spec return find_spec('xdist') is not None
[ 2595, 220, 9424 ]
def METHOD_NAME(self, data, tags): if "boundary" in tags: return default = tags.get("name") ja = tags.get("name:ja") en = tags.get("name:en") if default or ja or en: if default: if (ja or en) and not (default == ja or default == en or (ja and en and default == "{0} ({1})".format(ja, en))): return {"class": 50604, "subclass": 0} elif (ja or en): return {"class": 50605, "subclass": 0, "fix": [{"+": {"name": ja}}, {"+": {"name": en}}]} else: locales = map(lambda y: [{"+": {"name": tags[y]}}], filter(lambda x: self.LocalName.match(x), tags.keys())) if locales: return {"class": 50606, "subclass":0, "fix": locales}
[ 1716 ]
def METHOD_NAME(path, file_cache, logger): """Helper for locating a markdown file""" out = list() for filename in file_cache: if filename.endswith(path): out.append(filename) if len(out) > 1: # This should not be possible unless multiple content directories are included # in the file cache. In this case, it is possible to have more than one markdown file # match. For example, Distributions/index.md has a basic version in the framework that # is overridden by the detailed version in the stochastic tools module. msg = "Located multiple files for the desired markdown: {}:\n ".format(path) msg += '\n '.join(out) logger.log('log_duplicate_files', msg) if out: return out[0] return None
[ 416, 4165, 171 ]
async def METHOD_NAME(call, gate): response = await call() assert response.status == 200 assert response.text == SUCCESS_RESOLVE
[ 9, 1217 ]
def METHOD_NAME(self, shared_state): # Initialize vertexai vertexai.init( project=e2e_base._PROJECT, location=e2e_base._LOCATION, staging_bucket=f"gs://{shared_state['staging_bucket_name']}", ) # Prepare dataset dataset = load_iris() X, X_retrain, y, y_retrain = train_test_split( dataset.data, dataset.target, test_size=0.60, random_state=42 ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.20, random_state=42 ) # Remote fit_transform on train dataset vertexai.preview.init(remote=True) transformer = StandardScaler() transformer.fit_transform.vertex.set_config( display_name=self._make_display_name("fit-transform"), ) X_train = transformer.fit_transform(X_train) # Assert the right serializer is being used serializer = any_serializer.AnySerializer() assert ( serializer._get_predefined_serializer(transformer.__class__.__mro__[-2]) is serializers.SklearnEstimatorSerializer ) # Remote transform on test dataset transformer.transform.vertex.set_config( display_name=self._make_display_name("transform"), ) X_test = transformer.transform(X_test) # Assert the right serializer is being used serializer = any_serializer.AnySerializer() assert ( serializer._get_predefined_serializer(transformer.__class__.__mro__[-2]) is serializers.SklearnEstimatorSerializer ) # Local transform on retrain data vertexai.preview.init(remote=False) X_retrain = transformer.transform(X_retrain) # Transform retrain dataset to pandas dataframe X_retrain_df = pd.DataFrame(X_retrain, columns=dataset.feature_names) y_retrain_df = pd.DataFrame(y_retrain, columns=["class"]) # Remote training on sklearn vertexai.preview.init(remote=True) model = LogisticRegression(warm_start=True) model.fit.vertex.remote_config.display_name = self._make_display_name( "sklearn-training" ) model.fit(X_train, y_train) # Assert the right serializer is being used serializer = any_serializer.AnySerializer() assert ( serializer._get_predefined_serializer(model.__class__.__mro__[-2]) is serializers.SklearnEstimatorSerializer ) # Remote prediction on sklearn model.predict.vertex.remote_config.display_name = self._make_display_name( "sklearn-prediction" ) model.predict(X_test) # Register trained model registered_model = vertexai.preview.register(model) shared_state["resources"] = [registered_model] # Load the registered model pulled_model = vertexai.preview.from_pretrained( model_name=registered_model.resource_name ) # Retrain model with pandas df on Vertex pulled_model.fit(X_retrain_df, y_retrain_df) # Assert the right serializer is being used serializer = any_serializer.AnySerializer() assert ( serializer._get_predefined_serializer(pulled_model.__class__.__mro__[-2]) is serializers.SklearnEstimatorSerializer )
[ 9, 2437, 2046, 435 ]
def METHOD_NAME(self, inp='', maxsize=None): return self.send(inp), self.recv(maxsize), self.recv_err(maxsize)
[ 353, 1398 ]
def METHOD_NAME(): duplicate_data = pd.DataFrame({'col1': [1, 2, 1, 2, 1, 2, 1, 2, 1, 2], 'col2': [1, 2, 1, 2, 1, 2, 1, 2, 1, 2], 'col3': [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]}) check_obj = DataDuplicates() assert_that(check_obj.run(duplicate_data).value, equal_to(0))
[ 9, 365, 2968, 654, 1119 ]
async def METHOD_NAME(self, payload: dict, in_edge=None) -> Result: dot = self._get_dot_accessor(payload) template = DotTemplate() try: timeout = aiohttp.ClientTimeout(total=self.config.timeout) async with HttpClient( self.node.on_connection_error_repeat, [200, 201, 202, 203, 204], timeout=timeout ) as client: params = { "json": { "content": template.render(self.config.message, dot), "username": self.config.username if self.config.username and len( self.config.username) > 0 else None } } async with client.request( method="POST", url=str(self.credentials.url), **params ) as response: # todo add headers and cookies result = { "status": response.status } if response.status in [200, 201, 202, 203, 204]: return Result(port="response", value=payload) else: return Result(port="error", value=result) except ClientConnectorError as e: return Result(port="error", value=str(e)) except asyncio.exceptions.TimeoutError: return Result(port="error", value="Discord webhook timed out.")
[ 22 ]
def METHOD_NAME(self): """Test width | height | size_in_pixel""" self.assertEqual(self._page1().size_in_pixel()[0], 77) self.assertEqual(self._page1().width_in_pixel(), 77) self.assertEqual(self._page1().size_in_pixel()[1], 66) self.assertEqual(self._page1().height_in_pixel(), 66) self.assertEqual(self._page1_90().size_in_pixel()[0], 66) self.assertEqual(self._page1_90().width_in_pixel(), 66) self.assertEqual(self._page1_90().size_in_pixel()[1], 77) self.assertEqual(self._page1_90().height_in_pixel(), 77) self.assertTrue(isinstance(self._page1().height_in_pixel(), int), 'height_in_pixel not an int') self.assertTrue(isinstance(self._page1().width_in_pixel(), int), 'width_in_pixel not an int')
[ 2580 ]
async def METHOD_NAME( from_date: date = Query(default=(datetime.utcnow() - timedelta(180)).date()), to_date: date = Query(default=datetime.utcnow().date()), conversation_step_threshold: int = Query(default=10, ge=2), collection: str = Depends(Authentication.authenticate_and_get_collection)
[ 14227, 3467 ]
def METHOD_NAME(self, **options): "Modify font attributes" if options: self._call("font", "config", self.name, *self._set(options)) else: return self._mkdict( self._split(self._call("font", "config", self.name)))
[ 200 ]
def METHOD_NAME(self, tag, attributes=[]): # Write an empty XML tag with optional, unencoded, attributes. # This is a minor speed optimization for elements that don't # need encoding. for key, value in attributes: tag += ' %s="%s"' % (key, value) self.fh.write("<%s/>" % tag)
[ 399, 35, 82, 3635 ]
def METHOD_NAME(xyzs): axyz = np.ones((len(xyzs), 4)) axyz[:, :3] = xyzs return axyz
[ 3725 ]
def METHOD_NAME(self): """ Retrieves the high threshold temperature of thermal Returns: A float number, the high threshold temperature of thermal in Celsius up to nearest thousandth of one degree Celsius, e.g. 30.125 """ attr_path = HWMON_DIR + self.high_th_attr attr_rv = self.__get_attr_value(attr_path) if (attr_rv != 'ERR'): return float(attr_rv) / 1000 else: return None
[ 19, 5020, 853 ]
def METHOD_NAME(cls, follower_id, object_id): '''Return a ModelFollowingModel object for the given follower_id and object_id, or None if no such follower exists. ''' query = cls._get(follower_id, object_id) following = cls._filter_following_objects(query) if len(following) == 1: return following[0]
[ 19 ]
def METHOD_NAME(self, item): core.send_message_to_server(slskmessages.ItemRecommendations(item))
[ 377, 1024, 8892 ]
def METHOD_NAME(): x = None def func(): return x # pragma: no cover return func
[ 93, 717, 41, 118 ]
def METHOD_NAME(collection): rng = np.random.default_rng(seed=19530) batch_count = 5 for i in range(batch_count): entities = [ [str(random.randint(NUM_ENTITIES * i, NUM_ENTITIES * (i + 1))) for ni in range(NUM_ENTITIES)], [int(ni % 100) for ni in range(NUM_ENTITIES)], [float(ni) for ni in range(NUM_ENTITIES)], rng.random((NUM_ENTITIES, DIM)), ] collection.insert(entities) collection.flush() print(f"Finish insert batch{i}, number of entities in Milvus: {collection.num_entities}")
[ 408, 365 ]
def METHOD_NAME(mocker): return mocker.patch("molecule.dependency.ansible_galaxy.AnsibleGalaxy.execute")
[ 1265, 4090, 12261 ]
def METHOD_NAME(self, config, document, entry, data): raise NotImplementedError
[ 109, 86 ]
def METHOD_NAME(self, option): return self._chainNegotiation(None, self.dont, option)
[ 3898, 357 ]
def METHOD_NAME(self): if self.uuid is not None: return self.uuid else: return ""
[ 19, 4977 ]
def METHOD_NAME(self, ids): if isinstance(ids, int): return self.decoder[ids] if isinstance(ids, torch.Tensor): ids = ids.cpu().tolist() return [self.decoder[idx] for idx in ids]
[ 197, 308, 24, 1735 ]
def METHOD_NAME(self): """Read the wavelength Returns: (float): Wavelength [Å]. """ return self.calculate_wavelength()
[ 19, 8925 ]
def METHOD_NAME(test, params, env): """ Test command: virsh desc. This command allows to show or modify description or title of a domain. 1). For running domain, get/set description&title with options. 2). For shut off domain, get/set description&title with options. 3). For persistent/transient domain, get/set description&title with options. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) options = params.get("desc_option", "") persistent_vm = params.get("persistent_vm", "yes") domain = params.get("domain", "name") if domain == "UUID": vm_name = vm.get_uuid() elif domain == "invalid_domain": vm_name = "domain_" + str(uuid.uuid1()) elif domain == "invalid_uuid": vm_name = uuid.uuid1() def run_cmd(name, options, desc_str, status_error): """ Run virsh desc command :return: cmd output """ if "--edit" not in options: cmd_result = virsh.desc(name, options, desc_str, ignore_status=True, debug=True) output = cmd_result.stdout.strip() err = cmd_result.stderr.strip() status = cmd_result.exit_status else: logging.debug("Setting domain desc \"%s\" by --edit", desc_str) session = aexpect.ShellSession("sudo -s") try: session.sendline("virsh -c %s desc %s --edit" % (vm.connect_uri, name)) session.sendline("dgg") session.sendline("dG") session.sendline(":%s/^$/" + desc_str + "/") session.send('\x1b') session.send('ZZ') match, text = session.read_until_any_line_matches( [r"Domain description updated successfully"], timeout=10, internal_timeout=1) session.close() if match == -1: status = 0 output = "Domain description updated successfully" else: status = 1 err = "virsh desc --edit fails" except Exception: test.fail("Fail to create session.") if status_error == "no" and status: test.fail(err) elif status_error == "yes" and status == 0: test.fail("Expect fail, but run successfully.") return output def vm_state_switch(): """ Switch the vm state """ if vm.is_dead(): vm.start() if vm.is_alive(): vm.destroy() def desc_check(name, desc_str, options): """ Check the domain's description or title """ ret = False state_switch = False if options.count("--config") and vm.is_persistent(): state_switch = True # If both --live and --config are specified, the --config # option takes precedence on getting the current description # and both live configuration and config are updated while # setting the description. # This situation just happens vm is alive if options.count("--config") and options.count("--live"): # Just test options exclude --config (--live [--title]) desc_check(name, desc_str, options.replace("--config", "")) # Just test options exclude --live (--config [--title]) desc_check(name, desc_str, options.replace("--live", "")) ret = True else: if state_switch: vm_state_switch() # --new-desc and --edit option should not appear in check if options.count("--edit") or options.count("--new-desc"): output = run_cmd(name, "", "", "no") else: output = run_cmd(name, options, "", "no") if desc_str == output: logging.debug("Domain desc check successfully.") ret = True else: test.fail("Expect fail, but run successfully.") return ret def run_test(): """ Get/Set vm desc by running virsh desc command. """ status_error = params.get("status_error", "no") desc_str = params.get("desc_str", "") # Test 1: get vm desc if "--edit" not in options: if "--new-desc" in options: run_cmd(vm_name, options, "", "yes") else: run_cmd(vm_name, options, "", status_error) # Test 2: set vm desc if options.count("--live") and vm.state() == "shut off": status_error = "yes" if len(desc_str) == 0 and status_error == "no": desc_str = "New Description/title for the %s vm" % vm.state() logging.debug("Use the default desc message: %s", desc_str) run_cmd(vm_name, options, desc_str, status_error) if status_error == "no": desc_check(vm_name, desc_str, options) # Prepare transient/persistent vm original_xml = vm.backup_xml() if persistent_vm == "no" and vm.is_persistent(): virsh.undefine(vm.name, options='--nvram') elif persistent_vm == "yes" and not vm.is_persistent(): vm.define(original_xml) try: if vm.is_dead(): vm.start() if domain == "ID": vm_name = vm.get_id() run_test() # Recover the vm and shutoff it if persistent_vm == "yes" and domain != "ID": vm.define(original_xml) vm.destroy() run_test() finally: vm.destroy(False) virsh.define(original_xml) os.remove(original_xml)
[ 22 ]
def METHOD_NAME(self) -> Optional[SqlRequestId]: """The value of the request ID tag.""" tag_value = self.tag_dict.get(SqlRequestTagKey.REQUEST_ID_KEY.value) if tag_value: return SqlRequestId(tag_value) return None
[ 377, 147 ]
def METHOD_NAME(self) -> Optional[Sequence['outputs.AvailableContactsResponse']]: """ A list of available contacts. """ return pulumi.get(self, "value")
[ 99 ]
async def METHOD_NAME(self): """Test turning on the siren with various parameters""" async with assert_device_properties_set( self.subject._device, {TONE_DP: "alarm_light"} ): await self.subject.async_turn_on(tone="light")
[ 9, 0, 24, 4322 ]
def METHOD_NAME(name: Optional[pulumi.Input[str]] = None, scope: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebAclResult]: """ Retrieves the summary of a WAFv2 Web ACL. ## Example Usage ```python import pulumi import pulumi_aws as aws example = aws.wafv2.get_web_acl(name="some-web-acl", scope="REGIONAL") ``` :param str name: Name of the WAFv2 Web ACL. :param str scope: Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. """ ...
[ 19, 2412, 1918, 146 ]
def METHOD_NAME(): """wizard: Would you like to setup a new Bitcoin node or connect to an existing one? (Connect existing node / Setup a new node ) """ app.specter.setup_status["stage"] = "node_type" return render_template( "setup/node_type.jinja", bitcoind_installed=os.path.isfile(app.specter.bitcoind_path), specter=app.specter, rand=rand, )
[ 1716, 44 ]