text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(key: str, default=None) -> float: env_var = Environment._get(key, default) return float(env_var)
[ 19, 1819 ]
def METHOD_NAME(self, batch, names): return [batch[name] for name in names]
[ 24, 245 ]
def METHOD_NAME(self): for attr in ['SWAP', 'SBSIZE', 'NPTS']: with contextlib.suppress(AttributeError): self.assertIsInstance(getattr(resource, 'RLIMIT_' + attr), int)
[ 9, 7860, 7861 ]
def METHOD_NAME(self) -> None: self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME( self, resource_group_name: str, namespace_name: str, **kwargs: Any ) -> _models.PrivateLinkResourcesListResult: """Gets lists of resources that supports Privatelinks. .. seealso:: - https://msdn.microsoft.com/en-us/library/azure/mt639379.aspx :param resource_group_name: Name of the Resource group within the Azure subscription. Required. :type resource_group_name: str :param namespace_name: The namespace name. Required. :type namespace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PrivateLinkResourcesListResult or the result of cls(response) :rtype: ~azure.mgmt.servicebus.v2021_11_01.models.PrivateLinkResourcesListResult :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2021-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-11-01")) cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None) request = build_get_request( resource_group_name=resource_group_name, namespace_name=namespace_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.METHOD_NAME.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
[ 19 ]
def METHOD_NAME( self, files: List[str], file_path: str, file_name: str, results_filter: Optional[ObjectType], ) -> None: filtered = ["__pycache__", "__init__.py"] is_group = self.is_group(file_path) is_config = self.is_config(file_path) if ( is_group and (results_filter is None or results_filter == ObjectType.GROUP) and file_name not in filtered ): files.append(file_name) if ( is_config and file_name not in filtered and (results_filter is None or results_filter == ObjectType.CONFIG) ): # strip extension last_dot = file_name.rfind(".") if last_dot != -1: file_name = file_name[0:last_dot] files.append(file_name)
[ 245, 238, 1571 ]
def METHOD_NAME(address): users = [ User( email=f"john.doe.{i}@example.com", is_active=True, default_billing_address=address.get_copy(), default_shipping_address=address.get_copy(), first_name=f"John_{i}", last_name=f"Doe_{i}", ) for i in range(ORDER_COUNT_IN_BENCHMARKS) ] return User.objects.bulk_create(users)
[ 3467, 43, 852, 3906 ]
def METHOD_NAME(): client = boto3.client("appsync", region_name="ap-southeast-1") api_id = client.create_graphql_api(name="api1", authenticationType="API_KEY")[ "graphqlApi" ]["apiId"] resp = client.get_graphql_api(apiId=api_id) assert "graphqlApi" in resp api = resp["graphqlApi"] assert api["name"] == "api1" assert "apiId" in api assert api["authenticationType"] == "API_KEY"
[ 9, 19, 4487, 58 ]
def METHOD_NAME( model: PipetteModel, output: pc.PipetteModelVersionType ) -> None: assert output == ps.convert_pipette_model(model)
[ 9, 197, 5302, 578 ]
def METHOD_NAME(a, b, transa, transb): if transa: a = a.transpose(0, 2, 1) if not transb: b = b.transpose(0, 2, 1) return tvm.topi.testing.batch_matmul(a, b)
[ 19, 2028 ]
def METHOD_NAME(self): return True
[ 220, 343, 112, 2770 ]
def METHOD_NAME(expected, create_roster_files): """Test that minion files in the directory roster match and render.""" expected = expected["basic"] ret = dir_.targets(".*basic$", "pcre", saltenv="") _test_match(ret, expected)
[ 9, 756, 2647 ]
def METHOD_NAME(self, _ctxsw_re=...): ...
[ 181, 3890, 6612 ]
def METHOD_NAME(other): controller.call_ethplorer(other)
[ 9, 128, -1 ]
def METHOD_NAME(request, tmpdir): """ Fixture that prepares a simulation directory with a populated west.cfg file. """ test_dir = str(tmpdir) os.chdir(test_dir) copy_ref(test_dir) copyfile(os.path.join(REFERENCE_PATH, 'west_init_ref.cfg'), CFG_FILENAME) copyfile(os.path.join(REFERENCE_PATH, 'west_init_ref.h5'), "west_init_ref.h5") request.cls.cfg_filepath = CFG_FILENAME request.cls.h5_filepath = H5_FILENAME request.cls.ref_h5_filepath = 'west_init_ref.h5' os.environ['WEST_SIM_ROOT'] = test_dir westpa.rc = westpa.core._rc.WESTRC() request.addfinalizer(clear_state)
[ 2360, 2610 ]
def METHOD_NAME(msn, user_agent, ip, data): for report in data: events = report.pop('events') event_uuid = uuid.uuid4() for event_index, (created_at, payload) in enumerate(events): # event type try: failed = int(payload["status"]) != 0 except (KeyError, ValueError): failed = True payload_type = payload.get("type") if payload_type == "install": if failed: event_cls = MunkiInstallFailedEvent else: event_cls = MunkiInstallEvent elif payload_type == "removal": if failed: event_cls = MunkiRemovalFailedEvent else: event_cls = MunkiRemovalEvent elif payload_type == "warning": event_cls = MunkiWarningEvent elif payload_type == "error": event_cls = MunkiErrorEvent elif payload_type == "start": event_cls = MunkiStartEvent else: logger.error("Unknown munki event payload type %s", payload_type) continue # build event metadata = EventMetadata( uuid=event_uuid, index=event_index, machine_serial_number=msn, request=EventRequest(user_agent, ip), created_at=parser.parse(created_at), incident_updates=payload.pop("incident_updates", []), ) payload.update(report) event = event_cls(metadata, payload) event.post()
[ 72, 15105, 239 ]
async def METHOD_NAME( self, resource_group_name: str, server_name: str, **kwargs: Any ) -> "_models.RecoverableServerResource": """Gets a recoverable MariaDB Server. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param server_name: The name of the server. :type server_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: RecoverableServerResource, or the result of cls(response) :rtype: ~azure.mgmt.rdbms.mariadb.models.RecoverableServerResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RecoverableServerResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" accept = "application/json" # Construct URL url = self.METHOD_NAME.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'serverName': self._serialize.url("server_name", server_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.METHOD_NAME(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('RecoverableServerResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
[ 19 ]
def METHOD_NAME(static_exc, exc_args): exc, static_args, locinfo = cloudpickle.loads(static_exc) real_args = [] exc_args_iter = iter(exc_args) for arg in static_args: if isinstance(arg, ir.Value): real_args.append(next(exc_args_iter)) else: real_args.append(arg) return (exc, tuple(real_args), locinfo)
[ 1888, 56, -1, 1755 ]
def METHOD_NAME(self): """Enables to get outputs of the operator by evaluating it Returns -------- outputs : OutputsChangeLocation """ return super().METHOD_NAME
[ 141 ]
def METHOD_NAME(self, captchaType, url, siteKey, captchaParams): taskID = None if not captchaParams.get('clientKey'): raise CaptchaParameter( "CapMonster: Missing clientKey parameter." ) self.clientKey = captchaParams.get('clientKey') if captchaParams.get('proxy') and not captchaParams.get('no_proxy'): hostParsed = urlparse(captchaParams.get('proxy', {}).get('https')) if not hostParsed.scheme: raise CaptchaParameter('Cannot parse proxy correctly, bad scheme') if not hostParsed.netloc: raise CaptchaParameter('Cannot parse proxy correctly, bad netloc') ports = { 'http': 80, 'https': 443 } self.proxy = { 'proxyType': hostParsed.scheme, 'proxyAddress': hostParsed.hostname, 'proxyPort': hostParsed.port if hostParsed.port else ports[self.proxy['proxyType']], 'proxyLogin': hostParsed.username, 'proxyPassword': hostParsed.password, } else: self.proxy = None try: taskID = self.requestSolve(captchaType, url, siteKey) return self.requestJob(taskID) except polling2.TimeoutException: try: if taskID: self.reportJob(taskID) except polling2.TimeoutException: raise CaptchaTimeout( "CapMonster: Captcha solve took to long and also failed " f"reporting the task with task id {taskID}." ) raise CaptchaTimeout( "CapMonster: Captcha solve took to long to execute " f"task id {taskID}, aborting." )
[ 19, 2244, 3485 ]
def METHOD_NAME(self): nucs = 'TTMTTCNTTTTA' expected_aminos = '[FL]F[FILV]L' stats = {} expected_stats = dict(length=4, ambiguous=2, max_aminos=4) aminos = translate(nucs, stats=stats, list_ambiguous=True) self.assertEqual(expected_aminos, aminos) self.assertEqual(expected_stats, stats)
[ 9, 68, 9624 ]
def METHOD_NAME(self): self.assertTrue(issubclass(MagicMock, Mock)) self.assertTrue(issubclass(NonCallableMagicMock, NonCallableMock))
[ 9, 6801 ]
f METHOD_NAME(self):
[ 9, 1337, 623, 1097, 130, 259, 604 ]
def METHOD_NAME(doc): basic_component, hra_component = frappe.db.get_value( "Company", doc.company, ["basic_component", "hra_component"] ) if not (basic_component and hra_component): frappe.throw( _("Please set Basic and HRA component in Company {0}").format( get_link_to_form("Company", doc.company) ) ) annual_exemption = monthly_exemption = hra_amount = basic_amount = 0 if hra_component and basic_component: assignments = get_salary_assignments(doc.employee, doc.payroll_period) if not assignments and doc.docstatus == 1: frappe.throw( _("Salary Structure must be submitted before submission of {0}").format(doc.doctype) ) assignment_dates = [assignment.from_date for assignment in assignments] for idx, assignment in enumerate(assignments): if has_hra_component(assignment.salary_structure, hra_component): basic_salary_amt, hra_salary_amt = get_component_amt_from_salary_slip( doc.employee, assignment.salary_structure, basic_component, hra_component, assignment.from_date, ) to_date = get_end_date_for_assignment(assignment_dates, idx, doc.payroll_period) frequency = frappe.get_value( "Salary Structure", assignment.salary_structure, "payroll_frequency" ) basic_amount += get_component_pay(frequency, basic_salary_amt, assignment.from_date, to_date) hra_amount += get_component_pay(frequency, hra_salary_amt, assignment.from_date, to_date) if hra_amount: if doc.monthly_house_rent: annual_exemption = calculate_hra_exemption( assignment.salary_structure, basic_amount, hra_amount, doc.monthly_house_rent, doc.rented_in_metro_city, ) if annual_exemption > 0: monthly_exemption = annual_exemption / 12 else: annual_exemption = 0 return frappe._dict( { "hra_amount": hra_amount, "annual_exemption": annual_exemption, "monthly_exemption": monthly_exemption, } )
[ 1593, 14173, 11014, -1, 16343 ]
async def METHOD_NAME(self): response = await self.get_as_heif("/unsafe/animated.gif") expect(response.code).to_equal(200) expect(response.body).to_be_gif()
[ 9, 427, 130, 197, 5808, -1, 24 ]
def METHOD_NAME(name): # Save these images somewhat larger than a regular test case image # since the images contain a lot of text. swa = SaveWindowAttributes() swa.width = 500 swa.height = 500 swa.screenCapture = 0 Test(name, swa)
[ 73, 9, 660 ]
f METHOD_NAME(self, batch_size, context_dim):
[ 9, 385, 5862, 86 ]
def METHOD_NAME(self): """Registering an external key/value will allow it to be included in the list returned by config_option_list action.""" key = "ckanext.example_iconfigurer.test_conf" value = "Test value" params = {key: value} # add registered external value helpers.call_action("config_option_update", **params) option_list = helpers.call_action("config_option_list") assert key in option_list
[ 9, 86, 3024, 751, 99, 623, 245 ]
f METHOD_NAME(self):
[ 1668, 1413, 3402 ]
def METHOD_NAME(cmd, client, webhook_name, registry_name, resource_group_name=None, parameters=None): resource_group_name = get_resource_group_name_by_registry_name( cmd.cli_ctx, registry_name, resource_group_name) return client.begin_update(resource_group_name, registry_name, webhook_name, parameters)
[ 2299, 12, 86, 0 ]
def METHOD_NAME(): tolerances = { 'multiquadric': 0.1, 'inverse multiquadric': 0.15, 'gaussian': 0.15, 'cubic': 0.15, 'quintic': 0.1, 'thin-plate': 0.15, 'linear': 0.2 } for function in FUNCTIONS: check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
[ 9, -1, 16349 ]
def METHOD_NAME(self): return tuple(self._normalize_bounds(self._src.bounds))
[ 2739 ]
def METHOD_NAME(self, user, *args, **kwargs): return self.get_queryset().filter( *args, organizationmembership__user=user, organizationmembership__status=OrganizationMembership.STATUS.ACTIVE, **kwargs )
[ 19, 43, 21 ]
f METHOD_NAME(self):
[ 9, 2654, 0, 116 ]
def METHOD_NAME(element): x1 = float(element.get("x1")) y1 = float(element.get("y1")) x2 = float(element.get("x2")) y2 = float(element.get("y2")) fill = Color(get_style(element, "fill")) stroke_weight = get_style(element, "stroke-width") stroke = Color(get_style(element, "stroke")) stroke_cap = get_style(element, "stroke-cap") return PShape( vertices=[(x1, y1), (x2, y2)], fill_color=fill, stroke_weight=stroke_weight, stroke_color=stroke, stroke_cap=stroke_cap, stroke_join=default_values["stroke-join"], )
[ 214, 534 ]
def METHOD_NAME(self) -> str: return pulumi.get(self, "parent")
[ 935 ]
def METHOD_NAME(self): logger.debug("Ensuring dependencies.") dependencies = ["docker", "git"] failed_dependencies = [] for dependency in dependencies: return_code = shell.run(["which", dependency], check=False).returncode if return_code: failed_dependencies.append(dependency) if failed_dependencies: raise EnvironmentError( f"Dependencies missing: {', '.join(failed_dependencies)}" )
[ 602, 2410, 1255 ]
def METHOD_NAME( request, salt_factories, host_docker_network_ip_address, state_tree, pillar_tree, ): master_id = random_string("master-compat-", uppercase=False) root_dir = salt_factories.get_root_dir_for_daemon(master_id) conf_dir = root_dir / "conf" conf_dir.mkdir(exist_ok=True) config_defaults = { "root_dir": str(root_dir), "transport": request.config.getoption("--transport"), } config_overrides = { "interface": host_docker_network_ip_address, "log_level_logfile": "quiet", # We also want to scrutinize the key acceptance "open_mode": False, } # We need to copy the extension modules into the new master root_dir or # it will be prefixed by it extension_modules_path = str(root_dir / "extension_modules") if not os.path.exists(extension_modules_path): shutil.copytree( os.path.join(RUNTIME_VARS.FILES, "extension_modules"), extension_modules_path, ) config_overrides.update( { "extension_modules": extension_modules_path, "file_roots": {"base": [str(state_tree)]}, "pillar_roots": {"base": [str(pillar_tree)]}, } ) factory = salt_factories.salt_master_daemon( master_id, defaults=config_defaults, overrides=config_overrides, ) with factory.started(): yield factory
[ 2229, 2614 ]
def METHOD_NAME(name, typ, body): new_name = 'newname' if not isinstance(body, bytes): body = body.encode('UTF-8') return File(new_name, '', body, content_type=typ)
[ 343, 1276, 1155 ]
def METHOD_NAME(): received = [] class TestCollector(cirq.Collector): def next_job(self): q = cirq.LineQubit(0) circuit = cirq.Circuit(cirq.H(q), cirq.measure(q)) return cirq.CircuitSampleJob(circuit=circuit, repetitions=10, tag='test') def on_job_result(self, job, result): received.append(job.tag) TestCollector().collect(sampler=cirq.Simulator(), max_total_samples=100, concurrency=5) assert received == ['test'] * 10
[ 9, 1444 ]
def METHOD_NAME(self, message): if self.DEBUG: print '[DEBUG] - ' + str(datetime.datetime.now()) + ' - ' + message
[ 390 ]
def METHOD_NAME(value): return isinstance(value, collections.abc.Sequence) and not isinstance(value, str)
[ 137, 256, 3, 771 ]
def METHOD_NAME(nodes: List[Node], identifier: str) -> bool: node = nodes[0] try: provider_id = node.spec.providerID return identifier in provider_id except (AttributeError, TypeError): # is not aks, field is optional so could be missing return False
[ 137, 3, 623, 2059, 2275 ]
def METHOD_NAME(self, timeout: Optional[float] = None): logger.info("Stopping Bitcoin mining network traffic simulator") self._send_bitcoin_mining_request_periodically.METHOD_NAME(timeout)
[ 631 ]
def METHOD_NAME(self, refresh_token): # noqa: ANN001 """Set the refresh token in the database. This function will overwrite an existing value if the corresponding ``key`` already exists. """ self._connection.execute( "REPLACE INTO tokens VALUES (?, ?, datetime('now'))", (self.key, refresh_token), ) self._connection.commit()
[ 0 ]
def METHOD_NAME(self): if self.is_file and self.extension.lower() in DOCUMENT_MIMETYPE_MAP: return DOCUMENT_MIMETYPE_MAP[self.extension.lower()] return None
[ 1526, 44 ]
def METHOD_NAME(self, s): # type: (str) -> value_t """ Raises exception on error? - Can parse either J8 or JSON strings """ # TODO: feed it to lexer first, then parser return None
[ 214 ]
def METHOD_NAME(): # as we don't use session from flask, then we save into user registry, at least for now if not app.auth.authorized([], 'global_preferences', 'POST'): return app.auth.authenticate() secrets = request.files.get('secretsFile', None) if secrets is None: return api_error('Please provide your youtube credentials', 400) secrets.seek(0) file_content = secrets.read() yt_data = json.loads(bytes2string(file_content)) if 'web' not in yt_data: return api_error('OAuth project has to be configured as web in google console', 400) # let's save secrets file content in db for future usage global_serv = get_resource_service('global_preferences') global_serv.save_preference(YT_KEY, yt_data) redirect_uri = flask.url_for( 'video_upload.oauth2callback', _external=True, _scheme=SCHEME) flow = Flow.from_client_config( yt_data, scopes=SCOPES, redirect_uri=redirect_uri) auth_url, _ = flow.authorization_url( prompt='consent', access_type='offline', include_granted_scopes='true') return make_response(auth_url, 200)
[ 19, 1920, 466 ]
def METHOD_NAME(config): p = ESRIServiceProvider(config) results = p.query() assert results['features'][0]['id'] == 1 assert results['numberReturned'] == 10 results = p.query(limit=50) assert results['numberReturned'] == 50 results = p.query(offset=10) assert results['features'][0]['id'] == 11 assert results['numberReturned'] == 10 results = p.query(limit=10) assert len(results['features']) == 10 assert results['numberMatched'] == 406 results = p.query(limit=10001, resulttype='hits') assert results['numberMatched'] == 406
[ 9, 539 ]
def METHOD_NAME(directory): """Delete a directory in a filesystem. Args: directory: Full path to a directory supported by Beam filesystems (e.g. "gs://mybucket/mydir/", "s3://...", ...) """ filesystems.FileSystems.delete([directory])
[ 34, 2851 ]
def METHOD_NAME(self) -> Optional[int]: if not self.has_more(): return None ctrl = self.peek() if ctrl == "Z": self.vidx += 1 return 0 if ctrl == "-" or ctrl == "+": self.vidx += 1 offset = self.parse_digits(2) * 60 self.skip(":") offset += self.parse_digits(2) offset *= -1 if ctrl == "-" else 1 return offset raise ValueError()
[ 214, 1540 ]
def METHOD_NAME(x: TensorType, mean: TensorType, scale: TensorType, df: TensorType) -> tf.Tensor: df = to_default_float(df) const = ( tf.math.lgamma((df + 1.0) * 0.5) - tf.math.lgamma(df * 0.5) - 0.5 * (tf.math.log(tf.square(scale)) + tf.math.log(df) + np.log(np.pi)) ) return const - 0.5 * (df + 1.0) * tf.math.log( 1.0 + (1.0 / df) * (tf.square((x - mean) / scale)) )
[ 1316, 791 ]
def METHOD_NAME(self): examinee = notification.cfg_from_callback callback_file = os.path.join(self.tmp_dir.name, 'call_me') with open(callback_file, 'w') as f: f.write('#!/usr/bin/env sh\necho "foo: 42">"${NOTIFY_CFG_OUT}"') os.chmod(callback_file, stat.S_IEXEC | stat.S_IREAD) assert examinee( repo_root=self.tmp_dir.name, callback_path=callback_file, effective_cfg_file='no-file-yet', ) == {'foo':42}
[ 9, 2610, 280, 1076 ]
def METHOD_NAME(empty_index, small_movies): small_movies[0]["_vectors"] = [0.1, 0.2] def index_maker(index_uid=common.INDEX_UID, documents=small_movies): index = empty_index(index_uid) task = index.add_documents(documents) index.wait_for_task(task.task_uid) return index return index_maker
[ 724, 41, 7510, 61, 1742 ]
def METHOD_NAME(self) -> Optional[OpResIdent]: if len(self._disabled_res_ident_list) == 0: return None ident = self._disabled_res_ident_list.popleft() LOG.debug(f'Recheck resource {ident}') self._checked_res_ident_set.add(ident) self._commit_stat() return ident
[ 19, 1295, 191 ]
def METHOD_NAME(): return [APIStates.online, APIStates.offline]
[ 1019, 4085 ]
def METHOD_NAME(self, port_num, lpmode): # Check for invalid port_num if port_num < self.port_start or port_num > self.port_end: return False try: lpmode_val_device_file = self.BASE_VAL_PATH.format( port_num + self.LP_GPIO_BASE) val_file = open(lpmode_val_device_file, "w") except IOError as e: print("Error: unable to open file: %s" % str(e)) return False val_file.write("1" if lpmode is True else "0") val_file.close() return True
[ 0, 3420, 1928, 854 ]
def METHOD_NAME(self, ignore_results_formatting=False, ignore_metric_reference=False): self._internal_module.print_metrics(ignore_results_formatting, ignore_metric_reference)
[ 38, 1097, 51 ]
def METHOD_NAME(): args = parser.parse_args() if not args.checkpoint and not args.pretrained: args.pretrained = True amp_autocast = suppress # do nothing if args.amp: if not has_native_amp: print("Native Torch AMP is not available (requires torch >= 1.6), using FP32.") else: amp_autocast = torch.cuda.amp.autocast # create model model = geffnet.create_model( args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, scriptable=args.torchscript) if args.channels_last: model = model.to(memory_format=torch.channels_last) if args.torchscript: torch.jit.optimized_execution(True) model = torch.jit.script(model) print('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()]))) data_config = resolve_data_config(model, args) criterion = nn.CrossEntropyLoss() if not args.no_cuda: if args.num_gpu > 1: model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() else: model = model.cuda() criterion = criterion.cuda() loader = create_loader( Dataset(args.data, load_bytes=args.tf_preprocessing), input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=not args.no_cuda, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=data_config['crop_pct'], tensorflow_preprocessing=args.tf_preprocessing) batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() end = time.time() with torch.no_grad(): for i, (input, target) in enumerate(loader): if not args.no_cuda: target = target.cuda() input = input.cuda() if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) # compute output with amp_autocast(): output = model(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s) \t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( i, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg, loss=losses, top1=top1, top5=top5)) print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format( top1=top1, top1a=100-top1.avg, top5=top5, top5a=100.-top5.avg))
[ 57 ]
def METHOD_NAME(registry_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, task_run_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTaskRunResult]: """ Gets the detailed information for a given task run. :param str registry_name: The name of the container registry. :param str resource_group_name: The name of the resource group to which the container registry belongs. :param str task_run_name: The name of the task run. """ ...
[ 19, 758, 22, 146 ]
def METHOD_NAME(ent): try: return valid_ents_ordered.index(ent) except ValueError: return len(valid_ents_ordered)
[ 266, 852, 41, 4975, 679 ]
def METHOD_NAME(self): self.assertEqual(dis.opmap["STOP_CODE"], 0) self.assertIn(dis.opmap["LOAD_CONST"], dis.hasconst) self.assertIn(dis.opmap["STORE_NAME"], dis.hasname)
[ 9, 9318 ]
def METHOD_NAME(self, secret_data): nodes_with_system_id = {} secret_config = utils.get_secret_config(secret_data) nodes_info = self.get_nodes_info() for node_info in nodes_info: nodes_with_system_id[node_info.name] = self._get_system_id_for_node(node_info, secret_config) return nodes_with_system_id
[ 567, 480, 41, 112, 147 ]
def METHOD_NAME(self, line): # Possible formats: # goroutine 0 [idle]: # goroutine 1 [chan receive, 30 minutes]: # goroutine 17 [syscall, 31 minutes, locked to thread]: # goroutine 34 [syscall, locked to thread]: fieldnum = 0 warnings = [] line = line.strip() try: fieldnum = 1 # 'goroutine' goword, line = line.split(' ', 1) if goword.lower() != 'goroutine': raise ParseError('First word ({0}) is not \'goroutine\'!'.format(goword)) fieldnum = 2 # goroutine ID goId, line = line.split(' ', 1) if not goId.isdigit(): raise ParseError('Expected integer goroutine ID, got: {0}'.format(goId)) self.id = int(goId) fieldnum = 3 # State, wait time, etc. # Pull the state fields (the stuff between [ and ]) out from the rest of the line leftBraceIndex = line.find('[') rightBraceIndex = line.find(']') if leftBraceIndex == -1 or rightBraceIndex == -1 or rightBraceIndex < (leftBraceIndex + 2): raise ParseError('State info not found (or is empty)!') if leftBraceIndex > 0: warnings.append(self.formatMessage('Extra fields found before state info: {0}'.format(line[0:leftBraceIndex]), fieldnum)) stateFields = line[leftBraceIndex+1:rightBraceIndex].split(',') line = line[rightBraceIndex+1:] # Now process each field for i in range(len(stateFields)): field = stateFields[i].strip() if i == 0: # First field is always state self.state = field elif field == 'locked to thread': self.lockedtothread = True elif re.match('^[0-9]+ minutes$', field): # Wait time waittime, minutes = field.split(' ', 1) if not waittime.isdigit(): raise ParseError('Expected integer wait time, got: {0}'.format(waittime[0])) self.waittime = int(waittime) else: warnings.append(self.formatMessage('Unknown field found in state info: {0}'.format(field), fieldnum)) fieldnum += 1 fieldnum = 0 # Done processing fields # Verify no extra fields found on line if line is not None and line != ':': warnings.append(self.formatMessage('Extra fields found: ''{0}'''.format(line), fieldnum)) return warnings except Exception as exc: raiseWithModifiedMessage(sys.exc_info(), self.formatMessage(str(exc), fieldnum))
[ 214, 534 ]
def METHOD_NAME(): x = flow.nn.Parameter(flow.Tensor(init_value, device=flow.device(device))) optim_kwargs = { "params": [x], "lr": learning_rate, "betas": betas, "eps": eps, "weight_decay": weight_decay, "adam_w_mode": adam_w_mode, "do_bias_correction": do_bias_correction, "contiguous_params": contiguous_params, } if clip_grad_max_norm != -1: optim_kwargs["clip_grad_max_norm"] = clip_grad_max_norm optim_kwargs["clip_grad_norm_type"] = clip_grad_norm_type lamb = flow.optim.LAMB([optim_kwargs]) def train_one_iter(grad): grad_tensor = flow.tensor( grad, dtype=flow.float32, requires_grad=False, device=flow.device(device), ) loss = flow.sum(x * grad_tensor) loss.backward() if clip_grad_max_norm != -1: lamb.clip_grad() lamb.step() lamb.zero_grad() for i in range(train_iters): train_one_iter(random_grad_seq[i]) if i == reload_state_step: state_dict = lamb.state_dict() lamb = flow.optim.LAMB([optim_kwargs]) if save_load_by_pickle: with tempfile.NamedTemporaryFile() as f: flow.save(state_dict, f.name) state_dict = flow.load(f.name) lamb.load_state_dict(state_dict) return x
[ 849, 604, 4080 ]
def METHOD_NAME(halcomp,widgets,paths): return [HandlerClass(halcomp,widgets,paths)]
[ 19, 376 ]
def METHOD_NAME(test_case): for i in range(5): _test_bce_loss_grad_grad_impl(test_case, with_logits=True)
[ 9, 9538, 41, 9016, 1572, 140, 140 ]
def METHOD_NAME(self): # Ensure that an error is raised if the In wrapped is used to wrap # a shared variable a = shared(1.0) a_wrapped = In(a, update=a + 1) with pytest.raises(TypeError): function([a_wrapped])
[ 9, 623, 1644, 1210 ]
def METHOD_NAME(self, buf): string = re_vt100.sub("", buf.decode("utf-8", errors="replace")) string = string.replace("\r", "␍") string = string.replace("\n", "␤") string = string.replace("\b", "␈") string = string.replace("\a", "␇") string = string.replace("\v", "␋") string = string.replace("\f", "␌") return string
[ -1, 369, 3630, 3130 ]
f METHOD_NAME(self):
[ 9, 7320, 14012, 41, 10263, 1461 ]
def METHOD_NAME(pnode): ''' Get the root node for the path tree which contains pnode. Example: root = getRootNode(branchnode) ''' ret = pnode while pnode[0] is not None: pnode = pnode[0] return pnode
[ 19, 1563, 1716 ]
def METHOD_NAME(): data = np.array([[], []]) with pytest.raises(ValueError): EPSFModel(data) data = np.ones((5, 5), dtype=float) data[2, 2] = np.inf with pytest.raises(ValueError, match='must be finite'): EPSFModel(data) data[2, 2] = np.nan with pytest.raises(ValueError, match='must be finite'): EPSFModel(data, flux=None) data[2, 2] = 1 for oversampling in [-1, [-2, 4], (1, 4, 8), ((1, 2), (3, 4)), np.ones((2, 2, 2)), 2.1, np.nan, (1, np.inf)]: with pytest.raises(ValueError): EPSFModel(data, oversampling=oversampling) origin = (1, 2, 3) with pytest.raises(TypeError): EPSFModel(data, origin=origin)
[ 9, -1, 1461 ]
def METHOD_NAME(): tol = 10 ** (-6) rng = np.random.default_rng(utt.fetch_seed()) x = matrix() radius_bound = spectral_radius_bound(x, 5) f = aesara.function([x], radius_bound) shp = (3, 4) m = rng.random(shp) m = np.cov(m).astype(config.floatX) radius_bound_aesara = f(m) # test the approximation mm = m for i in range(5): mm = np.dot(mm, mm) radius_bound_numpy = np.trace(mm) ** (2 ** (-5)) assert abs(radius_bound_numpy - radius_bound_aesara) < tol # test the bound eigen_val = numpy.linalg.eig(m) assert (eigen_val[0].max() - radius_bound_aesara) < tol # test type errors xx = vector() ok = False try: spectral_radius_bound(xx, 5) except TypeError: ok = True assert ok ok = False try: spectral_radius_bound(x, 5.0) except TypeError: ok = True assert ok # test value error ok = False try: spectral_radius_bound(x, -5) except ValueError: ok = True assert ok
[ 9, 5050, 3662, 4432 ]
f METHOD_NAME(self,x):
[ 226, 1030 ]
def METHOD_NAME(self): settings = self._create_importantpagesgenericsetting_object() # when called directly with self.assertRaises(AttributeError): settings.get_page_url("not_an_attribute") # when called indirectly via shortcut with self.assertRaises(AttributeError): settings.page_url.not_an_attribute
[ 9, 19, 1174, 274, 45, -1, 217 ]
def METHOD_NAME(x): return -x[0]**2-x[1]**2
[ 784, 11805 ]
def METHOD_NAME(farm_beats_resource_name: Optional[str] = None, resource_group_name: Optional[str] = None, solution_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSolutionResult: """ Get installed Solution details by Solution id. :param str farm_beats_resource_name: FarmBeats resource name. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str solution_id: Solution Id of the solution. """ __args__ = dict() __args__['farmBeatsResourceName'] = farm_beats_resource_name __args__['resourceGroupName'] = resource_group_name __args__['solutionId'] = solution_id opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:agfoodplatform/v20210901preview:getSolution', __args__, opts=opts, typ=GetSolutionResult).value return AwaitableGetSolutionResult( e_tag=pulumi.get(__ret__, 'e_tag'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), properties=pulumi.get(__ret__, 'properties'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type'))
[ 19, 725 ]
def METHOD_NAME(self): data = [ utils.build_conf_dict( "marshal", "B302", issue.Cwe.DESERIALIZATION_OF_UNTRUSTED_DATA, ["marshal.load", "marshal.loads"], ( "Deserialization with the marshal module is possibly " "dangerous." ), ) ] profile = {"include": ["B001"], "blacklist": {"Call": data}} ts = test_set.BanditTestSet(self.config, profile) blacklist = ts.get_tests("Call")[0] self.assertNotIn("Import", blacklist._config) self.assertNotIn("ImportFrom", blacklist._config) self.assertEqual(1, len(blacklist._config["Call"]))
[ 9, 337, 3301, 4593 ]
def METHOD_NAME(self) -> None: ...
[ 38, 390 ]
def METHOD_NAME(backend): c = tc.Circuit(2) for _ in range(3): c.rx(range(2), theta=0.4) c.cnot(0, 1) def execute(circuit): value = circuit.expectation_ps(z=[0]) return value def execute2(circuit): key = tc.backend.get_random_state(42) count = circuit.sample( batch=1000, allow_state=True, format_="count_dict_bin", random_generator=key ) return count _ = apply_rc(circuit=c, executor=execute, num_to_average=6, simplify=False) _ = apply_rc( circuit=c, executor=execute2, num_to_average=6, simplify=True, iscount=True ) # generate a circuit with rc _ = qem.rc_circuit(c)
[ 9, 7671 ]
def METHOD_NAME(self): """Return localpath.""" return f"{self.hacs.core.config_path}/www/community/{self.data.full_name.split('/')[-1]}"
[ -1 ]
def METHOD_NAME(self, user, version): """ Check if `user` is authorized to access `version`. The queryset from `_get_subproject_version` already filters public projects. This is mainly to be overridden in .com to make use of the auth backends in the proxied API. """ return True
[ 220, 204 ]
def METHOD_NAME(resp): try: resp_json = json.loads(resp.text) except BaseException: resp_json = resp.text return { "status_code": resp.status_code, "headers": resp.headers, "content": resp_json, }
[ 17 ]
def METHOD_NAME(name, op): def_op(name, op) hasname.append(op)
[ 156, 441 ]
def METHOD_NAME(packer, frame, es_lkas_state_msg, enabled, visual_alert, left_line, right_line, left_lane_depart, right_lane_depart): values = {s: es_lkas_state_msg[s] for s in [ "CHECKSUM", "LKAS_Alert_Msg", "Signal1", "LKAS_ACTIVE", "LKAS_Dash_State", "Signal2", "Backward_Speed_Limit_Menu", "LKAS_Left_Line_Enable", "LKAS_Left_Line_Light_Blink", "LKAS_Right_Line_Enable", "LKAS_Right_Line_Light_Blink", "LKAS_Left_Line_Visible", "LKAS_Right_Line_Visible", "LKAS_Alert", "Signal3", ]} values["COUNTER"] = frame % 0x10 # Filter the stock LKAS "Keep hands on wheel" alert if values["LKAS_Alert_Msg"] == 1: values["LKAS_Alert_Msg"] = 0 # Filter the stock LKAS sending an audible alert when it turns off LKAS if values["LKAS_Alert"] == 27: values["LKAS_Alert"] = 0 # Filter the stock LKAS sending an audible alert when "Keep hands on wheel" alert is active (2020+ models) if values["LKAS_Alert"] == 28 and values["LKAS_Alert_Msg"] == 7: values["LKAS_Alert"] = 0 # Filter the stock LKAS sending an audible alert when "Keep hands on wheel OFF" alert is active (2020+ models) if values["LKAS_Alert"] == 30: values["LKAS_Alert"] = 0 # Filter the stock LKAS sending "Keep hands on wheel OFF" alert (2020+ models) if values["LKAS_Alert_Msg"] == 7: values["LKAS_Alert_Msg"] = 0 # Show Keep hands on wheel alert for openpilot steerRequired alert if visual_alert == VisualAlert.steerRequired: values["LKAS_Alert_Msg"] = 1 # Ensure we don't overwrite potentially more important alerts from stock (e.g. FCW) if visual_alert == VisualAlert.ldw and values["LKAS_Alert"] == 0: if left_lane_depart: values["LKAS_Alert"] = 12 # Left lane departure dash alert elif right_lane_depart: values["LKAS_Alert"] = 11 # Right lane departure dash alert if enabled: values["LKAS_ACTIVE"] = 1 # Show LKAS lane lines values["LKAS_Dash_State"] = 2 # Green enabled indicator else: values["LKAS_Dash_State"] = 0 # LKAS Not enabled values["LKAS_Left_Line_Visible"] = int(left_line) values["LKAS_Right_Line_Visible"] = int(right_line) return packer.make_can_msg("ES_LKAS_State", CanBus.main, values)
[ 129, 2752, 2542, 551 ]
def METHOD_NAME(self): self.stored_problems = {} self.any_compression = False
[ 176 ]
def METHOD_NAME(self): return self.client.format_url( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules", **self.url_parameters )
[ 274 ]
def METHOD_NAME(self) -> Optional['outputs.ShippingAddressResponse']: """ Shipping details for the address """ return pulumi.get(self, "shipping_address")
[ 850, 85 ]
def METHOD_NAME(self, tasks): """ Show and run dialog for selected tasks """ if len(tasks) == 0: return self.tasks = tasks self.tag_entry.set_text(self.last_tag_entry) self.tag_entry.grab_focus() self.apply_to_subtasks.set_active(self.last_apply_to_subtasks) self.dialog.run() self.dialog.hide() self.tasks = []
[ 2444, 114 ]
def METHOD_NAME(self, obj): """Get resource type.""" resource_type = obj["metadata"].get( "resource_type", {"id": "publication-article"} ) resource_type_record = self._read_resource_type(resource_type["id"]) props = resource_type_record["props"] return props.get("csl", "article") # article is CSL "Other"
[ 19, 44 ]
def METHOD_NAME(self, f, use_iord=True): self.use_iord = use_iord if use_iord: pt = f['iord'] else: pt = f.get_index_list(f.ancestor) self.particles = pt
[ 1472 ]
def METHOD_NAME(self): '''It should not duplicate badges''' fake = Fake.objects.create() result1 = fake.add_badge(TEST) result2 = fake.add_badge(TEST) self.assertEqual(len(fake.badges), 1) self.assertEqual(result1, result2) badge = fake.badges[0] self.assertEqual(badge.kind, TEST) self.assertIsNotNone(badge.created) self.assertIsNone(badge.created_by)
[ 9, 238, 5303, 2430 ]
def METHOD_NAME(self): expected_vals = self.MOCK_WASM_UDF_VALS.copy() udf_yaml_filename = "./sdcm/utils/udf_scripts/wasm_plus.yaml" udf = UDF.from_yaml(udf_yaml_filename) self.assertIsNotNone(udf) for key, value in expected_vals.items(): self.assertEqual(value, getattr(udf, key), f"Did not find expected value for {key} in the udf class.")
[ 9, 2309, 16980, 41, 6390, 2942 ]
def METHOD_NAME(self): polygon = array(((0.0, 0.0), (10.0, 0.0), (10.0, 10.0), (0.0, 10.0))) points = zeros((0, 2)) result = points_in_polygon(points, polygon) self.assertTrue(len(result) == len(points)) polygon = array([]) points = array(((-1.0, -1.0), (5.0, 5.0), (15.0, 15.0))) result = points_in_polygon(points, polygon) self.assertTrue(allclose(array([0, 0, 0]), result))
[ 9, 35, 182, 623, 1117 ]
def METHOD_NAME(self): """ This test case runs a job on two nodes. Kills the mom process on MS, waits for the job to be requeued and tests for the resources_used value to be present in the 'R' record. """ # Submit job select = "vnode=" + self.hostA + "+vnode=" + self.hostB j1 = Job(TEST_USER, attrs={ ATTR_N: 'NodeFailRequeueTest', 'Resource_List.select': select}) jid1 = self.server.submit(j1) # Wait for the job to start running. self.server.expect(JOB, {ATTR_state: 'R'}, jid1) # Kill the MoM process on the MS. self.momA.signal('-KILL') # Wait for the job to be requeued. self.server.expect(JOB, {'job_state': 'Q'}, id=jid1) # Check for resources_used value in the 'R' record. msg = '.*R;' + str(jid1) + '.*resources_used.ncpus=2.*' self.server.accounting_match(msg, regexp=True, n='ALL')
[ 9, -1 ]
def METHOD_NAME(self, data: Tuple[Tuple[Any, Tuple[str, BinaryIO]], Tuple[str, BinaryIO]]) -> Dict[str, Any]: split_and_image_data, ann_data = data _, image_data = split_and_image_data image_path, image_buffer = image_data ann_path, ann_buffer = ann_data anns = read_mat(ann_buffer, squeeze_me=True)["GTcls"] return dict( image_path=image_path, image=EncodedImage.from_file(image_buffer), ann_path=ann_path, # the boundaries are stored in sparse CSC format, which is not supported by PyTorch boundaries=torch.as_tensor( np.stack([raw_boundary.toarray() for raw_boundary in anns["Boundaries"].item()]) ), segmentation=torch.as_tensor(anns["Segmentation"].item()), )
[ 123, 734 ]
def METHOD_NAME(conf): """Configuration flags for executing gcc on MacOS""" v = conf.env v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-dynamiclib'] v.cshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.ARCH_ST = ['-arch'] v.LINKFLAGS_cstlib = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = []
[ 4110, 2872, 6697 ]
def METHOD_NAME( self, skip_token: Optional[str] = None, top: Optional[int] = None, select: Optional[str] = None, offer_guid: Optional[str] = None, report_creator_tenant_id: Optional[str] = None, **kwargs: Any ) -> AsyncIterable["_models.ReportResource"]: """Get the AppComplianceAutomation report list for the tenant. :param skip_token: Skip over when retrieving results. Default value is None. :type skip_token: str :param top: Number of elements to return when retrieving results. Default value is None. :type top: int :param select: OData Select statement. Limits the properties on each entry to just those requested, e.g. ?$select=reportName,id. Default value is None. :type select: str :param offer_guid: The offerGuid which mapping to the reports. Default value is None. :type offer_guid: str :param report_creator_tenant_id: The tenant id of the report creator. Default value is None. :type report_creator_tenant_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ReportResource or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appcomplianceautomation.models.ReportResource] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop( "api_version", _params.pop("api-version", self._config.api_version) ) # type: Literal["2022-11-16-preview"] cls = kwargs.pop("cls", None) # type: ClsType[_models.ReportResourceList] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def prepare_request(next_link=None): if not next_link: request = build_list_request( skip_token=skip_token, top=top, select=select, offer_guid=offer_guid, report_creator_tenant_id=report_creator_tenant_id, api_version=api_version, template_url=self.METHOD_NAME.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("ReportResourceList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
[ 245 ]
f METHOD_NAME( self):
[ 9, 3724, 2685, 1751, 45, 168, 2421 ]
def METHOD_NAME( name: str, connection: sqlite3.Connection, version: int ) -> str | None: """Return uuid for album with name or None if not found""" return _folder_album_uuid_for_name(name, connection, version, folder=True)
[ 451, 4977, 43, 156 ]