text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self): comments = self.ogr_project.get_pr(217).get_comments(author="lachmanfrantisek") assert len(comments) == 3 assert comments[0].body.endswith("here.")
[ 9, 1933, 3528, 2997 ]
def METHOD_NAME(self): portfolio = sample(list(self.user.portfolio_list()), 1)[0] print("Adding project to portfolio: {} (#{})".format(portfolio.title, portfolio.id)) self.post("/store/govready-q-files-startpack/System-Description-Demo?portfolio={}".format(portfolio.id), {"organization":self.org.slug}) print(self.response.url)
[ 238, 112 ]
def METHOD_NAME( circuit: QPROGRAM, executor: Union[Executor, Callable[[QPROGRAM], QuantumResult]], check_operators: Sequence[PauliString], code_hamiltonian: Observable, observable: Observable, pauli_string_to_expectation_cache: Dict[PauliString, complex] = {}, ) -> float: """Function for the calculation of an observable from some circuit of interest to be mitigated with quantum subspace expansion (QSE). Args: circuit: Quantum program to execute with error mitigation. executor: Executes a circuit and returns a `QuantumResult`. check_operators: List of check operators that define the stabilizer code space. code_hamiltonian: Hamiltonian of the code space. observable: Observable to compute the mitigated expectation value of. pauli_string_to_expectation_cache: Cache for expectation values of Pauli strings used to compute the projector and the observable. Returns: The expectation value estimated with QSE. """ projector = get_projector( circuit, executor, check_operators, code_hamiltonian, pauli_string_to_expectation_cache, ) # Compute the expectation value of the observable: <P O P> pop = get_expectation_value_for_observable( circuit, executor, projector * observable * projector, pauli_string_to_expectation_cache, ) # Compute the normalization factor: <P P> pp = get_expectation_value_for_observable( circuit, executor, projector * projector, pauli_string_to_expectation_cache, ) return pop / pp
[ 750, 41, -1 ]
def METHOD_NAME(self, rank, hostname=None, cores="0"): if hostname is None: hostname = socket.gethostname() self.pimpl.append_rank_cores(hostname, rank, cores) return self
[ 238, 1499 ]
def METHOD_NAME(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request
[ 123, 377 ]
def METHOD_NAME(self) -> str: group_name = self._get_param("GroupName") name = self.uri.split("?")[0].split("/")[-1] description = self._get_param("Description") end_date = self._get_param("EndDate") flexible_time_window = self._get_param("FlexibleTimeWindow") kms_key_arn = self._get_param("KmsKeyArn") schedule_expression = self._get_param("ScheduleExpression") schedule_expression_timezone = self._get_param("ScheduleExpressionTimezone") start_date = self._get_param("StartDate") state = self._get_param("State") target = self._get_param("Target") schedule = self.scheduler_backend.METHOD_NAME( description=description, end_date=end_date, flexible_time_window=flexible_time_window, group_name=group_name, kms_key_arn=kms_key_arn, name=name, schedule_expression=schedule_expression, schedule_expression_timezone=schedule_expression_timezone, start_date=start_date, state=state, target=target, ) return json.dumps(dict(ScheduleArn=schedule.arn))
[ 86, 507 ]
def METHOD_NAME(dtype_path): files = os.listdir(dtype_path) for fname in tqdm(files): build_data.ungzip(dtype_path, fname)
[ 6900, 1190, 1537 ]
def METHOD_NAME(mock_conditions): condition_1, condition_2, condition_3, condition_4 = mock_conditions and_condition = AndCompoundCondition( operands=[ condition_1, condition_2, condition_3, condition_4, ] ) # ensure that all conditions evaluated when all return True result, value = and_condition.verify() assert result is True assert len(value) == 4, "all conditions evaluated" assert value == [1, 2, 3, 4] # ensure that short circuit happens when 1st condition is false condition_1.verify.return_value = (False, 1) result, value = and_condition.verify() assert result is False assert len(value) == 1, "only one condition evaluated" assert value == [1] # short circuit occurs for 3rd entry condition_1.verify.return_value = (True, 1) condition_3.verify.return_value = (False, 3) result, value = and_condition.verify() assert result is False assert len(value) == 3, "3-of-4 conditions evaluated" assert value == [1, 2, 3]
[ 9, 61, 405, 61, 1707, 1708 ]
def METHOD_NAME(self): return ('optionset', )
[ 935, 342 ]
def METHOD_NAME(self, *args, **options): election_date = options["date"] election_id = f"ref.{options['council']}.{election_date}" referendum_type = ElectionType.objects.get(election_type="ref") group_election, created = Election.private_objects.update_or_create( election_id=f"ref.{election_date}", defaults={ "election_type": referendum_type, "poll_open_date": election_date, "election_title": "Referendum elections", "current": True, "division_id": options["division_id"], "group_type": "election", }, ) ref_election, created = Election.private_objects.update_or_create( election_id=election_id, defaults={ "election_type": referendum_type, "poll_open_date": election_date, "election_title": options["election_title"], "current": True, "division_id": options["division_id"], "voting_system": "FPTP", "group": group_election, }, ) self.stdout.write( f"{'Created' if created else 'Updated'} {election_id}" ) ModerationHistory.objects.get_or_create( status_id=ModerationStatuses.approved.value, election=group_election, ) ModerationHistory.objects.get_or_create( status_id=ModerationStatuses.approved.value, election=ref_election, ) org = Organisation.objects.get_by_date( date=election_date, official_identifier=options["official_identifier"], organisation_type=options["org_type"], ) ref_election.organisation = org ref_election.organisation_geography = org.geographies.latest() if ref_election.division: ref_election.division_geography = ref_election.division.geography ref_election.save()
[ 276 ]
def METHOD_NAME(attribute_info, attribute_value, wlst_mode): """ Returns the corrected WLST attribute name for the specified parameters. The "Encrypted" suffix is removed from online dual-password attributes for use with unencrypted values. :param attribute_info: the attribute information to be checked :param attribute_value: the value to be checked for encryption :param wlst_mode: the offline or online type to be checked :return: the corrected value, or None if no correction was required """ if _is_dual_password(attribute_info) and (wlst_mode == WlstModes.ONLINE) \ and not EncryptionUtils.isEncryptedString(attribute_value): return _get_non_encrypted_wlst_name(attribute_info) return None
[ 19, 13487, 309, 156 ]
def METHOD_NAME(self): """Retrieve the keys from the JWKS endpoint.""" LOGGER.info("Fetching keys from %s", self.options.get("jwks_uri")) try: # pylint: disable=consider-using-with request_res = request.urlopen(self.options.get("jwks_uri")) data = json.loads( request_res.read().decode( request_res.info().get_param("charset") or "utf-8" ) ) keys = data["keys"] LOGGER.info("Keys: %s", keys) return keys except Exception as err: # pylint: disable=broad-except LOGGER.info("Failure: ConnectionError") LOGGER.info(err) return {}
[ 19, 219 ]
def METHOD_NAME(self): responses.add( method=responses.POST, url="https://example.com/link-issue", body="Something failed", status=500, ) with pytest.raises(APIError): IssueLinkRequester.run( install=self.install, project=self.project, group=self.group, uri="/link-issue", fields={}, user=self.user, action="create", ) buffer = SentryAppWebhookRequestsBuffer(self.sentry_app) requests = buffer.get_requests() assert len(requests) == 1 assert requests[0]["response_code"] == 500 assert requests[0]["event_type"] == "external_issue.created"
[ 9, 4616, 17 ]
def METHOD_NAME(self) -> Dict[str, str]: env = super().METHOD_NAME() env.update( { "AMENT_PYTHON_EXECUTABLE": "/usr/bin/python3", "COLCON_PYTHON_EXECUTABLE": "/usr/bin/python3", } ) return env
[ 19, 56, 1027 ]
def METHOD_NAME(self): q = self.isolated_query_patch() runs = self.repo.query_runs(query=q, report_mode=QueryReportMode.DISABLED).iter_runs() run_count = sum(1 for _ in runs) self.assertEqual(10, run_count)
[ 9, 539, 420, 35, 539 ]
def METHOD_NAME(self, name=None, account=None): """ Returns a dictionary with the subscription information : Examples: ``{'status': 'INACTIVE/ACTIVE/BROKEN', 'last_modified_date': ...}`` :param name: Name of the subscription :type: String :param account: Account identifier :type account: String :returns: Dictionary containing subscription parameter :rtype: Dict :raises: exception.NotFound if subscription is not found """ path = self.SUB_BASEURL if account: path += '/%s' % (account) if name: path += '/%s' % (name) elif name: path += '/Name/%s' % (name) else: path += '/' url = build_url(choice(self.list_hosts), path=path) result = self._send_request(url, type_='GET') if result.status_code == codes.ok: # pylint: disable=no-member return self._load_json_data(result) else: exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content) raise exc_cls(exc_msg)
[ 245, 8614 ]
def METHOD_NAME(self): self.check_outputs_and_grads(all_equal=True)
[ 9, 250, 51 ]
def METHOD_NAME(images, coords): marker_kwargs = {"marker": "+", "color": (1, 1, 1, 0.1), "s": 50} im_kwargs = {"cmap": "gray"} title_kwargs = {"fontsize": 20} marker_names = ["inf insertion point", "sup insertion point", "RV inf"] n_samples = len(images) fig = plot_multi_images( [images[i][0, ...] for i in range(n_samples)], n_cols=5, marker_locs=coords, marker_titles=marker_names, marker_cmap="Set1", im_kwargs=im_kwargs, marker_kwargs=marker_kwargs, title_kwargs=title_kwargs, ) assert type(fig) == matplotlib.figure.Figure with pytest.raises(Exception): reg_img_stack(images, coords[1:, :], coords[0]) images_reg, max_dist = reg_img_stack(images, coords, target_coords=coords[0]) # images after registration should be close to original images, because values of noise are small for i in range(n_samples): testing.assert_allclose(images_reg[i], images[i]) # add one for avoiding inf relative difference testing.assert_allclose(max_dist + 1, np.ones(n_samples), rtol=2, atol=2) fig = plot_multi_images([images_reg[i][0, ...] for i in range(n_samples)], n_cols=5) assert type(fig) == matplotlib.figure.Figure
[ 9, 739 ]
def METHOD_NAME(replica_name: Optional[str] = None, resource_group_name: Optional[str] = None, resource_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSignalRReplicaResult: """ Get the replica and its properties. Azure REST API version: 2023-03-01-preview. :param str replica_name: The name of the replica. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str resource_name: The name of the resource. """ __args__ = dict() __args__['replicaName'] = replica_name __args__['resourceGroupName'] = resource_group_name __args__['resourceName'] = resource_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:signalrservice:getSignalRReplica', __args__, opts=opts, typ=GetSignalRReplicaResult).value return AwaitableGetSignalRReplicaResult( id=pulumi.get(__ret__, 'id'), location=pulumi.get(__ret__, 'location'), name=pulumi.get(__ret__, 'name'), provisioning_state=pulumi.get(__ret__, 'provisioning_state'), sku=pulumi.get(__ret__, 'sku'), system_data=pulumi.get(__ret__, 'system_data'), tags=pulumi.get(__ret__, 'tags'), type=pulumi.get(__ret__, 'type'))
[ 19, 900, 3264, 5501 ]
f METHOD_NAME(self, state):
[ 8895, 12759 ]
def METHOD_NAME(self, *args): if not all(map(is_develop, args)): self.skipTest( f"Required plugins: {', '.join(args)} must be installed in development mode" )
[ 984, 1294 ]
def METHOD_NAME(*args, **kwargs): return CheckmarxPlugin(*args, **kwargs)
[ 129, 2793 ]
def METHOD_NAME(self): return self._data
[ 19, 365 ]
def METHOD_NAME(repo_ctx): if repo_ctx.attr.url and repo_ctx.attr.urls: fail("Either `url` or `urls` attribute must be set") if repo_ctx.attr.sha256 and repo_ctx.attr.sha256s: fail("Either `sha256` or `sha256s` attribute must be set") if repo_ctx.attr.strip_prefix and repo_ctx.attr.strip_prefixes: fail("Either `strip_prefix` or `strip_prefixes` attribute must be set") if repo_ctx.attr.url: return struct( urls = [repo_ctx.attr.url], sha256s = [repo_ctx.attr.sha256], strip_prefixes = [repo_ctx.attr.strip_prefix], ) else: return struct( urls = repo_ctx.attr.urls, sha256s = repo_ctx.attr.sha256s, strip_prefixes = ( repo_ctx.attr.strip_prefixes if repo_ctx.attr.strip_prefixes else len(repo_ctx.attr.urls) * [repo_ctx.attr.strip_prefix] ), )
[ 129, 136, 100 ]
def METHOD_NAME(client, signup_url): assert EmailAddress.objects.count() == 0 email = '[email protected]' response = client.post( signup_url, { 'username': 'testuser', 'email': email, 'password1': 'password', 'password2': 'password', 'terms_of_use': 'on', 'captcha': 'testpass:0', } ) assert response.status_code == 302 assert EmailAddress.objects.filter( email=email, verified=False ).count() == 1 assert len(mail.outbox) == 1 confirmation_url = re.search( r'(http://testserver/.*/)', str(mail.outbox[0].body) ).group(0) confirm_email_response = client.get(confirmation_url) assert confirm_email_response.status_code == 200 assert EmailAddress.objects.filter( email=email, verified=False ).count() == 1 confirm_email_response = client.post(confirmation_url) assert confirm_email_response.status_code == 302 assert EmailAddress.objects.filter( email=email, verified=True ).count() == 1
[ 9, 372 ]
def METHOD_NAME(precision: Precision): memory_fp32 = eval_and_measure_memory(Precision.FP32) memory_half = eval_and_measure_memory(precision) assert memory_half < 0.95 * memory_fp32
[ 9, 1171, 1582, 1645 ]
def METHOD_NAME(repo, branch): """Check out a branch.""" remote_branch = repo.lookup_branch('origin/' + branch, pygit2.GIT_BRANCH_REMOTE) local_branch = repo.lookup_branch(branch, pygit2.GIT_BRANCH_LOCAL) if not local_branch: local_branch = repo.branches.create(branch, commit=remote_branch.peel()) local_branch.upstream = remote_branch local_branch.set_target(remote_branch.target) repo.checkout(local_branch) repo.reset(remote_branch.target, pygit2.GIT_RESET_HARD)
[ 2170, 3653 ]
def METHOD_NAME(val_in): if isinstance(val_in, dict): val = dict() for k, v in val_in.items(): val[k] = METHOD_NAME(v) elif isinstance(val_in, str): # exponential formatting val = val_in.strip().split() if len(val) == 1: val = val[0] re_float = r'(\d*\.*\d*)d(\-*\+*\d+)' val = re.sub(re_float, r'\1e\2', val) if val.isdecimal(): val = int(val) elif val == 'true' or val == 'false': val = val == 'true' else: try: val = float(val) except Exception: pass else: val = [METHOD_NAME(v) for v in val] elif isinstance(val_in, list): try: val = [v.split() if isinstance(v, str) else v for v in val_in] val = [v[0] if (isinstance(v, list) and len(v) == 1) else v for v in val] val = np.array(val, dtype=float) if np.all(np.mod(val, 1) == 0): val = np.array(val, dtype=int) except Exception: val = [METHOD_NAME(v) for v in val_in] return val
[ 197, 99 ]
f METHOD_NAME( self, command, ignore_failure=False, timeout=None, **kwargs
[ 2437, 462 ]
def METHOD_NAME(self, count): img_count = count % IMAGE_SHAPE[1] self._image_data[:, img_count] = img_count / IMAGE_SHAPE[1] rdata_shape = (IMAGE_SHAPE[0], IMAGE_SHAPE[1] - img_count - 1) self._image_data[:, img_count + 1:] = _generate_random_image_data(rdata_shape) return self._image_data.copy()
[ 86, 660, 365 ]
def METHOD_NAME(s, r, t, forward=True, backward=False): def clean_replace_single(s, r, t, forward, backward, sidx=0): # idx = s[sidx:].find(r) idx = s.find(r) if idx == -1: return s, -1 idx_r = idx + len(r) if backward: while idx > 0 and s[idx - 1]: idx -= 1 elif idx > 0 and s[idx - 1] != ' ': return s, -1 if forward: while \ idx_r < len(s) and (s[idx_r].isalpha() or s[idx_r].isdigit()): idx_r += 1 elif idx_r != len(s) and (s[idx_r].isalpha() or s[idx_r].isdigit()): return s, -1 return s[:idx] + t + s[idx_r:], idx_r sidx = 0 while sidx != -1: s, sidx = clean_replace_single(s, r, t, forward, backward, sidx) return s
[ 1356, 369 ]
def METHOD_NAME( resource_group_name: str, server_name: str, database_name: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-02-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/usages", ) # pylint: disable=line-too-long path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), "serverName": _SERIALIZER.url("server_name", server_name, "str"), "databaseName": _SERIALIZER.url("database_name", database_name, "str"), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
[ 56, 245, 604, 463, 377 ]
def METHOD_NAME(config: NNCFConfig, should_init: bool) -> TFCompressionAlgorithmBuilder: """ Factory to create an instance of the compression algorithm builder by NNCFConfig. :param config: An instance of NNCFConfig that defines compression methods. :param should_init: The flag indicates that the generated compression builder will initialize (True) or not (False) the training parameters of the model during model building. :return: An instance of the `CompressionAlgorithmBuilder` """ algo_names = extract_algorithm_names(config) number_compression_algorithms = len(algo_names) if number_compression_algorithms == 0: return NoCompressionAlgorithmBuilder(config, should_init) if number_compression_algorithms == 1: algo_name = next(iter(algo_names)) return get_compression_algorithm_builder(algo_name)(config, should_init) return TFCompositeCompressionAlgorithmBuilder(config, should_init)
[ 129, 4483, 4089, 348 ]
async def METHOD_NAME(session_factory: DbSessionFactory): content = BALANCES_CONTENT_SABLIER with session_factory() as session: update_balances(session=session, content=content) session.commit() balances = content["balances"] compare_balances( session=session, balances=balances, chain=Chain.ETH, dapp="SABLIER" )
[ 9, 356, 2199, -1 ]
def METHOD_NAME() -> bool: return _IS_MEGATRON_INITIALIZED
[ 137, 14831, 924 ]
def METHOD_NAME(self): # Reset all devices for gpu in self.gpus: gpu.reset()
[ 2656, 75, 6295 ]
def METHOD_NAME() -> None: pass
[ 1519, 1603 ]
def METHOD_NAME(self, *args, **kwargs): """ usage: append(green=0.1, red=0.5, blue=0.21) # former, now almost deprecated version: append(0.5, 0.6) """ for k in kwargs.keys(): v = kwargs.pop(k) kwargs[k[0]] = v i=0 for value in args: while self._defaultcolors[i] in kwargs: i += 1 kwargs[self._defaultcolors[i]] = value t = time()-self.plot_start_time for color, value in kwargs.items(): if value is not None: if not color in self.curves: self.curves[color] = self.pw.plot(pen=color) curve = self.curves[color] x, y = curve.getData() if x is None or y is None: x, y = np.array([t]), np.array([value]) else: x, y = np.METHOD_NAME(x, t), np.METHOD_NAME(y, value) curve.setData(x, y)
[ 1459 ]
def METHOD_NAME(): # Expected error type is LVMCommandError. with pytest.raises(TypeError): e = storage_exception.LogicalVolumeDoesNotExistError( "vg-name", "lv-name", error="error") # Correct initialization. fake_error = storage_exception.LVMCommandError( rc=5, cmd=["fake"], out=["fake output"], err=["fake error"]) e = storage_exception.LogicalVolumeDoesNotExistError( "vg-name", "lv-name", error=fake_error) assert e.error == fake_error # Check error format formatted = str(e) assert "vg_name=vg-name" in formatted assert "lv_name=lv-name" in formatted assert "error=" in formatted
[ 9, 1692, 2276, 870, 130, 1985, 168 ]
def METHOD_NAME(self, status_code): self.logger.debug(f'Update {self}: {status_code}') self.status_code = status_code self.status_text = str(status_code) data = bytes(self.reply.readAll()) if self.raw_response: self.logger.debug('Create a raw response') header = self.reply.header(QNetworkRequest.ContentTypeHeader) self.on_finished_signal.emit((data, header)) return if not data: self.logger.error(f'No data received in the reply for {self}') return self.logger.debug('Create a json response') result = json.loads(data) if isinstance(result, dict): result[REQUEST_ID] = self.id is_error = 'error' in result if is_error and self.capture_errors: text = self.manager.show_error(self, result) raise Warning(text) self.on_finished_signal.emit(result)
[ 276, 721, 17 ]
def METHOD_NAME(data, x=2): """Converting: ugSOx/m3 to ugS/ m3. Parameters ------------------ data: ndarray Contains the data in units of ugSoX/m3. x: int The number of oxygen atoms, O in you desired SOx compound. Returns ------------ data : ndarray in units of ugS/ m3. Notes ----------- micro grams to kilos is 10**6 """ mmO = 15.9999 # molar mass oxygen mmS = 32.065 # molar mass sulphur mm_compound = (mmS + x * mmO) * 10**3 # *10**3 gives molar mass in micrograms nr_molecules = mass_to_nr_molecules(data, mm_compound) weight_s = nr_molecules_to_mass(nr_molecules, mmS * 10**3) # weigth in ug return weight_s
[ 9326, 9327, 7343, 9328 ]
def METHOD_NAME(self): """test attribute label""" # get object case_1 = Case.objects.get(case_name='case_1') # get label field_label = case_1._meta.get_field('tag').verbose_name # compare self.assertEqual(field_label, 'tag')
[ 9, 82, 309, 636 ]
def METHOD_NAME(self, dt): """ Advance by one time step. Parameters ---------- dt : float Time-step duration (y) """ z_before = self.grid.at_node["topographic__elevation"].copy() self.calc_diffusion_coef() super().METHOD_NAME(dt) depo = self.grid.at_node["sediment_deposit__thickness"] depo[:] = self.grid.at_node["topographic__elevation"] - z_before self._time += dt
[ 22, 206, 367 ]
def METHOD_NAME(): coord = ([1, 2, 3], [4, 5, 6]) pixel_type = 'test_new_pixel_type' files = ['test.fits'] obs_start = obs_mid = obs_end = datetime.datetime.now() baseline = 'baseline.fits' mean_file = 'meanfile.fits' dark = dark_monitor.Dark() dark.instrument = 'nircam' dark.detector = 'nrcalong' dark.identify_tables() try: dark.add_bad_pix(coord, pixel_type, files, mean_file, baseline, obs_start, obs_mid, obs_end) new_entries = di.session.query(dark.pixel_table).filter( dark.pixel_table.type == pixel_type) assert new_entries.count() == 1 assert new_entries[0].baseline_file == baseline assert np.all(new_entries[0].x_coord == coord[0]) assert np.all(new_entries[0].y_coord == coord[1]) finally: # clean up di.session.query(dark.pixel_table).filter( dark.pixel_table.type == pixel_type).delete() di.session.commit() assert di.session.query(dark.pixel_table).filter( dark.pixel_table.type == pixel_type).count() == 0
[ 9, 238, 1068, 9791 ]
def METHOD_NAME(self, request: AnyRequest, config: dict) -> bool: # check LoA requirements authenticated_loa = request.session[FORM_AUTH_SESSION_KEY]["loa"] required = config.get("loa") or DIGID_DEFAULT_LOA return loa_order(authenticated_loa) >= loa_order(required)
[ 250, 5186 ]
def METHOD_NAME(bitwidth): # Note that this is not uniformly distributed return int(2**random.uniform(0, bitwidth) - 1)
[ 3581, 1928, 1260 ]
def METHOD_NAME(self, node=None, extra=0): self.new_lines = max(self.new_lines, 1 + extra) if node is not None and self.add_line_information: self.write('# line: %s' % node.lineno) self.new_lines = 1
[ 2317 ]
def METHOD_NAME(model0, mutator1, max_pool, avg_pool, global_pool): model = model0 for _ in range(10): model = mutator1.apply(model) pools = _get_pools(model) if pools[0] == max_pool: assert pools[1] == max_pool else: assert pools[0] in [avg_pool, global_pool] assert pools[1] in [max_pool, avg_pool, global_pool]
[ 9, 1695, 770 ]
def METHOD_NAME(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super().METHOD_NAME(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.monitor_name = AAZStrArg( options=["--monitor-name"], help="Monitor resource name", required=True, id_part="name", ) _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) _args_schema.ruleset_id = AAZStrArg( options=["--ruleset-id"], help="Ruleset Id of the filter", ) return cls._args_schema
[ 56, 134, 135 ]
def METHOD_NAME(p, include_options=True, sne=0): # type: (Packet, bool, int) -> bytes """Build message bytes as described by RFC5925 section 5.1""" result = bytearray() result += struct.pack("!I", sne) result += tcp_pseudoheader(p[TCP]) # tcp header with checksum set to zero th_bytes = bytes(p[TCP]) result += th_bytes[:16] result += b"\x00\x00" result += th_bytes[18:20] # Even if include_options=False the TCP-AO option itself is still included # with the MAC set to all-zeros. This means we need to parse TCP options. pos = 20 th = p[TCP] doff = th.dataofs if doff is None: opt_len = len(th.get_field("options").i2m(th, th.options)) doff = 5 + ((opt_len + 3) // 4) tcphdr_optend = doff * 4 while pos < tcphdr_optend: optnum = orb(th_bytes[pos]) pos += 1 if optnum == 0 or optnum == 1: if include_options: result += bytearray([optnum]) continue optlen = orb(th_bytes[pos]) pos += 1 if pos + optlen - 2 > tcphdr_optend: logger.info("bad tcp option %d optlen %d beyond end-of-header", optnum, optlen) break if optlen < 2: logger.info("bad tcp option %d optlen %d less than two", optnum, optlen) break if optnum == 29: if optlen < 4: logger.info("bad tcp option %d optlen %d", optnum, optlen) break result += th_bytes[pos - 2: pos + 2] result += (optlen - 4) * b"\x00" elif include_options: result += th_bytes[pos - 2: pos + optlen - 2] pos += optlen - 2 result += bytes(p[TCP].payload) return result
[ 56, 277, 280, 5788 ]
def METHOD_NAME(self, content, object_relation): attribute = {'description': content['description'], 'ui-priority': 1} if content['type'] in ['String', 'LowercaseString', 'ClassificationType', 'UppercaseString', 'Registry', 'JSONDict', 'JSON', 'TLP', 'Base64']: attribute['misp-attribute'] = 'text' elif content['type'] == 'DateTime': attribute['misp-attribute'] = 'datetime' elif content['type'] == 'ASN': attribute['misp-attribute'] = 'AS' elif content['type'] == 'FQDN': attribute['misp-attribute'] = 'text' elif content['type'] == 'Float': attribute['misp-attribute'] = 'float' elif (content['type'] in ['IPAddress', 'IPNetwork'] and object_relation.startswith('destination')): attribute['misp-attribute'] = 'ip-dst' elif (content['type'] in ['IPAddress', 'IPNetwork'] and object_relation.startswith('source')): attribute['misp-attribute'] = 'ip-src' elif content['type'] == 'Integer': attribute['misp-attribute'] = 'counter' elif content['type'] == 'Boolean': attribute['misp-attribute'] = 'boolean' elif content['type'] == 'URL': attribute['misp-attribute'] = 'url' elif content['type'] == 'Accuracy': attribute['misp-attribute'] = 'float' else: raise Exception('Unknown type {content["type"]}: {object_relation} - {content}') return attribute
[ -1, 12091, 445 ]
def METHOD_NAME(self, client, server_addr, server_port): print(f'Testing SRP server: {server_addr}:{server_port}') # check if the SRP client can register to the SRP server client.srp_client_start(server_addr, server_port) client.srp_client_set_host_name('host1') client.srp_client_set_host_address(client.get_rloc()) client.srp_client_add_service('ins1', '_ipp._tcp', 11111) self.simulator.go(3) self.assertEqual(client.srp_client_get_host_state(), 'Registered') # check if the SRP client can remove from the SRP server client.srp_client_remove_host() self.simulator.go(3) self.assertEqual(client.srp_client_get_host_state(), 'Removed') # stop the SRP client for the next round client.srp_client_stop() self.simulator.go(3)
[ 9, -1, 163 ]
def METHOD_NAME(): temp = utils.tempdir() file_path = temp.relpath("temp.log") tsk, target = get_sample_task() inputs = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(0, 10)] results = [MeasureResult((i,), 0, 0, 0) for i in range(0, 10)] invalid_inp = MeasureInput(target, tsk, tsk.config_space.get(10)) invalid_res = MeasureResult((10,), 0, 0, 0) # Erase the entity map to test if it will be ignored when loading back. invalid_inp.config._entity_map = {} with open(file_path, "w") as fo: cb = autotvm.callback.log_to_file(fo) cb(None, inputs, results) cb(None, [invalid_inp], [invalid_res]) ref = zip(inputs, results) for x, y in zip(ref, autotvm.record.load_from_file(file_path)): assert x[1] == y[1] # Confirm functionality of multiple file loads hist_best = ApplyHistoryBest([file_path, file_path]) x = hist_best.query(target, tsk.workload) assert str(x) == str(inputs[0][2])
[ 9, 171, 249 ]
def METHOD_NAME( api_client, data_source, organization, organization2 ): signup_group = SignUpGroupFactory( registration__event__publisher=organization, registration__event__data_source=data_source, ) data_source.owner = organization2 data_source.save(update_fields=["owner"]) api_client.credentials(apikey=data_source.api_key) response = get_detail(api_client, signup_group.id) assert response.status_code == status.HTTP_403_FORBIDDEN
[ 9, 58, 59, 41, 909, 1044, 2286 ]
def METHOD_NAME(self, *args, **kwargs): if not hasattr(socket, 'AF_UNIX'): raise NotImplementedError return self.tcp_server(*args, family=socket.AF_UNIX, **kwargs)
[ 1226, 163 ]
def METHOD_NAME(self) -> None: da = create_test_dataarray_attrs() with xarray.set_options(display_style="text"): text = da._repr_html_() assert text.startswith("<pre>") assert "&lt;xarray.DataArray &#x27;var1&#x27;" in text
[ 9, 52, 15148, 641, 526 ]
def METHOD_NAME(inp, requires_grad): """Make a viewless tensor. View tensors have the undesirable side-affect of retaining a reference to the originally-viewed tensor, even after manually setting the '.data' field. This method creates a new tensor that links to the old tensor's data, without linking the viewed tensor, referenced via the '._base' field. """ out = torch.empty( (1,), dtype=inp.dtype, device=inp.device, requires_grad=requires_grad, ) out.data = inp.data return out
[ 1885, 93, 8685, 768 ]
def METHOD_NAME( self, ): self.all_stacks[1].obsolete = True self.all_stacks[3].obsolete = False self.all_stacks[4].obsolete = False self.all_stacks[5].obsolete = False self.all_stacks[3].dependencies.append(self.all_stacks[1]) self.all_stacks[4].dependencies.append(self.all_stacks[3]) self.all_stacks[5].dependencies.append(self.all_stacks[3]) with pytest.raises(CannotPruneStackError): self.pruner.prune()
[ 9, 3724, 256, 8439, 1980, 3910, 69 ]
def METHOD_NAME(apiclient, idea, user, comment_factory): comment = comment_factory(pk=1, content_object=idea) idea.project.organisation.initiators.add(user) assert ModeratorCommentFeedback.objects.all().count() == 0 url = reverse("moderatorfeedback-list", kwargs={"comment_pk": comment.pk}) data = {"feedback_text": "a statement"} apiclient.force_authenticate(user=user) response = apiclient.post(url, data) assert response.status_code == 201 assert ModeratorCommentFeedback.objects.all().count() == 1
[ 9, 6924, 1046, 238, 2921 ]
def METHOD_NAME(self) -> None: self.close()
[ 1602 ]
def METHOD_NAME(self): # Test send() timeout # couldn't figure out how to test it pass
[ 9, 353 ]
def METHOD_NAME(self): if not self.channel_id: return data = self.session.http.get( "https://cloudac.mildom.com/nonolive/gappserv/live/liveserver", params={ "__platform": "web", "user_id": self.channel_id, "live_server_type": "hls", }, headers={"Accept-Language": "en"}, schema=validate.Schema( validate.parse_json(), { "code": int, validate.optional("message"): str, validate.optional("body"): { "stream_server": validate.url(), }, }, ), ) if self._is_api_error(data): return if data.get("body"): return data["body"]["stream_server"]
[ 19, 163 ]
def METHOD_NAME(monkeypatch): monkeypatch.setenv("PY_COLORS", "0") assert not should_do_markup()
[ 9, 7469, 381, -1 ]
def METHOD_NAME(self, messages: List[Dict[str, str]]) -> str: bittensor.logging.info("messages", str(messages)) history = self._process_history(messages) bittensor.logging.info("history", str(history)) resp = self.model(history) bittensor.logging.info("response", str(resp)) return resp
[ 76 ]
def METHOD_NAME(self): def ratio(a, b): return a / b if b > 0 else 0 return [ ratio(self.stat.match1, self.stat.count1), ratio(self.stat.match2, self.stat.count2), ratio(self.stat.match3, self.stat.count3), ratio(self.stat.match4, self.stat.count4), ]
[ 1582 ]
def METHOD_NAME(self): self.assertLocalizedHolidays( "uk", ("2022-01-01", "Новий рік"), ("2022-01-06", "Богоявлення"), ("2022-03-07", "Чистий понеділок"), ("2022-03-25", "День незалежності"), ("2022-04-22", "Страсна пʼятниця"), ("2022-04-25", "Великодній понеділок"), ("2022-05-01", "День праці"), ("2022-05-02", "День праці (вихідний)"), ("2022-06-13", "День Святого Духа"), ("2022-08-15", "Успіння Пресвятої Богородиці"), ("2022-10-28", "День Охі"), ("2022-12-24", "Святий вечір"), ("2022-12-25", "Різдво Христове"), ("2022-12-26", "Собор Пресвятої Богородиці"), ("2022-12-31", "Переддень Нового року"), )
[ 9, 1594, 5039 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_request( api_version=api_version, template_url=self.list.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(virtual_jf_detectors): d = ExperimentalDetector() d.frames = 10 assert d.frames == 10
[ 9, 1427 ]
def METHOD_NAME(options): """ Set up logging from the command line options """ root_logger = logging.getLogger() add_stdout = False formatter = logging.Formatter("%(asctime)s %(levelname)-5.5s %(message)s") # Write out to a logfile if options.logfile: handler = ClosingFileHandler(options.logfile) handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) root_logger.addHandler(handler) else: # The logfile wasn't specified. Add a stdout logger. add_stdout = True if options.verbose: # Add a stdout logger as well in verbose mode root_logger.setLevel(logging.DEBUG) add_stdout = True else: root_logger.setLevel(logging.INFO) if add_stdout: stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setFormatter(formatter) stdout_handler.setLevel(logging.DEBUG) root_logger.addHandler(stdout_handler)
[ 102, 663 ]
def METHOD_NAME(self, data: bytes | bytearray | memoryview) -> None: ...
[ 77 ]
def METHOD_NAME(hashprev, coinbase, nTime=None): block = CBlock() if nTime is None: import time block.nTime = int(time.time()+600) else: block.nTime = nTime block.hashPrevBlock = hashprev block.nBits = 0x207fffff # Will break after a difficulty adjustment... block.vtx.append(coinbase) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block
[ 129, 573 ]
def METHOD_NAME(specfile): """Check patches being removed from a specfile""" no_patches = specfile.read_text().replace("\n### patches ###\n", "") patches = specfile.read_text().replace( "### patches ###\n", """\
[ 9, 188, 8089 ]
def METHOD_NAME(self): """boost::any assignment and casting""" import cppyy assert cppyy.gbl.boost from cppyy.gbl import std, boost val = boost.any() # test both by-ref and by rvalue v = std.vector[int]() val.__assign__(v) val.__assign__(std.move(std.vector[int](range(100)))) assert val.type() == cppyy.typeid(std.vector[int]) extract = boost.any_cast[std.vector[int]](val) assert type(extract) is std.vector[int] assert len(extract) == 100 extract += range(100) assert len(extract) == 200 val.__assign__(std.move(extract)) # move forced #assert len(extract) == 0 # not guaranteed by the standard # TODO: we hit boost::any_cast<int>(boost::any* operand) instead # of the reference version which raises boost.any_cast.__useffi__ = False try: # raises(Exception, boost.any_cast[int], val) assert not boost.any_cast[int](val) except Exception: # getting here is good, too ... pass extract = boost.any_cast[std.vector[int]](val) assert len(extract) == 200
[ 315, 2147, 558 ]
def METHOD_NAME(self): for max_sites in range(10): ab = _tsinfer.AncestorBuilder(num_samples=2, max_sites=max_sites) for _ in range(max_sites): ab.add_site(time=1, genotypes=[0, 1]) for _ in range(2 * max_sites): with pytest.raises(_tsinfer.LibraryError) as record: ab.add_site(time=1, genotypes=[0, 1]) msg = "Cannot add more sites than the specified maximum." assert str(record.value) == msg
[ 9, 238, 564, 1401, 371 ]
def METHOD_NAME(get_contract): code = """
[ 9, 245, 478 ]
def METHOD_NAME(self, outfp): self.outfp = outfp self.append = self.do_append self.print_stats = self.do_print self.enabled = True
[ 1317 ]
def METHOD_NAME(self, obj): return obj.stats.source_words
[ 19, 1458, 1473 ]
async def METHOD_NAME(args, options): result = await aio_to_thread( proc.exec_command, [get_core_fullpath()] + args, cwd=options.get("cwd") or os.getcwd(), ) return (result["out"], result["err"], result["returncode"])
[ 128, 2929 ]
def METHOD_NAME(name, config_path, default_settings, **kwargs): """Install our custom module importer logic. Args: name (str): Module name to handle specially (e.g., "nautobot_config") config_path (str): Absolute path to the module in question (e.g., "/opt/nautobot/nautobot_config.py") default_settings (str): Settings module name to inherit settings from (e.g., "nautobot.core.settings") """ global installed if installed: return # Ensure that our custom importer for the config module takes precedence over standard Python import machinery sys.meta_path.insert(0, LoganImporter(name, config_path, default_settings, **kwargs)) installed = True
[ 428 ]
def METHOD_NAME(name): """Returns a gid, given a group name.""" if getgrnam is None or name is None: return None try: result = getgrnam(name) except KeyError: result = None if result is not None: return result[2] return None
[ 19, 1524 ]
def METHOD_NAME( config, emitter, mock_provider, tmp_path, mock_instance_name, mock_is_base_available ): """Test clean with a complex list of bases.""" config.name = "foo" bases_config = [ BasesConfiguration( **{ "build-on": [ Base(name="x1name", channel="x1channel", architectures=["x1arch"]), ], "run-on": [ Base(name="x2name", channel="x2channel", architectures=["x2arch"]), ], } ), BasesConfiguration( **{ "build-on": [ Base(name="x3name", channel="x3channel", architectures=["x3arch"]), Base(name="x3name", channel="x3channel", architectures=["x3arch"]), ], "run-on": [ Base(name="x4name", channel="x4channel", architectures=["x4arch"]), ], } ), BasesConfiguration( **{ "build-on": [ Base(name="x5name", channel="x5channel", architectures=["x5arch"]), ], "run-on": [ Base(name="x6name", channel="x6channel", architectures=["x6arch"]), Base( name="x7name", channel="x7channel", architectures=["x7arch1", "x7arch2"], ), ], } ), ] config.set(bases=bases_config) cmd = CleanCommand(config) cmd.run([]) assert mock_provider.mock_calls == [ mock.call.clean_project_environments(instance_name="test-instance-name-0"), mock.call.clean_project_environments(instance_name="test-instance-name-1"), mock.call.clean_project_environments(instance_name="test-instance-name-2"), ] assert mock_instance_name.mock_calls == [ mock.call( bases_index=0, build_on_index=0, project_name="foo", project_path=tmp_path, target_arch=get_host_architecture(), ), mock.call( bases_index=1, build_on_index=0, project_name="foo", project_path=tmp_path, target_arch=get_host_architecture(), ), mock.call( bases_index=2, build_on_index=0, project_name="foo", project_path=tmp_path, target_arch=get_host_architecture(), ), ] assert mock_is_base_available.mock_calls == [ mock.call.is_base_available( Base(name="x1name", channel="x1channel", architectures=["x1arch"]) ), mock.call.is_base_available( Base(name="x3name", channel="x3channel", architectures=["x3arch"]) ), mock.call.is_base_available( Base(name="x5name", channel="x5channel", architectures=["x5arch"]) ), ] emitter.assert_message("Cleaning project 'foo'.") emitter.assert_debug("Cleaning environment 'test-instance-name-0'") emitter.assert_debug("Cleaning environment 'test-instance-name-1'") emitter.assert_debug("Cleaning environment 'test-instance-name-2'") emitter.assert_message("Cleaned project 'foo'.")
[ 9, 1356, 2587 ]
def METHOD_NAME(self, object, value): """Set an address record or records :param object: the instance of the field :param value: dict with address information or list of dicts :type value: list/tuple/dict """ # Value is a list of dicts value = self.to_list(value) # Bail out non-supported address types address_types = self.get_address_types() value = filter(lambda ad: ad["type"] in address_types, value) # Set the value super(AddressField, self).METHOD_NAME(object, value)
[ 0 ]
def METHOD_NAME(self, input_data, expected_output): self._add_plugins_with_recursive_dependencies() result = self.scheduler.get_cumulative_remaining_dependencies(input_data) assert result == expected_output
[ 9, 19, 8218, 5872, 2410 ]
def METHOD_NAME(config): """Setup an instance of :class:`sentry_sdk.Client`. :param config: Sentry configuration :param client: class used to instantiate the sentry_sdk client. """ enabled = config.get("sentry_enabled", False) if not (HAS_SENTRY_SDK and enabled): return _logger.info("Initializing sentry...") if config.get("sentry_odoo_dir") and config.get("sentry_release"): _logger.debug( "Both sentry_odoo_dir and \ sentry_release defined, choosing sentry_release" ) if config.get("sentry_transport"): warnings.warn( "`sentry_transport` has been deprecated. " "Its not neccesary send it, will use `HttpTranport` by default.", DeprecationWarning, ) options = {} for option in const.get_sentry_options(): value = config.get("sentry_%s" % option.key, option.default) if isinstance(option.converter, abc.Callable): value = option.converter(value) options[option.key] = value exclude_loggers = const.split_multiple( config.get("sentry_exclude_loggers", const.DEFAULT_EXCLUDE_LOGGERS) ) if not options.get("release"): options["release"] = config.get( "sentry_release", get_odoo_commit(config.get("sentry_odoo_dir")) ) # Change name `ignore_exceptions` (with raven) # to `ignore_errors' (sentry_sdk) options["ignore_errors"] = options["ignore_exceptions"] del options["ignore_exceptions"] options["before_send"] = before_send options["integrations"] = [ options["logging_level"], ThreadingIntegration(propagate_hub=True), ] # Remove logging_level, since in sentry_sdk is include in 'integrations' del options["logging_level"] client = sentry_sdk.init(**options) sentry_sdk.set_tag("include_context", config.get("sentry_include_context", True)) if exclude_loggers: for item in exclude_loggers: ignore_logger(item) # The server app is already registered so patch it here if server: server.app = SentryWsgiMiddleware(server.app) # Patch the wsgi server in case of further registration odoo.http.Application = SentryWsgiMiddleware(odoo.http.Application) with sentry_sdk.push_scope() as scope: scope.set_extra("debug", False) sentry_sdk.capture_message("Starting Odoo Server", "info") return client
[ 15, 1063 ]
def METHOD_NAME(cfg, batch_norm=False): layers = [] in_channels = 3 for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] elif v == 'C': # Notice ceil_mode is true layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return layers
[ 93, 2315 ]
async def METHOD_NAME(self): merkle_verifier = MerkleVerifier(HexTreeHasher()) leaf_index = 848049 tree_size = 3630887 expected_root_hash = ( b"78316a05c9bcf14a3a4548f5b854a9adfcd46a4c034401b3ce7eb7ac2f1d0ecb" ) assert ( await merkle_verifier.calculate_root_hash( RAW_HEX_LEAF, leaf_index, SHA256_AUDIT_PATH[:], tree_size, ) == expected_root_hash )
[ 9, 1162, 3802, 6668, 1104 ]
def METHOD_NAME(BSP_ROOT, dist_dir): import sys cwd_path = os.getcwd() sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools')) from sdk_dist import dist_do_building dist_do_building(BSP_ROOT, dist_dir)
[ 1260, 276 ]
def METHOD_NAME(secret: bytes, msg: bytes) -> bytes: assert_bytes(msg) iv = bytes(os.urandom(16)) ct = aes_encrypt_with_iv(secret, iv, msg) return iv + ct
[ 421, 9706, 1305, 321 ]
def METHOD_NAME(self, *args, **kwargs): """ Reset cache """ reset_workout_log(self.user_id, self.date.year, self.date.month) super(WorkoutSession, self).METHOD_NAME(*args, **kwargs)
[ 34 ]
def METHOD_NAME(self): """Hepler method that sets possible and required catalog attributes. Sets attributes: catalog_atts (list): Attributes to try to copy from star catalog. Missing ones will be ignored and removed from this list. required_catalog_atts(list): Attributes that cannot be missing or nan. """ # list of possible Star Catalog attributes self.catalog_atts = [ "Name", "Spec", "parx", "Umag", "Bmag", "Vmag", "Rmag", "Imag", "Jmag", "Hmag", "Kmag", "dist", "BV", "MV", "BC", "L", "coords", "pmra", "pmdec", "rv", "Binary_Cut", "hasKnownPlanet", ] # required catalog attributes self.required_catalog_atts = [ "Name", "Vmag", "BV", "MV", "BC", "L", "coords", "dist", ]
[ 0, 2824, 177 ]
def METHOD_NAME( file, nested_dirs_with_files ): ret = file.rmdir(nested_dirs_with_files, recurse=True, verbose=True) assert ret["result"] is False assert len(ret["deleted"]) == 8 assert len(ret["errors"]) == 19 assert os.path.isdir(nested_dirs_with_files)
[ 9, 1275, 3832, 612, 2413, 41, 1537 ]
def METHOD_NAME(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(queue, mp_res: MPALTListResult) -> None: for alt_info in mp_res.alt_info_list: pos = queue.find(alt_info) if pos is not None: queue.pop(alt_info)
[ 537, 651 ]
def METHOD_NAME(self): """Get the circuit for the ZYZ decomposition.""" q = QuantumRegister(self.num_qubits) qc = QuantumCircuit(q, name=self.name) diag = [1.0, 1.0] alpha, beta, gamma, _ = self._zyz_dec() if abs(alpha) > _EPS: qc.rz(alpha, q[0]) if abs(beta) > _EPS: qc.ry(beta, q[0]) if abs(gamma) > _EPS: if self.up_to_diagonal: diag = [np.exp(-1j * gamma / 2.0), np.exp(1j * gamma / 2.0)] else: qc.rz(gamma, q[0]) return qc, diag
[ 12867, 1708 ]
def METHOD_NAME(fname): """ Read benchmark output from a file and return the JSON object. REQUIRES: 'fname' names a file containing JSON benchmark output. """ with open(fname, 'r') as f: return json.load(f)
[ 557, 1668, 51 ]
def METHOD_NAME( text: str | bytes, message: _M, allow_unknown_extension: bool = ..., allow_field_number: bool = ..., descriptor_pool: DescriptorPool | None = ..., allow_unknown_field: bool = ..., ) -> _M: ...
[ 214 ]
def METHOD_NAME(): """ Test that setting FONT config changes all FONT_* settings except FONT_LOGO. Specifically, this test only checks that FONT_ANNOT_PRIMARY, FONT_ANNOT_SECONDARY, FONT_LABEL, and FONT_TITLE are modified. """ fig = Figure() with config(FONT="8p,red"): fig.basemap(region=[0, 9, 0, 9], projection="C3/3/9c", compass="jTL+w3c+d4.5+l") fig.basemap(compass="jBR+w3.5c+d-4.5+l") return fig
[ 9, 200, 2584, 206 ]
def METHOD_NAME(self): return inspect.getsource(self.__class__)
[ 1458 ]
def METHOD_NAME( watched_thread_factory, notify_watcher_mock, user, thread, user_reply ): watched_thread_factory(user, thread, send_emails=True) notify_on_new_thread_reply(user_reply.id) notify_watcher_mock.assert_not_called()
[ 9, 959, 69, 80, 600, 1922, 7234 ]
def METHOD_NAME(): """ Parse the contents of ``/proc/net/route`` """ with salt.utils.files.fopen("/proc/net/route", "r") as fp_: out = salt.utils.stringutils.to_unicode(fp_.read()) ret = {} for line in out.splitlines(): tmp = {} if not line.strip(): continue if line.startswith("Iface"): continue comps = line.split() tmp["iface"] = comps[0] tmp["destination"] = _hex_to_octets(comps[1]) tmp["gateway"] = _hex_to_octets(comps[2]) tmp["flags"] = _route_flags(int(comps[3])) tmp["refcnt"] = comps[4] tmp["use"] = comps[5] tmp["metric"] = comps[6] tmp["mask"] = _hex_to_octets(comps[7]) tmp["mtu"] = comps[8] tmp["window"] = comps[9] tmp["irtt"] = comps[10] if comps[0] not in ret: ret[comps[0]] = [] ret[comps[0]].append(tmp) return ret
[ 214, 3968 ]