text
stringlengths
15
7.82k
ids
sequencelengths
1
7
f METHOD_NAME(self, extendee_name):
[ 416, 75, 2916, 3333 ]
def METHOD_NAME(self): instance = self.force_instance() response = self.post(reverse("wsone_api:start_instance_sync", args=(instance.pk,)), include_token=False) self.assertEqual(response.status_code, 401)
[ 9, 447, 164, 3166 ]
def METHOD_NAME(request): return Update(update_id=1, **request.param)
[ 1168, 86 ]
def METHOD_NAME(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super().METHOD_NAME(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) _args_schema.name = AAZStrArg( options=["-n", "--name"], help="Name of the service endpoint policy.", required=True, id_part="name", ) return cls._args_schema
[ 56, 134, 135 ]
def METHOD_NAME(request): """Return request-host, as defined by RFC 2965. Variation from RFC: returned value is lowercased, for convenient comparison. """ url = request.get_full_url() parse_result = urllib.parse.urlparse(url) scheme, host, port = parse_result.scheme, parse_result.hostname, parse_result.port try: port = int(port) except (ValueError, TypeError): pass if host == "": host = request.get_header("Host", "") if port and port != default_ports.get(scheme): host = "%s:%s" % (host, port) return host
[ 377, 1806 ]
def METHOD_NAME(f): return lambda a: np.array([f(x) for x in a])
[ 2028, 291 ]
def METHOD_NAME(mock_api_client, mock_telemetry_client): return { "API_CLIENT": mock_api_client, "WORKSPACE_ID": "my_workspace_id", "resource_id": "my_resource_id", "TELEMETRY_CLIENT": mock_telemetry_client, }
[ 198, 279 ]
def METHOD_NAME(type: str, cmake_var_str: str, comment: List[str] ) -> cmake_var: """ Factory function that associates the correct type of variable and handles metadata, provided by the row's associated comments. """ cmake_type_str_to_class: Mapping[str, Type[cmake_var]] = { "BOOL" : cmake_var_bool, "INTERNAL" : cmake_var, "FILEPATH": cmake_var, "STRING" : cmake_var, } loaded_metadata = metadata(comment) if loaded_metadata.realtype is not None: type = loaded_metadata.realtype class_obj = cmake_type_str_to_class[type] obj = class_obj(cmake_var_str) obj.metadata = loaded_metadata return obj
[ 56, 334, 486 ]
def METHOD_NAME(self, *args, **kwargs): """ Custom sanity check for Julia packages """ #NOTE: we don't use Pkg.status with arguments as only supported for Julia >=v1.1 cmd = "unset EBJULIA_USER_DEPOT_PATH && unset EBJULIA_ADMIN_DEPOT_PATH && export JULIA_DEPOT_PATH=%s && export JULIA_PROJECT=%s && julia --eval 'using Pkg; Pkg.status()'" % (self.depot, self.projectdir) cmdttdouterr, _ = run_cmd(cmd, log_all=True, simple=False, regexp=False) self.log.error("Julia package %s sanity returned %s" % (self.name, cmdttdouterr)) return len(parse_log_for_error(cmdttdouterr, regExp="%s\s+v%s" % (self.package_name, self.version))) != 0
[ 1125, 250, 367 ]
def METHOD_NAME(num_workers, steps, workers): sessions = [] graphs = [] train_ops = [] for worker_id in range(num_workers): graph = ops.Graph() is_chief = (worker_id == 0) with graph.as_default(): worker_device = "/job:worker/task:%d/cpu:0" % (worker_id) ma_coustom = model_average_optimizer.ModelAverageCustomGetter( worker_device=worker_device) with variable_scope.variable_scope( "", custom_getter=ma_coustom), ops.device( device_setter.replica_device_setter( worker_device=worker_device, ps_device="/job:ps/task:0/cpu:0", ps_tasks=1)): global_step = variables.Variable(0, name="global_step", trainable=False) var_0 = variable_scope.get_variable(initializer=0.0, name="v0") var_1 = variable_scope.get_variable(initializer=1.0, name="v1") with ops.device("/job:worker/task:" + str(worker_id)): if worker_id == 0: grads_0 = constant_op.constant(-1.0) grads_1 = constant_op.constant(-1.0) else: grads_0 = constant_op.constant(-2.0) grads_1 = constant_op.constant(-2.0) sgd_opt = gradient_descent.GradientDescentOptimizer(1.0) opt = model_average_optimizer.ModelAverageOptimizer( opt=sgd_opt, num_worker=num_workers, ma_custom_getter=ma_coustom, is_chief=is_chief, interval_steps=steps) train_op = [ opt.apply_gradients([[grads_0, var_0], [grads_1, var_1]], global_step) ] ma_hook = opt.make_session_run_hook() # Creates MonitoredSession sess = training.MonitoredTrainingSession( workers[worker_id].target, hooks=[ma_hook]) sessions.append(sess) graphs.append(graph) train_ops.append(train_op) return sessions, graphs, train_ops
[ 19, 5930 ]
def METHOD_NAME(observations, model, may_parallelise, blas): time = timeit.default_timer ls = least_squares.crystallographic_ls( observations, model, non_linear_ls_engine[blas], may_parallelise, weighting_scheme=least_squares.mainstream_shelx_weighting(a=0)) m = ls.observations.fo_sq.size() n = ls.reparametrisation.n_independents # let's do worth of 5 Gflops at least n_trials = max(int(5e9/(0.5*m*n**2)), 1) building = 0 solving = 0 for i in range(n_trials): t0 = time() ls.build_up() t1 = time() rls = ls.reduced_problem() neqns = rls.step_equations() neqns.solve() t2 = time() building += t1 - t0 solving += t2 - t1 return (ls.observations.fo_sq.d_min(), len(ls.xray_structure.scatterers()), building/n_trials, solving/n_trials)
[ 1668 ]
def METHOD_NAME(test_client): variable_response = test_client.get("/variable/income_tax") assert variable_response.status_code == client.OK
[ 9, 1413, 544, 1153, 1210 ]
def METHOD_NAME(cls, languages): """Check if the `languages` are supported by the provider. A subset of the supported languages is returned. :param languages: the languages to check. :type languages: set of :class:`~babelfish.language.Language` :return: subset of the supported languages. :rtype: set of :class:`~babelfish.language.Language` """ return cls.languages & languages
[ 250, 2539 ]
def METHOD_NAME(self, opt_dict): r""" Plot protocol for Gf objects with a MeshImFreq. Parameters ---------- opt_dict: dictionary Can contain: - mode: string, default None Mode to plot the Green's function in: -- 'R': real part only -- 'I': imaginary part only - x_window: tuple, default None (xmin,xmax) - name: str name of the gf for the label and legend """ return plot_base.plot_base( self, opt_dict, r'$\omega_n$', lambda x : r'%s$(i\omega_n)$'%x, [x.imag for x in list(self.mesh.values())] )
[ -1 ]
def METHOD_NAME(storage_type: StorageType) -> Any: """Determines which uploader method to use based on storage type""" return { StorageType.s3.value: _s3_uploader, StorageType.local.value: _local_uploader, }[storage_type.value]
[ 19, 7332, 280, 200, 44 ]
def METHOD_NAME(self, execstr): if 'cursor' in execstr: return '%s %s' % (self.cursorBox.x1, self.cursorBox.x2), '%s %s' % (self.cursorBox.y1, self.cursorBox.y2) return '', ''
[ 462, 3230 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_by_server_request( resource_group_name=resource_group_name, server_name=server_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.list_by_server.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = HttpRequest("GET", next_link) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(timestamp, filter_name): try: filter_date = datetime.strptime(timestamp, DATETIME_FORMAT) except ValueError: raise ValidationError("%(filter_name)s='%(ts)s' not a valid timestamp " "of format: %(format)s" % {"ts": timestamp, "format": DATETIME_FORMAT, "filter_name": filter_name}) yield filter_date
[ 153, 527 ]
def METHOD_NAME(caplog, optimizer, do_grad, do_stitch, return_fitted_val): pyhf.set_backend("jax", optimizer, precision="64b") pdf = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0]) data = pyhf.tensorlib.astensor([125.0] + pdf.config.auxdata) with caplog.at_level(logging.DEBUG, 'pyhf.optimize.opt_jax'): pyhf.infer.mle.fixed_poi_fit( 1.0, data, pdf, do_grad=do_grad, do_stitch=do_stitch, return_fitted_val=return_fitted_val, ) # jit assert 'jitting function' in caplog.text caplog.clear() with caplog.at_level(logging.DEBUG, 'pyhf.optimize.opt_jax'): pyhf.infer.mle.fixed_poi_fit( 2.0, data, pdf, do_grad=do_grad, do_stitch=do_stitch, return_fitted_val=return_fitted_val, ) # jit assert 'jitting function' not in caplog.text with caplog.at_level(logging.DEBUG, 'pyhf.optimize.opt_jax'): pyhf.infer.mle.fit( data, pdf, do_grad=do_grad, do_stitch=do_stitch, return_fitted_val=return_fitted_val, ) # jit assert 'jitting function' in caplog.text caplog.clear() with caplog.at_level(logging.DEBUG, 'pyhf.optimize.opt_jax'): pyhf.infer.mle.fit( data, pdf, do_grad=do_grad, do_stitch=do_stitch, return_fitted_val=return_fitted_val, ) # jit assert 'jitting function' not in caplog.text with caplog.at_level(logging.DEBUG, 'pyhf.optimize.opt_jax'): pyhf.infer.mle.fixed_poi_fit( 3.0, data, pdf, do_grad=do_grad, do_stitch=do_stitch, return_fitted_val=return_fitted_val, ) # jit assert 'jitting function' not in caplog.text
[ 9, 757, 3821 ]
def METHOD_NAME(name): return unittest.skipUnless(name in ctypes_symbols, '{!r} is required'.format(name))
[ 562, 1608 ]
def METHOD_NAME(self, session): pass
[ 69, 1072 ]
def METHOD_NAME(config: dict): return Configuration(**config)
[ 187 ]
def METHOD_NAME(self, cls, log_entry_model): self.log_entry_models_by_type.register(cls, value=log_entry_model) self.log_entry_models.add(log_entry_model)
[ 372, 578 ]
def METHOD_NAME(self, endpoint_name: str): """Delete a local endpoint. :param str endpoint_name: Name of local endpoint to delete. """ build_directory = self._get_build_directory(endpoint_name=endpoint_name) shutil.rmtree(build_directory)
[ 34 ]
def METHOD_NAME(state_dict: Dict[str, Any], keep_non_lora=False): ''' if keep_non_lora, also return non_lora state_dict ''' state_dict_lora = OrderedDict() state_dict_non_lora = OrderedDict() for k, v in state_dict.items(): if 'lora_A' in k or 'lora_B' in k: state_dict_lora[k] = v elif keep_non_lora: state_dict_non_lora[k] = v if keep_non_lora: return state_dict_lora, state_dict_non_lora else: return state_dict_lora, None
[ 527, 551, 553, 402 ]
def METHOD_NAME(sp, pars): """ Given a spectrum and some parameters, calculate the chi^2 value """ return ((sp.specfit.get_model_frompars(sp.xarr, pars) - sp.specfit.spectofit)**2 / (sp.specfit.errspec**2) ).sum()
[ 316 ]
def METHOD_NAME(self): """test expression with message return""" template = 'ak_message("some message");return False' evaluator = PolicyEvaluator("test") evaluator.set_policy_request(self.request) result = evaluator.evaluate(template) self.assertEqual(result.passing, False) self.assertEqual(result.messages, ("some message",))
[ 9, 1107 ]
async def METHOD_NAME( self, resource_group_name: str, vault_name: str, **kwargs: Any ) -> _models.PrivateLinkResourceListResult: """Gets the private link resources supported for the key vault. :param resource_group_name: Name of the resource group that contains the key vault. Required. :type resource_group_name: str :param vault_name: The name of the key vault. Required. :type vault_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PrivateLinkResourceListResult or the result of cls(response) :rtype: ~azure.mgmt.keyvault.v2020_04_01_preview.models.PrivateLinkResourceListResult :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2020-04-01-preview")) cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None) request = build_list_by_vault_request( resource_group_name=resource_group_name, vault_name=vault_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.METHOD_NAME.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) _stream = False pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
[ 245, 604, 78 ]
def METHOD_NAME(self, msg): """ Write out a message at the warning level @param msg The message to write out """ self._log.writeLogMessage(WARNING, msg)
[ 3437 ]
def METHOD_NAME(gcommdct): """extract embedded group data from commdct. return gdict -> {g1:[obj1, obj2, obj3], g2:[obj4, ..]}""" gdict = {} for objidd in gcommdct: group = objidd[0]["group"] objname = objidd[0]["idfobj"] if group in gdict: gdict[group].append(objname) else: gdict[group] = [objname] return gdict
[ -1 ]
def METHOD_NAME(self, kwargs, expected): """ Test calculated values based on Figure 1 of Bellan 2012 (DOI: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856). """ # theta and k values need to be single valued for this test to function # correctly cs = cs_(kwargs["T_e"], kwargs["T_i"], kwargs["ion"]) va = va_(kwargs["B"], kwargs["n_i"], ion=kwargs["ion"]) wci = wc_(kwargs["B"], kwargs["ion"]) beta = (cs / va).value ** 2 if not np.isclose(beta, 0.4, atol=1e-4): pytest.fail( f"The Bellan 2012 paper requires a 'beta' value of 0.4 and the test " f"parameters yielded {beta:.6f}." ) Lambda = (kwargs["k"] * va / wci).value ** 2 if not np.isclose(Lambda, 0.4, atol=1e-4): pytest.fail( f"The Bellan 2012 paper requires a 'Lambda' value of 0.4 and the test " f"parameters yielded {Lambda:.6f}." ) ws = two_fluid(**kwargs) for mode, val in ws.items(): norm = (np.absolute(val) / (kwargs["k"] * va)).value ** 2 assert np.isclose(norm, expected[mode])
[ 9, 69, -1, 7420 ]
def METHOD_NAME(root_path, domain, batch_size, kwargs, train_val_split=.5, rand_split=True): kwargs_fin = dict(shuffle=True, drop_last=True) kwargs_fin.update(kwargs) normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose( [ResizeImage(256), transforms.Resize(256), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]) data = datasets.ImageFolder(root=os.path.join( root_path, domain), transform=transform) if train_val_split <= 0: train_loader = torch.utils.data.DataLoader( data, batch_size=batch_size, **kwargs_fin) return train_loader else: train_loader, val_loader = load_train_valid_split( data, batch_size, kwargs_fin, val_ratio=1.-train_val_split, rand_split=rand_split) return train_loader, val_loader
[ 557, 2685 ]
def METHOD_NAME(): """ Gets current "date and time" and "current date and time minus 1 hour in list [current, current_minus_1h] """ date_format = "%Y-%m-%d %H:%M:%S" current = datetime.strptime( datetime.now(pytz.timezone("America/Sao_Paulo")).strftime(date_format), date_format, ) current_minus_1h = current - timedelta(minutes=60) return current_minus_1h, current
[ 1056, 153, 104 ]
def METHOD_NAME(user, obj): return obj and SubmissionStates.REJECTED in SubmissionStates.valid_next_states.get( obj.state, [] )
[ 1046, 673, 5307 ]
def METHOD_NAME(config): api = API(config) headers, status, content = api.landing_page({}, {'f': 'json'}) content = json.loads(content) assert headers['Content-Type'] == 'application/json' assert status == 200 assert len(content['links']) == 15 assert content['stac_version'] == '1.0.0' assert content['type'] == 'Catalog' assert len(content['conformsTo']) == 18 assert len(content['keywords']) == 3
[ 9, 9914, 1174 ]
f METHOD_NAME(self, primitive):
[ 9, 129, 9, 219, 43, 1478 ]
def METHOD_NAME(**kwargs): return ";".join([f"{k}={v}" for k, v in kwargs.items()])
[ 56, 6732, 550, 144 ]
def METHOD_NAME(): resolutions = m.different_resolutions() time = datetime.datetime.now() resolutions.timestamp_h = time resolutions.timestamp_m = time resolutions.timestamp_s = time resolutions.timestamp_ms = time resolutions.timestamp_us = time
[ 9, 2076, 955, 2077 ]
def METHOD_NAME(): """Test the sign_packet_with_crc_key tool for the SEND_CONTROL_PACKET for off state.""" packet = packets.SEND_CONTROL_PACKET.format( SUT_SESSION_ID, SUT_TIMESTAMP, SUT_DEVICE_ID, Command.OFF.value, packets.NO_TIMER_REQUESTED) assert_that(sign_packet_with_crc_key(packet)).is_equal_to(packet + "6c432cf4")
[ 9, 2452, 5788, 41, 2377, 59, 43 ]
def METHOD_NAME(conn, cursor, module, db, target): if os.path.isfile(target): with open(target, 'r') as backup: sqlQuery = "USE [%s]\n" % db for line in backup: if line is None: break elif line.startswith('GO'): cursor.execute(sqlQuery) sqlQuery = "USE [%s]\n" % db else: sqlQuery += line cursor.execute(sqlQuery) conn.commit() return 0, "import successful", "" else: return 1, "cannot find target file", "cannot find target file"
[ 1267, 512 ]
def METHOD_NAME(*, qualifier: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop("template_url", "/report/optional") # Construct parameters if qualifier is not None: _params["qualifier"] = _SERIALIZER.query("qualifier", qualifier, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
[ 56, 19, 665, 339, 377 ]
def METHOD_NAME(client: ContractingClient): for contract in REQUIRED_CONTRACTS: if not RewardManager.contract_exists(contract, client): log.error('Reward contracts not setup.') return False return True
[ 137, 102 ]
def METHOD_NAME(self) -> ServiceDiscoveryBackend: """Return backend instance specific for this region.""" return servicediscovery_backends[self.current_account][self.region]
[ -1, 3127 ]
def METHOD_NAME(self, model, feature_dim, aux_num_out): if aux_num_out is not None: model = model(aux_num_out=aux_num_out) else: model = model() model.eval() batch_size, num_frames = 3, 1024 features = torch.randn(batch_size, num_frames, feature_dim) input_lengths = torch.zeros(batch_size) _, output_lengths = model(features, input_lengths) self.assertEqual(torch.zeros_like(output_lengths), output_lengths) _, output_lengths = model.extract_features(features, input_lengths) self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
[ 9, 313, 799 ]
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = client._send_request(request) <HttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.HttpResponse """ request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs)
[ 353, 377 ]
def METHOD_NAME(image, cutoffs, squared_butterworth=True, order=3.0, npad=0): """Lowpass and highpass butterworth filtering at all specified cutoffs. Parameters ---------- image : ndarray The image to be filtered. cutoffs : sequence of int Both lowpass and highpass filtering will be performed for each cutoff frequency in `cutoffs`. squared_butterworth : bool, optional Whether the traditional Butterworth filter or its square is used. order : float, optional The order of the Butterworth filter Returns ------- lowpass_filtered : list of ndarray List of images lowpass filtered at the frequencies in `cutoffs`. highpass_filtered : list of ndarray List of images highpass filtered at the frequencies in `cutoffs`. """ lowpass_filtered = [] highpass_filtered = [] for cutoff in cutoffs: lowpass_filtered.append( filters.butterworth( image, cutoff_frequency_ratio=cutoff, order=order, high_pass=False, squared_butterworth=squared_butterworth, npad=npad, ) ) highpass_filtered.append( filters.butterworth( image, cutoff_frequency_ratio=cutoff, order=order, high_pass=True, squared_butterworth=squared_butterworth, npad=npad, ) ) return lowpass_filtered, highpass_filtered
[ 19, 1221 ]
def METHOD_NAME(self): parameters = { **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), **self.serialize_url_param( "workspaceName", self.ctx.args.workspace_name, required=True, ), } return parameters
[ 274, 386 ]
def METHOD_NAME(self, container_arch_map, to_be_deleted_exp): cleaner = MockedContainerCleaner(container_arch_map) to_be_deleted = cleaner.findSourcepkgsToDelete("mock:prj") to_be_deleted.sort() self.assertEqual(to_be_deleted, to_be_deleted_exp)
[ 74, 9 ]
def METHOD_NAME(self, n_points, pc_align=False, center_node_id=None, center_coord=None, method="kdtree", verbose=False): if center_node_id is None and center_coord is None: center_node_id = np.random.randint(len(self.vertices)) if center_coord is None: center_coord = self.vertices[center_node_id] n_samples = np.min([n_points, len(self.vertices)]) if method == "kdtree": dists, node_ids = self.kdtree.query(center_coord, n_samples) if verbose: print(np.mean(dists), np.max(dists), np.min(dists)) elif method == "graph": dist_dict = nx.single_source_dijkstra_path_length(self.graph, center_node_id, weight="weight") sorting = np.argsort(np.array(list(dist_dict.values()))) node_ids = np.array(list(dist_dict.keys()))[sorting[:n_points]] else: raise Exception("unknow method") local_vertices = self.vertices[node_ids] if pc_align: local_vertices = self.calc_pc_align(local_vertices) return local_vertices, center_node_id
[ 19, 125, 1179 ]
def METHOD_NAME(self): self._write_in_file(self.some_string) bad_level = zstandard.MAX_COMPRESSION_LEVEL+1 self._call_php("compress", level=bad_level) self.kphp_server.assert_log([ "zstd_compress: compression level \\({}\\) must be within -\\d*..22 or equal to 0".format(bad_level), ]) self.assertEqual(self._read_out_file(), b"false")
[ 9, 2800, 1068, 33 ]
def METHOD_NAME(text): "Gets all struct definitions from text.""" return get_all_composite_types(text, 'struct')
[ 19, 75, 10977 ]
def METHOD_NAME(device: Optional[Union[str, Device]] = None) -> None: """ Sets the globally used default :class:`~heat.core.device.Device`. Parameters ---------- device : str or Device The device to be set """ global __default_device __default_device = sanitize_device(device)
[ 1080, 398 ]
def METHOD_NAME(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: images_dp, targets_dp = resource_dps if self._split == "train": targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat")) targets_dp = StanfordCarsLabelReader(targets_dp) dp = Zipper(images_dp, targets_dp) dp = hint_shuffling(dp) dp = hint_sharding(dp) return Mapper(dp, self._prepare_sample)
[ 3096 ]
def METHOD_NAME(mp_edit_control, enabled, dummy): """Check the current lazy-load status of the tabs.""" ce_controls = dummy_controls = 0 for tab in mp_edit_control.controls.values(): if isinstance(tab, widgets.Label): dummy_controls += 1 else: ce_controls += 1 check.equal(ce_controls, enabled, "One control initialized") check.equal(dummy_controls, dummy, "Rest of controls are dummy")
[ 250, 5678, 551 ]
def METHOD_NAME(environ): """Update 'environ' with trivial defaults for testing purposes This adds various parameters required for WSGI, including HTTP_HOST, SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO, and all of the wsgi.* variables. It only supplies default values, and does not replace any existing settings for these variables. This routine is intended to make it easier for unit tests of WSGI servers and applications to set up dummy environments. It should *not* be used by actual WSGI servers or applications, since the data is fake! """ environ.setdefault('SERVER_NAME','127.0.0.1') environ.setdefault('SERVER_PROTOCOL','HTTP/1.0') environ.setdefault('HTTP_HOST',environ['SERVER_NAME']) environ.setdefault('REQUEST_METHOD','GET') if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ: environ.setdefault('SCRIPT_NAME','') environ.setdefault('PATH_INFO','/') environ.setdefault('wsgi.version', (1,0)) environ.setdefault('wsgi.run_once', 0) environ.setdefault('wsgi.multithread', 0) environ.setdefault('wsgi.multiprocess', 0) from io import StringIO, BytesIO environ.setdefault('wsgi.input', BytesIO()) environ.setdefault('wsgi.errors', StringIO()) environ.setdefault('wsgi.url_scheme',guess_scheme(environ)) if environ['wsgi.url_scheme']=='http': environ.setdefault('SERVER_PORT', '80') elif environ['wsgi.url_scheme']=='https': environ.setdefault('SERVER_PORT', '443')
[ 102, 1617, 1618 ]
def METHOD_NAME(self) -> StructuredModel.RecordModel: return self.__model
[ 578 ]
def METHOD_NAME(mock_tools, tmp_path): """If the download archive is corrupted, the validator fails.""" # Create a mock of a previously installed WiX version. wix_path = tmp_path / "tools" / "wix" wix_path.mkdir(parents=True) (wix_path / "heat.exe").touch() (wix_path / "light.exe").touch() (wix_path / "candle.exe").touch() # Mock the download wix_zip_path = os.fsdecode(tmp_path / "tools" / "wix.zip") wix_zip = MagicMock() wix_zip.__fspath__.return_value = wix_zip_path mock_tools.download.file.return_value = wix_zip # Mock an unpack failure mock_tools.shutil.unpack_archive.side_effect = EOFError # Create an SDK wrapper wix = WiX(mock_tools, wix_home=wix_path, bin_install=True) # Upgrade the install. This will trigger a download, # but the unpack will fail. with pytest.raises(BriefcaseCommandError): wix.upgrade() # A download was initiated mock_tools.download.file.assert_called_with( url=WIX_DOWNLOAD_URL, download_path=tmp_path / "tools", role="WiX", ) # The download was unpacked. mock_tools.shutil.unpack_archive.assert_called_with( os.fsdecode(wix_zip_path), extract_dir=os.fsdecode(wix_path) ) # The zip file was not removed assert wix_zip.unlink.call_count == 0
[ 9, 789, 180 ]
def METHOD_NAME(resource: str): with openFile(resource, 'rb') as f: yaml = str(f.read(), "utf-8") return unsafe_parse_yaml(yaml)
[ 214, 406, 2942 ]
def METHOD_NAME(show_plots, useClassicElem, accuracy): testFailCount = 0 # zero unit test result counter testMessages = [] # create empty array to store test log messages unitTaskName = "unitTask" # arbitrary name (don't change) unitProcessName = "TestProcess" # arbitrary name (don't change) # Create a sim meanOEFeedback as an empty container unitTestSim = SimulationBaseClass.SimBaseClass() # Create test thread testProcessRate = macros.sec2nano(0.1) # process rate testProc = unitTestSim.CreateNewProcess(unitProcessName) # create new process testProc.addTask(unitTestSim.CreateNewTask(unitTaskName, testProcessRate)) # create new task # Construct algorithm and associated C++ container module = meanOEFeedback.meanOEFeedback() module.ModelTag = "meanOEFeedback" # update python name of test meanOEFeedback module.targetDiffOeMean = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] module.mu = orbitalMotion.MU_EARTH * 1e9 # [m^3/s^2] module.req = orbitalMotion.REQ_EARTH * 1e3 # [m] module.J2 = orbitalMotion.J2_EARTH # [] module.K = [1e7, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e7, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e7, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e7, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e7, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e7] if(useClassicElem): module.oeType = 0 # 0: classic else: module.oeType = 1 # 1: equinoctial # Add test meanOEFeedback to runtime call list unitTestSim.AddModelToTask(unitTaskName, module) # Create input message and size it because the regular creator of that message # is not part of the test. # # Chief Navigation Message # oe = orbitalMotion.ClassicElements() oe.a = 20000e3 # [m] oe.e = 0.1 oe.i = 0.2 oe.Omega = 0.3 oe.omega = 0.4 oe.f = 0.5 (r_BN_N, v_BN_N) = orbitalMotion.elem2rv(orbitalMotion.MU_EARTH*1e9, oe) chiefNavStateOutData = messaging.NavTransMsgPayload() # Create a structure for the input message chiefNavStateOutData.timeTag = 0 chiefNavStateOutData.r_BN_N = r_BN_N chiefNavStateOutData.v_BN_N = v_BN_N chiefNavStateOutData.vehAccumDV = [0, 0, 0] chiefInMsg = messaging.NavTransMsg().write(chiefNavStateOutData) # # Deputy Navigation Message # oe2 = orbitalMotion.ClassicElements() oe2.a = (1 + 0.0006) * 7000e3 # [m] oe2.e = 0.2 + 0.0005 oe2.i = 0.0 + 0.0004 oe2.Omega = 0.0 + 0.0003 oe2.omega = 0.0 + 0.0002 oe2.f = 0.0001 (r_BN_N2, v_BN_N2) = orbitalMotion.elem2rv(orbitalMotion.MU_EARTH*1e9, oe2) deputyNavStateOutData = messaging.NavTransMsgPayload() # Create a structure for the input message deputyNavStateOutData.timeTag = 0 deputyNavStateOutData.r_BN_N = r_BN_N2 deputyNavStateOutData.v_BN_N = v_BN_N2 deputyNavStateOutData.vehAccumDV = [0, 0, 0] deputyInMsg = messaging.NavTransMsg().write(deputyNavStateOutData) # Setup logging on the test meanOEFeedback output message so that we get all the writes to it dataLog = module.forceOutMsg.recorder() unitTestSim.AddModelToTask(unitTaskName, dataLog) # connect messages module.chiefTransInMsg.subscribeTo(chiefInMsg) module.deputyTransInMsg.subscribeTo(deputyInMsg) # Need to call the self-init and cross-init methods unitTestSim.InitializeSimulation() # Set the simulation time. # NOTE: the total simulation time may be longer than this value. The # simulation is stopped at the next logging event on or after the # simulation end time. unitTestSim.ConfigureStopTime(testProcessRate) # seconds to stop simulation # Begin the simulation time run set above unitTestSim.ExecuteSimulation() # This pulls the actual data log from the simulation run. forceOutput = dataLog.forceRequestInertial # set the filtered output truth states if useClassicElem: trueVector = [[-849.57347406544340628897771239280701, 1849.77641265032843875815160572528839, 136.07817734479317550722043961286545]] else: trueVector = [[-1655.37188207880308254971168935298920, 1788.61776379042521512019447982311249, 52.54814237453938119415397522971034]] # compare the meanOEFeedback results to the truth values for i in range(0, len(trueVector)): # check a vector values if not unitTestSupport.isArrayEqual(forceOutput[i], trueVector[i], 3, accuracy): testFailCount += 1 testMessages.append("FAILED: " + module.ModelTag + " Module failed " + ".forceRequestInertial" + " unit test at t=" + str(dataLog.times()[i]*macros.NANO2SEC) + "sec\n") # print out success message if no error were found if testFailCount == 0: print("PASSED: " + module.ModelTag) print("This test uses an accuracy value of " + str(accuracy)) # each test method requires a single assert method to be called # this check below just makes sure no sub-test failures were found return [testFailCount, ''.join(testMessages)]
[ 314, 7380, 2921, 9, 559 ]
def METHOD_NAME(self): pred_proba = self.clf.predict_proba(self.X_test) assert (pred_proba.min() >= 0) assert (pred_proba.max() <= 1)
[ 9, 2726, 2550 ]
def METHOD_NAME(self): perm_tester = PermutationTest(self.create_hyperpipe_no_mongo, n_perms=2, n_processes=1, random_state=11, permutation_id=str(uuid.uuid4())) with self.assertRaises(ValueError): perm_tester.fit(self.X, self.y)
[ 9, 654, 2176, 550, 144 ]
def METHOD_NAME(self): get(self, **self.conan_data["sources"][self.version], strip_root=True)
[ 1458 ]
def METHOD_NAME( pex, # type: str *additional_args, # type: str **additional_env # type: Any ): # type: (...) -> List[str] isolated_sys_path = execute_sys_path_dump_pex(pex) return list( execute_sys_path_dump_pex(pex, *additional_args, **additional_env) - isolated_sys_path )
[ 203, 2900, 3709, 157 ]
def METHOD_NAME(self, record): if record.levelno >= logging.ERROR: self.error_count += 1 elif record.levelno >= logging.WARNING: self.warning_count += 1 return True
[ 527 ]
def METHOD_NAME( request: HttpRequest, user_profile: UserProfile, payload: WildValue = REQ(argument_type="body", converter=to_wild_value),
[ 58, 7935, 12 ]
def METHOD_NAME(self, namespace_scoped_permission_obj, project_id, cluster_id, namespace_name): """测试场景:有命名空间域资源使用权限(同时有集群/项目查看权限)""" perm_ctx = NamespaceScopedPermCtx( username=roles.ADMIN_USER, project_id=project_id, cluster_id=cluster_id, name=namespace_name ) assert namespace_scoped_permission_obj.can_use(perm_ctx)
[ 9, 1046, 1080 ]
def METHOD_NAME(skip_curdir=1, maxlevels=0, force=0, quiet=0): """Byte-compile all module on sys.path. Arguments (all optional): skip_curdir: if true, skip current directory (default true) maxlevels: max recursion level (default 0) force: as for compile_dir() (default 0) quiet: as for compile_dir() (default 0) """ success = 1 for dir in sys.path: if (not dir or dir == os.curdir) and skip_curdir: print 'Skipping current directory' else: success = success and compile_dir(dir, maxlevels, None, force, quiet=quiet) return success
[ 296, 157 ]
def METHOD_NAME(self, *args, **kwargs): return self._trace_cmd("get", *args, **kwargs)
[ 19 ]
f METHOD_NAME(self) -> Text:
[ 147 ]
def METHOD_NAME(self, nodes): """ Set the backdrop size to fit around specified nodes. Args: nodes (list[NodeGraphQt.NodeObject]): list of nodes. """ if not nodes: return self.graph.begin_undo('"{}" wrap nodes'.format(self.name())) size = self.view.calc_backdrop_size([n.view for n in nodes]) self.set_property('width', size['width']) self.set_property('height', size['height']) self.set_pos(*size['pos']) self.graph.end_undo()
[ 503, 480 ]
METHOD_NAME(self):
[ 56, 1179 ]
def METHOD_NAME(self): super().METHOD_NAME() # Probability of error/failure self._p = stats.Mean() # Minimum values observed self._p_min = None self._s_min = None # The sum of p_min and s_min, to avoid calculating it every time self._ps_min = float("inf")
[ 656 ]
def METHOD_NAME(self): # Tests that rhat works # Tests Rhat computation for one parameter, chains.shape=(2, 4) chains = np.array([[1.0, 1.1, 1.4, 1.3], [1.0, 2.0, 3.0, 4.0]]) self.assertAlmostEqual( pints._diagnostics.rhat(chains), 2.3303847470550716, 6) # Test Rhat computation for two parameters, chains.shape=(3, 4, 2) chains = np.array([ [ [-1.10580535, 2.26589882], [0.35604827, 1.03523364], [-1.62581126, 0.47308597], [1.03999619, 0.58203464] ], [ [-1.04755457, -2.28410098], [0.17577692, -0.79433186], [-0.07979098, -1.87816551], [-1.39836319, 0.95119085] ], [ [-1.1182588, -0.34647435], [1.36928142, -1.4079284], [0.92272047, -1.49997615], [0.89531238, 0.63207977] ]]) y = pints._diagnostics.rhat(chains) d = np.array(y) - np.array([0.84735944450487122, 1.1712652416950846]) self.assertLess(np.linalg.norm(d), 0.01)
[ 9, 14068 ]
def METHOD_NAME(destination, root): """ Writes plugin info tree to file @ In, destination, string, where to write file to @ In, root, xml.etree.ElementTree.Element, element to write @ Out, None """ xmlUtils.toFile(destination, root, pretty=True)
[ 77, 2793, 151 ]
def METHOD_NAME(): '''Test that the reference element setter and getter work as expected, including raising an exception if the value is invalid. ''' mesh_arg = MetaMeshArgMetadata("adjacent_face") with pytest.raises(ValueError) as info: mesh_arg.mesh = "invalid" assert ("The 'mesh property' metadata should be a recognised value (one " "of ['adjacent_face']) but found 'invalid'." in str(info.value)) mesh_arg.mesh = "adjacent_face" assert mesh_arg.mesh == "adjacent_face"
[ 9, 1949, 800, 801 ]
def METHOD_NAME(firmware: Firmware, backend: BackendInterface, navigator: Navigator): app_client = EthAppClient(backend) with app_client.eip712_sign_legacy( BIP32_PATH, bytes.fromhex('6137beb405d9ff777172aa879e33edb34a1460e701802746c5ef96e741710e59'), bytes.fromhex('eb4221181ff3f1a83ea7313993ca9218496e424604ba9492bb4052c03d5c3df8')): moves = list() if firmware.device.startswith("nano"): moves += [ NavInsID.RIGHT_CLICK ] if firmware.device == "nanos": screens_per_hash = 4 else: screens_per_hash = 2 moves += [ NavInsID.RIGHT_CLICK ] * screens_per_hash * 2 moves += [ NavInsID.BOTH_CLICK ] else: moves += [ NavInsID.USE_CASE_REVIEW_TAP ] * 2 moves += [ NavInsID.USE_CASE_REVIEW_CONFIRM ] navigator.navigate(moves) v, r, s = ResponseParser.signature(app_client.response().data) assert v == bytes.fromhex("1c") assert r == bytes.fromhex("ea66f747173762715751c889fea8722acac3fc35db2c226d37a2e58815398f64") assert s == bytes.fromhex("52d8ba9153de9255da220ffd36762c0b027701a3b5110f0a765f94b16a9dfb55")
[ 9, 7583, 3116 ]
def METHOD_NAME(env, symlinks, libnode, **kw) -> None: """Used by emitters to handle (shared/versioned) library symlinks""" Verbose = False # nodes involved in process... all symlinks + library nodes = list(set([x for x, y in symlinks] + [libnode])) clean_targets = kw.get('clean_targets', []) if not is_List(clean_targets): clean_targets = [clean_targets] for link, linktgt in symlinks: env.SideEffect(link, linktgt) if Verbose: print("EmitLibSymlinks: SideEffect(%r,%r)" % (link.get_path(), linktgt.get_path())) clean_list = [x for x in nodes if x != linktgt] env.Clean(list(set([linktgt] + clean_targets)), clean_list) if Verbose: print("EmitLibSymlinks: Clean(%r,%r)" % (linktgt.get_path(), [x.get_path() for x in clean_list]))
[ 2648, 124, 1826 ]
def METHOD_NAME(bench_config, monkeypatch, make_project): rev = bench_config.project_rev url = bench_config.project_git_repo if os.path.isdir(url): path = url assert not rev else: path = make_project(url, rev=rev) monkeypatch.chdir(path)
[ 155 ]
def METHOD_NAME(): try: __warningregistry__ except NameError: pass else: for key in __warningregistry__.keys(): _, cls, _ = key if cls is CarbonWarning: del __warningregistry__[key]
[ 656, 3437, 510 ]
def METHOD_NAME(index): index, values = index right = make_rng(13).permutation(np.arange(len(values))) left = index.shuffle(shuffle=13) assert (left == right).all()
[ 9, 1124, 962 ]
def METHOD_NAME(self): self.assertHoliday(f"{year}-06-15" for year in range(1997, 2050)) self.assertNoHoliday(f"{year}-06-15" for year in range(1990, 1997))
[ 9, -1, 1724 ]
def METHOD_NAME(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return value return wrapper
[ 972 ]
def METHOD_NAME(): """Check build tags and sign those we missed.""" db_factory = transactional_session_maker() older_than = datetime.utcnow() - timedelta(days=config.get('check_signed_builds_delay')) with db_factory() as session: updates = models.Update.query.filter( models.Update.status == models.UpdateStatus.pending ).filter( models.Update.release_id == models.Release.id ).filter( models.Release.state.in_([ models.ReleaseState.current, models.ReleaseState.pending, models.ReleaseState.frozen, ]) ).all() if len(updates) == 0: log.debug('No stuck Updates found') return kc = buildsys.get_session() stuck_builds = [] overlooked_builds = [] for update in updates: # Let Bodhi have its times if update.date_submitted >= older_than: continue builds = update.builds # Clean Updates with no builds if len(builds) == 0: log.debug(f'Obsoleting empty update {update.alias}') update.obsolete(session) session.flush() continue pending_signing_tag = update.release.pending_signing_tag pending_testing_tag = update.release.pending_testing_tag for build in builds: if build.signed: log.debug(f'{build.nvr} already marked as signed') continue build_tags = [t['name'] for t in kc.listTags(build=build.nvr)] if pending_signing_tag not in build_tags and pending_testing_tag in build_tags: # Our composer missed the message that the build got signed log.debug(f'Changing signed status of {build.nvr}') build.signed = True elif pending_signing_tag in build_tags and pending_testing_tag not in build_tags: # autosign missed the message that the build is waiting to be signed log.debug(f'{build.nvr} is stuck waiting to be signed, let\'s try again') stuck_builds.append((build.nvr, pending_signing_tag)) elif (pending_signing_tag not in build_tags and pending_testing_tag not in build_tags): # this means that an update has been created but we never tagged the build # as pending-signing log.debug(f'Oh, no! We\'ve never sent {build.nvr} for signing, let\'s fix it') overlooked_builds.append((build.nvr, pending_signing_tag)) session.flush() if stuck_builds: kc.multicall = True for b, t in stuck_builds: kc.untagBuild(t, b, force=True) kc.multiCall() for b, t in stuck_builds: kc.tagBuild(t, b, force=True) kc.multiCall() if overlooked_builds: kc.multicall = True for b, t in overlooked_builds: kc.tagBuild(t, b, force=True) kc.multiCall()
[ 57 ]
def METHOD_NAME(self): tools.get( **self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True )
[ 1458 ]
def METHOD_NAME(self): angles = numpy.linspace(0, numpy.pi / 2, 10) pty = numpy.sin(angles) * 20 + 10 ptx = numpy.cos(angles) * 20 + 10 ellipse = ellipse_mdl.fit_ellipse(pty, ptx) self.assertAlmostEqual(ellipse.center_1, 10) self.assertAlmostEqual(ellipse.center_2, 10) self.assertAlmostEqual(ellipse.half_long_axis, 20) self.assertAlmostEqual(ellipse.half_short_axis, 20)
[ 9, 6190, 1263 ]
def METHOD_NAME(self, opt, cur_params, init_params, weight=0): if weight == 0: weight = opt.ratio grad_dict = {} for k in cur_params.keys(): scale = 1.0 / opt.local_normalizing_vec cum_grad = init_params[k] - cur_params[k] cum_grad.mul_(weight * scale) grad_dict[k] = cum_grad return grad_dict
[ 19, 125, 387, 140 ]
def METHOD_NAME(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layers used during upsampling""" u = UpSampling2D(size=2)(layer_input) u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u) if dropout_rate: u = Dropout(dropout_rate)(u) u = BatchNormalization(momentum=0.8)(u) u = Concatenate()([u, skip_input]) return u
[ 13713 ]
def METHOD_NAME( delivery_attempts, staff_api_client, permission_manage_apps, ): # given staff_api_client.user.user_permissions.add(permission_manage_apps) variables = { "id": delivery_attempts["webhook_id"], "first": 3, "sortBy": {"field": "CREATED_AT", "direction": "DESC"}, } # when response = staff_api_client.post_graphql( EVENT_DELIVERY_ATTEMPT_SORT_QUERY, variables=variables ) content = get_graphql_content(response) deliveries_response = content["data"]["webhook"]["eventDeliveries"]["edges"][0] attempts_response = deliveries_response["node"]["attempts"]["edges"] # then assert attempts_response[0]["node"]["id"] == delivery_attempts["attempt_3_id"] assert attempts_response[1]["node"]["id"] == delivery_attempts["attempt_2_id"]
[ 9, 12, 3395, 3142, 539, 266, 1966 ]
def METHOD_NAME(self): self.agent_alt.DiscFac = 0.90 self.agent_alt.solve() self.assertAlmostEqual( self.agent_alt.solution[0].cFunc(10).tolist(), 3.97501, places=HARK_PRECISION, )
[ 9, 3823, 725 ]
def METHOD_NAME(self): # use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png') response = upload_helper(self, "imagefilewithoutext") self.assertEqual(response.status_code, 200) self.assertEqual(len(response.redirect_chain), 0) # Redirect only if it worked self.assertNotEqual(response.context['upload_avatar_form'].errors, {})
[ 9, 660, 529, 2916 ]
def METHOD_NAME() -> Component: c = gf.Component() s1 = gf.Section(width=2.2, offset=0, layer=(3, 0), name="etch") s2 = gf.Section(width=1.1, offset=3, layer=(1, 0), name="wg2") X1 = gf.CrossSection( width=1.2, offset=0, layer=(2, 0), name="wg", port_names=("in1", "out1"), sections=[s1, s2], ) # Create the second CrossSection that we want to transition to s1 = gf.Section(width=3.5, offset=0, layer=(3, 0), name="etch") s2 = gf.Section(width=3, offset=5, layer=(1, 0), name="wg2") X2 = gf.CrossSection( width=1, offset=0, layer=(2, 0), name="wg", port_names=("in1", "out1"), sections=[s1, s2], ) Xtrans = gf.path.METHOD_NAME(cross_section1=X1, cross_section2=X2, width_type="sine") # Xtrans = gf.cross_section.strip(port_names=('in1', 'out1')) P1 = gf.path.straight(length=5) P2 = gf.path.straight(length=5) wg1 = gf.path.extrude(P1, X1) wg2 = gf.path.extrude(P2, X2) P4 = gf.path.euler(radius=25, angle=45, p=0.5, use_eff=False) wg_trans = gf.path.extrude(P4, Xtrans) wg1_ref = c << wg1 wgt_ref = c << wg_trans wgt_ref.connect("in1", wg1_ref.ports["out1"]) wg2_ref = c << wg2 wg2_ref.connect("in1", wgt_ref.ports["out1"]) return c
[ 4677 ]
def METHOD_NAME(self, session): data = self.deserialize_http_content(session) self.ctx.set_var( "instance", data, schema_builder=self._build_schema_on_200 )
[ 69, 1072 ]
def METHOD_NAME(self, pt_session_options=None): """Initialize the prompt session and the prompt loop and store them in self.pt_app and self.pt_loop. Additional keyword arguments for the PromptSession class can be specified in pt_session_options. """ if pt_session_options is None: pt_session_options = {} def get_prompt_tokens(): return [(Token.Prompt, self.prompt)] if self._ptcomp is None: compl = IPCompleter( shell=self.shell, namespace={}, global_namespace={}, parent=self.shell ) # add a completer for all the do_ methods methods_names = [m[3:] for m in dir(self) if m.startswith("do_")] def gen_comp(self, text): return [m for m in methods_names if m.startswith(text)] import types newcomp = types.MethodType(gen_comp, compl) compl.custom_matchers.insert(0, newcomp) # end add completer. self._ptcomp = IPythonPTCompleter(compl) # setup history only when we start pdb if self.shell.debugger_history is None: if self.shell.debugger_history_file is not None: p = Path(self.shell.debugger_history_file).expanduser() if not p.exists(): p.touch() self.debugger_history = FileHistory(os.path.expanduser(str(p))) else: self.debugger_history = InMemoryHistory() else: self.debugger_history = self.shell.debugger_history options = dict( message=(lambda: PygmentsTokens(get_prompt_tokens())), editing_mode=getattr(EditingMode, self.shell.editing_mode.upper()), key_bindings=create_ipython_shortcuts(self.shell), history=self.debugger_history, completer=self._ptcomp, enable_history_search=True, mouse_support=self.shell.mouse_support, complete_style=self.shell.pt_complete_style, style=getattr(self.shell, "style", None), color_depth=self.shell.color_depth, ) if not PTK3: options['inputhook'] = self.shell.inputhook options.update(pt_session_options) if not _use_simple_prompt: self.pt_loop = asyncio.new_event_loop() self.pt_app = PromptSession(**options)
[ 7353, 176 ]
def METHOD_NAME(self, lr): """Set the learning rate.""" for param_group in self.param_groups: param_group["lr"] = lr
[ 0, 6941 ]
def METHOD_NAME(self): self.assertEqual(absolutify_url("/blap"), "http://example.com/blap")
[ 9, -1, 274, 432, 623, 10305 ]
def METHOD_NAME(self, socket_patches): expected = {"modelName": b"model_name", "modelPath": b"model_path", "batchSize": 1, "handler": b"handler", "ioFileDescriptor": b"0123456789"} socket_patches.socket.recv.side_effect = [ b"L", b"\x00\x00\x00\x0a", b"model_name", b"\x00\x00\x00\x0a", b"model_path", b"\x00\x00\x00\x01", b"\x00\x00\x00\x07", b"handler", b"\xFF\xFF\xFF\xFF", b"\x00\x00\x00\x0a", b"0123456789" ] cmd, ret = codec.retrieve_msg(socket_patches.socket) assert cmd == b"L" assert ret == expected
[ 9, 404, 169, 557, 654, 1667 ]
def METHOD_NAME(module_name, source_files): """Get source code root_dir from source_files in .podspecs Assume the root_dir is with the format: {module_name}/Sources or {module_name}/Source """ MODULE_ROOT_PATCH = { 'FirebaseFirestore': 'Firestore/Source', 'FirebaseFirestoreSwift': 'Firestore/Swift/Source', 'FirebaseCrashlytics': 'Crashlytics/Crashlytics', 'FirebaseInAppMessagingSwift': 'FirebaseInAppMessaging/Swift/Source', } if module_name in MODULE_ROOT_PATCH: return MODULE_ROOT_PATCH[module_name] if source_files: for source_file in source_files: if f'{module_name}/Sources' in source_file: return f'{module_name}/Sources' if f'{module_name}/Source' in source_file: return f'{module_name}/Source' return ''
[ 19, 1563, 1190 ]
def METHOD_NAME(active_directory_connector_name: Optional[str] = None, data_controller_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActiveDirectoryConnectorResult: """ Retrieves an Active Directory connector resource :param str active_directory_connector_name: The name of the Active Directory connector instance :param str data_controller_name: The name of the data controller :param str resource_group_name: The name of the Azure resource group """ __args__ = dict() __args__['activeDirectoryConnectorName'] = active_directory_connector_name __args__['dataControllerName'] = data_controller_name __args__['resourceGroupName'] = resource_group_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:azurearcdata/v20230115preview:getActiveDirectoryConnector', __args__, opts=opts, typ=GetActiveDirectoryConnectorResult).value return AwaitableGetActiveDirectoryConnectorResult( id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), properties=pulumi.get(__ret__, 'properties'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type'))
[ 19, 923, 2851, 4059 ]
def METHOD_NAME(): """ When no aws_kms:data_key is configured, calling _cfg_data_key should raise a SaltConfigurationError """ pytest.raises(salt.exceptions.SaltConfigurationError, aws_kms._cfg_data_key)
[ 9, 2610, 365, 59, 654, 59 ]
def METHOD_NAME(idp_config): metadata_url = idp_config["metadata_url"] entity_id = idp_config["entity_id"] cache_key = f"saml.metadata.{metadata_url}.{entity_id}" saml_config = cache.get(cache_key) if saml_config is None: saml_config = OneLogin_Saml2_IdPMetadataParser.parse_remote( metadata_url, entity_id=entity_id, timeout=idp_config.get("metadata_request_timeout", 10), ) cache.set( cache_key, saml_config, idp_config.get("metadata_cache_timeout", 60 * 60 * 4), ) return saml_config
[ 1047, 773, 274, 200 ]