text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(name, login): email = login if '@' in login else None user = User( name=name or login, login=login, password=generate_password_hash(password), roles=current_app.config['ADMIN_ROLES'], text=text, email=email, email_verified=bool(email) ) try: user = user.create() except Exception as e: click.echo(f'ERROR: {e}') else: return user
[ 129, 21 ]
def METHOD_NAME(var_name: str): ## An argument is an arg_char_list arguments = [string_to_argument("unset"), string_to_argument(var_name)] ## Pass all relevant argument to the planner node = make_command(arguments) return node
[ 93, 4132, 486 ]
def METHOD_NAME(uln_auth_instance): """ Authenticate ULN, getting its token. """ class ServerList: """ Dummy server list mock. """ def server(self): pass server_list_instance = ServerList() server_list = MagicMock(return_value=server_list_instance) retry_server_instance = MagicMock() retry_server_instance.auth = MagicMock() retry_server_instance.auth.login = MagicMock(return_value="12345") retry_server = MagicMock(return_value=retry_server_instance) uri = "uln:///suse" with patch("ulnauth.ServerList", server_list) as srv_lst, patch("ulnauth.RetryServer", retry_server) as rtr_srv: uln_auth_instance.get_credentials = MagicMock(return_value=("uln_user", "uln_password",)) token = uln_auth_instance.authenticate(uri) assert server_list.call_args_list[0][0] == (['https://linux-update.oracle.com/rpc/api'],) rs_call = retry_server.call_args_list[0][1] for p_name, p_val in {'refreshCallback': None, 'username': 'user', 'proxy': 'https://my_http_proxy', 'password': 'password', 'timeout': 5}.items(): assert p_name in rs_call assert rs_call[p_name] == p_val assert retry_server_instance.addServerList.call_args_list[0][0] == (server_list_instance,) assert token == "12345" assert retry_server_instance.auth.login.call_args_list[0][0] == ("uln_user", "uln_password")
[ 9, 2433, 10324 ]
def METHOD_NAME(self, editor): """Manages install setup of the pane.""" super().METHOD_NAME(editor) horizontal_scrollbar = editor.horizontalScrollBar() horizontal_scrollbar.valueChanged.connect(self.update_bar_position) horizontal_scrollbar.sliderReleased.connect(self.update)
[ 69, 428 ]
def METHOD_NAME(): @given(seq=from_type(_NestedSequence[int])) def test(seq): assert hasattr(seq, "__iter__") def flatten(lst): for el in lst: try: yield from flatten(el) except TypeError: yield el assert all(isinstance(i, int) for i in flatten(seq)) test()
[ 9, 7122, 1913, 612, 771 ]
def METHOD_NAME(self): """Tests the autograd interface.""" weight = pnp.array(0.5, requires_grad=True) dev = qml.device("default.qubit", wires=4) circuit = qml.QNode(circuit_template, dev) circuit(weight) grad_fn = qml.grad(circuit) # since test cases are hard to construct # for this template, just check that the gradient is computed # without error grad_fn(weight)
[ 9, 898 ]
def METHOD_NAME(self, options): base_qs = super().METHOD_NAME(options) if options.get('channel'): channel = amo.CHANNEL_CHOICES_LOOKUP[options['channel']] base_qs = base_qs.filter(versions__channel=channel) return base_qs
[ 19, 414, 2386 ]
def METHOD_NAME(self): signal = Signal(get_path_for_data_file("ask.complex"), "ASK-Test") signal.modulation_type = "ASK" signal.samples_per_symbol = 295 signal.center = 0.0219 self.assertEqual(signal.num_samples, 13710) proto_analyzer = ProtocolAnalyzer(signal) proto_analyzer.get_protocol_from_signal() self.assertTrue(proto_analyzer.plain_bits_str[0].startswith("1011001001011011011011011011011011001000000"))
[ 9, 4949 ]
def METHOD_NAME(self): self.__draw_handler_id = self._image.connect( 'draw', DeferredSignal(self.__draw, timeout=10))
[ 3914, 69, 1100 ]
def METHOD_NAME(response, fields, name=None, bom=False): u'''Context manager for writing UTF-8 JSON data to response :param response: file-like or response-like object for writing data and headers (response-like objects only) :param fields: list of datastore fields :param name: file name (for headers, response-like objects only) :param bom: True to include a UTF-8 BOM at the start of the file ''' if hasattr(response, u'headers'): response.headers['Content-Type'] = ( b'application/json; charset=utf-8') if name: response.headers['Content-disposition'] = ( u'attachment; filename="{name}.json"'.format( name=encode_rfc2231(name))) if bom: response.stream.write(BOM_UTF8) response.stream.write( six.ensure_binary(u'{\n "fields": %s,\n "records": [' % dumps( fields, ensure_ascii=False, separators=(u',', u':')))) yield JSONWriter(response.stream) response.stream.write(b'\n]}\n')
[ 763, 797 ]
def METHOD_NAME(self): # Generate an error and get an event self.vm.hmp_qemu_io("drive0", "aio_read %d %d" % (offset * sector_size, sector_size)) self.vm.qtest("clock_step 10") self.do_check_event('img0', offset) # I/O errors in the same child: only one event is emitted delay = 10 for i in range(3): self.vm.hmp_qemu_io("drive0", "aio_read %d %d" % (offset * sector_size, sector_size)) self.vm.qtest("clock_step %d" % delay) self.do_check_event(None) # Wait enough so the event is finally emitted self.vm.qtest("clock_step %d" % (2 * event_rate)) self.do_check_event('img0', offset) # I/O errors in the same child: all events are emitted delay = 2 * event_rate for i in range(3): self.vm.hmp_qemu_io("drive0", "aio_read %d %d" % (offset * sector_size, sector_size)) self.vm.qtest("clock_step %d" % delay) self.do_check_event('img0', offset) # I/O errors in different children: all events are emitted delay = 10 for i in range(len(imgs)): self.vm.hmp_qemu_io("drive0", "aio_read %d %d" % ((offset + i) * sector_size, sector_size)) self.vm.qtest("clock_step %d" % delay) # In fifo mode only errors in the first child are detected if i > 0 and self.read_pattern == 'fifo': self.do_check_event(None) else: self.do_check_event('img%d' % i, offset + i) # I/O errors in different children: all events are emitted delay = 2 * event_rate for i in range(len(imgs)): self.vm.hmp_qemu_io("drive0", "aio_read %d %d" % ((offset + i) * sector_size, sector_size)) self.vm.qtest("clock_step %d" % delay) # In fifo mode only errors in the first child are detected if i > 0 and self.read_pattern == 'fifo': self.do_check_event(None) else: self.do_check_event('img%d' % i, offset + i) # No more pending events self.do_check_event(None)
[ 9, 2682 ]
def METHOD_NAME(args): label_path = osp.join(args.data, f"{args.split}.{args.labels}") if osp.exists(label_path): lp = open(label_path, "r") else: lp = None with open(osp.join(args.data, f"{args.split}.tsv"), "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() files = [line.rstrip() for line in lines if len(line) > 0] if lp is not None: lbls = [line.rstrip() for line in lp] else: lbls = [None] * len(files) num = len(files) reader = Wav2VecFeatureReader(args.checkpoint, args.layer) def iterate(): for fname, lbl in zip(files, lbls): file = osp.join(root, fname.split("\t")[0]) feats = reader.get_feats(file) yield feats.data, fname, lbl return iterate, num, root
[ 19, 640 ]
def METHOD_NAME(config): config.add_request_method( LTIParams.from_request, name="lti_params", property=True, reify=True )
[ 9995 ]
def METHOD_NAME(): with pytest.raises(DataError): DeviceCertificate.unsecure_load(b"dummy")
[ 9, 3615, 203, 398, 1548, 1068, 365 ]
def METHOD_NAME(configure_local_internal_options_module, get_configuration, configure_environment, file_monitoring, restart_logcollector): ''' description: Check if the 'wazuh-logcollector' daemon changes a command name in the log messages by the one defined in the 'alias' tag. For this purpose, the test will monitor a command using an alias. Then, it will verify that the 'reading command' event is generated. This event includes the output of the command executed and its alias. Finally, the test will verify that the Wazuh API returns the same values for the 'localfile' section that the configured one. wazuh_min_version: 4.2.0 tier: 0 parameters: - configure_local_internal_options_module: type: fixture brief: Configure the Wazuh local internal options. - get_configuration: type: fixture brief: Get configurations from the module. - configure_environment: type: fixture brief: Configure a custom environment for testing. - file_monitoring: type: fixture brief: Handle the monitoring of a specified file. - restart_logcollector: type: fixture brief: Clear the 'ossec.log' file and start a new monitor. assertions: - Verify that the logcollector monitors a command with an assigned alias. - Verify that the Wazuh API returns the same values for the 'localfile' section as the configured one. input_description: A configuration template (test_basic_configuration_alias) is contained in an external YAML file (wazuh_basic_configuration.yaml). That template is combined with two test cases defined in the module. Those include configuration settings for the 'wazuh-logcollector' daemon. expected_output: - r'Reading command message.*' tags: - logs ''' cfg = get_configuration['metadata'] log_callback = logcollector.callback_command_alias_output(cfg['alias']) log_monitor.start(timeout=10, callback=log_callback, error_message=logcollector.GENERIC_CALLBACK_ERROR_COMMAND_MONITORING) if wazuh_component == 'wazuh-manager': api.wait_until_api_ready() api.compare_config_api_response([cfg], 'localfile')
[ 9, 830, 533 ]
def METHOD_NAME(backend_class, plot=False): # arrange settings0 = Settings0D(seed=44) settings0.n_sd = N_SD settings0.radius_bins_edges = bins_edges(32) nf_vals = [1, 4, 16] data_x = {} data_y = {} # act lbl = "initial" res = run_box_breakup(settings0, [0], backend_class) data_x[lbl], data_y[lbl] = res.x, res.y for i, nf_val in enumerate(nf_vals): settings = Settings0D( fragmentation=AlwaysN(n=nf_val), seed=44, warn_overflows=False ) settings.n_sd = settings0.n_sd settings.radius_bins_edges = settings0.radius_bins_edges settings.coal_eff = ConstEc(Ec=0.95) settings.dt = DT lbl = "n_f = " + str(nf_val) res = run_box_breakup(settings, [120], backend_class) data_x[lbl], data_y[lbl] = res.x, res.y # plot pyplot.step( data_x["initial"], data_y["initial"][0] * settings.rho, color="k", linestyle="--", label="initial", ) for i, nf_val in enumerate(nf_vals): lbl = "n_f = " + str(nf_val) pyplot.step( data_x[lbl], data_y[lbl][0] * settings.rho, color=CMAP(i / len(nf_vals)), label=lbl if lbl not in pyplot.gca().get_legend_handles_labels()[1] else "", ) pyplot.xscale("log") pyplot.xlabel("particle radius (um)") pyplot.ylabel("dm/dlnR (kg/m$^3$ / unit(ln R)") pyplot.legend() pyplot.title(backend_class.__name__) if plot: pyplot.show() else: pyplot.clf() # assert for datum_x in data_x.values(): np.testing.assert_array_equal(data_x["initial"], datum_x) peaks_expected = { "initial": (30, 0.017), "n_f = 1": (1600, 0.015), "n_f = 4": (500, 0.01), "n_f = 16": (200, 0.0075), } for lbl, x_y in peaks_expected.items(): print(lbl) peak = np.argmax(data_y[lbl][0]) np.testing.assert_approx_equal(data_x[lbl][peak], x_y[0], significant=1) np.testing.assert_approx_equal( data_y[lbl][0][peak] * settings.rho, x_y[1], significant=1 )
[ 9, 8617, -1 ]
def METHOD_NAME(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]: """ Identity for the resource. """ return pulumi.get(self, "identity")
[ 2989 ]
f METHOD_NAME(self, text):
[ 711 ]
def METHOD_NAME(self) -> str: """ The path ID that uniquely identifies the object. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(maskfile, plugin, tmpdir, infile): """wf with one task that runs fsl.bet using BoshTask""" wf = Workflow(name="wf", input_spec=["maskfile", "infile"]) wf.inputs.maskfile = maskfile wf.inputs.infile = infile wf.cache_dir = tmpdir wf.add( BoshTask( name="bet", zenodo_id="1482743", infile=wf.lzin.infile, maskfile=wf.lzin.maskfile, ) ) wf.set_output([("outfile", wf.bet.lzout.outfile)]) with Submitter(plugin=plugin) as sub: wf(submitter=sub) res = wf.result() assert res.output.outfile.name == "test_brain.nii.gz" assert res.output.outfile.exists()
[ 9, 8299, 3545, 1170 ]
def METHOD_NAME(qtbot, widget, widget_mngr): applet = QLabel("Widget") prov = applet_provider(applet, "test") applet2 = QLabel("Widget2") prov2 = applet_provider(applet2, "test2") widget_mngr.addApplet(100, prov) widget_mngr.addApplet(200, prov2) widget.show() qtbot.waitExposed(widget) assert applet.isVisible() assert not applet2.isVisible() widget_mngr.focusApplet(200) assert not applet.isVisible() assert applet2.isVisible()
[ 9, 1449, 722, 194, 264, 9680, 14894 ]
def METHOD_NAME(directory): result = [] for root, dirs, files in os.walk(directory): if '.svn' in dirs: dirs.remove('.svn') result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ]) result.sort() return result
[ 416, 75, -1, 1537 ]
def METHOD_NAME(expr, passes): passes = passes if isinstance(passes, list) else [passes] mod = tvm.IRModule.from_expr(expr) seq = tvm.transform.Sequential(passes) with tvm.transform.PassContext(opt_level=3): mod = seq(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body
[ 22, 1671, 403 ]
def METHOD_NAME(cls, parser: argparse.ArgumentParser): parser.add_argument( "--aleph.api_key", type=str, help="AlephAlpha API key.", required=True ) parser.add_argument( "--aleph.model", type=str, help="Model name to use.", default="luminous-base", ) parser.add_argument( "--aleph.maximum_tokens", type=int, help="The maximum number of tokens to be generated.", default=64, ) parser.add_argument( "--aleph.temperature", type=float, help="A non-negative float that tunes the degree of randomness in generation.", default=0.0, ) parser.add_argument( "--aleph.stop_sequences", type=List[str], help="Stop tokens.", default=["user: ", "bot: ", "system: "], ) parser.add_argument( "--aleph.top_k", type=int, help="Number of most likely tokens to consider at each step.", default=0, ) parser.add_argument( "--aleph.top_p", type=float, help="Total probability mass of tokens to consider at each step.", default=0.0, )
[ 238, 335 ]
def METHOD_NAME(self) -> List[Output]: """Detect shadowing of built-in symbols Recursively visit the calls Returns: list: {'vuln', 'filename,'contract','func', 'shadow'} """ results = [] for contract in self.contracts: shadows = self.detect_builtin_shadowing_definitions(contract) if shadows: for shadow in shadows: # Obtain components shadow_type = shadow[0] shadow_object = shadow[1] info: DETECTOR_INFO = [ shadow_object, f' ({shadow_type}) shadows built-in symbol"\n', ] res = self.generate_result(info) results.append(res) return results
[ 2991 ]
def METHOD_NAME(trials, do_show=True, title="Loss Histogram"): # -- import here because file-level import is too early import matplotlib.pyplot as plt status_colors = default_status_colors Xs, Ys, Ss, Cs = zip( *[ (x, y, s, status_colors[s]) for (x, y, s) in zip(trials.specs, trials.losses(), trials.statuses()) if y is not None ] ) # XXX: deal with ok vs. un-finished vs. error trials print("Showing Histogram of %i jobs" % len(Ys)) plt.hist(Ys) plt.xlabel("loss") plt.ylabel("frequency") plt.title(title) if do_show: plt.show()
[ 57, 1288, 6069 ]
def METHOD_NAME(self, name, sig, signode): """ Add cross-reference IDs and entries to self.indexnode, if applicable. *name* is whatever :meth:`handle_signature()` returned. """ return # do nothing by default
[ 238, 1030, 61, 724 ]
def METHOD_NAME (libraries): return __order.order (libraries)
[ 852, 2948 ]
def METHOD_NAME(self, update=True): """ Execute authconfig command """ if update: self.add_option("update") args = self.build_args() auth_cmd = ['/usr/sbin/authconfig'] + args cmd = self.host.run_command(auth_cmd, set_env=False, raiseonerr=False) if cmd.returncode != 0: raise Exception("Failed to run Authconfig")
[ 750 ]
def METHOD_NAME(input_args: str): """ Gets the top and bottom text and returns them in a url friendly form that conforms with the api standards """ # This gets the text between the quotation marks (and ignores \") args = re.findall(r'"(.*?(?<!\\))"', input_args) # Replaces all the required characters to be url friendly url_friendly_args = [] for arg in args: arg = arg.translate(REPLACEMENTS) # the translate function won't properly handle the '\"' character so we do it explicitly arg = arg.replace(r'\"', "''") if arg == "": arg = "_" url_friendly_args.append(arg) return url_friendly_args
[ 19, 12284, 134 ]
def METHOD_NAME(self, ctx:AutoscaleConditionParser.NamespaceContext): pass
[ 538, 1194 ]
def METHOD_NAME(self, block_pool_name: str, cannot_be_deleted: bool = False): """ Deletes the block pool, does not verify the deletion, but verifies the alert if the block pool cannot be deleted Args: block_pool_name (str): Name of the block pool to be deleted cannot_be_deleted (bool): Whether the block pool cannot be deleted Returns: bool: True if the block pool delete via UI performed, False otherwise """ logger.info(f"Deleting the block pool: {block_pool_name}") self.select_search_by("name") self.search(block_pool_name) from ocs_ci.ocs.ui.helpers_ui import format_locator resource_actions = format_locator( self.generic_locators["actions_of_resource_from_list"], block_pool_name ) self.do_click(resource_actions, enable_screenshot=True) self.do_click(self.generic_locators["delete_resource"], enable_screenshot=True) if cannot_be_deleted: logger.info( f"Block pool {block_pool_name} cannot be deleted. Verifying alert" ) self.check_element_presence( self.bp_loc["pool_cannot_be_deleted_warning"][::-1] ) warning_text = self.get_element_text( self.bp_loc["pool_cannot_be_deleted_warning"] ) logger.info(f"Warning text: {warning_text}. Close warning modal") self.do_click(self.generic_locators["close_modal_btn"]) return False logger.info(f"Confirm {block_pool_name} Deletion") self.do_click(self.generic_locators["confirm_action"], enable_screenshot=True) return True
[ 34, 573, 1567 ]
METHOD_NAME(self):
[ 59, 1798 ]
def METHOD_NAME(self): return self.gear
[ 19, 17583 ]
def METHOD_NAME(cert_name, cert, private_key, chain=None): kwargs = dict( ServerCertificateName=cert_name, CertificateBody=cert, PrivateKey=private_key ) if chain: kwargs['CertificateChain'] = chain result = _make_api_call('upload_server_certificate', **kwargs) return result['ServerCertificateMetadata']
[ 172, 163, 1548 ]
def METHOD_NAME(model_data: AttrDict) -> None: res = model_data.res stata_sigma = model_data.stata_result.sigma assert_allclose(res.sigma, stata_sigma)
[ 9, 2230 ]
def METHOD_NAME(configuration_name: Optional[str] = None, network_manager_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityAdminConfigurationResult: """ Retrieves a network manager security admin configuration. :param str configuration_name: The name of the network manager Security Configuration. :param str network_manager_name: The name of the network manager. :param str resource_group_name: The name of the resource group. """ __args__ = dict() __args__['configurationName'] = configuration_name __args__['networkManagerName'] = network_manager_name __args__['resourceGroupName'] = resource_group_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:network/v20230201:getSecurityAdminConfiguration', __args__, opts=opts, typ=GetSecurityAdminConfigurationResult).value return AwaitableGetSecurityAdminConfigurationResult( apply_on_network_intent_policy_based_services=pulumi.get(__ret__, 'apply_on_network_intent_policy_based_services'), description=pulumi.get(__ret__, 'description'), etag=pulumi.get(__ret__, 'etag'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), provisioning_state=pulumi.get(__ret__, 'provisioning_state'), resource_guid=pulumi.get(__ret__, 'resource_guid'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type'))
[ 19, 2326, 2870, 830 ]
def METHOD_NAME(cls, model): return validation.core.check_model(getattr(validators, 'check_instance', identity)(model))
[ 2316, 437 ]
def METHOD_NAME(delta_year=0): return int(datetime.now(UTC_ZI) \ .replace(year=TimeUTC.__now().year + delta_year, month=1, day=1, hour=0, minute=0, second=0, microsecond=0) \ .astimezone(UTC_ZI).timestamp() * 1000)
[ 842, 447 ]
def METHOD_NAME(): """Tests CP_PLSR factors accurately reconstruct x.""" x, y, _, _ = _get_standard_synthetic() pls = CP_PLSR(N_LATENT) pls.fit(x, y) x_cp = CPTensor((None, pls.X_factors)) reconstructed_x = x_cp.to_tensor() assert_allclose(reconstructed_x, x, rtol=0, atol=1e-2)
[ 9, 7942, 1104 ]
def METHOD_NAME(label): ''' Changes a label possibly containing a modifier such as "-" or "#" into a unicode string. >>> graph.utilities.accidentalLabelToUnicode('B-4') 'B♭4' Since matplotlib's default fonts do not support double sharps or double flats, etc. these are converted as best we can... >>> graph.utilities.accidentalLabelToUnicode('B--4') 'B♭♭4' ''' if not isinstance(label, str): return label for modifier, unicodeAcc in pitch.unicodeFromModifier.items(): if modifier != '' and modifier in label and modifier in ('-', '#'): # ideally eventually matplotlib will do the other accidentals... label = label.replace(modifier, unicodeAcc) break return label
[ -1, 636, 24, 774 ]
def METHOD_NAME(self, target): builder = NetBuilder("select") c = builder.create_input( self.nptype2cinntype(self.inputs["Condition"].dtype), self.inputs["Condition"].shape, "Condition") x = builder.create_input( self.nptype2cinntype(self.inputs["X"].dtype), self.inputs["X"].shape, "X") y = builder.create_input( self.nptype2cinntype(self.inputs["Y"].dtype), self.inputs["Y"].shape, "Y") out = builder.select(c, x, y) prog = builder.build() res = self.get_cinn_output( prog, target, [c, x, y], [self.inputs["Condition"], self.inputs["X"], self.inputs["Y"]], [out]) self.cinn_outputs = res
[ 56, 8357, 735 ]
def METHOD_NAME(workflow_name, protocol="http", host="localhost", port="8081"): print(f"## Registering {workflow_name} workflow") model_zoo_url = "https://torchserve.s3.amazonaws.com" params = (("url", f"{model_zoo_url}/war_files/{workflow_name}.war"),) url = f"{protocol}://{host}:{port}/workflows" response = requests.post(url, params=params, verify=False) return response
[ 372, 3855 ]
def METHOD_NAME(): param = ParameterBoolean("MyBool") assert param.to_request() == {"Name": "MyBool", "Type": "Boolean"} assert param.expr == {"Get": "Parameters.MyBool"} assert param.parameter_type.python_type == bool
[ 9, 511 ]
def METHOD_NAME(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location")
[ 708 ]
def METHOD_NAME(self): # Make sure an invalid result doesn't muck-up the works self.assertEqual(list(NoneLengthHint()), list(range(10)))
[ 9, 532, 3711 ]
def METHOD_NAME(info, grade): """Auxiliary function for make_compensator.""" for k in range(len(info["comps"])): if info["comps"][k]["kind"] == grade: this_data = info["comps"][k]["data"] # Create the preselector presel = np.zeros((this_data["ncol"], info["nchan"])) for col, col_name in enumerate(this_data["col_names"]): ind = [k for k, ch in enumerate(info["ch_names"]) if ch == col_name] if len(ind) == 0: raise ValueError( "Channel %s is not available in " "data" % col_name ) elif len(ind) > 1: raise ValueError("Ambiguous channel %s" % col_name) presel[col, ind[0]] = 1.0 # Create the postselector (zero entries for channels not found) postsel = np.zeros((info["nchan"], this_data["nrow"])) for c, ch_name in enumerate(info["ch_names"]): ind = [ k for k, ch in enumerate(this_data["row_names"]) if ch == ch_name ] if len(ind) > 1: raise ValueError("Ambiguous channel %s" % ch_name) elif len(ind) == 1: postsel[c, ind[0]] = 1.0 # else, don't use it at all (postsel[c, ?] = 0.0) by allocation this_comp = np.dot(postsel, np.dot(this_data["data"], presel)) return this_comp raise ValueError("Desired compensation matrix (grade = %d) not" " found" % grade)
[ 93, -1 ]
def METHOD_NAME(self, fpath, content): try: with open(fpath, 'wb') as f: f.write(content) except Exception: logger.exception('save image file failed')
[ 73 ]
def METHOD_NAME(self, tmpdir): # Issue #3057 u = Universe(mol2_sodium_ion) ag = u.atoms assert not hasattr(ag, "bonds") with tmpdir.as_cwd(): outfile = 'test.mol2' ag.write(outfile) u2 = Universe(outfile) assert not hasattr(u2.atoms, "bonds")
[ 9, 654, 5179 ]
def METHOD_NAME(n, r, out): """ Main body of `sample_without_replacement`. To be complied as a ufunc by guvectorize of Numba. """ k = r.shape[0] # Logic taken from random.sample in the standard library pool = np.arange(n) for j in range(k): idx = np.intp(np.floor(r[j] * (n-j))) # np.floor returns a float out[j] = pool[idx] pool[idx] = pool[n-j-1]
[ 734, 529, 3729 ]
def METHOD_NAME(self, site=None): if not site: site = Site.objects.get_current() subject = render_to_string('email_activate_subject.txt') subject = ''.join(subject.splitlines()) context = { 'user': self.user, 'user_info': json.loads(self.user_info), 'domain': str(site), 'activation_key': self.activation_key, 'is_updating': self.is_updating } plain_text = render_to_string('email_activate.txt', context) html = render_to_string('email_activate.html', context) from_email = settings.DEFAULT_FROM_EMAIL prepared_email = (subject, plain_text, html, from_email, [self.user.email]) email_sender = EmailSender(from_email) email_sender.mass_send([prepared_email])
[ 353, 648, 487 ]
def METHOD_NAME(self): """Return the configured workheet layout """ setup = api.get_setup() return setup.getWorksheetLayout()
[ 13835, 571 ]
def METHOD_NAME(self, bot): comments = self.api.get_comments(project_name=PROJECT) return self.api.comment_find(comments, bot)
[ 3528, 1221 ]
def METHOD_NAME( self, context: InputContext, table_slice: TableSlice, connection ) -> pyspark.sql.DataFrame: """Loads the return of the query as the correct type.""" spark = SparkSession.builder.getOrCreate() # type: ignore if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0: return spark.createDataFrame([], StructType([])) pd_df = connection.execute(DuckDbClient.get_select_statement(table_slice)).fetchdf() return spark.createDataFrame(pd_df)
[ 557, 362 ]
def METHOD_NAME(): """Get the parameter file path Args: None Returns: str: path for parameter files """ pth = idaes.cfg.properties.helmholtz.parameter_file_path if pth is None: pth = this_file_dir() return pth
[ 19, 511, 157 ]
def METHOD_NAME(self): import bl_app_override self._ui_ignore_store = bl_app_override.ui_draw_filter_register( ui_ignore_classes=( None if self.ui_ignore_classes is None else self.ui_ignore_classes() ), ui_ignore_operator=self.ui_ignore_operator, ui_ignore_property=self.ui_ignore_property, ui_ignore_menu=self.ui_ignore_menu, ui_ignore_label=self.ui_ignore_label, )
[ 102, 882, 684 ]
def METHOD_NAME(iterable, encoder=JSONEncoder): """Stream JSON line-based data.""" def _generate_stream(): for row in iterable: row.pop("_index", None) yield encoder().encode(row) yield "\n" return Response(_generate_stream(), mimetype="application/json+stream")
[ 919, -1 ]
def METHOD_NAME(cli_ctx, _): # do not return cli_ctx.allowed_connections for home region compatibility return _cf_security(cli_ctx)
[ 2325, 2326, 2474, 560 ]
def METHOD_NAME() -> Dict[str, Any]: return { "results": [ { "id": "sha256:e70f7611f4d093d5f73026d23f7ab612aa1794abfb210ef1c549b225380d053b", "distro": "Debian GNU/Linux 11 (bullseye)", "distroRelease": "bullseye", "collections": ["All"], "packages": [ {"type": "os", "name": "mawk", "version": "1.3.4.20200120-2", "licenses": ["GPL-2"]}, {"type": "os", "name": "gzip", "version": "1.10-4", "licenses": ["GPL-3+"]}, ], "compliances": [ { "id": 41, "title": "(CIS_Docker_v1.2.0 - 4.1) Image should be created with a non-root user", "severity": "high", "description": "It is a good practice to run the container as a non-root user, if possible. " "Though user\nnamespace mapping is now available, if a user is already defined in " "the container image, the\ncontainer is run as that user by default and specific" " user namespace remapping is not\nrequired", } ], "complianceDistribution": {"critical": 0, "high": 1, "medium": 0, "low": 0, "total": 1}, "complianceScanPassed": True, "vulnerabilities": [ { "id": "CVE-2022-23990", "status": "fixed in 2.2.10-2+deb11u1", "cvss": 9.8, "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", "description": "Expat (aka libexpat) before 2.4.4 has an integer overflow in the doProlog " "function.", "severity": "critical", "packageName": "expat", "packageVersion": "2.2.10-2", "link": "https://security-tracker.debian.org/tracker/CVE-2022-23990", "riskFactors": [ "Attack complexity: low", "Attack vector: network", "Critical severity", "Has fix", "Recent vulnerability", ], "impactedVersions": ["<2.2.10-2+deb11u1"], "publishedDate": "2022-01-26T21:15:00+02:00", "discoveredDate": "2022-02-13T11:57:08+02:00", "fixDate": "2022-01-26T21:15:00+02:00", }, { "id": "CVE-2022-23852", "status": "fixed in 2.2.10-2+deb11u1", "cvss": 9.8, "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", "description": "Expat (aka libexpat) before 2.4.4 has a signed integer overflow in XML_GetBuffer, " "for configurations with a nonzero XML_CONTEXT_BYTES.", "severity": "critical", "packageName": "expat", "packageVersion": "2.2.10-2", "link": "https://security-tracker.debian.org/tracker/CVE-2022-23852", "riskFactors": [ "Attack complexity: low", "Attack vector: network", "Critical severity", "Has fix", "Recent vulnerability", ], "impactedVersions": ["<2.2.10-2+deb11u1"], "publishedDate": "2022-01-24T04:15:00+02:00", "discoveredDate": "2022-02-13T11:57:08+02:00", "fixDate": "2022-01-24T04:15:00+02:00", }, ], "vulnerabilityDistribution": {"critical": 5, "high": 5, "medium": 0, "low": 14, "total": 24}, "vulnerabilityScanPassed": True, } ] }
[ 223, 660, 793, 1571 ]
def METHOD_NAME(self, lens_model_class): self._model.METHOD_NAME(lens_model_class)
[ 86, 8318, 578 ]
def METHOD_NAME(self): pass
[ 72, 710 ]
def METHOD_NAME(self): """Save and flush.""" return self.flush()
[ 238, 24, 240 ]
def METHOD_NAME(): data_success = { 'id': str(uuid.uuid4()), 'status': {'major': 'SUCCEEDED', 'minor': 'COMPLETED', 'detail': [], 'info': []}, } execution_success = SampleDesignSpaceExecution.build(data_success) assert execution_success.succeeded()
[ 9, 2046, 14431 ]
def METHOD_NAME(volname="", snapname=""): success = True if volname == "": log.debug("No volname given") return False if snapname == "": log.debug("No snapname given") return False cli = ["gluster", "snapshot", "create", snapname, volname] log.debug("Running command '%s'", " ".join(cli)) p = subprocess.Popen(cli, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() rv = p.returncode log.debug("Command '%s' returned '%d'", " ".join(cli), rv) if rv: log.error("Snapshot of %s failed", volname) log.error("Command output:") log.error(err) success = False else: log.info("Snapshot of %s successful", volname) return success
[ 1828, 4792 ]
def METHOD_NAME(tgen): for routern in range(1, 3): tgen.add_router("r{}".format(routern)) switch = tgen.add_switch("s1") switch.add_link(tgen.gears["r1"]) switch.add_link(tgen.gears["r2"])
[ 56, 5650 ]
def METHOD_NAME(ecr_image): if not can_run_smdataparallel(ecr_image): pytest.skip("Data Parallelism is supported on CUDA 11 on PyTorch v1.6 and above")
[ 187, 894, 2423, 9481 ]
def METHOD_NAME(self, context): self.inputs.new('SvVerticesSocket', 'Vertices') self.inputs.new('SvStringsSocket', 'Poly Egde') self.outputs.new('SvVerticesSocket', 'Vertices') self.outputs.new('SvStringsSocket', 'Poly Egde') self.outputs.new('SvStringsSocket', 'Vert idx') self.outputs.new('SvStringsSocket', 'Poly Egde idx')
[ 2153, 176 ]
def METHOD_NAME(tmp_path: Path) -> Iterator[None]: """ Check for the environment variable we pass as part of circleci's "storage-unit-tests" context. Note that the gcs credentials in the "storage-unit-tests" context are the keyid=c07eed131 key to the [email protected] service account. The contents of the key are at github.com/determined-ai/secrets/gcp/service-accounts/storage-unit-tests.json. The service account should only have permission to view the "storage-unit-tests" bucket. """ if "DET_GCS_TEST_CREDS" not in os.environ: yield return # Save the text in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to be the path. creds_path = tmp_path.joinpath("gcs-test-creds.json") with creds_path.open("w") as f: f.write(os.environ["DET_GCS_TEST_CREDS"]) os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(creds_path) try: yield finally: del os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
[ 48, 4191, 9, 6471 ]
def METHOD_NAME(self): queryA = dns.message.make_query('foo.localhost', 'A', want_dnssec=True) expectedA = dns.rrset.from_text('foo.localhost.', 0, 'IN', 'A', '127.0.0.1') resA = self.sendTCPQuery(queryA) self.assertRcodeEqual(resA, dns.rcode.NOERROR) self.assertRRsetInAnswer(resA, expectedA)
[ 9, 4826, 302 ]
def METHOD_NAME() -> Pipeline: tubular_config_repo = git_repo( name=Identifier("tubular-pipeline-config"), uri="https://github.com/mitodl/ol-infrastructure", branch="main", paths=["src/ol_concourse/pipelines/open_edx/tubular/"], ) tubular_build_schedule = schedule(Identifier("build-schedule"), interval="168h") tubular_retirees = Output(name=Identifier("tubular-retirees")) tubular_config_path = f"{tubular_config_repo.name}/src/ol_concourse/pipelines/open_edx/tubular/openedx-config.yml" # noqa: E501 tubular_job_object = Job( name=Identifier("deploy-tubular-world"), max_in_flight=1, # Only allow 1 Pulumi task at a time since they lock anyway. plan=[ GetStep(get=tubular_config_repo.name, trigger=True), GetStep(get=tubular_build_schedule.name, trigger=True), TaskStep( task=Identifier("tubular-generate-retirees-task"), config=TaskConfig( platform=Platform.linux, image_resource=AnonymousResource( type=REGISTRY_IMAGE, source=RegistryImage(repository="mitodl/openedx-tubular"), ), inputs=[Input(name=Identifier("tubular-pipeline-config"))], outputs=[tubular_retirees], params={ "TUBULAR_OAUTH_CLIENT_ID": "((tubular_oauth_client.id))", "TUBULAR_OAUTH_CLIENT_SECRET": "((tubular_oauth_client.secret))", # noqa: E501 "TUBULAR_LMS_HOST": "((tubular_oauth_client.host))", }, run=Command( path="/app/scripts/get_learners_to_retire.py", args=[ "--config_file", tubular_config_path, "--output_dir", f"{tubular_retirees.name}/processing", "--cool_off_days", "0", ], ), ), ), TaskStep( task=Identifier("tubular-process-retired-users-task"), config=TaskConfig( platform=Platform.linux, image_resource=AnonymousResource( type=REGISTRY_IMAGE, source=RegistryImage(repository="mitodl/openedx-tubular"), ), inputs=[tubular_retirees], # inline bash script to generate retirees yaml run=Command( path="python", args=[ "-c", textwrap.dedent( """\ import json from pathlib import Path learner_dir = Path("tubular-retirees/processing") rfiles = learner_dir.glob("learner*") retirees = [] for rfile in rfiles: retiree = rfile.read_text().strip('\\n').split("=")[-1] retirees.append(retiree) with open("retirees_dir/vars.json","w") as vj: vj.write(json.dumps(retirees)) """ ), ], ), outputs=[Output(name=Identifier("retirees_dir"))], ), ), LoadVarStep( load_var="tubular_retirees", file="retirees_dir/vars.json", reveal=True, ), TaskStep( task=Identifier("tubular-retire-users-task"), across=[ AcrossVar( var="tubular_retiree", values="((.:tubular_retirees))", ), ], config=TaskConfig( platform=Platform.linux, image_resource=AnonymousResource( type=REGISTRY_IMAGE, source=RegistryImage(repository="mitodl/openedx-tubular"), ), inputs=[tubular_retirees, Input(name=tubular_config_repo.name)], params={ "TUBULAR_OAUTH_CLIENT_ID": "((tubular_oauth_client.id))", "TUBULAR_OAUTH_CLIENT_SECRET": "((tubular_oauth_client.secret))", # noqa: E501 "TUBULAR_LMS_HOST": "((tubular_oauth_client.host))", }, run=Command( path="/app/scripts/retire_one_learner.py", args=[ "--config_file", tubular_config_path, "--username", "((.:tubular_retiree))", ], ), ), ), ], ) return Pipeline( resources=[tubular_config_repo, tubular_build_schedule], jobs=[tubular_job_object], )
[ -1, 1148 ]
def METHOD_NAME(): global _cached_pages LOGGER.debug("Pages directory changed") with _pages_cache_lock: _cached_pages = None _on_pages_changed.send()
[ 3359, 254, 596 ]
def METHOD_NAME(self) -> IPAddress | None: ...
[ 693 ]
def METHOD_NAME(self): P = Polygon((), ) assert len(P) == 0
[ 9, 1318, 313 ]
def METHOD_NAME(hierarchy, ddr_i_seqs): ddr_set = set(ddr_i_seqs) previous = None intersection = None total_i_seqs = [] for rg in hierarchy.residue_groups(): if intersection: rc = get_selected_i_seqs(rg, [' CA ', ' N ', ' H ']) total_i_seqs += rc rg_set = [] for atom in rg.atoms(): rg_set.append(atom.i_seq) intersection = ddr_set.intersection(set(rg_set)) if intersection: total_i_seqs += rg_set rc = get_selected_i_seqs(previous, [' CA ', ' C ', ' O ']) total_i_seqs += rc previous = rg return total_i_seqs
[ 2450, 497, 12908, 24, -1, 825 ]
def METHOD_NAME(self, __module_name: str, __global_name: str) -> Any: ...
[ 416, 2 ]
def METHOD_NAME(self): self.scene.METHOD_NAME(self.starttime.text(), self.endtime.text()) return
[ -1 ]
def METHOD_NAME(cli_ctx, subscription_id, *_): return _log_analytics_client_factory(cli_ctx, subscription_id).data_sources
[ 2325, 390, 3762, 365, 505 ]
def METHOD_NAME(x, y): alpha = 1 beta = 1 # White f1 = noise_variance # Violet-ish f2 = noise_variance * (x * x + y * y) / L * L return (alpha * f1 + beta * f2) / 2.0
[ 802, 559 ]
def METHOD_NAME(name): get = operator.attrgetter(name) assert issubclass(get(cupy), get(numpy))
[ 9, 168, 393 ]
def METHOD_NAME(self): storage_account_id = str(uuid4()) storage_account_name = "Test Storage Account" storage_client = mock.MagicMock storage_client.storage_accounts = { AZURE_SUSCRIPTION: [ Storage_Account( id=storage_account_id, name=storage_account_name, enable_https_traffic_only=False, infrastructure_encryption=False, allow_blob_public_access=None, network_rule_set=NetworkRuleSet(default_action="Allow"), encryption_type=None, minimum_tls_version=None, ) ] } with mock.patch( "prowler.providers.azure.services.storage.storage_default_network_access_rule_is_denied.storage_default_network_access_rule_is_denied.storage_client", new=storage_client, ): from prowler.providers.azure.services.storage.storage_default_network_access_rule_is_denied.storage_default_network_access_rule_is_denied import ( storage_default_network_access_rule_is_denied, ) check = storage_default_network_access_rule_is_denied() result = check.execute() assert len(result) == 1 assert result[0].status == "FAIL" assert ( result[0].status_extended == f"Storage account {storage_account_name} from subscription {AZURE_SUSCRIPTION} has network access rule set to Allow." ) assert result[0].subscription == AZURE_SUSCRIPTION assert result[0].resource_name == storage_account_name assert result[0].resource_id == storage_account_id
[ 9, 948, 948, 6736, 235, 1228, 1089 ]
f METHOD_NAME(self):
[ 9, 129, 280, 1881, 7909, 41, 1205 ]
def METHOD_NAME(name, sep='_', _xform_cache=_xform_cache): """Convert camel case to a "pythonic" name. If the name contains the ``sep`` character, then it is returned unchanged. """ if sep in name: # If the sep is in the name, assume that it's already # transformed and return the string unchanged. return name key = (name, sep) if key not in _xform_cache: if _special_case_transform.search(name) is not None: is_special = _special_case_transform.search(name) matched = is_special.group() # Replace something like ARNs, ACLs with _arns, _acls. name = f"{name[: -len(matched)]}{sep}{matched.lower()}" s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name) transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower() _xform_cache[key] = transformed return _xform_cache[key]
[ 2504, 156 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_request( api_version=api_version, template_url=self.list.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(self, arg__1: "PySide6.QtGui.QContextMenuEvent"): # pylint:disable=unused-argument if not self.selectedIndexes(): return mnu = self._get_breakpoint_submenu() mnu.exec_(QCursor.pos())
[ 198, 2470, 417 ]
def METHOD_NAME(addr): if len(addr)>6 and addr.endswith('.onion'): vchAddr = b32decode(addr[0:-6], True) if len(vchAddr) != 16-len(pchOnionCat): raise ValueError('Invalid onion %s' % vchAddr) return pchOnionCat + vchAddr elif '.' in addr: # IPv4 return pchIPv4 + bytearray((int(x) for x in addr.split('.'))) elif ':' in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') for i,comp in enumerate(addr): if comp == '': if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end continue x += 1 # :: skips to suffix assert(x < 2) else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) return bytearray(sub[0] + ([0] * nullbytes) + sub[1]) elif addr.startswith('0x'): # IPv4-in-little-endian return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:]))) else: raise ValueError('Could not parse address %s' % addr)
[ 156, 24, 1899 ]
def METHOD_NAME(): """Hybrid Target and multiple external codegen""" targets = parse_target("ethos-u,cmsis-nn,c") assert len(targets) == 3 assert "ethos-u" == targets[0]["name"] assert not targets[0]["is_tvm_target"] assert "cmsis-nn" == targets[1]["name"] assert not targets[1]["is_tvm_target"] assert "c" == targets[2]["name"] assert targets[2]["is_tvm_target"]
[ 9, 214, 107, 2895, 1030 ]
async def METHOD_NAME(): num_nodes = 3 adj_map = {0: [1], 1: [2]} async def action_func(dummy_nodes): await dummy_nodes[0].publish_set_crypto("aspyn", 10) def assertion_func(dummy_node): assert dummy_node.get_balance("aspyn") == 10 await perform_test(num_nodes, adj_map, action_func, assertion_func)
[ 9, 53, 2756, 480, 534, 9964 ]
def METHOD_NAME(self): """Methond override.""" code = ("#include \"util/defines.h\"\n") code += "\n" code += ("#include <cstdint>\n") write(code, file=self.outFile)
[ 77, 1872 ]
def METHOD_NAME(tuple_obj): """ Return str list for tuple obj for logger. :param tuple_obj: tuple obj :return: string list """ str_list = list() for item in tuple_obj: str_list.append(str(item)) return str_list
[ 19, 3 ]
def METHOD_NAME(): """Retrieves a live reference to the global dictionary of custom objects. Custom objects set using using `custom_object_scope` are not added to the global dictionary of custom objects, and will not appear in the returned dictionary. Example: ```python get_custom_objects().clear() get_custom_objects()['MyObject'] = MyObject ``` Returns: Global dictionary mapping registered class names to classes. """ return _GLOBAL_CUSTOM_OBJECTS
[ 19, 343, 635 ]
def METHOD_NAME(blk): """Process a container object, returning the new components found.""" new_fixed_true_disjuncts = ComponentSet( disj for disj in blk.component_data_objects(Disjunct, active=True) if disj.indicator_var.value and disj.indicator_var.fixed ) new_activated_disjunctions = ComponentSet( blk.component_data_objects(Disjunction, active=True) ) new_activated_disjuncts = ComponentSet( disj for disjtn in new_activated_disjunctions for disj in _activated_disjuncts_in_disjunction(disjtn) ) new_activated_constraints = ComponentSet( blk.component_data_objects(Constraint, active=True) ) return ( new_activated_disjunctions, new_fixed_true_disjuncts, new_activated_disjuncts, new_activated_constraints, )
[ 356, 3082, 224 ]
f METHOD_NAME(path, file):
[ 203, 2518, 171 ]
def METHOD_NAME(self, node: Node) -> Optional[Node]: results = self.get_nodes_with_ast_type(node, NodeType.OPTIONAL) return results[0] if results else None
[ 19, 1234, 1716, 41, 11347 ]
def METHOD_NAME(notice_origin, origin_pfx, notice_pfx): '''Given a NoticeOrigin object with a prefix (like a series of tabs) for the origin and the notice messages, return the notes''' notes = origin_pfx + notice_origin.origin_str + ':\n' for notice in notice_origin.notices: notes = notes + notice_pfx + notice.level + ': ' + \ notice.message + '\n' return notes
[ 38, 11662 ]
def METHOD_NAME(h, Xi, x): """ Gaussian Kernel for continuous variables Parameters ---------- h : 1-D ndarray, shape (K,) The bandwidths used to estimate the value of the kernel function. Xi : 1-D ndarray, shape (K,) The value of the training set. x : 1-D ndarray, shape (K,) The value at which the kernel density is being estimated. Returns ------- kernel_value : ndarray, shape (nobs, K) The value of the kernel function at each training point for each var. """ return (1. / np.sqrt(2 * np.pi)) * np.exp(-(Xi - x)**2 / (h**2 * 2.))
[ 4008 ]
def METHOD_NAME(self, request: Request, organization) -> Response: """ Get list of requests to join org/team """ if request.access.has_scope("org:write"): access_requests = list( OrganizationAccessRequest.objects.filter( team__organization=organization, member__user_is_active=True, member__user_id__isnull=False, ).select_related("team") ) elif request.access.has_scope("team:write") and request.access.team_ids_with_membership: access_requests = list( OrganizationAccessRequest.objects.filter( member__user_is_active=True, member__user_id__isnull=False, team__id__in=request.access.team_ids_with_membership, ).select_related("team") ) else: # Return empty response if user does not have access return Response([]) return Response(serialize(access_requests, request.user))
[ 19 ]
def METHOD_NAME() -> Path: root = wave_root() www = root / "www" return www if www.exists() else root / "ui/build"
[ 235, 2412, 1190 ]
def METHOD_NAME(): warn_if_no_long_paths() logfile_path = get_logfile_path() if not logfile_path.is_file(): click.echo(f"No logfile found at: {logfile_path}") return traceback = get_traceback(logfile_path.read_text(encoding="utf-8")) click.echo(traceback)
[ 168, 100 ]
def METHOD_NAME(): """Test the kwargs of estimate_mass.""" M = estimate_mass(cnumax, cdeltanu, cteff, cenumax, cedeltanu, ceteff) # Check units assert M.unit == u.solMass assert M.error.unit == u.solMass # Check returns right answer assert_correct_answer(M, cM) # Check units on parameters M = estimate_mass( cnumax, cdeltanu, cteff, u.Quantity(cenumax, u.microhertz), cedeltanu, ceteff ) assert_correct_answer(M, cM) M = estimate_mass( cnumax, cdeltanu, cteff, cenumax, u.Quantity(cedeltanu, u.microhertz), ceteff ) assert_correct_answer(M, cM) M = estimate_mass( cnumax, cdeltanu, cteff, cenumax, cedeltanu, u.Quantity(ceteff, u.Kelvin) ) assert_correct_answer(M, cM) # Check works with a random selection of appropriate units M = estimate_mass( cnumax, cdeltanu, cteff, u.Quantity(cenumax, u.microhertz).to(1 / u.day), u.Quantity(cedeltanu, u.microhertz).to(u.hertz), ceteff, ) assert_correct_answer(M, cM)
[ 9, 918, 2858, 1475 ]
def METHOD_NAME(self, entity: team_control_point, entity_raw: dict): obj = self._create_empty(self._get_entity_name(entity)) properties = {'prop_path': entity.team_model_0, 'type': entity.class_name, 'scale': self.scale, 'entity': entity_raw} obj.rotation_euler.rotate(Euler((math.radians(entity.angles[2]), math.radians(entity.angles[0]), math.radians(entity.angles[1])))) self._set_location_and_scale(obj, parse_float_vector(entity_raw['origin'])) self._set_entity_data(obj, properties) self._put_into_collection('item_ammopack', obj, 'props')
[ 276, 2957, 401, 1669 ]