text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self) -> str: """ Resource Id. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(self) -> None: assert m.parser.get_default("nvprof") is False
[ 9, -1 ]
def METHOD_NAME(self): s = self.dumps(self.adapted) qs = QuotedString(s) if self._conn is not None: qs.prepare(self._conn) return qs.METHOD_NAME()
[ 8541 ]
def METHOD_NAME(self): """ Check that {{ settings }} does not throw an error if it can not find a request to work with """ context = {} template = '{{ settings("tests.testgenericsetting").title }}' self.assertEqual( self.render(template, context, request_context=False), self.default_settings.title, )
[ 9, 817, 654, 377 ]
def METHOD_NAME(self) -> str: return pulumi.get(self, "domain")
[ 1674 ]
def METHOD_NAME( self, y_hat: Tensor, y: Tensor, ) -> Tensor: if self.model_config.return_type == ModelReturnType.raw: loss_fn = F.cross_entropy elif self.model_config.return_type == ModelReturnType.probs: loss_fn = F.nll_loss y_hat = y_hat.log() elif self.model_config.return_type == ModelReturnType.log_probs: loss_fn = F.nll_loss else: assert False return loss_fn(y_hat, y)
[ 1572, 592, 493 ]
def METHOD_NAME(component_type: str, ip_address: str): device_config = Varta(configuration=VartaConfiguration(ip_address=ip_address)) dev = create_device(device_config) if component_type in COMPONENT_TYPE_TO_MODULE: component_config = COMPONENT_TYPE_TO_MODULE[component_type].component_descriptor.configuration_factory() else: raise Exception( "illegal component type " + component_type + ". Allowed values: " + ','.join(COMPONENT_TYPE_TO_MODULE.keys()) ) component_config.id = None dev.add_component(component_config) log.debug('Varta IP-Adresse: ' + ip_address) return dev
[ 129, 398, 41, 811 ]
def METHOD_NAME(self): self.request = Mock() self.view = Mock() self.backend = api_utils.InequalityFilterBackend() self.queryset = Mock() self.queryset.filter.return_value = self.queryset
[ 0, 1 ]
def METHOD_NAME(self, value): """ Update current time value """ self._time = value
[ 86 ]
def METHOD_NAME(self, METHOD_NAME): m_range = ['leiden', 'louvain'] self._method_check(METHOD_NAME, m_range)
[ 103 ]
def METHOD_NAME(self, standardItem: 'QMacToolBarItem.StandardItem') -> None: ...
[ 0, 2356, 1024 ]
def METHOD_NAME(client, resource_name, scope, limit_object=None, resource_type=None, properties=None, no_wait=False): all_limit = [] if limit_object is not None: all_limit.append(limit_object) if len(all_limit) > 1: raise CLIError('at most one of limit object is needed for limit!') limit = all_limit[0] if len(all_limit) == 1 else None create_quota_request = {} create_quota_request['properties'] = {} create_quota_request['properties']['limit'] = {} if limit is not None: create_quota_request['properties']['limit'] = limit if properties is not None: create_quota_request['properties']['properties'] = properties create_quota_request['properties']['name'] = {} if resource_name is not None: create_quota_request['properties']['name']['value'] = resource_name if len(create_quota_request['properties']['name']) == 0: del create_quota_request['properties']['name'] if resource_type is not None: create_quota_request['properties']['resource_type'] = resource_type return sdk_no_wait(no_wait, client.begin_create_or_update, resource_name=resource_name, scope=scope, create_quota_request=create_quota_request)
[ 2268, 129 ]
def METHOD_NAME(request): return request.param
[ 374, 44 ]
def METHOD_NAME(endpoint): """ werkzeug.routing.BuildError will be throw if rout endpoint not found """ return app.config.get("BASE_URL", "https://doaj.org") + url_for(endpoint)
[ 19, 324, 274, 604, 841 ]
def METHOD_NAME(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int64"): """Get the top k elements in an input tensor along the given axis. Parameters ---------- data : tvm.te.Tensor The input tensor. k : int or tvm.te.Tensor, optional Number of top elements to select. Return all elements if k < 1. axis : int, optional Axis long which to sort the input tensor. ret_type: str, optional The return type [both, values, indices]. "both": return both top k data and indices. "values": return top k data only. "indices": return top k indices only. is_ascend : boolean, optional Whether to sort in ascending or descending order. dtype : string, optional The data type of the indices output. Returns ------- out : tvm.te.Tensor or List[tvm.te.Tensor] The computed result. """ assert ret_type in ["both", "values", "indices"] data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8) out_shape = list(get_const_tuple(data.shape)) kvar = tvm.te.size_var("k") if not isinstance(k, int): out_shape[axis] = kvar elif k >= 1: out_shape[axis] = k out_bufs = [] if ret_type in ["both", "values"]: out_bufs.append(tvm.tir.decl_buffer(out_shape, data.dtype, "value_buf", data_alignment=8)) if ret_type in ["both", "indices"]: out_bufs.append(tvm.tir.decl_buffer(out_shape, dtype, "indices_buf", data_alignment=8)) out_shapes = [out_shape] * len(out_bufs) kv = kvar if not isinstance(k, int) else k out = te.extern( out_shapes, [data], lambda ins, outs: tvm.tir.call_packed( "tvm.contrib.sort.topk", ins[0], *outs, kv, axis, ret_type, is_ascend ), in_buffers=[data_buf], out_buffers=out_bufs, name="topk_cpu", tag="topk_cpu", ) return out
[ 4073 ]
def METHOD_NAME(self, x): with tf.name_scope("second"): x = self.d1(x) x = self.d2(x) # testing layer re-use x = self.last(x) return self.last(x)
[ 3146 ]
def METHOD_NAME(self): self.tmp_dir.cleanup()
[ 531, 481 ]
def METHOD_NAME(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super().METHOD_NAME(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.name = AAZStrArg( options=["-n", "--name"], help="Authorization name.", required=True, id_part="child_name_1", ) _args_schema.circuit_name = AAZStrArg( options=["--circuit-name"], help="ExpressRoute circuit name.", required=True, id_part="name", ) _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) return cls._args_schema
[ 56, 134, 135 ]
def METHOD_NAME(self): """this is used to label the operator being benchmarked""" if self.user_given_name: return self.user_given_name return self.__class__.__name__
[ 298, 156 ]
def METHOD_NAME(self, alias): ...
[ -1 ]
def METHOD_NAME(authorization_name: Optional[str] = None, circuit_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteCircuitAuthorizationResult: """ Gets the specified authorization from the specified express route circuit. :param str authorization_name: The name of the authorization. :param str circuit_name: The name of the express route circuit. :param str resource_group_name: The name of the resource group. """ __args__ = dict() __args__['authorizationName'] = authorization_name __args__['circuitName'] = circuit_name __args__['resourceGroupName'] = resource_group_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:network/v20230401:getExpressRouteCircuitAuthorization', __args__, opts=opts, typ=GetExpressRouteCircuitAuthorizationResult).value return AwaitableGetExpressRouteCircuitAuthorizationResult( authorization_key=pulumi.get(__ret__, 'authorization_key'), authorization_use_status=pulumi.get(__ret__, 'authorization_use_status'), etag=pulumi.get(__ret__, 'etag'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), provisioning_state=pulumi.get(__ret__, 'provisioning_state'), type=pulumi.get(__ret__, 'type'))
[ 19, 5223, 2476, 1708, 1355 ]
def METHOD_NAME(self) -> None: set_determinism(seed=0)
[ 0, 1 ]
def METHOD_NAME(self, trial_job_id, trial_history): """update data Parameters ---------- trial_job_id : int trial job id trial_history : list The history performance matrix of each trial """ if trial_job_id not in self._running_history: self._running_history[trial_job_id] = [] self._running_history[trial_job_id].extend(trial_history[len(self._running_history[trial_job_id]):])
[ 86, 365 ]
def METHOD_NAME(self): cgi.maxlen = 10000 expect = self.headers.get('expect', "") if expect.lower() == "100-continue": self.send_response(100) self.end_headers() form = cgi.FieldStorage( fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD': 'POST'}) req_item = form['certreq'] req_type = form.getfirst('type', 'auto') if req_item.filename: # The field contains an uploaded file if req_type == "auto": if "sign" in req_item.filename: sign_type = "sign" else: sign_type = "auth" else: if req_type == "sign": sign_type = "sign" else: sign_type = "auth" try: t = tempfile.NamedTemporaryFile() t.write(req_item.file.read()) t.flush() p = subprocess.Popen(["bash", "/home/ca/CA/sign_req.sh", sign_type, t.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() t.close() p.wait() if p.returncode == 0: crtname = os.path.splitext(req_item.filename)[0].replace("_csr_", "_crt_") self.send_response(200, 'OK') self.send_header('Content-Type', 'application/octet-stream') self.send_header('Content-Disposition', 'attachment; filename="{}.pem"'.format(crtname)) self.send_header('Content-Length', len(out)) self.end_headers() self.wfile.write(out) else: err = err.decode() print(err, file=sys.stderr) self.send_response(500) self.send_header("Content-Type", 'text/html; charset="utf-8"') self.end_headers() self.wfile.write("<html><body>Error:<pre>{}</pre></body></html>".format(err).encode()) return finally: t.close() req_item.file.close() self.send_error(400) return
[ 74, 72 ]
def METHOD_NAME(dest_dir): dir_path = "py" for file_path in os.listdir("py/"): file_name = file_path py_path = os.path.join(dir_path, file_path) file_name_no_ext = os.path.splitext(file_name)[0] ext = os.path.splitext(file_name)[1] if ext != ".py": continue nb_path = os.path.join("ipynb", file_name_no_ext + ".ipynb") md_path = os.path.join(dest_dir, "tutorial", file_name_no_ext + ".md") tutobooks.py_to_md(py_path, nb_path, md_path, "templates/img") github_repo_dir = "keras-team/autokeras/blob/master/docs/" with open(md_path, "r") as md_file: button_lines = [ ":material-link: " "[**View in Colab**](https://colab.research.google.com/github/" + github_repo_dir + "ipynb/" + file_name_no_ext + ".ipynb" + ") &nbsp; &nbsp;" # + '<span class="k-dot">•</span>' + ":octicons-mark-github-16: " "[**GitHub source**](https://github.com/" + github_repo_dir + "py/" + file_name_no_ext + ".py)", "\n", ] md_content = "".join(button_lines) + "\n" + md_file.read() with open(md_path, "w") as md_file: md_file.write(md_content)
[ 1739, 24, 4862, 4165 ]
def METHOD_NAME(xx): return str(xx)
[ 24, 3 ]
def METHOD_NAME(db): if EVENT_TYPE == 'normal': with open(base_path / 'sql' / 'clickhouse_events.sql', 'r') as f: q = f.read() with db.get_live_session() as conn: conn.execute(q) print(f"`connector_user_events` table created succesfully.") with open(base_path / 'sql' / 'clickhouse_events_buffer.sql', 'r') as f: q = f.read() with db.get_live_session() as conn: conn.execute(q) print(f"`connector_user_events_buffer` table created succesfully.") with open(base_path / 'sql' / 'clickhouse_sessions.sql', 'r') as f: q = f.read() with db.get_live_session() as conn: conn.execute(q) print(f"`connector_sessions` table created succesfully.") with open(base_path / 'sql' / 'clickhouse_sessions_buffer.sql', 'r') as f: q = f.read() with db.get_live_session() as conn: conn.execute(q) print(f"`connector_sessions_buffer` table created succesfully.") if EVENT_TYPE == 'detailed': with open(base_path / 'sql' / 'clickhouse_events_detailed.sql') as f: q = f.read() with db.get_live_session() as conn: conn.execute(q) print(f"`connector_user_events_detailed` table created succesfully.") with open(base_path / 'sql' / 'clickhouse_events_detailed_buffer.sql') as f: q = f.read() with db.get_live_session() as conn: conn.execute(q) print(f"`connector_user_events_detailed_buffer` table created succesfully.")
[ 129, 2253, 10438 ]
def METHOD_NAME(self, cls, to_dict, from_dict, typename=None): """Register a conversion/restoration. The `to_dict` function must return a dictionary; if no `typename` is given, the unquallified class name will be used. :param cls: the class that will be converted/restored :param to_dict: a function that will be called with the object as an argument :param from_dict: a function that restores the object from the dict :param typename: an optional typename for identifying the converted object """ typename = cls.__name__ if typename is None else typename self.typenames[cls] = typename self.convert_to_dict[typename] = partial(self.obj_to_dict, typename, to_dict) self.convert_from_dict[typename] = partial(self.obj_from_dict, from_dict)
[ 372 ]
def METHOD_NAME(n_features, n_args, interaction_only): """Get the combinations of features to be passed to a library function.""" comb = combinations if interaction_only else combinations_w_r return comb(range(n_features), n_args)
[ 7599 ]
def METHOD_NAME(self, user: dict) -> UserRole: if user.get("workspaces") is None: return UserRole.owner return UserRole.annotator
[ 21, 1018 ]
def METHOD_NAME(self, sig, signode): signode += addnodes.desc_name(text=sig) return sig
[ 276, 1334 ]
def METHOD_NAME(self, txid): res = self.compose_request('tx', txid) return res['hex']
[ 17242 ]
def METHOD_NAME(release, platform, locale): with dbo.begin() as trans: release_row = releases_service.get_release(release, trans) if release_row: locale_data = get_by_path(release_row["blob"], ("platforms", platform, "locales", locale)) data_version = get_by_path(release_row["data_versions"], ("platforms", platform, "locales", locale)) else: try: locale_data = dbo.releases.getLocale(release, platform, locale, transaction=trans) except KeyError as e: return problem(404, "Not Found", json.dumps(e.args)) data_version = dbo.releases.getReleases(name=release, transaction=trans)[0]["data_version"] headers = {"X-Data-Version": data_version} return Response(response=json.dumps(locale_data), mimetype="application/json", headers=headers)
[ 19, 586, 97, 779 ]
def METHOD_NAME(self): unlocked = ThreadFactory() user = UserFactory() # This should not raise an exception unlocked.new_post(author=user, content="empty")
[ 9, 15258, 600 ]
def METHOD_NAME(self, split, epoch=1, combine=False): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ loaded_datasets = [] paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] logger.info("data_path", data_path) for k in itertools.count(): split_k = split + (str(k) if k > 0 else "") path = os.path.join(data_path, split_k) ds = indexed_dataset.make_dataset( path, impl=self.args.dataset_impl, fix_lua_indexing=True, dictionary=self.dictionary, ) if ds is None: if k > 0: break else: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) with data_utils.numpy_seed(self.seed + k): loaded_datasets.append( BlockPairDataset( ds, self.dictionary, ds.sizes, self.args.tokens_per_sample, break_mode=self.args.break_mode, doc_break_size=1, ) ) logger.info( "{} {} {} examples".format(data_path, split_k, len(loaded_datasets[-1])) ) if not combine: break if len(loaded_datasets) == 1: dataset = loaded_datasets[0] sizes = dataset.sizes else: dataset = ConcatDataset(loaded_datasets) sizes = np.concatenate([ds.sizes for ds in loaded_datasets]) self.datasets[split] = MaskedLMDataset( dataset=dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.cls(), sep_token_idx=self.dictionary.sep(), shuffle=self.args.shuffle_dataset, seed=self.seed, )
[ 557, 126 ]
def METHOD_NAME(cls, left: Optional[float] = None) -> "Fit": """ Display the page designated by page, with the horizontal coordinate left positioned at the left edge of the window and the contents of the page magnified just enough to fit the entire height of its bounding box within the window. A null value for left specifies that the current value of that parameter is to be retained unchanged. Args: left: Returns: The created fit object. """ return Fit(fit_type="/FitBV", fit_args=(left,))
[ 90, 3521, 11568 ]
def METHOD_NAME(pfnDict): """ Create PFN URI from pfnDict :param dict pfnDict: """ # # make sure all keys are in allDict = dict.fromkeys(["Protocol", "Host", "Port", "WSUrl", "Path", "FileName"], "") if not isinstance(pfnDict, dict): return S_ERROR(f"pfnunparse: wrong type for pfnDict argument, expected a dict, got {type(pfnDict)}") allDict.update(pfnDict) pfnDict = allDict # # c # # /a/b/c filePath = os.path.normpath("/" + pfnDict["Path"] + "/" + pfnDict["FileName"]).replace("//", "/") # # host uri = pfnDict["Host"] if pfnDict["Host"]: if pfnDict["Port"]: # host:port uri = f"{pfnDict['Host']}:{pfnDict['Port']}" if pfnDict["WSUrl"]: if "?" in pfnDict["WSUrl"] and "=" in pfnDict["WSUrl"]: # pylint: disable=unsupported-membership-test # host/wsurl # host:port/wsurl uri = f"{uri}{pfnDict['WSUrl']}" else: # host/wsurl # host:port/wsurl uri = f"{uri}{pfnDict['WSUrl']}?=" if pfnDict["Protocol"]: if uri: # proto://host # proto://host:port # proto://host:port/wsurl uri = f"{pfnDict['Protocol']}://{uri}" else: # proto: uri = f"{pfnDict['Protocol']}:" pfn = f"{uri}{filePath}" # c # /a/b/c # proto:/a/b/c # proto://host/a/b/c # proto://host:port/a/b/c # proto://host:port/wsurl/a/b/c return S_OK(pfn)
[ 9305, -1 ]
def METHOD_NAME(self): try: for environment_string, environment_object in braintree.Environment.All.items(): braintree.Configuration.configure( environment_string, 'my_merchant_id', 'public_key', 'private_key' ) self.assertEqual(braintree.Configuration.environment, environment_object) finally: reset_braintree_configuration()
[ 9, 830, 111, 3367, 3224, 43, 1027 ]
def METHOD_NAME(value, expected_result): validator = URL() result = validator.validate(value) assert result.is_valid == expected_result
[ 9, 12633, 741, 187 ]
def METHOD_NAME(density, viscosity, f_other=0.0): A = _A_coeff(density) B = _B_coeff(density, viscosity) f_res = 0.033 * A + 0.00087 * B - 0.74 f_res = np.clip(f_res, 0.0, 1.0 - f_other) return f_res
[ 11711, 5680 ]
f METHOD_NAME(self):
[ 9, 24, 2152 ]
def METHOD_NAME(typestr): key = typestr.rstrip(" *") if key not in typemap: user_defined_types.add(key) typemap[key] = "{}_{}".format(module, key)
[ 19, 8400, 44 ]
def METHOD_NAME(self): self.outxml.endElement(self.mode)
[ 322, 1798 ]
def METHOD_NAME(logger): """Setup ophyd for use Must be called once per session using ophyd """ # It's important to use the same context in the callback _dispatcher # as the main thread, otherwise not-so-savvy users will be very # confused global _dispatcher if _dispatcher is not None: logger.debug("ophyd already setup") return pyepics_compat._get_pv = pyepics_compat.get_pv pyepics_compat.get_pv = get_pv def _cleanup(): """Clean up the ophyd session""" global _dispatcher if _dispatcher is None: return pyepics_compat.get_pv = pyepics_compat._get_pv if _dispatcher.is_alive(): _dispatcher.stop() _dispatcher = None logger.debug("Installing event dispatcher") context = PV._default_context.broadcaster _dispatcher = EventDispatcher( thread_class=CaprotoCallbackThread, context=context, logger=logger ) atexit.register(_cleanup) return _dispatcher
[ 102 ]
def METHOD_NAME(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.String(o + self._tab.Pos) return None
[ 13814 ]
def METHOD_NAME(cpu, pc, fd, buf, cnt): try_read = False # Capture console output if self._console_capture: # Fun trick: lazy eval of OSI # Based on the idea that a non-POSIX FD will only be used after boot is finished an OSI is functional # Note: doesn't capture boot logs (would require hooking kernel's printk, not write syscall) if (fd == 1) or (fd == 2) or (fd == 3): try_read = True else: curr_proc = panda.plugins['osi'].get_current_process(cpu) file_name_ptr = panda.plugins['osi_linux'].osi_linux_fd_to_filename(cpu, curr_proc, fd) file_path = panda.ffi.string(file_name_ptr).decode() if ("tty" in file_path): try_read = True if try_read: try: data = panda.virtual_memory_read(cpu, buf, cnt) except ValueError: raise RuntimeError(f"Failed to read buffer: addr 0x{buf:016x}") if fd == 2: self._console_printed_err = True log_file = self._console_log_dir.joinpath("console.out") with open(log_file, "ab") as f: f.write(data) self._files_written.add(str(log_file)) # Use OSI to capture logs for a named process if self._proc_name: curr_proc = panda.plugins['osi'].get_current_process(cpu) curr_proc_name = panda.ffi.string(curr_proc.name).decode() if self._proc_name == curr_proc_name: if not try_read: # If we didn't already read this data in once for console capture try: data = panda.virtual_memory_read(cpu, buf, cnt) except ValueError: raise RuntimeError(f"Failed to read buffer: proc \'{curr_proc_name}\', addr 0x{buf:016x}") file_name_ptr = panda.plugins['osi_linux'].osi_linux_fd_to_filename(cpu, curr_proc, fd) file_path = panda.ffi.string(file_name_ptr).decode() # For informational purposes only, collection not reliant on this exact mapping if fd == 1: # POSIX stdout file_path += ".stdout" elif fd == 2: # POSIX stderr file_path += ".stderr" self._proc_printed_err = True log_file = self._proc_log_dir.joinpath(file_path.replace("//", "_").replace("/", "_")) with open(log_file, "ab") as f: f.write(data) self._files_written.add(str(log_file))
[ 2305, 77, 4987, 69, 3709, 77, 7576 ]
def METHOD_NAME( values: np.ndarray, prediction_length: int, start: str = "1700-01-01", freq: str = "1H", ): target_dim = values.shape[0] print( f"making dataset with {target_dim} dimension and {values.shape[1]} observations." ) start = pd.Timestamp(start, freq) train_ds = [ {"item": "0", "start": start, "target": values[:, :-prediction_length]} ] test_ds = [{"item": "0", "start": start, "target": values}] return MultivariateDatasetInfo( name="custom", train_ds=train_ds, test_ds=test_ds, target_dim=target_dim, freq=freq, prediction_length=prediction_length, )
[ 93, 126 ]
def METHOD_NAME(type, callback): # -> None: ...
[ 1971 ]
def METHOD_NAME(self, get_latest): get_latest.return_value = self.create_mock_time( tests.TestNewCamAndArpRecords.slack - 10 ) test = tests.TestNewCamAndArpRecords() assert test.test_cam() is None
[ 9, 3846, 12265 ]
def METHOD_NAME() -> None: """Prints access objects of Client, use cmd for commands.""" root = CommandGraphRoot() actions = ["-o cmd"] + [f"-o {key}" for key in root.children] print("Specify an object on which to execute command") print("\n".join(actions))
[ 38, 414, 635 ]
def METHOD_NAME(fn): if return_orig: @functools.wraps(fn) def wrapper_orig(*args): if _orig_isinstance(args[-1], cond_type): return fn(*args) return args[-1] return wrapper_orig else: @functools.wraps(fn) def wrapper_const(*args): if _orig_isinstance(args[-1], cond_type): return fn(*args) return return_const return wrapper_const
[ 1087 ]
def METHOD_NAME(env_vars): for ev in env_vars: os.environ.pop('{}{}'.format(E2E_ENV_VAR_PREFIX, ev), None)
[ 188, 485, 1659 ]
def METHOD_NAME(vec: List[Any], trials: int = 2000) -> float: """Returns the empirical likelihood (via Monte Carlo trials) of randomly finding a vector with as low entropy as this one.""" n = np.nansum(vec) newvec = list(filter(lambda x: not np.isnan(x), vec)) m = len(newvec) ne = normalized_entropy(newvec) sampled_vec = rng.multinomial(n, [1 / m for i in range(m)], trials) # Return the fraction of times the sampled vector has no more entropy than the original vector return sum(normalized_entropy(v) <= ne for v in sampled_vec) / trials
[ 8802, 11772 ]
def METHOD_NAME(*args, **kwargs): if kwargs["type"] == "int64": return np.random.randint(kwargs["low"], kwargs["high"], kwargs["shape"]).astype(np.int64) elif kwargs["type"] == "float32": return (kwargs["high"] - kwargs["low"]) * np.random.random( kwargs["shape"]).astype(np.float32) + kwargs["low"]
[ 567, 362 ]
def METHOD_NAME(network_manager_name: Optional[str] = None, regions: Optional[Sequence[str]] = None, resource_group_name: Optional[str] = None, skip_token: Optional[str] = None, top: Optional[int] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListActiveSecurityAdminRulesResult: """ Lists active security admin rules in a network manager. :param str network_manager_name: The name of the network manager. :param Sequence[str] regions: List of regions. :param str resource_group_name: The name of the resource group. :param str skip_token: When present, the value can be passed to a subsequent query call (together with the same query and scopes used in the current request) to retrieve the next page of data. :param int top: An optional query parameter which specifies the maximum number of records to be returned by the server. """ __args__ = dict() __args__['networkManagerName'] = network_manager_name __args__['regions'] = regions __args__['resourceGroupName'] = resource_group_name __args__['skipToken'] = skip_token __args__['top'] = top opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:network/v20230501:listActiveSecurityAdminRules', __args__, opts=opts, typ=ListActiveSecurityAdminRulesResult).value return AwaitableListActiveSecurityAdminRulesResult( skip_token=pulumi.get(__ret__, 'skip_token'), value=pulumi.get(__ret__, 'value'))
[ 245, 923, 2326, 2870, 1634 ]
def METHOD_NAME(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type")
[ 44 ]
async def METHOD_NAME(self) -> None: self.log('info', 'Stopping.') if self.running: self.close_socket() self.running = False
[ 631 ]
def METHOD_NAME(user, super_admin_token, public_group): name = str(uuid.uuid4()) status, data = api( 'POST', 'telescope', data={ 'name': name, 'nickname': name, 'lat': 0.0, 'lon': 0.0, 'elevation': 0.0, 'diameter': 10.0, }, token=super_admin_token, ) assert status == 200 assert data['status'] == 'success' telescope_id = data['data']['id'] fielddatafile = f'{os.path.dirname(__file__)}/../../../../data/ZTF_Fields.csv' regionsdatafile = f'{os.path.dirname(__file__)}/../../../../data/ZTF_Region.reg' instrument_name = str(uuid.uuid4()) status, data = api( 'POST', 'instrument', data={ 'name': instrument_name, 'type': 'imager', 'band': 'Optical', 'filters': ['ztfr'], 'telescope_id': telescope_id, 'api_classname': 'ZTFAPI', 'api_classname_obsplan': 'ZTFMMAAPI', 'field_data': pd.read_csv(fielddatafile)[:5].to_dict(orient='list'), 'field_region': Regions.read(regionsdatafile).serialize(format='ds9'), 'sensitivity_data': { 'ztfr': { 'limiting_magnitude': 20.3, 'magsys': 'ab', 'exposure_time': 30, 'zeropoint': 26.3, } }, }, token=super_admin_token, ) assert status == 200 assert data['status'] == 'success' instrument_id = data['data']['id'] request_data = { 'group_id': public_group.id, 'instrument_id': instrument_id, 'pi': 'Shri Kulkarni', 'hours_allocated': 200, 'start_date': '3021-02-27T00:00:00', 'end_date': '3021-07-20T00:00:00', 'proposal_id': 'COO-2020A-P01', } status, data = api('POST', 'allocation', data=request_data, token=super_admin_token) assert status == 200 assert data['status'] == 'success' allocation_id = data['data']['id'] default_plan_name = str(uuid.uuid4()) request_data = { 'allocation_id': allocation_id, 'default_plan_name': default_plan_name, 'payload': { 'filter_strategy': 'block', 'schedule_strategy': 'tiling', 'schedule_type': 'greedy_slew', 'exposure_time': 300, 'filters': 'ztfr', 'maximum_airmass': 2.0, 'integrated_probability': 100, 'minimum_time_difference': 30, 'program_id': 'Partnership', 'subprogram_name': 'GRB', }, } status, data = api( 'POST', 'default_observation_plan', data=request_data, token=super_admin_token ) assert status == 200 assert data['status'] == 'success' id = data['data']['id'] status, data = api( 'GET', f'default_observation_plan/{id}', token=super_admin_token, ) assert status == 200 assert data['status'] == 'success' assert data["data"]["allocation_id"] == allocation_id # we create a second plan, to see if generating both at the same time works default_plan_name_2 = str(uuid.uuid4()) request_data["default_plan_name"] = default_plan_name_2 status, data = api( 'POST', 'default_observation_plan', data=request_data, token=super_admin_token ) assert status == 200 assert data['status'] == 'success' id = data['data']['id'] status, data = api( 'GET', f'default_observation_plan/{id}', token=super_admin_token, ) assert status == 200 assert data['status'] == 'success' assert data["data"]["allocation_id"] == allocation_id datafile = f'{os.path.dirname(__file__)}/../../../../data/GW190814.xml' with open(datafile, 'rb') as fid: payload = fid.read() event_data = {'xml': payload} dateobs = "2019-08-14T21:10:39" status, data = api('GET', f'gcn_event/{dateobs}', token=super_admin_token) if status == 404: status, data = api( 'POST', 'gcn_event', data=event_data, token=super_admin_token ) assert status == 200 assert data['status'] == 'success' gcnevent_id = data['data']['gcnevent_id'] else: # we delete the event and re-add it gcnevent_id = data['data']['id'] status, data = api('DELETE', f'gcn_event/{dateobs}', token=super_admin_token) assert status == 200 assert data['status'] == 'success' status, data = api( 'POST', 'gcn_event', data=event_data, token=super_admin_token ) assert status == 200 assert data['status'] == 'success' gcnevent_id = data['data']['gcnevent_id'] # wait for event to load for n_times in range(26): status, data = api('GET', f"gcn_event/{dateobs}", token=super_admin_token) if data['status'] == 'success': break time.sleep(2) assert n_times < 25 # wait for the localization to load params = {"include2DMap": True} for n_times_2 in range(26): status, data = api( 'GET', 'localization/2019-08-14T21:10:39/name/LALInference.v1.fits.gz', token=super_admin_token, params=params, ) if data['status'] == 'success': data = data["data"] assert data["dateobs"] == dateobs assert data["localization_name"] == "LALInference.v1.fits.gz" assert np.isclose(np.sum(data["flat_2d"]), 1) break else: time.sleep(2) assert n_times_2 < 25 # wait for the plans to be processed time.sleep(30) n_retries = 0 while n_retries < 15: try: # now we want to see if any observation plans were created status, data = api( 'GET', f"gcn_event/{gcnevent_id}/observation_plan_requests", token=super_admin_token, ) assert status == 200 assert data['status'] == 'success' assert len(data['data']) > 0 generated_by_default = [ d['allocation_id'] == allocation_id for d in data['data'] ] assert sum(generated_by_default) == 2 break except AssertionError: n_retries += 1 time.sleep(3) assert n_retries < 15 status, data = api( 'DELETE', f'default_observation_plan/{id}', token=super_admin_token, ) assert status == 200
[ 9, 235, 476, 145, 15765 ]
def METHOD_NAME( *, product_channel_listing: Optional[ProductChannelListing], variants_channel_listing: List[ProductVariantChannelListing], prices_entered_with_tax: bool, tax_calculation_strategy: str, tax_rate: Decimal ) -> ProductAvailability: discounted: Optional[TaxedMoneyRange] = None discounted_net_range = get_product_price_range( variants_channel_listing=variants_channel_listing, discounted=True ) if discounted_net_range is not None: discounted = TaxedMoneyRange( start=_calculate_product_price_with_taxes( discounted_net_range.start, tax_rate, tax_calculation_strategy, prices_entered_with_tax, ), stop=_calculate_product_price_with_taxes( discounted_net_range.stop, tax_rate, tax_calculation_strategy, prices_entered_with_tax, ), ) undiscounted: Optional[TaxedMoneyRange] = None undiscounted_net_range = get_product_price_range( variants_channel_listing=variants_channel_listing, discounted=False, ) if undiscounted_net_range is not None: undiscounted = TaxedMoneyRange( start=_calculate_product_price_with_taxes( undiscounted_net_range.start, tax_rate, tax_calculation_strategy, prices_entered_with_tax, ), stop=_calculate_product_price_with_taxes( undiscounted_net_range.stop, tax_rate, tax_calculation_strategy, prices_entered_with_tax, ), ) discount = None if undiscounted is not None and discounted is not None: discount = _get_total_discount_from_range(undiscounted, discounted) is_visible = ( product_channel_listing is not None and product_channel_listing.is_visible ) is_on_sale = is_visible and discount is not None return ProductAvailability( on_sale=is_on_sale, price_range=discounted, price_range_undiscounted=undiscounted, discount=discount, )
[ 19, 1188, 6477 ]
def METHOD_NAME(self): """Test function _check_eftype.""" eftype = "cohen" _check_eftype(eftype) eftype = "fake" _check_eftype(eftype)
[ 9, 250, -1 ]
def METHOD_NAME(typ, obj, axes, dtype): """Reconstruct an object given its type, raw value, and possibly empty (None) axes. Parameters ---------- typ : object A type obj : object The value to use in the type constructor axes : dict The axes to use to construct the resulting pandas object Returns ------- ret : typ An object of type ``typ`` with the value `obj` and possible axes `axes`. """ try: typ = typ.type except AttributeError: pass res_t = np.result_type(obj.dtype, dtype) if (not isinstance(typ, partial) and issubclass(typ, pd.core.generic.PandasObject)): return typ(obj, dtype=res_t, **axes) # special case for pathological things like ~True/~False if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_: ret_value = res_t.type(obj) else: ret_value = typ(obj).astype(res_t) # The condition is to distinguish 0-dim array (returned in case of # scalar) and 1 element array # e.g. np.array(0) and np.array([0]) if len(obj.shape) == 1 and len(obj) == 1: if not isinstance(ret_value, np.ndarray): ret_value = np.array([ret_value]).astype(res_t) return ret_value
[ 5122, 279 ]
def METHOD_NAME(plugin, item_id, video_id, subtitle, download_mode=False, **kwargs): resp = urlquick.get(URL_BRIGHTCOVE_DATAS) data_account = PATTERN_ACCOUNT.findall(resp.text)[0] data_player = PATTERN_PLAYER.findall(resp.text)[0] key = PATTERN_KEY.findall(resp.text)[0] data_video_id = video_id return resolver_proxy.get_brightcove_video_json(plugin, data_account, data_player, data_video_id, key, download_mode, subtitle)
[ 19, 1781, 274 ]
def METHOD_NAME(self) -> Optional[BoundingPyramid]: """See in superclass.""" return self._tile_store.METHOD_NAME()
[ 19, -1, 1538, 5533 ]
def METHOD_NAME(s): """ TODO: Reference lib/glob / glob_pattern functions in bash grep glob_pattern lib/glob/* NOTE: Dash has CTLESC = -127. Does that mean a string is an array of ints or shorts? Not bytes? How does it handle unicode/utf-8 then? Nope it's using it with char* p. So it dash only ASCII or what? TODO: test it Still need this for slow path / fast path of prefix/suffix/patsub ops. """ left_bracket = False i = 0 n = len(s) while i < n: c = s[i] if c == '\\': i += 1 elif c == '*' or c == '?': return True elif c == '[': left_bracket = True elif c == ']' and left_bracket: return True i += 1 return False
[ 6285, 2307, 1825 ]
METHOD_NAME(self, surface, event_x, event_y):
[ 586, 632 ]
def METHOD_NAME(): """ :return: the openssl version, if it can be determined """ command = ['openssl', 'version'] p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) current_app.logger.debug(command) stdout, stderr = p.communicate() if p.returncode != 0: current_app.logger.debug(" ".join(command)) current_app.logger.error(stderr) raise Exception(stderr) if stdout.startswith(b'OpenSSL'): return stdout.split()[1]
[ 19, 10984, 281 ]
def METHOD_NAME(self): # Test with no header provided self.getPage('/accept/feed') self.assertStatus(200) self.assertInBody('<title>Unknown Blog</title>') # Specify exact media type self.getPage('/accept/feed', headers=[('Accept', 'application/atom+xml')]) self.assertStatus(200) self.assertInBody('<title>Unknown Blog</title>') # Specify matching media range self.getPage('/accept/feed', headers=[('Accept', 'application/*')]) self.assertStatus(200) self.assertInBody('<title>Unknown Blog</title>') # Specify all media ranges self.getPage('/accept/feed', headers=[('Accept', '*/*')]) self.assertStatus(200) self.assertInBody('<title>Unknown Blog</title>') # Specify unacceptable media types self.getPage('/accept/feed', headers=[('Accept', 'text/html')]) self.assertErrorPage(406, 'Your client sent this Accept header: text/html. ' 'But this resource only emits these media types: ' 'application/atom+xml.') # Test resource where tool is 'on' but media is None (not set). self.getPage('/accept/') self.assertStatus(200) self.assertBody('<a href="feed">Atom feed</a>')
[ 9, 1437, 3081 ]
def METHOD_NAME(self, **kwargs): # noqa: E501 """get_api_versions # noqa: E501 get available API versions # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_versions(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1APIVersions If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_api_versions_with_http_info(**kwargs) # noqa: E501
[ 19, 58, 295 ]
def METHOD_NAME(text: str) -> str: """ Normalize unicode text with "NFC", and convert right single quotation mark (U+2019, decimal 8217) as an apostrophe. Args: text (str): the original input sentence. Returns: normalized text (str). """ res = [] for c in normalize_unicode_text(text): if c in ['’']: # right single quotation mark (U+2019, decimal 8217) as an apostrophe res.append("'") else: res.append(c) return ''.join(res)
[ 2147, 779, 526, 1184 ]
def METHOD_NAME(node, sos_thr=0.0): """ Returns a list of duplication and speciation events in which the current node has been involved. Scanned nodes are also labeled internally as dup=True|False. You can access this labels using the 'node.dup' sintaxis. Method: the algorithm scans all nodes from the given leafName to the root. Nodes are assumed to be duplications when a species overlap is found between its child linages. Method is described more detail in: "The Human Phylome." Huerta-Cepas J, Dopazo H, Dopazo J, Gabaldon T. Genome Biol. 2007;8(6):R109. """ # Get the tree's root root = node.root # Checks that is actually rooted outgroups = root.get_children() if len(outgroups) != 2: raise TypeError("Tree is not rooted") # Cautch the smaller outgroup (will be stored as the tree # outgroup) o1 = set(n.name for n in outgroups[0].leaves()) o2 = set(n.name for n in outgroups[1].leaves()) if len(o2)<len(o1): smaller_outg = outgroups[1] else: smaller_outg = outgroups[0] # Prepare to browse tree from leaf to root all_events = [] current = node ref_spcs = node.species sister_leaves = set([]) browsed_spcs = set([current.species]) browsed_leaves = set([current]) # get family Size fSize = len([n for n in root.leaves() if n.species == ref_spcs]) # Clean previous analysis for n in list(root.descendants())+[root]: n.del_prop("evoltype") while current.up: # distances control (0.0 distance check) d = 0 for s in current.get_sisters(): for leaf in s.leaves(): d += current.get_distance(current, leaf) sister_leaves.add(leaf) # Process sister node only if there is any new sequence. # (previene dupliaciones por nombres repetidos) sister_leaves = sister_leaves.difference(browsed_leaves) if len(sister_leaves)==0: current = current.up continue # Gets species at both sides of event sister_spcs = set([n.species for n in sister_leaves]) overlaped_spces = browsed_spcs & sister_spcs all_spcs = browsed_spcs | sister_spcs score = float(len(overlaped_spces))/len(all_spcs) # Creates a new evolEvent event = EvolEvent() event.fam_size = fSize event.seed = node.name # event.e_newick = current.up.get_newick() # high mem usage!! event.sos = score event.outgroup = smaller_outg.name # event.allseqs = set(current.up.get_leaf_names()) event.in_seqs = set([n.name for n in browsed_leaves]) event.out_seqs = set([n.name for n in sister_leaves]) event.inparalogs = set([n.name for n in browsed_leaves if n.species == ref_spcs]) # If species overlap: duplication if score > sos_thr:# and d > 0.0: Removed branch control. event.node = current.up event.etype = "D" event.outparalogs = set([n.name for n in sister_leaves if n.species == ref_spcs]) event.orthologs = set([]) current.up.add_prop("evoltype","D") all_events.append(event) # If NO species overlap: speciation elif score <= sos_thr: event.node = current.up event.etype = "S" event.orthologs = set([n.name for n in sister_leaves if n.species != ref_spcs]) event.outparalogs = set([]) current.up.add_prop("evoltype","S") all_events.append(event) # Updates browsed species browsed_spcs |= sister_spcs browsed_leaves |= sister_leaves sister_leaves = set([]) # And keep ascending current = current.up return all_events
[ 19, 9717, 239, 280, 3802 ]
def METHOD_NAME(self): self._dll.tf_bQ.restype = c_ulonglong self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong) self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205) self.assertEqual(self.U(), 18446744073709551615)
[ 9, 12704, 222 ]
def METHOD_NAME(self) -> None: ...
[ 1602 ]
def METHOD_NAME(self, inputs: Dict[str, Any]) -> Dict[str, Any]: output = inputs['results'][0] meta = inputs['meta'] hm = output['hm'].sigmoid_() ftype = output['ftype'].sigmoid_() wh = output['wh'] reg = output['reg'] bbox, inds = bbox_decode(hm, wh, reg=reg, K=self.K) car_type = decode_by_ind(ftype, inds, K=self.K).detach().cpu().numpy() bbox = bbox.detach().cpu().numpy() for i in range(bbox.shape[1]): bbox[0][i][9] = car_type[0][i] bbox = nms(bbox, 0.3) bbox = bbox_post_process(bbox.copy(), [meta['c'].cpu().numpy()], [meta['s']], meta['out_height'], meta['out_width']) res, Type = [], [] for box in bbox[0]: if box[8] > 0.3: res.append(box[0:8]) Type.append(self.car_type[int(box[9])]) result = {OutputKeys.POLYGONS: np.array(res), OutputKeys.TEXT: Type} return result
[ 1710 ]
def METHOD_NAME(self): pass
[ 709, 710 ]
def METHOD_NAME(self): """Make the collapsing log message handler flush more eagerly.""" class EagerFlusher(logging.handlers.MemoryHandler): def __init__(self, *args, **kwargs): """Set the buffer capacity to 3 regardless of args.""" # leave out any capacity argument from args and kwargs args = args[1:] kwargs = {k: v for k, v in list(kwargs.items()) if k != 'capacity'} # pass 3 as the capacity argument super(EagerFlusher, self).__init__(3, *args, **kwargs) class EagerFlushingCollapser( cbioportal_common.CollapsingLogMessageHandler, EagerFlusher): """CollapsingLogMessageHandler with EagerFlusher overrides.""" pass self.original_collapser = cbioportal_common.CollapsingLogMessageHandler cbioportal_common.CollapsingLogMessageHandler = EagerFlusher
[ 0, 1 ]
def METHOD_NAME(self, **parameters): for key, value in parameters.items(): if key not in self.__parameters.keys(): # throw an exception raise SpynnakerException(f"{key} is not a parameter of {self}") self.__parameters[key] = value # Parameters have been set, so if multi-run then it will have been # injected already; if not then it can just be ignored if self.app_vertex is not None: for m_vertex in self.app_vertex.machine_vertices: m_vertex.set_reload_required(True)
[ 0, 386 ]
def METHOD_NAME(line, cursor, namespace, private): """Returns a list of possible completions: * name completion * attribute completion (obj.attr) * index completion for lists and dictionaries * module completion (from/import) :arg line: incomplete text line :type line: str :arg cursor: current character position :type cursor: int :arg namespace: namespace :type namespace: dict :arg private: whether private variables should be listed :type private: bool :returns: list of completions, word :rtype: list, str >>> complete('re.sr', 5, {'re': re}) (['re.sre_compile', 're.sre_parse'], 're.sr') """ re_unquoted_word = RE_UNQUOTED_WORD.search(line[:cursor]) if re_unquoted_word: # unquoted word -> module or attribute completion word = re_unquoted_word.group(1) if RE_MODULE.match(line): from . import complete_import matches = complete_import.METHOD_NAME(line) if not private: matches[:] = [m for m in matches if m[:1] != "_"] matches.sort() else: from . import complete_namespace matches = complete_namespace.METHOD_NAME(word, namespace, private=private) else: # for now we don't have completers for strings # TODO: add file auto completer for strings word = '' matches = [] return matches, word
[ 676 ]
def METHOD_NAME(r_node, t_node): r_childs = r_node.childNodes t_childs = t_node.childNodes r_count = t_count = 0 for r_c in r_childs: if r_c.nodeType == r_c.ELEMENT_NODE: r_count += 1 for t_c in t_childs: if t_c.nodeType == t_c.ELEMENT_NODE: t_count += 1 if r_count != t_count: print(tabs(), location(t_node), "Child count missmatch. expected:", r_count, " found:", t_count)
[ 979, 186, 669, 1716, 29 ]
def METHOD_NAME(): output = json.loads(r2.vtysh_cmd("show bgp ipv4 unicast json")) expected = { "routes": { "10.10.10.1/32": [{"valid": True}], "10.10.10.2/32": [{"valid": True}], } } return topotest.json_cmp(output, expected)
[ 2260, 15946, 1180, 8343 ]
def METHOD_NAME(self, device: dai.Device, xouts: List[XoutFrames]): """ Start recording process. This will create and start the pipeline, start recording threads, and initialize all queues. """ self.mxid = device.getMxId() self.path = self._create_folder(self.folder, self.mxid) calib_data = device.readCalibration() calib_data.eepromToJsonFile(str(self.path / "calib.json")) self.recorder.update(self.path, device, xouts) self.frame_q = Queue(maxsize=20) self.process = Thread(target=_run, args=(self.recorder, self.frame_q)) self.process.METHOD_NAME()
[ 447 ]
def METHOD_NAME(num_micro_batches): """ This test is to examine the correctness of interleaved 1F1B, compared with torch. Be aware it contains some hardcodes. """ world_size = torch.distributed.get_world_size() local_rank = torch.distributed.get_rank() seed_all(1453) NUM_MICRO_BATCHS = num_micro_batches BATCH_SIZE = num_micro_batches NUM_CHUNKS = 2 # create model torch_model = MlpModel().cuda() pp_model = copy.deepcopy(torch_model).cuda() DP_DIM, PP_DIM, TP_DIM = 0, 1, 2 pg_mesh = ProcessGroupMesh(1, world_size, 1) stage_manager = PipelineStageManager(pg_mesh, PP_DIM, is_virtual=True) schedule = InterleavedSchedule(NUM_MICRO_BATCHS, NUM_CHUNKS, stage_manager) sharded_model = torch.nn.ModuleList() for idx, (_, sub_model) in enumerate(pp_model.named_children()): if idx % (world_size) == local_rank: sub_model._forward = sub_model.forward sub_model.forward = MethodType( partial(pp_linear_fwd, stage_mgr=stage_manager, num_chunks=NUM_CHUNKS, model_chunk_id=len(sharded_model)), sub_model._forward) sharded_model.append(sub_model.cuda()) # create optimizer torch_optimizer = torch.optim.SGD(torch_model.parameters(), lr=1) pp_optimizer = OptimizerWrapper(torch.optim.SGD(sharded_model.parameters(), lr=1)) # create seed_all(1453) if local_rank == 0: input_list = [torch.rand(BATCH_SIZE, 4).cuda()] else: input_list = [torch.zeros(BATCH_SIZE, 4).cuda()] torch.distributed.all_reduce(input_list[0]) criterion = lambda x, y: torch.mean(x) # forward and backward torch_output = torch_model(input_list[0]) torch_loss = criterion(torch_output, _) torch_loss.backward() pp_ret = schedule.forward_backward_step(sharded_model, iter(input_list), criterion, pp_optimizer, return_loss=True, return_outputs=True) # check loss if stage_manager.is_last_stage(): assert torch.allclose(torch_loss, pp_ret['loss']) # check gradients torch_grad = [] for torch_p in torch_model.parameters(): torch_grad.append(torch_p.grad.data) for idx, pp_p in enumerate(sharded_model.parameters()): if idx < 2: assert torch.allclose(torch_grad[idx + local_rank * 2], pp_p.grad.data) else: assert torch.allclose(torch_grad[idx + local_rank * 2 + 6], pp_p.grad.data) # step torch_optimizer.step() pp_optimizer.step() # check updated param torch_param = [] for torch_p in torch_model.parameters(): torch_param.append(torch_p.data) for idx, pp_p in enumerate(sharded_model.parameters()): if idx < 2: assert torch.allclose(torch_param[idx + local_rank * 2], pp_p.data) else: assert torch.allclose(torch_param[idx + local_rank * 2 + 6], pp_p.data)
[ 14550, 9550 ]
def METHOD_NAME(argv): del argv # Unused. if _MYSQL_USERNAME.value is None: raise ValueError("--mysql_username has to be specified.") # Generate server and client configs. grr_configs = self_contained_components.InitGRRConfigs( _MYSQL_DATABASE.value, mysql_username=_MYSQL_USERNAME.value, mysql_password=_MYSQL_PASSWORD.value, logging_path=_LOGGING_PATH.value, osquery_path=flags.FLAGS.osquery_path, ) fleetspeak_configs = self_contained_components.InitFleetspeakConfigs( grr_configs, _FLEETSPEAK_MYSQL_DATABASE.value, mysql_username=_MYSQL_USERNAME.value, mysql_password=_MYSQL_PASSWORD.value, logging_path=_LOGGING_PATH.value, ) # Start all remaining server components. # Start a background thread that kills the main process if one of the # server subprocesses dies. server_processes = self_contained_components.StartServerProcesses( grr_configs, fleetspeak_configs ) self_contained_components.DieIfSubProcessDies(server_processes) api_port = api_helpers.GetAdminUIPortFromConfig(grr_configs.server_config) grrapi = api_helpers.WaitForAPIEndpoint(api_port) # Start the client. preliminary_client_p = self_contained_components.StartClientProcess( fleetspeak_configs ) # Wait for the client to enroll and get its id. client_id = api_helpers.WaitForClientToEnroll(grrapi) print("Found client id: %s" % client_id) # Python doesn't guarantee the process name of processes started by the Python # interpreter. They may vary from platform to platform. In order to ensure # that Client.binary_name config setting matches the actual process name, # let's get the name via psutil, kill the client and set the # Config.binary_name explicitly. client_binary_name = str(psutil.Process(preliminary_client_p.pid).name()) preliminary_client_p.kill() preliminary_client_p.wait() # Simply add the Client.binary_name to the client's configuration YAML. with open(grr_configs.client_config, mode="a", encoding="utf-8") as fd: fd.write("\nClient.binary_name: %s\n" % client_binary_name) print("Starting the client with Client.binary_name=%s" % client_binary_name) client_p = self_contained_components.StartClientProcess(fleetspeak_configs) # Start a background thread that kills the main process if # client subprocess dies. self_contained_components.DieIfSubProcessDies([client_p]) # Run the test suite against the enrolled client. self_contained_components.RunEndToEndTests( client_id, grr_configs.server_config, tests=_TESTS.value, manual_tests=_MANUAL_TESTS.value) print("RunEndToEndTests execution succeeded.") sys.exit(0)
[ 57 ]
def METHOD_NAME(self, data=None): # Themes gui.apply_theme(editorpersistance.prefs.theme) # Init mlt. repo = mltinit.init_with_translations() _create_info_dialog() GLib.idle_add(_do_assets_write, self.filename) self.add_window(_info_window)
[ 69, 1284 ]
def METHOD_NAME(self: Any, mock_method: Any) -> None: """ Test Extraction using model class """ config_dict = { 'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION', 'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;', 'extractor.sqlalchemy.model_class': 'tests.unit.extractor.test_sql_alchemy_extractor.TableMetadataResult' } self.conf = ConfigFactory.from_dict(config_dict) extractor = SQLAlchemyExtractor() extractor.results = [dict(database='test_database', schema='test_schema', name='test_table', description='test_description', column_name='test_column_name', column_type='test_column_type', column_comment='test_column_comment', owner='test_owner')] extractor.init(Scoped.get_scoped_conf(conf=self.conf, scope=extractor.get_scope())) result = extractor.extract() self.assertIsInstance(result, TableMetadataResult) self.assertEqual(result.name, 'test_table')
[ 9, 7252, 41, 578, 2 ]
def METHOD_NAME(self): _STATSBEAT_STATE["INITIAL_SUCCESS"] = False _STATSBEAT_STATE["INITIAL_FAILURE_COUNT"] = 1 with mock.patch("azure.monitor.opentelemetry.exporter.export._base._reached_ingestion_code") as m, \ mock.patch("azure.monitor.opentelemetry.exporter.export._base._is_retryable_code") as p: m.return_value = False p.return_value = False with mock.patch.object(AzureMonitorClient, 'track', throw(HttpResponseError)): result = self._exporter._transmit(self._envelopes_to_export) self.assertEqual(result, ExportResult.FAILED_NOT_RETRYABLE) self.assertFalse(_STATSBEAT_STATE["INITIAL_SUCCESS"]) self.assertEqual(_STATSBEAT_STATE["INITIAL_FAILURE_COUNT"], 2)
[ 9, 5022, 130, 15503, 4029, 544 ]
def METHOD_NAME(self, language=None, include_descendants=True): """ Return a query to get the organizations related to this category ie for which a plugin for this category is linked to the organization page via any placeholder. """ return self.get_reverse_related_page_extensions( "organization", language=language, include_descendants=include_descendants )
[ 19, 1123 ]
def METHOD_NAME( self, stream_state: Optional[StreamState] = None, stream_slice: Optional[StreamSlice] = None, next_page_token: Optional[Mapping[str, Any]] = None, ) -> Mapping[str, Any]: # Pass the stream_slice from the argument, not the cursor because the cursor is updated after processing the response return self._get_request_option(RequestOptionType.body_data, stream_slice)
[ 19, 377, 2829, 365 ]
def METHOD_NAME(self, timeformat='unix'): """ Returns the UTC start time of the weather alert event :param timeformat: the format for the time value. May be: '*unix*' (default) for UNIX time '*iso*' for ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00`` '*date* for ``datetime.datetime`` object instance :type timeformat: str :returns: an int or a str :raises: ValueError when negative values are provided """ return formatting.timeformat(self.start, timeformat)
[ 447, 104 ]
def METHOD_NAME(): """Test that the MultiplexForecaster magic "|" dunder methodbahves as expected. A MultiplexForecaster can be created by using the "|" dunder method on either forecaster or MultiplexForecaster objects. Here we test that it performs as expected on all the use cases, and raises the expected error in some others. """ # test a simple | example with two forecasters: multiplex_two_forecaster = AutoETS() | NaiveForecaster() assert isinstance(multiplex_two_forecaster, MultiplexForecaster) assert len(multiplex_two_forecaster.forecasters) == 2 # now test that | also works on two MultiplexForecasters: multiplex_one = MultiplexForecaster([("arima", AutoARIMA()), ("ets", AutoETS())]) multiplex_two = MultiplexForecaster( [("theta", ThetaForecaster()), ("naive", NaiveForecaster())] ) multiplex_two_multiplex = multiplex_one | multiplex_two assert isinstance(multiplex_two_multiplex, MultiplexForecaster) assert len(multiplex_two_multiplex.forecasters) == 4 # last we will check 3 forecaster with the same name - should check both that # MultiplexForecaster | forecaster works, and that ensure_unique_names works multiplex_same_name_three_test = ( NaiveForecaster(strategy="last") | NaiveForecaster(strategy="mean") | NaiveForecaster(strategy="drift") ) assert isinstance(multiplex_same_name_three_test, MultiplexForecaster) assert len(multiplex_same_name_three_test.forecasters) == 3 forecaster_param_names = multiplex_same_name_three_test._get_estimator_names( multiplex_same_name_three_test._forecasters ) assert len(set(forecaster_param_names)) == 3 # test we get a ValueError if we try to | with anything else: with pytest.raises(TypeError): multiplex_one | "this shouldn't work"
[ 9, 10778, 894, 3135 ]
def METHOD_NAME(self): pts = np.array( [[0, 0.5, 1, 1.2, 0.5, -0.2], [0, 0, 0, 1, 1.2, 1], [0, 0, 0, 0, 0, 0]], dtype=float, ) pt = np.array([0.2, 0.3, 0]) self.assertTrue(pp.geometry_property_checks.point_in_cell(pts, pt)) pt = np.array([0.5, 1, 0]) self.assertTrue(pp.geometry_property_checks.point_in_cell(pts, pt)) pt = np.array([1.3, 0.5, 0]) self.assertTrue(not pp.geometry_property_checks.point_in_cell(pts, pt)) pt = np.array([-0.1, 0.5, 0]) self.assertTrue(not pp.geometry_property_checks.point_in_cell(pts, pt)) pt = np.array([1.1, -0.1, 0]) self.assertTrue(not pp.geometry_property_checks.point_in_cell(pts, pt))
[ 9, 7863, 5066, 1170 ]
def METHOD_NAME(self): """ Creates a :py:class:`waflib.Tools.c_osx.macplist` instance. """ if self.env.MACAPP or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'Info.plist']) self.plisttask = plisttask = self.create_task('macplist', [], n1) plisttask.context = { 'app_name': self.link_task.outputs[0].name, 'env': self.env } plist_ctx = getattr(self, 'plist_context', None) if (plist_ctx): plisttask.context.update(plist_ctx) if getattr(self, 'mac_plist', False): node = self.path.find_resource(self.mac_plist) if node: plisttask.inputs.append(node) else: plisttask.code = self.mac_plist else: plisttask.code = app_info inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name self.add_install_files(install_to=inst_to, install_from=n1)
[ 129, 758, 4412 ]
def METHOD_NAME(self): pred_socres = self.clf.decision_function(self.X_test) pred_ranks = self.clf._predict_rank(self.X_test, normalized=True) # assert the order is reserved assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=3) assert_array_less(pred_ranks, 1.01) assert_array_less(-0.1, pred_ranks)
[ 9, 2103, 1499, 1568 ]
def METHOD_NAME(self) -> str: return MATRIX_PROTOCOL_PREFIX + self.hashes[NULL_MATRIX_NAME]
[ 19, 1051, 430 ]
def METHOD_NAME(string, year_first=None, day_first=None): # pylint:disable=inconsistent-return-statements """Looks for date patterns, and if found return the date and group span. Assumes there are sentinels at the beginning and end of the string that always allow matching a non-digit delimiting the date. Year can be defined on two digit only. It will return the nearest possible date from today. >>> search_date(' This happened on 2002-04-22. ') (18, 28, datetime.date(2002, 4, 22)) >>> search_date(' And this on 17-06-1998. ') (13, 23, datetime.date(1998, 6, 17)) >>> search_date(' no date in here ') """ for date_re in date_regexps: search_match = date_re.search(string) if not search_match: continue start, end = search_match.start(1), search_match.end(1) groups = search_match.groups()[1:] match = '-'.join(groups) if match is None: continue if year_first and day_first is None: day_first = False if day_first is None: day_first = _guess_day_first_parameter(groups) # If day_first/year_first is undefined, parse is made using both possible values. yearfirst_opts = [False, True] if year_first is not None: yearfirst_opts = [year_first] dayfirst_opts = [True, False] if day_first is not None: dayfirst_opts = [day_first] kwargs_list = ({'dayfirst': d, 'yearfirst': y} for d in dayfirst_opts for y in yearfirst_opts) for kwargs in kwargs_list: try: date = parser.parse(match, **kwargs) except (ValueError, TypeError): # pragma: no cover # see https://bugs.launchpad.net/dateutil/+bug/1247643 date = None # check date plausibility if date and valid_year(date.year): # pylint:disable=no-member return start, end, date.date() # pylint:disable=no-member
[ 1070, 153 ]
def METHOD_NAME(configs, project, bucket_path): """Loads metric config files and runs each metric.""" queryer = BigQuerier(project, bucket_path) # authenticate as the given service account if our environment is providing one if 'GOOGLE_APPLICATION_CREDENTIALS' in os.environ: keyfile = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] check(['gcloud', 'auth', 'activate-service-account', f'--key-file={keyfile}']) # the 'bq show' command is called as a hack to dodge the config prompts that bq presents # the first time it is run. A newline is passed to stdin to skip the prompt for default project # when the service account in use has access to multiple projects. check(['bq', 'show'], stdin='\n') errs = [] for path in configs or all_configs(): try: with open(path) as config_raw: config = yaml.safe_load(config_raw) if not config: raise ValueError('invalid yaml: %s.' % path) config['metric'] = config['metric'].strip() validate_metric_name(config['metric']) queryer.run_metric(config) except ( ValueError, KeyError, IOError, subprocess.CalledProcessError, ): print(traceback.format_exc(), file=sys.stderr) errs.append(path) if errs: print('Failed %d configs: %s' % (len(errs), ', '.join(errs))) sys.exit(1)
[ 57 ]
def METHOD_NAME(self) -> str: return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(n_reqs, mocked_version, requirement_version, expected_return): """ Running with n_reqs requirements TODO - check the results - not testing for wrongly formated requirements """ result = {} requirements = [f"test-dep>={requirement_version}" for _ in range(n_reqs)] # pylint: disable=disallowed-name with patch("ansible_collections.arista.avd.plugins.action.verify_requirements.version") as patched_version: patched_version.return_value = mocked_version if mocked_version is None: patched_version.side_effect = PackageNotFoundError() ret = _validate_python_requirements(requirements, result) assert ret == expected_return
[ 9, 187, 440, 5186 ]
def METHOD_NAME(self): """ Makes authentication request """ self.validate_config_credentials() req_data = { "client_id": self.config.client_id, "client_secret": self.config.client_secret, "grant_type": self.GRANT_TYPE, } credentials = {k: v for k, v in self.config.credentials.items()} try: response = self.session.post( self.TOKEN_URL_TEMPLATE.format( auth_base_uri=self.config.auth_base_uri.rstrip("/"), realm=self.config.realm, ), data=dict(req_data, **credentials), headers=USER_AGENT, timeout=HTTP_REQ_TIMEOUT, ) response.raise_for_status() except requests.RequestException as e: if self.retrieved_token: # try using already retrieved token if authenticate() fails (OTP use-case) return CodeAuthorizedAuth( self.retrieved_token, self.config.token_provision, key=getattr(self.config, "token_qs_key", None), ) response_text = getattr(e.response, "text", "").strip() # check if error is identified as auth_error in provider conf auth_errors = getattr(self.config, "auth_error_code", [None]) if not isinstance(auth_errors, list): auth_errors = [auth_errors] if ( hasattr(e.response, "status_code") and e.response.status_code in auth_errors ): raise AuthenticationError( "HTTP Error %s returned, %s\nPlease check your credentials for %s" % (e.response.status_code, response_text, self.provider) ) # other error else: import traceback as tb logger.error( f"Provider {self.provider} returned {e.response.status_code}: {response_text}" ) raise AuthenticationError( "Something went wrong while trying to get access token:\n{}".format( tb.format_exc() ) ) self.retrieved_token = response.json()["access_token"] return CodeAuthorizedAuth( self.retrieved_token, self.config.token_provision, key=getattr(self.config, "token_qs_key", None), )
[ 1805 ]
def METHOD_NAME(self): """Tests that if opponent has played all D then player chooses D.""" actions = [(C, D)] + [(D, D)] * 9 self.versus_test(axl.Defector(), expected_actions=actions, seed=1)
[ 9, 11227, 217, 15694, 2659, -1 ]
def METHOD_NAME(self, *, values, radius, k1, k2, k3, r1, r2): self.terminal_velocity_body( values=values, radius=radius, k1=k1, k2=k2, k3=k3, r1=r1, r2=r2 )
[ 1019, 5311 ]