text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(disallowed_ops: Set[DisallowedOperator]): """ Disallow certain operators in the fx graph produced by TorchDynamo. There are two ways to disallow operator in TorchDynamo, 1. Use the disallow_in_graph API, which only applies to free function call. 2. Patch the TensorVariable class, which applies to method call on torch.Tensor. """ disallowed_tensor_methods: Set[str] = set() if DisallowedOperator.INPLACE_COPY in disallowed_ops: torchdynamo.disallow_in_graph(torch.Tensor.copy_) disallowed_tensor_methods.update({"copy_", "__setitem__"}) if DisallowedOperator.EINSUM in disallowed_ops: torchdynamo.disallow_in_graph(torch.functional.einsum) if DisallowedOperator.MULTIHEAD_ATTENTION in disallowed_ops: torchdynamo.disallow_in_graph(torch.nn.MultiheadAttention) if DisallowedOperator.AS_STRIDE in disallowed_ops: disallowed_tensor_methods.add("as_stride") tensor_variable_cls = torchdynamo.variables.tensor.TensorVariable old_call_method = tensor_variable_cls.call_method @functools.wraps(old_call_method) def call_method(self, translator, name, args, kwargs): if name in disallowed_tensor_methods: raise torchdynamo.exc.Unsupported(f"Tensor.{name} not supported by TVM.") return old_call_method(self, translator, name, args, kwargs) tensor_variable_cls.call_method = call_method
[ 1833, 5416 ]
def METHOD_NAME(self, detected_language: dict) -> bool: name = detected_language.get("name", "").lower() if not name: return False return any(ext in name for ext in self._possible_matches)
[ 10944, 622 ]
def METHOD_NAME(cls, path): """ :param path: The path of the directory you want to use as a project. """ with open(cls._get_json_path(path)) as f: version, data = json.METHOD_NAME(f) if version == 1: self = cls.__new__() self.__dict__.update(data) return self else: raise WrongVersion( "The Jedi version of this project seems newer than what we can handle." )
[ 557 ]
def METHOD_NAME(h, f): if h[65:69] != b'FSSD' or h[128:132] != b'HCOM': return None divisor = get_long_be(h[144:148]) if divisor: rate = 22050 / divisor else: rate = 0 return 'hcom', rate, 1, -1, 8
[ 9, 9275 ]
def METHOD_NAME(esxStub, context=None): vsanStub = GetVsanEsxStub(esxStub, context) esxMos = { 'vsan-performance-manager' : vim.cluster.VsanPerformanceManager( 'vsan-performance-manager', vsanStub ), 'ha-vsan-health-system' : vim.host.VsanHealthSystem( 'ha-vsan-health-system', vsanStub ), 'vsan-object-system' : vim.cluster.VsanObjectSystem( 'vsan-object-system', vsanStub ), } return esxMos
[ 19, 13113, 13114, 13187 ]
def METHOD_NAME(self, outfile=None): helpFileStr=self.read_helpfile() if helpFileStr: if outfile is None: outfile = sys.stdout outfile.write(helpFileStr) else: OptionParser.METHOD_NAME(self,outfile)
[ 38, 40 ]
def METHOD_NAME(self, scores, labels, **kwargs): """ Args: scores (paddle.Tensor): [N, num_classes] labels (paddle.Tensor): [N, ] Returns: paddle.Tensor: [1,] """ if paddle.is_compiled_with_custom_device('npu'): """ Designed for the lack of temporary operators of NPU, main idea is to split smooth loss into uniform distribution loss and hard label calculation """ hard_loss = (1.0 - self.ls_eps) * F.cross_entropy(scores, labels) uniform_loss = (self.ls_eps / self.num_classes) * ( -F.log_softmax(scores, -1).sum(-1).mean(0)) loss = hard_loss + uniform_loss else: labels = F.one_hot(labels, self.num_classes) labels = F.label_smooth(labels, epsilon=self.ls_eps) labels = paddle.squeeze(labels, axis=1) loss = self.loss_func(scores, labels, soft_label=True, **kwargs) return loss
[ 636, 3772, 1572 ]
def METHOD_NAME(dist): # type: (Distribution) -> Optional[DirectUrl] """Obtain a DirectUrl from a pkg_resource.Distribution. Returns None if the distribution has no `direct_url.json` metadata, or if `direct_url.json` is invalid. """ if not dist.has_metadata(DIRECT_URL_METADATA_NAME): return None try: return DirectUrl.from_json(dist.get_metadata(DIRECT_URL_METADATA_NAME)) except ( DirectUrlValidationError, JSONDecodeError, UnicodeDecodeError ) as e: logger.warning( "Error parsing %s for %s: %s", DIRECT_URL_METADATA_NAME, dist.project_name, e, ) return None
[ 1260, 19, 4234, 274 ]
def METHOD_NAME(path): """Remove the file at path. Might fail if used on a directory path.""" if io_mode == BackendMode.DEFAULT: return os.METHOD_NAME(path) elif io_mode == BackendMode.TF: return gfile.METHOD_NAME(path) else: raise ValueError("Unknown IO Backend Mode.")
[ 188 ]
def METHOD_NAME(self, model): assert model.fs.unit.config.database is model.db assert model.fs.unit._tech_type == "sw_onshore_intake"
[ 9, 56 ]
def METHOD_NAME(obj): return result_from_tuple(obj.as_tuple(), app=workerapp)
[ 197 ]
def METHOD_NAME(self, datas: list, columns: list): list = [] for index, value in enumerate(datas): if value is None: list.append(None) else: data = str(value); column = columns[index] log.info("column is:{}".format(column)) list.append(self.convertData(data, column)) return list
[ 197, 245 ]
def METHOD_NAME(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str: """Mock predict.""" prompt_str = prompt.metadata["prompt_type"] if prompt_str == PromptType.SUMMARY: output = _mock_summary_predict(self.max_tokens, prompt_args) elif prompt_str == PromptType.TREE_INSERT: output = _mock_insert_predict() elif prompt_str == PromptType.TREE_SELECT: output = _mock_query_select() elif prompt_str == PromptType.TREE_SELECT_MULTIPLE: output = _mock_query_select_multiple(prompt_args["num_chunks"]) elif prompt_str == PromptType.REFINE: output = _mock_refine(self.max_tokens, prompt, prompt_args) elif prompt_str == PromptType.QUESTION_ANSWER: output = _mock_answer(self.max_tokens, prompt_args) elif prompt_str == PromptType.KEYWORD_EXTRACT: output = _mock_keyword_extract(prompt_args) elif prompt_str == PromptType.QUERY_KEYWORD_EXTRACT: output = _mock_query_keyword_extract(prompt_args) elif prompt_str == PromptType.KNOWLEDGE_TRIPLET_EXTRACT: output = _mock_knowledge_graph_triplet_extract( prompt_args, int(prompt.kwargs.get("max_knowledge_triplets", 2)), ) elif prompt_str == PromptType.CUSTOM: # we don't know specific prompt type, return generic response output = "" else: raise ValueError("Invalid prompt type.") return output
[ 2103 ]
def METHOD_NAME(self) -> bool: return True
[ 1317, 557, 552, 200 ]
def METHOD_NAME(self,widget,data=None): if self.e.ensure_mode(linuxcnc.MODE_MDI): print("switched to MDI mode") else: print("can not switch to MDI in this state")
[ 69, -1, 854 ]
def METHOD_NAME(t): import struct; return struct.calcsize(t)
[ 7880 ]
def METHOD_NAME(): ...
[ 129, 88, 2458 ]
def METHOD_NAME(v): """Convert 3D vector to position. A point (x, y, z) given by the components of a vector will be represented by [x, y, z, 1] in homogeneous coordinates to which we can apply a transformation. Parameters ---------- v : array-like, shape (3,) 3D vector that contains x, y, and z Returns ------- p : array-like, shape (4,) Point vector with 1 as last element """ return np.hstack((v, 1))
[ 798, 24, 1669 ]
def METHOD_NAME(self): copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) cmake = CMake(self) cmake.install() rmdir(self, os.path.join(self.package_folder, "share")) rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) self._create_cmake_module_variables( os.path.join(self.package_folder, self._module_file_rel_path) )
[ 360 ]
def METHOD_NAME(self): test_cases = ( ('my-exception', 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"'), ('my-exception', None, 'my-stderr', 'message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"'), (None, 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "None"\nstderr: "my-stderr"'), ('my-exception', 'my-msg', None, 'message: "my-msg"\nexception: "my-exception"\nstderr: "None"'), ('my-exception', 'my-msg', '\nline1\nline2', 'message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"') ) for tc in test_cases: result = self.elastic.enrich_error_message(generate_test_data(tc[0], tc[1], tc[2])) self.assertEqual(result, tc[3])
[ 9, 644, 168, 277 ]
def METHOD_NAME(self, block_index, seg_index): t_stop = self._raw_signals.shape[0] / self._sampling_rate return t_stop
[ 4373, 791, 631 ]
def METHOD_NAME(self): return "Be sure to stop weewx first before using. Mutating actions will"\ " request confirmation before proceeding.\n"
[ 6633 ]
def METHOD_NAME(self, data: bytes, can_id: int) -> None: """ Serves as the data handler for the device. Handles :class:`KillMessage`, :class:`ThrustPacket`, and :class:`HeartbeatMessage` types. """ assert can_id in (THRUST_SEND_ID, KILL_SEND_ID) if data[0] == KillMessage.IDENTIFIER: packet = KillMessage.from_bytes(data) assert packet.is_command assert packet.is_hard or packet.is_soft if packet.is_hard: self.hard_kill_mobo = packet.is_asserted elif packet.is_soft: self.soft_kill_mobo = packet.is_asserted self.send_updates() elif data[0] == ThrustPacket.IDENTIFIER: packet = ThrustPacket.from_bytes(data) elif data[0] == HeartbeatMessage.IDENTIFIER: packet = HeartbeatMessage.from_bytes(data) self._last_heartbeat = rospy.Time.now() else: assert False, "No recognized identifier"
[ 69, 365 ]
def METHOD_NAME(name): """Whether a package name is in the lab namespace""" return name.startswith("@jupyterlab/")
[ 137, 4293, 360 ]
f METHOD_NAME(self, *args: Any) -> None:
[ 390, 277 ]
def METHOD_NAME(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME( auth_setup, email=None, password=None, ): currentAuthConfig = None # check if an auth method for this service was already set configs = QgsApplication.authManager().availableAuthMethodConfigs() previousAuthExist = False for config in configs.values(): if config.name() == auth_setup.name: currentAuthConfig = config previousAuthExist = True break if not previousAuthExist: # not found => create a new one currentAuthConfig = QgsAuthMethodConfig() currentAuthConfig.setName(auth_setup.name) # reset it's config values to set later new password when received currentAuthConfig.setMethod("Basic") currentAuthConfig.setConfig("username", email) currentAuthConfig.setConfig("password", password) if not previousAuthExist: # store a new auth config if not QgsApplication.authManager().storeAuthenticationConfig( currentAuthConfig ): iface.messageBar().pushCritical( "Trends.Earth", tr_auth.tr("Cannot init auth configuration") ) return None else: # update existing if not QgsApplication.authManager().updateAuthenticationConfig( currentAuthConfig ): iface.messageBar().pushCritical( "Trends.Earth", tr_auth.tr("Cannot update auth configuration") ) return None QtCore.QSettings().setValue( f"trends_earth/{auth_setup.key}", currentAuthConfig.id() ) return currentAuthConfig.id()
[ 176, 2433, 200 ]
def METHOD_NAME(self, program_file, write_file, render_data, progress_callback, completion_callback): new_instance = copy.deepcopy(self) new_instance.program_file = program_file new_instance.write_file = write_file new_instance.render_data = render_data new_instance.progress_callback = progress_callback new_instance.completion_callback = completion_callback _render_items.append(new_instance) new_instance.start_render()
[ 338, 735 ]
def METHOD_NAME(env1, env2, num_cycles): """Check that two parallel environments execute the same way.""" env1.reset(seed=42) env2.reset(seed=42) # seed action spaces to ensure sampled actions are the same seed_action_spaces(env1) seed_action_spaces(env2) # seed observation spaces to ensure first observation is the same seed_observation_spaces(env1) seed_observation_spaces(env2) iter = 0 max_env_iters = num_cycles * len(env1.agents) env1.reset(seed=42) env2.reset(seed=42) seed_action_spaces(env1) seed_action_spaces(env2) while env1.agents: actions1 = {agent: env1.action_space(agent).sample() for agent in env1.agents} actions2 = {agent: env2.action_space(agent).sample() for agent in env2.agents} assert data_equivalence(actions1, actions2), "Incorrect action seeding" obs1, rewards1, terminations1, truncations1, infos1 = env1.step(actions1) obs2, rewards2, terminations2, truncations2, infos2 = env2.step(actions2) iter += 1 assert data_equivalence(obs1, obs2), "Incorrect observations" assert data_equivalence(rewards1, rewards2), "Incorrect values for rewards" assert data_equivalence(terminations1, terminations2), "Incorrect terminations." assert data_equivalence(truncations1, truncations2), "Incorrect truncations" assert data_equivalence(infos1, infos2), "Incorrect infos" if iter >= max_env_iters or any(terminations1) or any(truncations1): break env1.close() env2.close()
[ 250, 1027, 4665, 1498 ]
def METHOD_NAME(self, request): try: self.standard_log_data["request_uuid"] = request._logging_uuid except Exception: self.standard_log_data["request_uuid"] = None try: self.standard_log_data["auth_uuid"] = request.session["auth_uuid"] except Exception: self.standard_log_data["auth_uuid"] = None try: self.standard_log_data["auth_app_id"] = request.session["auth_app_id"] except Exception: self.standard_log_data["auth_app_id"] = None try: self.standard_log_data["auth_app_name"] = request.session["auth_app_name"] except Exception: self.standard_log_data["auth_app_name"] = None try: self.standard_log_data["auth_app_data_access_type"] = request.session["auth_app_data_access_type"] except Exception: self.standard_log_data["auth_app_data_access_type"] = None try: self.standard_log_data["auth_app_end_date"] = request.session["auth_app_end_date"] except Exception: self.standard_log_data["auth_app_end_date"] = None try: self.standard_log_data["auth_client_id"] = request.session["auth_client_id"] except Exception: self.standard_log_data["auth_client_id"] = None try: self.standard_log_data["auth_pkce_method"] = request.session["auth_pkce_method"] except Exception: self.standard_log_data["auth_pkce_method"] = None self.standard_log_data.update(get_session_auth_flow_trace(request))
[ 297, 377, 365 ]
def METHOD_NAME(): """Look for options in environment vars""" settings = {} zmq = os.environ.get("ZMQ_PREFIX") if zmq: debug("Found environ var ZMQ_PREFIX=%s" % zmq) settings['zmq_prefix'] = zmq draft_api = os.environ.get("ZMQ_DRAFT_API") if draft_api: debug("Found environ var ZMQ_DRAFT_API=%s" % draft_api) settings['zmq_draft_api'] = int(draft_api) return settings
[ 19, 485, 335 ]
def METHOD_NAME(self): return parse_email_line(self.submitter)
[ 3011, 3458 ]
def METHOD_NAME(x): pass
[ 9, 946, -1, 1399 ]
f METHOD_NAME(self):
[ 9, 756, 4167, 41, 99 ]
def METHOD_NAME(self, name: str) -> WebElement: return self.selenium.find_element(By.CLASS_NAME, name)
[ 416, 604, 2 ]
def METHOD_NAME(boiling_points): resin_rho = np.full(len(boiling_points),1100.0) return resin_rho
[ 11711, 4638 ]
def METHOD_NAME(command, new_command): assert get_new_command(command) == new_command
[ 9, 19, 80, 462 ]
def METHOD_NAME(self) -> Tuple[str, str]: """Get the username and password for the registry. Returns: (str, str): The username and password. Raises: RegistryError: If there is an error getting the username and password. """ _logger.debug("Getting username and password for Elastic Container Registry.") try: session = self.environment.get_session() client = session.client("ecr") response = client.get_authorization_token() username, password = base64.standard_b64decode( response["authorizationData"][0]["authorizationToken"] ).split(b":") return username.decode("utf-8"), password.decode("utf-8") except botocore.exceptions.ClientError as e: code = e.response["Error"]["Code"] msg = e.response["Error"]["Message"] # TODO: Log the code and the message here? raise LaunchError(f"Error getting username and password: {code} {msg}")
[ 19, 2072, 2897 ]
def METHOD_NAME(e): """Handle an exception and show the traceback to error page.""" try: message = 'Please file an issue in GitHub: ' + \ traceback.format_exc() loggedin = 'username' in session except: message = ( 'Something went terribly wrong, ' 'and we failed to find the cause automatically. ' 'Please file an issue in GitHub.' ) loggedin = False try: return render_template( 'error.min.html', message=message, loggedin=loggedin ), 500 except: return message, 500
[ 75, 442, 1519 ]
def METHOD_NAME(self): self.base.win.movePointer(0, int(self.base.win.getProperties().getXSize() / 2), int(self.base.win.getProperties().getYSize() / 2))
[ 13568, 2571 ]
def METHOD_NAME(batch_size): images = np.random.rand(*image_shape(batch_size)).astype(np.float32) num_classes = 1000 labels = np.random.randint( low=0, high=num_classes, size=[batch_size]).astype(np.int32) one_hot = np.zeros((batch_size, num_classes)).astype(np.float32) one_hot[np.arange(batch_size), labels] = 1. return images, one_hot
[ 236, 2277 ]
def METHOD_NAME( df: Union[pd.DataFrame, dd.DataFrame], column: str, output_format: str = "standard", inplace: bool = False, errors: str = "coerce", progress: bool = True, ) -> pd.DataFrame: """ Clean Indian Permanent Account numbers (PANs) type data in a DataFrame column. Parameters ---------- df A pandas or Dask DataFrame containing the data to be cleaned. col The name of the column containing data of PAN type. output_format The output format of standardized number string. If output_format = 'compact', return string without any separators or whitespace. If output_format = 'standard', return string with proper separators and whitespace. If output_format = 'info', return a dictionary containing information that can be decoded from the PAN. If output_format = 'mask', mask the PAN as per CBDT masking standard. Note: in the case of PAN, the compact format is the same as the standard one. (default: "standard") inplace If True, delete the column containing the data that was cleaned. Otherwise, keep the original column. (default: False) errors How to handle parsing errors. - ‘coerce’: invalid parsing will be set to NaN. - ‘ignore’: invalid parsing will return the input. - ‘raise’: invalid parsing will raise an exception. (default: 'coerce') progress If True, display a progress bar. (default: True) Examples -------- Clean a column of PAN data. >>> df = pd.DataFrame({ "pan": [ 'ACUPA7085R', '234123412347',] }) >>> clean_in_pan(df, 'pan') pan pan_clean 0 ACUPA7085R ACUPA7085R 1 234123412347 NaN """ if output_format not in {"compact", "standard", "info", "mask"}: raise ValueError( f"output_format {output_format} is invalid. " 'It needs to be "compact", "standard", "info", "mask".' ) # convert to dask df = to_dask(df) # To clean, create a new column "clean_code_tup" which contains # the cleaned values and code indicating how the initial value was # changed in a tuple. Then split the column of tuples and count the # amount of different codes to produce the report df["clean_code_tup"] = df[column].map_partitions( lambda srs: [_format(x, output_format, errors) for x in srs], meta=object, ) df = df.assign( _temp_=df["clean_code_tup"].map(itemgetter(0)), ) df = df.rename(columns={"_temp_": f"{column}_clean"}) df = df.drop(columns=["clean_code_tup"]) if inplace: df[column] = df[f"{column}_clean"] df = df.drop(columns=f"{column}_clean") df = df.rename(columns={column: f"{column}_clean"}) with ProgressBar(minimum=1, disable=not progress): df = df.compute() return df
[ 1356, 623, 6396 ]
def METHOD_NAME(self): self.assertEqual(self.obj._seen_so_far, 0)
[ 9, 176, 3959, 5371, 4904 ]
def METHOD_NAME(self, lab): np.random.seed(SEED) embeddings_array = 0.5 + 0.1 * np.random.rand(lab.get_info("statistics")["num_examples"], 2) embeddings_array[4, :] = -1 return {"embedding": embeddings_array}
[ 2465 ]
def METHOD_NAME(cls): return datetime.METHOD_NAME()
[ 4607 ]
METHOD_NAME(self, event_btn):
[ 0, 1881 ]
def METHOD_NAME( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return f"views/{self.view_id}/records"
[ 157 ]
def METHOD_NAME(self): """ Checking if a wrong remote PIN will fail """ parameters = {"user": "remoteuser", "pass": "abcd123456"} response = self.make_validate_request("check", params=parameters) assert '"value": false' in response, response
[ 9, 4468, 250, 466, 2437, 2818, -1 ]
def METHOD_NAME() -> Optional[Action]: action = request.args.get("action") if action is not None: if action == "link": return Action.Link elif action == "login": return Action.Login else: LOG.w(f"Unknown action received: {action}") return None return Action.Login
[ 297, 1006 ]
def METHOD_NAME(self, request, expected=None, exitcode=None): """Execute a query and check expected result. :param request: query string :param expected: result string :param exitcode: exitcode """ r = self.context.node.query(request) if expected is not None: with Then(f"output should match the expected", description=f"{expected}"): assert r.output == expected, error() elif exitcode is not None: with Then(f"output exitcode should match expected", description=f"{exitcode}"): assert r.exitcode == exitcode, error()
[ 1005, 539 ]
def METHOD_NAME(sa, sb, transp_a, transp_b): a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32) b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32) c1 = np.matmul(np.transpose(a) if transp_a else a, np.transpose(b) if transp_b else b) c2 = with_tvm(lambda A, B: topi.matmul(A, B, transp_a, transp_b), a, b) tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
[ 1162, 1496 ]
def METHOD_NAME(self): return "ODataV4Format"
[ 168, 275 ]
def METHOD_NAME(self, key): if not isinstance(key, parameters.electricFieldParameters): logger.error("parameter key needs to be of type NuRadioReco.framework.parameters.electricFieldParameters") raise ValueError("parameter key needs to be of type NuRadioReco.framework.parameters.electricFieldParameters") return self._parameters[key]
[ 19, 511 ]
def METHOD_NAME(self, short_hash: str) -> str: """Check the haveibeenpwned API""" url = f"https://api.pwnedpasswords.com/range/{short_hash}" return get_http_session().get(url).text
[ 250, -1 ]
def METHOD_NAME(self, repair_dir, sql_repair_contents, segment): sql_filename = self._get_sql_filename(segment) + ".sql" sql_file_path = os.path.join(repair_dir, sql_filename) with open(sql_file_path, 'w') as sql_file: for content in sql_repair_contents: sql_file.write(content + "\n") return sql_filename
[ 129, 1621, 171, 623, 4894, 1190 ]
def METHOD_NAME(cfg, data, weight, bias=None, out_dtype=None): """Dense operator for int8 on CUDA""" if out_dtype is None: out_dtype = data.dtype batch, in_dim = get_const_tuple(data.shape) out_dim, _ = get_const_tuple(weight.shape) k = te.reduce_axis((0, in_dim), name="k") matmul = te.compute( (batch, out_dim), lambda i, j: te.sum( data[i, k].astype(out_dtype) * weight[j, k].astype(out_dtype), axis=[k] ), tag="dense_int8", ) cfg.add_flop(batch * in_dim * out_dim * 2) if bias is not None: matmul = te.compute( (batch, out_dim), lambda i, j: matmul[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST, ) cfg.add_flop(batch * out_dim) return matmul
[ 3829, 3638 ]
def METHOD_NAME(self, x, y): """Perform picking :param float x: X coordinates in plot data frame :param float y: Y coordinates in plot data frame :return: List of picked data point indices :rtype: Union[List[int],None] """ if (x < self.xMin or x > self.xMax or y < self.yMin or y > self.yMax): return None xPts, yPts = self.__x_y_color[:2] if self.__picking_triangles is None: self.__picking_triangles = numpy.zeros( self.__triangles.shape + (3,), dtype=numpy.float32) self.__picking_triangles[:, :, 0] = xPts[self.__triangles] self.__picking_triangles[:, :, 1] = yPts[self.__triangles] segment = numpy.array(((x, y, -1), (x, y, 1)), dtype=numpy.float32) # Picked triangle indices indices = glutils.segmentTrianglesIntersection( segment, self.__picking_triangles)[0] # Point indices indices = numpy.unique(numpy.ravel(self.__triangles[indices])) # Sorted from furthest to closest point dists = (xPts[indices] - x) ** 2 + (yPts[indices] - y) ** 2 indices = indices[numpy.flip(numpy.argsort(dists), axis=0)] return tuple(indices) if len(indices) > 0 else None
[ 2981 ]
def METHOD_NAME(self, mock_logger): """ Test that a course is created successfully """ settings = {"courses": [{ "organization": "test-course-generator", "number": "1", "run": "1", "user": str(self.user.email), "fields": {"display_name": "test-course", "announcement": "2010-04-20T20:08:21.634121"} }]} arg = json.dumps(settings) call_command("generate_courses", arg) key = modulestore().make_course_key("test-course-generator", "1", "1") self.assertTrue(modulestore().has_course(key)) mock_logger.info.assert_any_call("Created course-v1:test-course-generator+1+1") mock_logger.info.assert_any_call("announcement has been set to 2010-04-20T20:08:21.634121") mock_logger.info.assert_any_call("display_name has been set to test-course")
[ 9, 567, 1122, 623, 5639 ]
def METHOD_NAME(p, t): """Compute the function p * cos(t**2).""" return np.cos(p * t**2)
[ 5376 ]
def METHOD_NAME(self, N = 10): """Fill the bar in N frames. This call is blocking.""" remaining = self['range'] - self['value'] if remaining: step = max(1, int(remaining / N)) count = self['value'] while count != self['range']: count += step if count > self['range']: count = self['range'] self.update(count)
[ 1239 ]
def METHOD_NAME(cls): # Loads the asdl module dynamically, since it's not in a real importable # package. # Parses Python.asdl into an ast.Module and run the check on it. # There's no need to do this for each test method, hence setUpClass. sys.path.insert(0, parser_dir) loader = importlib.machinery.SourceFileLoader( 'asdl', os.path.join(parser_dir, 'asdl.py')) spec = importlib.util.spec_from_loader('asdl', loader) module = importlib.util.module_from_spec(spec) loader.exec_module(module) cls.asdl = module cls.mod = cls.asdl.parse(os.path.join(parser_dir, 'Python.asdl')) cls.assertTrue(cls.asdl.check(cls.mod), 'Module validation failed')
[ 0, 1, 2 ]
def METHOD_NAME(self, dsn, isolation=None, read_only=False, deferrable=False, application_name=None): """ Return a connection whose transactions will have the defined characteristics and which will use the given ``application_name``. The driver should pass the ``application_name`` as part of the connect packet, and *not* via ``SET SESSION application_name``. This is because the former is both faster and because the latter does not work when connecting to a hot standby (it triggers "cannot set transaction read-write mode during recovery"). If it cannot be passed at connect time, an alternative is ``SELECT set_config('application_name', %s, FALSE)`` which does not seem to have this problem. """ raise NotImplementedError
[ 707, 41, 5167 ]
def METHOD_NAME(): """Ensure TranslationUnit.save() works.""" tu = get_tu('int foo();') path = save_tu(tu) assert os.path.exists(path) assert os.path.getsize(path) > 0 os.unlink(path)
[ 9, 73 ]
def METHOD_NAME( registry, ) -> Set[Union[torch._ops.OperatorBase, Callable]]: """ Creates a set of OperatorBase and Callable objects that represent ONNX-supported PyTorch operations. Args: registry (OnnxRegistry): The ONNX registry for PyTorch. Returns: A collection of OperatorBase and Callable objects representing ONNX-supported PyTorch operations. """ table: Set[Union[torch._ops.OperatorBase, Callable]] = set() # Some ops in `torch.ops.aten` are not discoverable through `dir(torch.ops.aten)`, # but retrievable via explicit lookup. # https://github.com/pytorch/pytorch/issues/99681 # This is a workaround to make sure we register ONNX symbolic functions for these. onnx_supported_aten_lookup_table = [ k.split("::")[1].split(".")[0] for k in registry._all_registered_ops() if k.startswith("aten::") ] for op_namespace in (torch.ops.aten, torch.ops.prims): attr_names = dir(op_namespace) if op_namespace is torch.ops.aten: attr_names += onnx_supported_aten_lookup_table for attr_name in attr_names: if not hasattr(op_namespace, attr_name): # torchlib owns some attributes that are not aten ops. continue op_overload_packet = getattr(op_namespace, attr_name) if not isinstance(op_overload_packet, torch._ops.OpOverloadPacket): continue for overload_name in op_overload_packet.overloads(): op_overload = getattr(op_overload_packet, overload_name) internal_op_name = registration.OpName.from_qualified_name( qualified_name=op_overload.name() ) # NOTE: If the overload is supported in registry or it's default overload is supported in registry, # we add it to the table. if registry.is_registered_op( namespace=internal_op_name.namespace, op_name=internal_op_name.op_name, overload=internal_op_name.overload, ) or registry.is_registered_op( namespace=internal_op_name.namespace, op_name=internal_op_name.op_name, overload=None, ): # This line maps torch.ops.aten.add.Tensor, torch.ops.aten.add.Scalar, torch.ops.aten.add.out, etc # to "aten::add". This means the exporter for "aten::add" is used for all overloads of "aten::add". # This is applied to all ops under torch.ops.aten. table.add(op_overload) return table
[ 129, 4233, 1466, 441, 9934, 410 ]
def METHOD_NAME(): g = cirq.ParallelGate(cirq.X, 2) qubits = cirq.LineQubit.range(4) with pytest.raises(ValueError, match=r'len\(qubits\)=4 should be 2'): cirq.decompose_once_with_qubits(g, qubits)
[ 9, 7426, 45 ]
def METHOD_NAME(cls, _): store = Repository(random_string()) store.add_folder(random_string())
[ 9, 238, 451, 1434 ]
def METHOD_NAME(self): """Test the knot insertion function (.insert_knot()).""" # BSpline Data b_spline_knots_0 = c.np.random.rand(8) b_spline_knots_1 = c.np.random.rand(9) matrix_bspline = self.bspline.knot_insertion_matrix( 0, b_spline_knots_0 ) self.bspline.insert_knots( 0, b_spline_knots_0, ) matrix_bspline = ( self.bspline.knot_insertion_matrix(1, b_spline_knots_1) @ matrix_bspline ) self.bspline.insert_knots(1, b_spline_knots_1) # NURBS Data nurbs_knots_0 = c.np.random.rand(10) nurbs_knots_1 = c.np.random.rand(11) matrix_nurbs = self.nurbs.knot_insertion_matrix(0, nurbs_knots_0) self.nurbs.insert_knots(0, nurbs_knots_0) matrix_nurbs = ( self.nurbs.knot_insertion_matrix(1, nurbs_knots_1) @ matrix_nurbs ) self.nurbs.insert_knots(1, nurbs_knots_1) # use random query points q2D = c.np.random.rand(50, 2) # test evaluation self.assertTrue( c.np.allclose( self.bspline.evaluate(q2D), self.bspline_2p2d().evaluate(q2D) ) ) self.assertTrue( c.np.allclose( self.nurbs.evaluate(q2D), self.nurbs_2p2d().evaluate(q2D) ) ) # Test control points and weights self.assertTrue( c.np.allclose( self.bspline.control_points, matrix_bspline @ self.bspline_2p2d().control_points, ) ) self.assertTrue( c.np.allclose( self.nurbs.weights, matrix_nurbs @ self.nurbs_2p2d().weights ) ) self.assertTrue( c.np.allclose( self.nurbs.control_points, matrix_nurbs @ ( self.nurbs_2p2d().weights * self.nurbs_2p2d().control_points ) / self.nurbs.weights, ) )
[ 9, 408, 14938, 41, 430 ]
def METHOD_NAME(self) -> 'outputs.SystemDataResponse': """ The system metadata relating to this resource. """ return pulumi.get(self, "system_data")
[ 112, 365 ]
def METHOD_NAME(engine_name='') -> None: """Downgrade all databases.""" # Do not modify. Edit `downgrade_` instead globals().get(f'downgrade_{engine_name}', lambda: None)()
[ 1502 ]
def METHOD_NAME(duration): """validate_session_duration validates that the AWS STS Assume Role Session Duration is between 900 and 43200 seconds.""" duration = int(duration) # Since the range(i,j) goes from i to j-1 we have to j+1 if duration not in range(900, 43201): raise ArgumentTypeError("Session duration must be between 900 and 43200") return duration
[ 187, 240, 2205 ]
def METHOD_NAME(self, A: dace.float64[20]): return self.method(A) + 2 + self(A)
[ 2395, 103, 2575 ]
def METHOD_NAME(self, request: Message, state_check): return None
[ 1970, 250 ]
def METHOD_NAME(): bind = op.get_bind() session = db.Session(bind=bind) for chart in session.query(Slice): params = json.loads(chart.params or "{}") if not params.get("num_period_compare"): continue num_period_compare = int(params.get("num_period_compare")) granularity = ( params.get("granularity") if chart.datasource_type == "druid" else params.get("time_grain_sqla") ) time_compare = compute_time_compare(granularity, num_period_compare) period_ratio_type = params.get("period_ratio_type") or "growth" comparison_type = comparison_type_map[period_ratio_type.lower()] params["time_compare"] = [time_compare] params["comparison_type"] = comparison_type chart.params = json.dumps(params, sort_keys=True) session.commit() session.close()
[ 738 ]
def METHOD_NAME(cls): cls.startResponders() cls.startDNSDist() cls.setUpSockets() cls.waitForTCPSocket('127.0.0.1', cls._webServerPort) print("Launching tests..")
[ 0, 1, 2 ]
def METHOD_NAME(template): """Returns the template.""" template_path = current_app.config.get('REPORT_TEMPLATE_PATH') template_code = Path(f'{template_path}{template}').read_text() return Template(template_code)
[ 19, 671 ]
def METHOD_NAME(self) -> Optional[str]: """ The friendly name of the management group. """ return pulumi.get(self, "display_name")
[ 52, 156 ]
def METHOD_NAME(self, heater, METHOD_NAME): h = self.heaters[heater] h.METHOD_NAME = METHOD_NAME
[ 3195 ]
def METHOD_NAME(): print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]") print(" -h hostname name of the host to run the test against") print(" localhost by default") print(" -p port port number to use for connection, 4433 by default") print(" probe-name if present, will run only the probes with given") print(" names and not all of them, e.g \"sanity\"") print(" -e probe-name exclude the probe from the list of the ones run") print(" may be specified multiple times") print(" -x probe-name expect the probe to fail. When such probe passes despite being marked like this") print(" it will be reported in the test summary and the whole script will fail.") print(" May be specified multiple times.") print(" -X message expect the `message` substring in exception raised during") print(" execution of preceding expected failure probe") print(" usage: [-x probe-name] [-X exception], order is compulsory!") print(" -n num run 'num' or all(if 0) tests instead of default(all)") print(" (excluding \"sanity\" tests)") print(" --help this message")
[ 40, 169 ]
def METHOD_NAME(self) -> None: """ A room should show up in the shared list of rooms between two users if it is private. """ self._check_mutual_rooms_with( room_one_is_public=False, room_two_is_public=False )
[ 9, 1644, 6703, 245, 547 ]
def METHOD_NAME(self, point: Point) -> float: ...
[ 155 ]
def METHOD_NAME(cls, data: bytes, **kwargs: OptionalKwargs) -> WazaPProtocol: return cls.get_model_cls()(data, 0)
[ 2696, 772 ]
def METHOD_NAME(monkeypatch): # Set a very short timeout to exit fast monkeypatch.setenv("DD_PROFILING_API_TIMEOUT", "0.1") monkeypatch.setenv("DD_PROFILING_ENABLED", "1") stdout, stderr, exitcode, _ = call_program( "ddtrace-run", sys.executable, os.path.join(os.path.dirname(__file__), "simple_program.py") ) if sys.platform == "win32": assert exitcode == 0, (stdout, stderr) else: assert exitcode == 42, (stdout, stderr) hello, interval, stacks, pid = list(s.strip() for s in stdout.decode().strip().split("\n")) assert hello == "hello world", stdout.decode().strip() assert float(interval) >= 0.01, stdout.decode().strip() assert int(stacks) >= 1, stdout.decode().strip()
[ 9, 128, 782 ]
def METHOD_NAME(botarray): qtbot, dialog, widget = botarray qtbot.keyClick(widget, Qt.Key_1) qtbot.keyClick(widget, Qt.Key_Tab) qtbot.keyClick(widget, Qt.Key_2) qtbot.keyClick(widget, Qt.Key_Tab) qtbot.keyClick(widget, Qt.Key_Backtab) # Hack: in the tests the selected cell is wrong qtbot.keyClick(widget, Qt.Key_3) qtbot.keyClick(widget, Qt.Key_Tab) qtbot.keyClick(widget, Qt.Key_4) qtbot.keyClick(widget, Qt.Key_Tab) qtbot.keyClick(widget, Qt.Key_5) qtbot.keyClick(widget, Qt.Key_Tab) qtbot.keyClick(widget, Qt.Key_6) qtbot.keyClick(widget, Qt.Key_Tab) # Hack: in the tests the selected cell is wrong qtbot.keyClick(widget, Qt.Key_Return, modifier=Qt.NoModifier) value = dialog.text() assert value == 'np.array([[1, 2, 3],\n [4, 5, 6]])'
[ 9, 877, 410, 877 ]
def METHOD_NAME(self): parameters = { **self.serialize_url_param( "clusterName", self.ctx.args.cluster_name, required=True, ), **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), } return parameters
[ 274, 386 ]
def METHOD_NAME(): check_duplicates()
[ 57 ]
def METHOD_NAME(self) -> int: try: logger.info( f"\n[blue]================= Updating file {self.source_file} =================[/blue]" ) super().update_json() self.format_auto_extract_mode() self.set_default_values_as_needed() self.update_id() self.save_json_to_destination_file() return SUCCESS_RETURN_CODE except Exception as err: logger.debug( f"\n[red]Failed to update file {self.source_file}. Error: {err}[/red]" ) return ERROR_RETURN_CODE
[ 22, 275 ]
def METHOD_NAME(self, stmt): # Check if this is one of the statement types that we can check if not isinstance(stmt, (Modification, RegulateAmount, RegulateActivity, Influence)): logger.info('Statement type %s not handled' % stmt.__class__.__name__) return (None, None, 'STATEMENT_TYPE_NOT_HANDLED') subj, obj = stmt.agent_list() subj_nodes = self.get_nodes(subj, self.graph) obj_nodes = self.get_nodes(obj, self.graph) # Statement has object but it's not in the graph if obj and not obj_nodes.all_nodes: return (None, None, 'OBJECT_NOT_FOUND') if subj and not subj_nodes.all_nodes: return (None, None, 'SUBJECT_NOT_FOUND') return (subj_nodes, obj_nodes, None)
[ 356, 925 ]
def METHOD_NAME(self, other: Mapping) -> bool: common_keys = self._check_conflict(other) return not common_keys
[ 9390 ]
def METHOD_NAME(self): tid = 0 address = 0x1140000 size = 1024 allocator = AllocatorType.MALLOC stack_id = 1 n_allocations = 1 stack = ["i'm stack"] record = AllocationRecord( (tid, address, size, allocator, stack_id, n_allocations) ) mock_record = MockAllocationRecord( tid, address, size, allocator, stack_id, n_allocations, stack ) assert mock_record.tid == record.tid == tid assert mock_record.address == record.address == address assert mock_record.size == record.size == size assert mock_record.allocator == record.allocator == allocator assert mock_record.stack_id == record.stack_id == stack_id assert mock_record.n_allocations == record.n_allocations == n_allocations
[ 9, 6285, 2307, 4722, 148 ]
def METHOD_NAME(self, data_info: dict, data_mode: str = 'topdown'): if data_mode == 'topdown': expected_keys = dict( img_id=int, img_path=str, bbox=np.ndarray, bbox_score=np.ndarray, keypoints=np.ndarray, keypoints_visible=np.ndarray, id=int) elif data_mode == 'bottomup': expected_keys = dict( img_id=int, img_path=str, bbox=np.ndarray, bbox_score=np.ndarray, keypoints=np.ndarray, keypoints_visible=np.ndarray, invalid_segs=list, id=list) else: raise ValueError(f'Invalid data_mode {data_mode}') for key, type_ in expected_keys.items(): self.assertIn(key, data_info) self.assertIsInstance(data_info[key], type_, key)
[ 250, 365, 100, 219 ]
def METHOD_NAME(self, button_id, args_dict, thread=True, return_from_function=False): """Execute function from custom action button press.""" try: run_command = getattr(self.run_function, button_id) if not thread or return_from_function: return_val = run_command(args_dict) if return_from_function: return 0, return_val else: return 0, f"Command sent to Function Controller. Returned: {return_val}" else: thread_run_command = threading.Thread( target=run_command, args=(args_dict,)) thread_run_command.start() return 0, "Command sent to Function Controller and is running in the background." except Exception as err: msg = f"Error executing function '{button_id}': {err}" self.logger.exception(msg) return 1, msg
[ 128, 298, 559 ]
def METHOD_NAME(init): init = init or {} return dict(init)
[ 947, 553 ]
def METHOD_NAME(seed) -> None: r"""Sets the seed to generate random numbers for custom device. Args: seed (int): The desired seed. See [Note: support the custom device with privateuse1] """ seed = int(seed) custom_backend_name = torch._C._get_privateuse1_backend_name() if hasattr(torch, custom_backend_name): custom_device_mod = getattr(torch, custom_backend_name) _bad_fork_name = "_is_in_bad_fork" _seed_all_name = "manual_seed_all" if hasattr(custom_device_mod, _bad_fork_name) and hasattr(custom_device_mod, _seed_all_name): if not getattr(custom_device_mod, _bad_fork_name)(): getattr(custom_device_mod, _seed_all_name)(seed) else: message = f"Set seed for `{custom_backend_name}` device does not take effect, please add API's " message += f"`{_bad_fork_name}` and `{_seed_all_name}` to `{custom_backend_name}` device module." warnings.warn(message, UserWarning, stacklevel=3)
[ 484, 343, 398 ]
def METHOD_NAME(self, chip_counter): source_app_vertex = self.governed_app_vertex.source_vertex slices = source_app_vertex.splitter.get_out_going_slices() # create vertices correctly for vertex_slice in slices: vertex = self.create_machine_vertex( source_app_vertex, vertex_slice) self.governed_app_vertex.remember_machine_vertex(vertex) chip_counter.add_core(vertex.sdram_required)
[ 129, 1600, 2128 ]
def METHOD_NAME(self, session): data = self.deserialize_http_content(session) self.ctx.set_var( "instance", data, schema_builder=self._build_schema_on_200 )
[ 69, 1072 ]
def METHOD_NAME(self): """ Turn off the watchdog timer """ # echo 0xA181 0x0 > /sys/devices/platform/baseboard/setreg disable_val = '{} {}'.format(WDT_ENABLE_REG, DISABLE_CMD) return self._api_common.write_txt_file(self.setreg_path, disable_val)
[ 193 ]
def METHOD_NAME(self, key, add_flip=False): pairs = self.__make_unordered_pairs(self.subjects[key], add_flip) return pairs
[ 19, 1861, 3151 ]
def METHOD_NAME(self, kernel_shape, strides, padding, dilation, activation): """Returns tf.function that creates TFLite Conv2d layer""" @tf.function def conv2d_single_function(ifm_tensor): """Returns TFLite Conv2d layer""" op = tf.nn.conv2d( ifm_tensor, filters=tf.constant( np.random.uniform(size=[kernel_shape[0], kernel_shape[1], 3, 3]), dtype=tf.float32, ), strides=[1, strides[0], strides[1], 1], padding=padding, dilations=dilation, ) if activation == "RELU": op = tf.nn.relu(op) elif activation == "NONE": pass else: assert False, f"Unsupported activation {activation}" return op return conv2d_single_function
[ 129, 3385, 97 ]
def METHOD_NAME(tensor, batch): return [ { "file": xir_extra_ops.remove_xfix(tensor.name) + ".bin", "md5sum": "0" * 32, "size": tensor.get_data_size(), } for i in range(batch) ]
[ 235, 171, 109 ]
def METHOD_NAME(self, data): wptr = self.txq.write(data) self.send(AFKEP_Send(WPTR = wptr))
[ 353, 1274 ]