text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self) -> None: expected_topic = "student-yqqtag" expected_message = "**student-yqqtag** commented on the task [Sails unspread it stopped at kearney](https://codein.withgoogle.com/dashboard/task-instances/6296903092273152/)." self.check_webhook("student_commented_on_task", expected_topic, expected_message)
[ 9, 1591, 417, 277 ]
def METHOD_NAME(df, col="abs"): """Compute DataFrame with single column, absolute value of rows of df.""" return absq_rows(df, col=col) ** 0.5
[ 4120, 1346 ]
def METHOD_NAME(test, params, env): r""" Test for virt-win-reg. (1).Get parameters from params. (2).Build the full command of virt-win-reg. (3).Login vm to get a session. (4).Prepare for test. (5).Run virt-win-reg command. Command virt-win-reg is used to export and merge Windows Registry entries from a Windows guest. We can do add/remove/modify/query with it. Example: * add: Make sure there is no value named AddTest in [HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName] # cat reg_file.reg [HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName] "AddTest" = "VIRTTEST" # virt-win-reg Guestname/disk --merge reg_file.reg * remove: # cat reg_file.reg [HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName] "ComputerName" = - # virt-win-reg Guestname/disk --merge reg_file.reg * modify: # cat reg_file.reg [HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName] "ComputerName" = "VIRTTEST_v2" # virt-win-reg Guestname/disk --merge reg_file.reg * query: # virt-win-reg domname 'HKLM\SYSTEM\ControlSet001\Control\ComputerName\ComputerName' ComputerName (6).Verify the result. (7).Clean up. """ try: virt_win_reg_exec = path.command("virt-win-reg") except ValueError: test.cancel("Not find virt-win-reg command.") # Get parameters. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Get parameters for remote. remote_yes = (params.get("virt_win_reg_remote", "no") == "yes") remote_uri = params.get("virt_win_reg_remote_uri", "ENTER.YOUR.REMOTE") if remote_yes and remote_uri.count("ENTER"): test.cancel("Remote Test is skipped.") # Get parameters about reg value. computer_name = params.get("virt_win_reg_computer_name") computer_name_v2 = params.get("virt_win_reg_computer_name_v2") key_path = params.get("virt_win_reg_key_path") value_name = params.get("virt_win_reg_value_name") # Get vm_ref. vm_ref = params.get("virt_win_reg_vm_ref") if vm_ref == "domname": vm_ref = vm_name elif vm_ref == "image_name": disks = vm.get_disk_devices() vm_ref = list(disks.values())[0]['source'] # Get information about operations. operation = params.get("virt_win_reg_operation") virt_win_reg_cmd = params.get("virt_win_reg_cmd") prepare_reg_cmd = params.get("prepare_reg_cmd") verify_reg_cmd = params.get("verify_reg_cmd") if not (virt_win_reg_cmd and prepare_reg_cmd and verify_reg_cmd): test.cancel("Missing command for virt_win_reg or" " cmd in guest to check result.") # Build a command. command = virt_win_reg_exec if remote_yes: command += " -c %s" % remote_uri command += " %s" % vm_name else: command += " %s" % vm_ref command += " %s" % virt_win_reg_cmd reg_file = None if not operation == "query": # Prepare a file for virt-win-reg --merge lines = [] lines.append("[%s]\n" % key_path) if operation == "add": lines.append("\"%s\"=\"%s\"" % (value_name, computer_name)) elif operation == "remove": lines.append("\"%s\"=-" % (value_name)) elif operation == "modify": lines.append("\"%s\"=\"%s\"" % (value_name, computer_name_v2)) with open(os.path.join(data_dir.get_tmp_dir(), "merge.reg"), "w") as reg_file: reg_file.writelines(lines) command += " %s" % reg_file.name session = vm.wait_for_login() try: status, output = session.cmd_status_output(prepare_reg_cmd) if status: logging.debug("Preparation is already done.") vm.destroy() result = process.METHOD_NAME(command, ignore_status=True, shell=True) if result.exit_status: test.fail(result) output_virt_win_reg = result.stdout_text.strip() if not vm.is_alive(): vm.start() session = vm.wait_for_login() status, output = session.cmd_status_output(verify_reg_cmd) if operation == "query": output_in_guest = output.split()[-1].strip() if not output_in_guest == output_virt_win_reg: test.fail("Information are not equal from " "virt_win_reg and from cmd in guest.\n" "virt_win_reg: %s\n" "cmd_in_guest: %s" % (output_virt_win_reg, output_in_guest)) elif operation == "remove": if not status: test.fail("Get the value of computer %s in remove" " test.\nSo it means the virt-win-reg" "to remove it failed." % output) elif operation == "modify": output_in_guest = output.split()[-1].strip() if not output_in_guest == computer_name_v2: test.fail("Modify test failed. The value of" "computer after virt-win-reg is %s." "But our expected value is %s." % (output_in_guest, computer_name_v2)) elif operation == "add": if status: test.fail("Add test failed. Get the computer_name" "failed after virt-win-reg command." "Detail: %s." % output) finally: # Clean up. session.close() # remove temp file. if reg_file and os.path.exists(reg_file.name): os.remove(reg_file.name)
[ 22 ]
def METHOD_NAME(self, bk_biz_id, scope_type, combine): scope = {"business": "bk_alarm_shield_business", "IP": "bk_alarm_shield_IP", "node": "bk_alarm_shield_node"} scope_value = combine.get(scope[scope_type]) return scope_value
[ 19, 913, 99 ]
def METHOD_NAME(self) -> Sequence['outputs.SubResourceResponse']: """ The list of virtual networks associated with the DDoS protection plan resource. This list is read-only. """ return pulumi.get(self, "virtual_networks")
[ 162, 7479 ]
def METHOD_NAME(self): assert invoke.StreamWatcher is invoke.METHOD_NAME.StreamWatcher assert invoke.Responder is invoke.METHOD_NAME.Responder assert invoke.FailingResponder is invoke.METHOD_NAME.FailingResponder
[ 2388 ]
def METHOD_NAME(self): if self._maxsize > 0: return self.qsize() < self._maxsize else: return True
[ -1 ]
def METHOD_NAME(self, epoch: int): # We sample as many architectures as we need if epoch < self.population_size: logger.info("Start sampling architectures to fill the population") # If there is no scope defined, let's use the search space default one model = ( torch.nn.Module() ) model.arch = self.search_space.clone() model.arch.sample_random_architecture(dataset_api=self.dataset_api) model.accuracy = model.arch.query( self.performance_metric, self.dataset, dataset_api=self.dataset_api ) self.population.append(model) self._update_history(model) log_every_n_seconds( logging.INFO, "Population size {}".format(len(self.population)) ) else: sample = [] while len(sample) < self.sample_size: candidate = np.random.choice(list(self.population)) sample.append(candidate) parent = max(sample, key=lambda x: x.accuracy) child = ( torch.nn.Module() ) child.arch = self.search_space.clone() child.arch.mutate(parent.arch, dataset_api=self.dataset_api) child.accuracy = child.arch.query( self.performance_metric, self.dataset, dataset_api=self.dataset_api ) self.population.append(child) self._update_history(child)
[ 80, 1165 ]
def METHOD_NAME(self, console: "Console") -> Iterator[Callable[[str], None]]: """Given a Console, yields a function for writing data to stdout, or a file. The passed options instance will generally be the `Goal.Options` of an `Outputting` `Goal`. """ with self.output_sink(console) as output_sink: yield lambda msg: output_sink.write(msg) # type: ignore[no-any-return]
[ 146 ]
def METHOD_NAME(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request("patch", url, data=data, **kwargs)
[ 1575 ]
def METHOD_NAME(): assert normalize_and_validate([("foo", "bar")]) == [(b"foo", b"bar")] assert normalize_and_validate([(b"foo", b"bar")]) == [(b"foo", b"bar")] # no leading/trailing whitespace in names with pytest.raises(LocalProtocolError): normalize_and_validate([(b"foo ", "bar")]) with pytest.raises(LocalProtocolError): normalize_and_validate([(b" foo", "bar")]) # no weird characters in names with pytest.raises(LocalProtocolError) as excinfo: normalize_and_validate([(b"foo bar", b"baz")]) assert "foo bar" in str(excinfo.value) with pytest.raises(LocalProtocolError): normalize_and_validate([(b"foo\x00bar", b"baz")]) # Not even 8-bit characters: with pytest.raises(LocalProtocolError): normalize_and_validate([(b"foo\xffbar", b"baz")]) # And not even the control characters we allow in values: with pytest.raises(LocalProtocolError): normalize_and_validate([(b"foo\x01bar", b"baz")]) # no return or NUL characters in values with pytest.raises(LocalProtocolError) as excinfo: normalize_and_validate([("foo", "bar\rbaz")]) assert "bar\\rbaz" in str(excinfo.value) with pytest.raises(LocalProtocolError): normalize_and_validate([("foo", "bar\nbaz")]) with pytest.raises(LocalProtocolError): normalize_and_validate([("foo", "bar\x00baz")]) # no leading/trailing whitespace with pytest.raises(LocalProtocolError): normalize_and_validate([("foo", "barbaz ")]) with pytest.raises(LocalProtocolError): normalize_and_validate([("foo", " barbaz")]) with pytest.raises(LocalProtocolError): normalize_and_validate([("foo", "barbaz\t")]) with pytest.raises(LocalProtocolError): normalize_and_validate([("foo", "\tbarbaz")]) # content-length assert normalize_and_validate([("Content-Length", "1")]) == [ (b"content-length", b"1") ] with pytest.raises(LocalProtocolError): normalize_and_validate([("Content-Length", "asdf")]) with pytest.raises(LocalProtocolError): normalize_and_validate([("Content-Length", "1x")]) with pytest.raises(LocalProtocolError): normalize_and_validate([("Content-Length", "1"), ("Content-Length", "2")]) assert normalize_and_validate( [("Content-Length", "0"), ("Content-Length", "0")] ) == [(b"content-length", b"0")] assert normalize_and_validate([("Content-Length", "0 , 0")]) == [ (b"content-length", b"0") ] with pytest.raises(LocalProtocolError): normalize_and_validate( [("Content-Length", "1"), ("Content-Length", "1"), ("Content-Length", "2")] ) with pytest.raises(LocalProtocolError): normalize_and_validate([("Content-Length", "1 , 1,2")]) # transfer-encoding assert normalize_and_validate([("Transfer-Encoding", "chunked")]) == [ (b"transfer-encoding", b"chunked") ] assert normalize_and_validate([("Transfer-Encoding", "cHuNkEd")]) == [ (b"transfer-encoding", b"chunked") ] with pytest.raises(LocalProtocolError) as excinfo: normalize_and_validate([("Transfer-Encoding", "gzip")]) assert excinfo.value.error_status_hint == 501 # Not Implemented with pytest.raises(LocalProtocolError) as excinfo: normalize_and_validate( [("Transfer-Encoding", "chunked"), ("Transfer-Encoding", "gzip")] ) assert excinfo.value.error_status_hint == 501 # Not Implemented
[ 9, 1137, 61, 187 ]
def METHOD_NAME(self): super(GpSegStart, self).METHOD_NAME()
[ 531, 481 ]
def METHOD_NAME(self) -> SupervisorJob: """Return current job of the asyncio task. Must be called from within a job. Raises RuntimeError if there is no current job. """ try: return self.get_job(_CURRENT_JOB.get()) except (LookupError, JobNotFound) as err: capture_exception(err) raise RuntimeError("No job for the current asyncio task!") from None
[ 1056 ]
def METHOD_NAME(): records = [ {"buildingID": 0, "date": "6/1/13", "temp_diff": 12}, {"buildingID": 1, "date": "6/1/13", "temp_diff": 0}, {"buildingID": 2, "date": "6/1/14", "temp_diff": 11}, {"buildingID": 0, "date": "6/1/15", "temp_diff": 5}, {"buildingID": 1, "date": "6/1/16", "temp_diff": 19}, {"buildingID": 2, "date": "6/1/17", "temp_diff": 32}, ] df = pd.DataFrame(records) encoding = Encoding() ipywidget_factory = MagicMock() ipywidget_factory.get_vbox.return_value = MagicMock(spec=Widget) EncodingWidget(df, encoding, change_hook, ipywidget_factory, testing=True) assert ( call( description="X", value=None, options=[ ("-", None), ("buildingID", "buildingID"), ("date", "date"), ("temp_diff", "temp_diff"), ], ) in ipywidget_factory.get_dropdown.mock_calls ) assert ( call( description="Y", value=None, options=[ ("-", None), ("buildingID", "buildingID"), ("date", "date"), ("temp_diff", "temp_diff"), ], ) in ipywidget_factory.get_dropdown.mock_calls ) assert ( call( description="Func.", value="none", options=[ ("-", "None"), ("Avg", "Avg"), ("Min", "Min"), ("Max", "Max"), ("Sum", "Sum"), ("Count", "Count"), ], ) in ipywidget_factory.get_dropdown.mock_calls )
[ 9, 2300, 41, 75, 98, 1993, 1471 ]
def METHOD_NAME(api): ''' test to raise the exception when type of name is not as defined ''' with pytest.raises(TypeError): api.target_groups.edit(1, 1)
[ 9, 11300, 2004, 156, 3365 ]
def METHOD_NAME(obj, dtype: Union[type, Tuple[type, ...]]): """Check if an object has a specific dtype.""" if not isinstance(obj, dtype): raise ValidationError(f"The dataobject is expected to be one of ({dtype},). But it is a {type(obj)}")
[ 638, 137, 1249 ]
def METHOD_NAME(self): t1 = time.gmtime() t2 = type(t1)(t1) self.assertEqual(t1, t2) self.assertTrue(not (t1 < t2)) self.assertTrue(t1 <= t2) self.assertTrue(not (t1 > t2)) self.assertTrue(t1 >= t2) self.assertTrue(not (t1 != t2))
[ 9, 2014 ]
def METHOD_NAME(path, *args, **kwargs): import torch state = torch.load(path, map_location="cpu") for key in list(state.keys()): v = state.pop(key) state[key] = v.numpy() return state
[ 557, 3296 ]
def METHOD_NAME(self): g = bf.zeros_like(self.known_vals, space='cuda') g[...] = self.known_vals np.testing.assert_equal(g.copy('system'), self.known_vals) g[:1,1:] = [[999]] np.testing.assert_equal(g.copy('system'), np.array([[0,999],[2,3],[4,5]])) g[0,0] = 888 np.testing.assert_equal(g.copy('system'), np.array([[888,999],[2,3],[4,5]])) g[0] = [99,88] np.testing.assert_equal(g.copy('system'), np.array([[99,88],[2,3],[4,5]])) g[:,1] = [77,66,55] np.testing.assert_equal(g.copy('system'), np.array([[99,77],[2,66],[4,55]]))
[ 9, 5719 ]
METHOD_NAME(self,obj):
[ 427, 0, 264, 24, 122 ]
def METHOD_NAME(self): qs = Incident.objects.order_by("pk") expected = qs.stateful() result = IncidentFilter.incident_filter(qs, "stateful", True) self.assertEqual(list(expected), list(result.order_by("pk")))
[ 9, 3103, 2019 ]
def METHOD_NAME(url: str, **values) -> Response: if is_safe_url(url): return redirect(url, **values) return redirect(url_for("indexPage"))
[ 1209, 1736 ]
f METHOD_NAME(self, value, timestamp):
[ 1459 ]
def METHOD_NAME(self, obj): """For the given value, return its corresponding key.""" for key, val in self.items(): if val is obj: return key raise ValueError('The given object could not be found: %r' % obj)
[ 59, 43 ]
def METHOD_NAME(files, dim): paths = sorted(glob(files)) datasets = [xr.open_dataset(p) for p in paths] combined = xr.concat(datasets, dim) return combined
[ 203, -1 ]
def METHOD_NAME(remote_command_executor, submit_command, additional_files=None, children_number=0): logging.debug("Submitting Batch job") awsbatch_commands = AWSBatchCommands(remote_command_executor) result = remote_command_executor.run_remote_command(submit_command, additional_files=additional_files) job_id = awsbatch_commands.assert_job_submitted(result.stdout) logging.debug("Submitted Batch job id: {0}".format(job_id)) awsbatch_commands.wait_job_completed(job_id) try: awsbatch_commands.assert_job_succeeded(job_id, children_number) except AssertionError: remote_command_executor.run_remote_command(f"awsbout {job_id}", raise_on_error=False, log_output=True) raise
[ 9, 202, 1978 ]
def METHOD_NAME(): # create Universe objects for tests new_u = make_Universe(trajectory=True) return new_u
[ 4849, 4850 ]
def METHOD_NAME(self, others, merge_conditions, common_ancestor=None): if common_ancestor is None: merged = self.blank_copy() options = [] for s, v in zip([self] + others, merge_conditions): options.append(And(*([v] + s.constraints))) merged.add([Or(*options)]) else: merged = common_ancestor.branch() merged.add([Or(*merge_conditions)]) return False, merged
[ 411 ]
def METHOD_NAME(self, model: TModel) -> CompressionAlgorithmController: pass
[ 56, 2951 ]
def METHOD_NAME( primary_client, benchmark, load, incr_load, num_runs, clients, nfs_mount, benchmark_defination, ): if SpecStorage(primary_client).run_spec_storage( benchmark, load, incr_load, num_runs, clients, nfs_mount, benchmark_defination ): raise OperationFailedError("SPECstorage run failed") log.info("SPECstorage run completed")
[ 22, 1457, 948, 249 ]
def METHOD_NAME(fl_model: FLModel, params_converter: Optional[ParamsConverter] = None) -> Shareable: """From FLModel to NVFlare side shareable. This is a temporary solution to converts FLModel to the shareable of existing style, so that we can reuse the existing components we have. In the future, we should be using the to_dxo, from_dxo directly. And all the components should be changed to accept the standard DXO. """ if fl_model.params is None and fl_model.metrics is None: raise ValueError("FLModel without params and metrics is NOT supported.") elif fl_model.params is not None: if fl_model.params_type is None: raise ValueError(f"Invalid ParamsType: ({fl_model.params_type}).") data_kind = params_type_to_data_kind.get(fl_model.params_type) if data_kind is None: raise ValueError(f"Invalid ParamsType: ({fl_model.params_type}).") if params_converter is not None: fl_model.params = params_converter.convert(fl_model.params) if fl_model.metrics is None: dxo = DXO(data_kind, data=fl_model.params, meta={}) else: # if both params and metrics are presented, will be treated as initial evaluation on the global model dxo = DXO(data_kind, data=fl_model.params, meta={MetaKey.INITIAL_METRICS: fl_model.metrics}) else: dxo = DXO(DataKind.METRICS, data=fl_model.metrics, meta={}) meta = fl_model.meta if fl_model.meta is not None else {} dxo.meta.update(meta) shareable = dxo.METHOD_NAME() if fl_model.current_round is not None: shareable.set_header(AppConstants.CURRENT_ROUND, fl_model.current_round) if fl_model.total_rounds is not None: shareable.set_header(AppConstants.NUM_ROUNDS, fl_model.total_rounds) if MetaKey.VALIDATE_TYPE in meta: shareable.set_header(AppConstants.VALIDATE_TYPE, meta[MetaKey.VALIDATE_TYPE]) return shareable
[ 24, 2881 ]
async def METHOD_NAME(self): network = NetworkFactory(ip="192.168.0.0/24") await network.remove() with pytest.raises(TransitionNotAllowed, match=".*Can't stop when in removed.*"): await network.remove()
[ 9, 188, 1646, 674 ]
f METHOD_NAME(self):
[ 9, 2216, 803, 656 ]
def METHOD_NAME(self): with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX), 'r') as f: return json.load(f)[self.CFG_SACKS]
[ 19, 948, 9441 ]
async def METHOD_NAME(self, device_name: str, name: str, resource_group_name: str, **kwargs: Any) -> _models.Job: """Gets the details of a specified job on a Data Box Edge/Data Box Gateway device. Gets the details of a specified job on a Data Box Edge/Data Box Gateway device. :param device_name: The device name. Required. :type device_name: str :param name: The job name. Required. :type name: str :param resource_group_name: The resource group name. Required. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Job or the result of cls(response) :rtype: ~azure.mgmt.databoxedge.v2022_03_01.models.Job :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) cls: ClsType[_models.Job] = kwargs.pop("cls", None) request = build_get_request( device_name=device_name, name=name, resource_group_name=resource_group_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.METHOD_NAME.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("Job", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
[ 19 ]
def METHOD_NAME(self) -> int: from poetry.factory import Factory from poetry.pyproject.toml import PyProjectTOML # Load poetry config and display errors, if any poetry_file = self.poetry.file.path config = PyProjectTOML(poetry_file).poetry_config check_result = Factory.validate(config, strict=True) # Validate trove classifiers project_classifiers = set(config.get("classifiers", [])) errors, warnings = self._validate_classifiers(project_classifiers) check_result["errors"].extend(errors) check_result["warnings"].extend(warnings) # Validate readme (files must exist) if "readme" in config: errors = self._validate_readme(config["readme"], poetry_file) check_result["errors"].extend(errors) # Verify that lock file is consistent if self.option("lock") and not self.poetry.locker.is_locked(): check_result["errors"] += ["poetry.lock was not found."] if self.poetry.locker.is_locked() and not self.poetry.locker.is_fresh(): check_result["errors"] += [ "poetry.lock is not consistent with pyproject.toml. Run `poetry" " lock [--no-update]` to fix it." ] if not check_result["errors"] and not check_result["warnings"]: self.info("All set!") return 0 for error in check_result["errors"]: self.line_error(f"<error>Error: {error}</error>") for error in check_result["warnings"]: self.line_error(f"<warning>Warning: {error}</warning>") return 1
[ 276 ]
def METHOD_NAME(self): self.label1.setText("Parameters for TikTorch configuration:")
[ 15, 1174 ]
def METHOD_NAME(self): log_file = os.path.join( self.out_dir, "impl", self.project_name + "_impl.par" ) with open(log_file, 'r') as file: for line in file: line = line.strip() if "Peak Memory Usage:" in line: self.maximum_memory_use = line.split()[3] return
[ 238, 3624, 1645, 1080 ]
async def METHOD_NAME(spawn_client, fake2, snapshot, role, mongo): client = await spawn_client(authorize=True, administrator=True) user = await fake2.users.create() resp = await client.put(f"/admin/users/{user.id}/role", {"role": role}) assert resp.status == 200 if role == AdministratorRole.FULL: assert await get_one_field(mongo.users, "administrator", user.id) is True assert await resp.json() == snapshot
[ 9, 86, 2870, 1018 ]
def METHOD_NAME(predictions, gts, raw_df_lst, capacity, output_len): """ Desc: Some common metrics for regression problems Args: predictions: gts: ground truth vector raw_df_lst: settings: Returns: A tuple of metrics """ all_mae, all_rmse = [], [] for i in range(capacity): prediction = predictions[i] gt = gts[i] raw_df = raw_df_lst[i] _mae, _rmse = turbine_scores(prediction, gt, raw_df, output_len, 1) all_mae.append(_mae) all_rmse.append(_rmse) total_mae = np.array(all_mae).sum() total_rmse = np.array(all_rmse).sum() return total_mae, total_rmse
[ 1981, 4814, 3295 ]
def METHOD_NAME(self): data = b"Test data" with patch("builtins.open", mock_open(read_data=data)): res = utils.read_file_bytes("any path") self.assertEqual(data, res)
[ 9, 203, 171, 321 ]
def METHOD_NAME(): class TypesZone(Enum): z1 = "Zone 1" z2 = "Zone 2" zone = np.asarray([TypesZone.z1, TypesZone.z2, TypesZone.z2, TypesZone.z1]) assert_near(P.single.owner[zone], [100, 200, 200, 100])
[ 9, 41, 1206 ]
def METHOD_NAME(self, name): """Get a single Role by name. Args: name (str): The name of the Role. Returns: (:obj:`Role`): The Role that matches the name or None. """ address = _create_role_address(name) role_list_bytes = None try: role_list_bytes = self._state_view.get(address=address) except KeyError: return None if role_list_bytes is not None: role_list = _create_from_bytes(role_list_bytes, identity_pb2.RoleList) for role in role_list.roles: if role.name == name: return role return None
[ 19, 1018 ]
def METHOD_NAME(self, sql): if sql.lower() == 'begin': self.begin = True elif sql.lower() == 'commit': self.commit = True
[ 248, 539 ]
def METHOD_NAME(): with patch("os.path.isfile", return_value=True): yield
[ 220, 6903, 171 ]
def METHOD_NAME(self) -> str: """ Description of the pool, if any. """ return pulumi.get(self, "description")
[ 1067 ]
def METHOD_NAME(process, input): if input: try: process.stdin.write(input) except BrokenPipeError: pass # communicate() must ignore broken pipe errors. except ValueError: pass # communicate() must ignore broken closed pipes except OSError as exc: if exc.errno == errno.EINVAL: # bpo-19612, bpo-30418: On Windows, stdin.write() fails # with EINVAL if the child process exited or if the child # process is still running but closed the pipe. pass else: raise try: process.stdin.close() except BrokenPipeError: pass # communicate() must ignore broken pipe errors. except ValueError: pass # communicate() must ignore broken closed pipes except OSError as exc: if exc.errno == errno.EINVAL: pass else: raise
[ 2195, 77 ]
def METHOD_NAME(self): """ Test repetition parser class. """ word = parser.Letters() whitespace = parser.Whitespace() p = parser.Repetition(parser.Alternative((word, whitespace))) # Test with ascii letters input_output = ( ("abc", ["abc"]), ("abc abc", ["abc", " ", "abc"]), ("abc abc\t\t\n cba", ["abc", " ", "abc", "\t\t\n ", "cba"]), ) self._test_single(p, input_output) # Test with non-ascii letters input_output = ( (u"êùö", [u"êùö"]), (u"êùö êùö", [u"êùö", u" ", u"êùö"]), (u"êùö êùö\t\t\n öùê", [ u"êùö", " ", u"êùö", u"\t\t\n ", u"öùê"]), ) self._test_single(p, input_output)
[ 9, 7963 ]
def METHOD_NAME(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result)
[ 9, 8265, 1345, 1268, 5472, 2938, 1114 ]
def METHOD_NAME(df: pd.DataFrame, npartitions: int = 1) -> dd.DataFrame: return dd.from_pandas(df, npartitions=npartitions)
[ 280, 2842 ]
def METHOD_NAME(client: Any) -> None: """Validate client and import weaviate library.""" try: import weaviate # noqa: F401 from weaviate import Client client = cast(Client, client) except ImportError: raise ImportError( "Weaviate is not installed. " "Please install it with `pip install weaviate-client`." ) cast(Client, client)
[ 187, 340 ]
def METHOD_NAME(self): test_user = get_user_model().objects.first() """ If the input object is a ResourceBase, in favorite content type, should be saved he subtype content type (Doc, Dataset, Map or GeoApp) """ create_single_dataset("foo_dataset") resource = ResourceBase.objects.get(title="foo_dataset") created_fav = Favorite.objects.create_favorite(resource, test_user) self.assertEqual("dataset", created_fav.content_type.model) """ If the input object is a subtype, should save the relative content type """ test_document_1 = Document.objects.first() self.assertIsNotNone(test_document_1) Favorite.objects.create_favorite(test_document_1, test_user) fav = Favorite.objects.last() ct = ContentType.objects.get_for_model(test_document_1) self.assertEqual(fav.content_type, ct)
[ 9, 1393, 191, 414, 279, 5334, 1283 ]
def METHOD_NAME(self): node = parser.DataNode(' ') node.write(self.doc) self.doc.handle_data.assert_called_once_with(' ') self.doc.handle_data.reset_mock() node = parser.DataNode(' ') node.write(self.doc) self.doc.handle_data.assert_called_once_with(' ')
[ 9, 77, 173 ]
def METHOD_NAME(self, keyspace: str = "keyspace1", cf: str = "standard1", scrub_mode: Optional[str] = None) -> Result: params = {"keyspace": keyspace, "cf": cf, "scrub_mode": scrub_mode} return self.storage_service_client.scrub_ks_cf(**params)
[ 2117, 2825, 4713 ]
def METHOD_NAME(csv_file_basename, csv_content): """ Write the content of a list of list into a CSV file. Example of csv_content: [ ['Header_1', 'Header_2', 'Header_3' ...], ['Value_1', 'Value_2', 'Value_3' ...], .... ] :param csv_file_basename: base name that should be given to the CSV :type csv_file_basename: str :param csv_content : list of list with the content of the future CSV file :type csv_content : list """ csv_file = ( os.getcwd() + "/" + csv_file_basename + "_" + str(datetime.date.today()) + ".csv" ) with open(csv_file, "w", newline="") as file: writer = csv.writer(file) writer.writerows(csv_content)
[ 77, 732, 171 ]
def METHOD_NAME(db): questionset = QuestionSet.objects.first() validator = QuestionSetUniqueURIValidator() validator.set_context(QuestionSetSerializer(instance=questionset)) with pytest.raises(RestFameworkValidationError): validator({ 'uri_prefix': questionset.uri_prefix, 'key': questionset.section.questionsets.last().key, 'section': questionset.section })
[ 9, 2768, 354, 2889, 1386, 86, 168 ]
f METHOD_NAME(self):
[ 9, 401, 141 ]
def METHOD_NAME(self) -> None: """ L{FileLogObserver} does not write to the given file when it observes events and C{formatEvent} returns C{""}. """ self._test_observeWrites("", 0)
[ 9, 6427, 5033, 35 ]
def METHOD_NAME(dictionary): """ Return an identical copy of a dictionary """ return np.copy(np.array(dictionary))
[ 215, 2445 ]
def METHOD_NAME(self, mocker): """ Test with a gzipped file. """ prepare_mocker(mocker) self.run_bot(parameters={'http_url': 'http://localhost/foobar.gz', 'extract_files': True, 'name': 'Example feed', }, iterations=1) output = OUTPUT[0].copy() output['feed.url'] = 'http://localhost/foobar.gz' del output['extra.file_name'] self.assertMessageEqual(0, output)
[ 9, 6937 ]
def METHOD_NAME(graph, quant_groups): return graph_utils.group_up(graph, quant_groups, NNDCT_OP.SQUEEZE)
[ 1587, 2151, 3822 ]
def METHOD_NAME(*args): return "test_single_exp"
[ 668, 156 ]
def METHOD_NAME(self): request = gateway.mapper.create_tracking_request(self.TrackingRequest) self.assertEqual(request.serialize(), TrackingRequestJSON)
[ 9, 129, 1151, 377 ]
def METHOD_NAME(self): return UsersUser.objects.filter(id=self.lastedit_user_id).first()
[ -1, 21 ]
def METHOD_NAME(self, spawn, find_binary, tmp_path): syncer = git.git_syncer(str(tmp_path), "git://blah.git") syncer.sync(verbosity=-1) assert "-q" == spawn.call_args[0][0][-1]
[ 9, 4720, 164 ]
def METHOD_NAME(self): """ Assert that the Mach number check fires an exception. """ max_vel = 1.1 * self.lbf.mach_limit() * AGRID / TAU vbb = espressomd.lb.VelocityBounceBack([0, 0, max_vel]) error_msg = 'Slip velocity exceeds Mach 0.35' with self.assertRaisesRegex(ValueError, error_msg): self.lbf[0, 0, 0].boundary = vbb self.assertIsNone(self.lbf[0, 0, 0].boundary) with self.assertRaisesRegex(ValueError, error_msg): shape = espressomd.shapes.Wall(normal=[1, 0, 0], dist=AGRID) self.lbf.add_boundary_from_shape(shape, vbb.velocity) self.assertIsNone(self.lbf[0, 0, 0].boundary)
[ 9, 14342, 1467, 250 ]
def METHOD_NAME(shape, yx=[0, 0], th=None): """2D array of x values. Parameters ---------- shape : tuple of int The shape of the resulting array, (y, x). yx : tuple of float Offset the array to align with this y, x center. th : Quantity, optional Place the x-axis along this position angle, measured counterclockwise from the original x-axis. Returns ------- x : ndarray An array of x values. Examples -------- >>> from sbpy.imageanalysis.utils import xarray >>> x = xarray((10, 10)) >>> x[0, 3] 3 """ import numpy as np import astropy.units as u y, x = np.indices(shape)[-2:] y = y - yx[0] x = x - yx[1] if th is not None: x = x * np.cos(th.to(u.rad).value) + y * np.sin(th.to(u.rad).value) return x
[ 7724 ]
async def METHOD_NAME(cls, client: GraphQLClient) -> str: """Determine whether the GitLab instance has the approved field for merge requests.""" response = await client.execute(MERGE_REQUEST_FIELDS_QUERY) json = await response.json() fields = [field["name"] for field in json["data"]["__type"]["fields"]] return cls.APPROVED_FIELD if cls.APPROVED_FIELD in fields else ""
[ 12427, 101 ]
def METHOD_NAME(self): self.test_open_template(filename=os_helper.FakePath(self.fn))
[ 9, 11771, 157, 171, 1514 ]
def METHOD_NAME(local_filename, var_name): from pyhdf.SD import SD, SDC f = SD(local_filename, SDC.READ) var = f.select(var_name) data = var[:] fill = ReflectanceCorrector._read_fill_value_from_hdf4(var, data.dtype) return np.ma.MaskedArray(data, data == fill)
[ 203, 486, 280, -1, 171, -1 ]
def METHOD_NAME(): self.gui.lap.unpause()
[ 3609, 10412 ]
def METHOD_NAME(plugin, item_id, page, category_url, **kwargs): """Build playlists listing""" resp = urlquick.get(category_url + '?paged=%s' % page) root = resp.parse() for playlist in root.iterfind(".//article"): item = Listitem() item.label = playlist.find('.//h2').find('a').get('title') if playlist.find('.//img').get('data-src'): item.art['thumb'] = item.art['landscape'] = playlist.find('.//img').get('data-src') else: item.art['thumb'] = item.art['landscape'] = playlist.find('.//img').get('src') videos_url = URL_ROOT + playlist.find('.//h2').find('a').get('href') item.set_callback(list_playlist_videos, item_id=item_id, videos_url=videos_url) item_post_treatment(item) yield item # More videos... yield Listitem.next_page(item_id=item_id, category_url=category_url, page=page + 1)
[ 245, 9828 ]
def METHOD_NAME(): func = lambda x: x rob = robjects.default_converter.py2rpy(func) assert isinstance(rob, robjects.SignatureTranslatedFunction) assert rob.typeof == rinterface.RTYPES.CLOSXP
[ 9, 3782, 3952, 3264, 559 ]
def METHOD_NAME(self) -> GraphTraversalSource: return self._graph
[ 19, 303 ]
def METHOD_NAME(): da = make_binned_data_array(ndim=1) sc.make_html(da['xx', 1].bins.data)
[ 9, 382, 92, 10187, 1997, 365, 877 ]
def METHOD_NAME(csv_config: dict): tap = SampleTapCountries(config=SAMPLE_TAP_CONFIG, state=None) target = SampleTargetCSV(config=csv_config) mapper = StreamTransform(config=COUNTRIES_STREAM_MAPS_CONFIG) sync_end_to_end(tap, target, mapper)
[ 9, 3235, 24, 732, 8568 ]
def METHOD_NAME(self, tb: str, column: sqlalchemy.sql.schema.Column) -> None: if column.info.get("readonly"): with tag(tb, "xsd:annotation"): with tag(tb, "xsd:appinfo"): with tag(tb, "readonly", {"value": "true"}): pass
[ 669, 1076 ]
def METHOD_NAME( request, kube_apis, ingress_controller_prerequisites, ingress_controller_endpoint, ingress_controller, test_namespace, ) -> TLSSetup: print("------------------------- Deploy TLS setup -----------------------------------") test_data_path = f"{TEST_DATA}/tls" ingress_path = f"{test_data_path}/{request.param}/ingress.yaml" create_ingress_from_yaml(kube_apis.networking_v1, test_namespace, ingress_path) wait_before_test(1) ingress_host = get_first_ingress_host_from_yaml(ingress_path) secret_name = get_name_from_yaml(f"{test_data_path}/tls-secret.yaml") ensure_connection_to_public_endpoint( ingress_controller_endpoint.public_ip, ingress_controller_endpoint.port, ingress_controller_endpoint.port_ssl ) def fin(): if request.config.getoption("--skip-fixture-teardown") == "no": print("Clean up TLS setup") delete_items_from_yaml(kube_apis, ingress_path, test_namespace) if is_secret_present(kube_apis.v1, secret_name, test_namespace): delete_secret(kube_apis.v1, secret_name, test_namespace) request.addfinalizer(fin) return TLSSetup( ingress_host, secret_name, f"{test_data_path}/tls-secret.yaml", f"{test_data_path}/new-tls-secret.yaml", f"{test_data_path}/invalid-tls-secret.yaml", )
[ 1245, 102 ]
def METHOD_NAME(self, response: requests.Response) -> ResponseStatus: request = response.request if request not in self._last_request_to_attempt_count: self._last_request_to_attempt_count = {request: 1} else: self._last_request_to_attempt_count[request] += 1 for response_filter in self.response_filters: matched_status = response_filter.matches( response=response, backoff_time=self._backoff_time(response, self._last_request_to_attempt_count[request]) ) if matched_status is not None: return matched_status if response.ok: return response_status.SUCCESS # Fail if the response matches no filters return response_status.FAIL
[ 1028, 17 ]
def METHOD_NAME(): mu = pt.constant(0) sigma = pt.scalar("sigma") x_rv = pt.random.normal(mu, sigma, name="x") x_vv = pt.constant(0) x_logp = logp(x_rv, x_vv) x_logp_fn = function([sigma], x_logp) with pytest.raises(ParameterValueError, match="sigma > 0"): x_logp_fn(-1)
[ 9, 250, 511 ]
def METHOD_NAME(self, gpxfile, tree): tree.write(gpxfile, xml_declaration=True, encoding='UTF-8')
[ 129, 2949, 16791 ]
def METHOD_NAME(self): """ create a multistatus response for the prop names """ dc=self._dataclass # create the document generator doc = domimpl.createDocument(None, "multistatus", None) ms = doc.documentElement ms.setAttribute("xmlns:D", "DAV:") ms.tagName = 'D:multistatus' if self._depth=="0": if self._uri in self._dataclass.get_childs(get_parenturi(self._uri), self.filter): pnames=dc.get_propnames(self._uri) re=self.mk_propname_response(self._uri,pnames, doc) ms.appendChild(re) elif self._depth=="1": if self._uri in self._dataclass.get_childs(get_parenturi(self._uri), self.filter): pnames=dc.get_propnames(self._uri) re=self.mk_propname_response(self._uri,pnames, doc) ms.appendChild(re) for newuri in dc.get_childs(self._uri, self.filter): pnames=dc.get_propnames(newuri) re=self.mk_propname_response(newuri,pnames, doc) ms.appendChild(re) elif self._depth=='infinity': uri_list = [self._uri] while uri_list: uri = uri_list.pop() if uri in self._dataclass.get_childs(get_parenturi(uri), self.filter): pnames=dc.get_propnames(uri) re=self.mk_propname_response(uri,pnames, doc) ms.appendChild(re) uri_childs = self._dataclass.get_childs(uri) if uri_childs: uri_list.extend(uri_childs) return doc.toxml(encoding="utf-8")
[ 129, 4062 ]
def METHOD_NAME(self) -> Optional[str]: """ When present, the value can be passed to a subsequent query call (together with the same query and scopes used in the current request) to retrieve the next page of data. """ return pulumi.get(self, "skip_token")
[ 2423, 466 ]
def METHOD_NAME(): logging.debug("Creating buildpack launch environment...") util.mkdir_p(PROFILE_DIR) util.METHOD_NAME(DEPS_DIR, PROFILE_DIR)
[ 0, 1, 1440, 1027 ]
async def METHOD_NAME(self,index,coords,viewer=0): c= { "command" : "crosshairs", "coords" : [ 20+index*10,20+index*20,20+index*30 ], "viewer" : viewer }; await self.connections[index].send(json.dumps(c));
[ 0, 4645 ]
def METHOD_NAME(bq_client, table, rows): """Inserts rows into a BigQuery table. Args: bq_client: BigQuery client table: table where rows must be inserted rows: a list of rows to insert """ _batch_insert(bq_client, table, rows)
[ 3578, 408, 1346 ]
def METHOD_NAME(self, *args, **kwargs): """ Pass get_backend_timeout cache method to _call_with_fallback """ return self._call_with_fallback("get_backend_timeout", *args, **kwargs)
[ 19, 3127, 659 ]
def METHOD_NAME(self, mock_end_request): """An anonymous should not be able to end a classroom.""" classroom = ClassroomFactory() response = self.client.patch( f"/api/classrooms/{classroom.id}/end/", ) self.assertEqual(response.status_code, 401) mock_end_request.assert_not_called()
[ 9, 58, 14247, 1798, 7758, 1946 ]
def METHOD_NAME(path): return "".join([c if c.isalnum() else "_" for c in path.elems()])
[ 1702, 157 ]
def METHOD_NAME(self, date: str) -> List[dict]: components = self.identify_vacant_components() self.populate_num_untriaged_bugs(components) return components
[ 19, 487, 365 ]
def METHOD_NAME(self): return self.settings.os == "Windows" and not is_msvc(self)
[ 949, -1 ]
def METHOD_NAME(xshape, wshape, stride, padding, dilation, use_cuda, nhwc): if nhwc: test_func = test_nhwc else: test_func = test_nchw if use_cuda == 1: op_name = "cudnn_conv" else: op_name = "mkl_conv" with jt.log_capture_scope(use_cuda=use_cuda, enable_tuner=1, log_v=1, log_vprefix="op.cc=1000,exe=1000,conv_t=1000", compile_options={"test":244} ) as raw_log: x = jt.random(xshape) w = jt.random(wshape) y = test_func(x, w, stride, padding, dilation) loss = y.mean() dx, dw = jt.grad(loss, [x, w]) jt.sync([y, loss, dx, dw]) with jt.flag_scope(use_cuda=0, enable_tuner=0, compile_options={"test":233}): cy = test_func(x, w, stride, padding, dilation) closs = cy.mean() cdx, cdw = jt.grad(closs, [x, w]) jt.sync([cy, closs, cdx, cdw]) logs = find_log_with_re(raw_log, "(Jit op key (not )?found: " + op_name + ".*)") assert len(logs)==3 and "oihw" in logs[0][0], (logs) assert np.allclose(y.data, cy.data, 1e-3) assert np.allclose(dw.data, cdw.data, 1e-3), (dw.data, cdw.data) assert np.allclose(dx.data, cdx.data, 1e-3), (dx.data, cdx.data, np.abs(cdx.data).max(), np.abs(dx.data - cdx.data).max())
[ 250, 2955 ]
def METHOD_NAME(test_case): device = random_device() input = random_tensor(ndim=2, dim0=2, dim1=2).to(device) src = oneof(3.14, random_tensor(ndim=2, dim0=2, dim1=2).to(device)) inplace = oneof(True, False) dim = oneof(0, 1, -1) if inplace: y = input + 1 y.scatter_(dim, oneof(*_get_indexes(device)), src) return y return input.scatter(dim, oneof(*_get_indexes(device)), src)
[ 9, 2633, 236, 365 ]
def METHOD_NAME(self, nwkid, EPout, devunit, onoff): """ Levolo On/Off command are based on Level Control cluster Level: 108/0x6C -> On Level: 1/0x01 -> Off Left Unit: Timing 1 Right Unit: Timing 2 """ self.log.logging("Livolo", "Debug", "livolo_OnOff - devunit: %s, onoff: %s" % (devunit, onoff), nwkid=nwkid) if onoff not in ("On", "Off", "Toggle"): return if devunit not in ("Left", "Right", "All"): return if onoff == "Toggle" and devunit == "All": self.log.logging("Livolo", "Debug", "livolo_toggle", nwkid=nwkid) zcl_toggle(self, nwkid, EPout) else: level_value = timing_value = None if onoff == "On": level_value = "%02x" % 108 elif onoff == "Off": level_value = "%02x" % 1 if devunit == "Left": timing_value = "0001" elif devunit == "Right": timing_value = "0002" if level_value is not None and timing_value is not None: self.log.logging( "Livolo", "Debug", "livolo_OnOff - %s/%s Level: %s, Timing: %s" % (nwkid, EPout, level_value, timing_value), nwkid=nwkid, ) zcl_level_move_to_level( self, nwkid, EPout, "00", level_value, timing_value) #sendZigateCmd(self, "0081", "02" + nwkid + ZIGATE_EP + EPout + "00" + level_value + timing_value) else: Domoticz.Error("livolo_OnOff - Wrong parameters sent ! onoff: %s devunit: %s" % (onoff, devunit))
[ 14236, 69, 3988 ]
def METHOD_NAME(cli): if (not os.path.exists(cli.path)): ch.FATAL("can’t copy: not found: %s" % cli.path) pathstr = im.Reference.ref_to_pathstr(cli.image_ref) if (cli.bucache == ch.Build_Mode.ENABLED): # Un-tag previously deleted branch, if it exists. bu.cache.tag_delete(pathstr, fail_ok=True) dst = im.Image(im.Reference(cli.image_ref)) ch.INFO("importing: %s" % cli.path) ch.INFO("destination: %s" % dst) dst.unpack_clear() if (os.path.isdir(cli.path)): dst.copy_unpacked(cli.path) else: # tarball, hopefully dst.unpack([cli.path]) bu.cache.adopt(dst) if (dst.metadata["history"] == []): dst.metadata["history"].append({ "empty_layer": False, "command": "ch-image import"}) dst.metadata_save() ch.done_notify()
[ 512 ]
async def METHOD_NAME(form: KeywordsFormArgs, offset: int, limit: int) -> Tuple[int, List[Dict[str, Any]]]: path = _create_cache_path(form) if not await aiofiles.os.path.exists(path): raise KeywordsResultNotFound('The result does not exist') else: return await load_cached_partial(path, offset, limit)
[ 208, 1153, 2537 ]
def METHOD_NAME(module_session) -> None: module_session.execute(hades.radius_property.insert().values([ ('payment_in_default',), ('traffic_limit_exceeded',), ]))
[ 8568, 3662, 748 ]
def METHOD_NAME(hparams): """ This function prepares the datasets to be used in the brain class. It also defines the data processing pipeline through user-defined functions. """ data_folder = hparams["save_folder"] train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, ) valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, ) test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( csv_path=hparams["test_csv"], replacements={"data_root": data_folder}, ) datasets = [train_data, valid_data, test_data] # We get the tokenizer as we need it to encode the labels when creating # mini-batches. tokenizer = hparams["tokenizer"] """Define text pipeline""" @sb.utils.data_pipeline.takes("words") @sb.utils.data_pipeline.provides("words", "tokens_bos", "tokens_eos") def text_pipeline(words): yield words tokens_list = tokenizer.encode_as_ids(words) tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) yield tokens_bos tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) yield tokens_eos sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) # 4. Set output: sb.dataio.dataset.set_output_keys( datasets, ["id", "words", "tokens_bos", "tokens_eos"], ) return train_data, valid_data, test_data
[ -1, 123 ]
def METHOD_NAME(self): # b larger than b a = np.array([[1, 3, 3, 1, 7], [3, 3, 2, 3, 0]]) b = np.array([[3, 1, 2, 5, 3, 4, 7], [3, 3, 3, 1, 9, 9, 9]]) ma, ia = setmembership.ismember_rows(a, b) ma_known = np.array([1, 1, 1, 1, 0], dtype=bool) ia_known = np.array([1, 0, 2, 1]) self.assertTrue(np.allclose(ma, ma_known)) self.assertTrue(np.allclose(ia, ia_known))
[ 9, 9594, 1346, 5765, 3120, 988 ]
def METHOD_NAME(): """Sums the runtime of all queries logged by django.db.connection.queries""" runtimes = (float(query['time']) for query in django.db.connection.queries) return sum(runtimes)
[ 912, 5063, 815, 1888 ]