text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME( self, ): self.all_stacks[1].obsolete = True self.all_stacks[3].obsolete = False self.all_stacks[4].obsolete = False self.all_stacks[5].obsolete = False self.all_stacks[3].dependencies.append(self.all_stacks[1]) self.all_stacks[4].dependencies.append(self.all_stacks[3]) self.all_stacks[5].dependencies.append(self.all_stacks[3]) self.context.ignore_dependencies = True self.pruner.prune() assert self.plans[0].executions[0][0] == "delete" assert set(self.executed_stacks) == { self.all_stacks[1], }
[ 9, 3724, 256, 8439, 1980, 3910, 69 ]
def METHOD_NAME(self, new_data): result = "" for field in self.model._meta.fields: field = field.name if ( field in new_data and self._values_before_update[field] != new_data[field] ): _before_update = self._values_before_update[field] _after_update = new_data[field] result += f"{field.title()}: {_before_update} -> {_after_update}\n" if result: add_comment([self.object], result, self.request.user)
[ 148, 1103 ]
def METHOD_NAME(self, node): """ Visitor looking for matching between current node and pattern. If it match, save it else look for a match at lower level keep going. """ if Check(node, dict()).METHOD_NAME(self.pattern): self.result.add(node) else: self.generic_visit(node)
[ 716 ]
def METHOD_NAME(self): result = self.cli('record', 'tests', '--session', self.session, 'go-test', str(self.test_files_dir.joinpath('reportv2')) + "/") self.assertEqual(result.exit_code, 0) self.assertIn('events', responses.calls[1].request.url, 'call events API') payload = json.loads(gzip.decompress(responses.calls[1].request.body).decode()) # Remove timestamp because it depends on the machine clock for c in payload['events']: del c['created_at'] expected = self.load_json_from_file(self.test_files_dir.joinpath('record_test_result.json')) self.assert_json_orderless_equal(expected, payload) self.assertIn('close', responses.calls[3].request.url, 'call close API')
[ 9, 148, 450, 820 ]
def METHOD_NAME(x, y, **kwargs): """ Like numpy's assert_allclose, but for angles (in radians). """ c2 = (np.sin(x)-np.sin(y))**2 + (np.cos(x)-np.cos(y))**2 diff = np.arccos((2.0 - c2)/2.0) # a = b = 1 assert np.allclose(diff, 0.0, **kwargs)
[ 638, 4100, 5362 ]
def METHOD_NAME(self, externalhandler, request): print("PYTHON - Handle web req {} {} {}".format(request.req_id, request.method, request.uri)) print("PYTHON - GET variables ") for i in request.variable_data: print("PYTHON - GET {}:{}".format(i.field, i.content)) externalhandler.send_http_response(request.req_id, bytes("<html><body><b>This is from python!</b></body></html>", "UTF-8"))
[ 276, 2412, 440, 19 ]
def METHOD_NAME(self): import re run_command = self._build_run_command() # Need the '$' to delete the last print just # in case the command output also has this # print out! pattern = re.compile('SHELL_EXIT=(\\d+)$') run_command = run_command + '; echo SHELL_EXIT=$?' (child_stdin, child_outerr) =os.popen4(run_command) child_stdin.close() self.output = child_outerr.read() self.exit_code = child_outerr.close() if self.exit_code is None: self._parse_shell_exit(pattern) self._remove_shell_exit(pattern) return self.exit_code
[ -1, 22 ]
def METHOD_NAME(self, repo, layout_container, result): """ Given - A layout container with name and id When - validating layout container Then - validating that layout_container name and id are equal. """ structure = mock_structure("", layout_container) validator = LayoutsContainerValidator(structure) assert validator.is_id_equals_name() == result
[ 9, 137, 156, 147, 926 ]
def METHOD_NAME(self): "Test Kabsch rigid rotation algorithm" # Test translation Kabsch.test([[1, 2], [2, 3], [1, 4]], [[2, 3], [3, 4], [2, 5]], verbose=False) # Test rotation P = numpy.array([[2, 3], [3, 4], [2, 5]]) Q = -P Kabsch.test(P, Q, verbose=False) # test advanced 1 P = numpy.array([[1, 1], [2, 0], [3, 1], [2, 2]]) Q = numpy.array([[-1, 1], [0, 2], [-1, 3], [-2, 2]]) Kabsch.test(P, Q, verbose=False) # test advanced 2 P = numpy.array([[1, 1], [2, 0], [3, 1], [2, 2]]) Q = numpy.array([[2, 0], [3, 1], [2, 2], [1, 1]]) Kabsch.test(P, Q, verbose=False)
[ 9, 753 ]
def METHOD_NAME(self, url: QUrl, force: bool = False): """Load a given QUrl. Args: url: The QUrl to load. force: Whether to force loading even if the file is invalid. """ with self.qtbot.wait_signal(self.tab.load_finished, timeout=2000) as blocker: self.tab.METHOD_NAME(url) if not force: assert blocker.args == [True]
[ 557, 274 ]
def METHOD_NAME(self): self.workflow.do_engine_steps() json = self.serializer.serialize_json(self.workflow) wf2 = self.serializer.deserialize_json(json) self.assertIsNotNone(self.workflow.last_task) self.assertIsNotNone(wf2.last_task) self._compare_workflows(self.workflow, wf2)
[ 9, 679, 758, 137, 0, 61, 432 ]
def METHOD_NAME(): import aiohttp if getattr(aiohttp, "_datadog_patch", False): return _patch_client(aiohttp) aiohttp._datadog_patch = True
[ 1575 ]
def METHOD_NAME(self, cols: List[List[Any]], eow: bool = False, eos: bool = False) -> vpb.RowBatchData: assert len(cols) == len( self.relation.columns), f"num cols not equal, {len(cols)}, {len(self.relation.columns)}" for c in cols[1:]: assert len(c) == len( cols[0]), f"Rows are not aligned {len(c)}, {len(cols[0])}" formatted_cols: List[vpb.Column] = [] for c, t in zip(cols, self.relation.columns): formatted_cols.append(_make_column(c, t.column_type)) return vpb.RowBatchData( table_id=self.id, eow=eow, eos=eos, cols=formatted_cols, num_rows=len(cols[0]), )
[ 843, 2277 ]
def METHOD_NAME(server=None): """Returns the default config of the operator. This config can then be changed to the user needs and be used to instantiate the operator. The Configuration allows to customize how the operation will be processed by the operator. Parameters ---------- server : server.DPFServer, optional Server with channel connected to the remote or local instance. When ``None``, attempts to use the global server. """ return Operator.METHOD_NAME(name="min_max_fc", server=server)
[ 235, 200 ]
def METHOD_NAME(self): """Test the series list command.""" series_cmd: commands.RunSeries = commands.get_command('series') arg_parser = arguments.get_parser() args = arg_parser.parse_args([ 'series', 'run', 'basic', ]) series_cmd.silence() run_result = series_cmd.run(self.pav_cfg, args) self.assertEqual(run_result, 0) self._wait_for_all_start(series_cmd.last_run_series) list_args = [ ['series', 'state_history', '--text'], ['series', 'states', series_cmd.last_run_series.sid], ] for raw_args in list_args: args = arg_parser.parse_args(raw_args) self.assertEqual(series_cmd.run(self.pav_cfg, args), 0)
[ 9, 4045, 351 ]
def METHOD_NAME(category, area, grade, options, start_date, end_date): df = fred_model.get_icespread(category, area, grade, options, start_date, end_date) assert isinstance(df, DataFrame) assert not df.empty
[ 9, 19, -1 ]
def METHOD_NAME(self, states): return len(states["tokens"]["tgt"])
[ 1030, 799 ]
def METHOD_NAME() -> str: return "my_namespace"
[ 1192, 1193, 1194 ]
f METHOD_NAME(self, old_key_tmpl, new_key_tmpl):
[ 9, 59, 2271 ]
def METHOD_NAME( self, query_input_ids, title_input_ids, query_token_type_ids=None, query_position_ids=None, query_attention_mask=None, title_token_type_ids=None, title_position_ids=None, title_attention_mask=None, ): query_cls_embedding = self.get_pooled_embedding( query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask ) title_cls_embedding = self.get_pooled_embedding( title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask ) cosine_sim = paddle.matmul(query_cls_embedding, title_cls_embedding, transpose_y=True) # substract margin from all positive samples cosine_sim() margin_diag = paddle.full( shape=[query_cls_embedding.shape[0]], fill_value=self.margin, dtype=paddle.get_default_dtype() ) cosine_sim = cosine_sim - paddle.diag(margin_diag) # scale cosine to ease training converge cosine_sim *= self.sacle labels = paddle.arange(0, query_cls_embedding.shape[0], dtype="int64") labels = paddle.reshape(labels, shape=[-1, 1]) loss = F.cross_entropy(input=cosine_sim, label=labels) return loss
[ 76 ]
def METHOD_NAME(self): """ Looks for user tokens. If they are expired, or expiring, it notifies users. """ # Initialized here, as it is needed empty at the beginning of the execution self.tokenDict = {} elements = ("Site", "Resource", "Node") for element in elements: self.log.info(f"Processing {element}") interestingTokens = self._getInterestingTokens(element) if not interestingTokens["OK"]: self.log.error(interestingTokens["Message"]) continue interestingTokens = interestingTokens["Value"] processTokens = self._processTokens(element, interestingTokens) if not processTokens["OK"]: self.log.error(processTokens["Message"]) continue notificationResult = self._notifyOfTokens() if not notificationResult["OK"]: self.log.error(notificationResult["Message"]) return S_OK()
[ 750 ]
def METHOD_NAME(generator, ones, rate, training=None, count=1): def dropped_inputs(): return generator.dropout(ones, rate) if count > 1: return [ backend.in_train_phase(dropped_inputs, ones, training=training) for _ in range(count) ] return backend.in_train_phase(dropped_inputs, ones, training=training)
[ 567, 3663, 361 ]
def METHOD_NAME(self, connection, schema=None, **kw): query = self._get_table_names_base_query(schema=schema) query += """ WHERE TBL_TYPE != 'VIRTUAL_VIEW'""" return [row[0] for row in connection.execute(query)]
[ 19, 410, 83 ]
def METHOD_NAME(source, channels, start=None, end=None, series_class=TimeSeries, scaled=None): """Read data from one or more GWF files using the FrameL API """ # scaled must be provided to provide a consistent API with frameCPP if scaled is not None: warnings.warn( "the `scaled` keyword argument is not supported by framel, " "if you require ADC scaling, please install " "python-ldas-tools-framecpp", ) # parse input source source = file_list(source) # get duration crop = start is None and end is not None duration = -1 span = Segment(start or 0, end or 0) framelstart = start or -1 if start and end: duration = end - start # read each file and channel individually and append out = series_class.DictClass() for i, file_ in enumerate(source): for name in channels: new = _read_channel( file_, name, framelstart, duration, series_class, ) if crop and end < new.x0.value: raise ValueError( "read() given end GPS earlier than start GPS for " "{} in {}".format( name, file_, ), ) elif crop: new = new.crop(end=end) out.append({name: new}) # if we have all of the data we want, stop now if all(span in out[channel].span for channel in out): break return out
[ 203 ]
def METHOD_NAME(): from litex.build.parser import LiteXArgumentParser parser = LiteXArgumentParser(platform=antmicro_sdi_mipi_video_converter.Platform, description="LiteX SoC on Antmicro SDI MIPI Video Converter Board.") parser.add_target_argument("--device", default="LIFCL-40-9BG256C", help="FPGA device (LIFCL-40-9BG400C, LIFCL-40-8BG400CES, or LIFCL-40-8BG400CES2).") parser.add_target_argument("--sys-clk-freq", default=75e6, help="System clock frequency.") parser.add_target_argument("--programmer", default="radiant", help="Programmer (radiant or ecpprog).") parser.add_target_argument("--prog-target", default="direct", help="Programming Target (direct or flash).") args = parser.parse_args() soc = BaseSoC( sys_clk_freq = int(float(args.sys_clk_freq)), device = args.device, toolchain = args.toolchain, **parser.soc_argdict ) builder = Builder(soc, **parser.builder_argdict) if args.build: builder.build(**parser.toolchain_argdict) if args.load: prog = soc.platform.create_programmer(args.prog_target, args.programmer) if args.programmer == "ecpprog" and args.prog_target == "flash": prog.flash(address=args.address, bitstream=builder.get_bitstream_filename(mode="sram")) else: if args.programmer == "radiant": os.system("sudo modprobe -rf ftdi_sio") prog.load_bitstream(builder.get_bitstream_filename(mode="sram")) if args.programmer == "radiant": os.system("sudo modprobe ftdi_sio")
[ 57 ]
def METHOD_NAME(self): encrypted = encrypt_binary(b"stuff", self.key) with self.assertRaises(ValueError): decrypt_binary(encrypted[1:], self.key)
[ 9, 443, 532, 2426 ]
def METHOD_NAME( self, custom_headers=None, raw=False, **operation_config): """Gets all the available express route service providers. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of ExpressRouteServiceProvider :rtype: ~azure.mgmt.network.v2019_06_01.models.ExpressRouteServiceProviderPaged[~azure.mgmt.network.v2019_06_01.models.ExpressRouteServiceProvider] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.METHOD_NAME.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ExpressRouteServiceProviderPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized
[ 245 ]
def METHOD_NAME(self, test, litConfig): testPath,testName = os.path.split(test.getSourcePath()) while not os.path.exists(testPath): # Handle GTest parametrized and typed tests, whose name includes # some '/'s. testPath, namePrefix = os.path.split(testPath) testName = namePrefix + '/' + testName cmd = [testPath, '--gtest_filter=' + testName] if litConfig.useValgrind: cmd = litConfig.valgrindArgs + cmd if litConfig.noExecute: return lit.Test.PASS, '' out, err, exitCode = lit.util.executeCommand( cmd, env=test.config.environment) if exitCode: return lit.Test.FAIL, out + err passing_test_line = '[ PASSED ] 1 test.' if passing_test_line not in out: msg = ('Unable to find %r in gtest output:\n\n%s%s' % (passing_test_line, out, err)) return lit.Test.UNRESOLVED, msg return lit.Test.PASS,''
[ 750 ]
def METHOD_NAME(protocol, status): """ Return the proper error code depending on the protocol """ status, codes = STATUSES[status] return status, codes[protocol]
[ 19, 452 ]
def METHOD_NAME(self, bin_path): if bin_path is None: return flags = {} if sys.platform.startswith("win"): DETACHED_PROCESS = 0x00000008 flags["creationflags"] = DETACHED_PROCESS apppath = app_path(pythonw=False, as_list=True) shell = sys.platform.startswith("win") # default to using daemon # if the user chooses to use angr URL scheme to load a binary, they are more likely to keep interacting with # this binary using angr URL scheme, which requires the angr management instance to run in with-daemon mode. subprocess.Popen( apppath + ["-d", bin_path], shell=shell, stdin=None, stdout=None, stderr=None, close_fds=True, **flags )
[ 2883, 1452 ]
def METHOD_NAME(self): return float(abs(self.magnitude.std_dev / self.magnitude.nominal_value))
[ 2071 ]
def METHOD_NAME(client): """Verify error on bad autocomplete request for budget function.""" resp = client.post(ENDPOINT, content_type="application/json", data=json.dumps({"filters": {}})) assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
[ 9, 11046, 604, 1853, 12753, 374 ]
def METHOD_NAME(self, event: Event, state: State, logger: Optional[Logger]) -> Optional[int]: assert state.optimizers is not None state.optimizers = tuple( SAMOptimizer( base_optimizer=optimizer, rho=self.rho, epsilon=self.epsilon, interval=self.interval, ) for optimizer in ensure_tuple(state.optimizers))
[ 231 ]
def METHOD_NAME(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
[ 24, 3 ]
def METHOD_NAME(self): python = np.array([2, 3, 7, 5, 26, 221, 44, 233, 254, 265, 266, 267, 120, 111]) pypy = np.array([12, 33, 47, 15, 126, 121, 144, 233, 254, 225, 226, 267, 110, 130]) jython = np.array([22, 43, 10, 25, 26, 101, 114, 203, 194, 215, 201, 227, 139, 160]) dims = dict(kdims="time", vdims="memory") python = Area(python, label="python", **dims) pypy = Area(pypy, label="pypy", **dims) jython = Area(jython, label="jython", **dims) overlay = Area.stack(python * pypy * jython) labels = [n[1] for n in overlay.data] self.assertEqual(labels, ['Python', 'Pypy', 'Jython'])
[ 9, 690, 5515 ]
def METHOD_NAME( self, path: DbPath, raw_schema: Dict[str, tuple], filter_columns: Sequence[str], where: str = None ): accept = {i.lower() for i in filter_columns} rows = [row for name, row in raw_schema.items() if name.lower() in accept] resulted_rows = [] for row in rows: row_type = "DECIMAL" if row[1].startswith("DECIMAL") else row[1] type_cls = self.dialect.TYPE_CLASSES.get(row_type, UnknownColType) if issubclass(type_cls, Integer): row = (row[0], row_type, None, None, 0) elif issubclass(type_cls, Float): numeric_precision = math.ceil(row[2] / math.log(2, 10)) row = (row[0], row_type, None, numeric_precision, None) elif issubclass(type_cls, Decimal): items = row[1][8:].rstrip(")").split(",") numeric_precision, numeric_scale = int(items[0]), int(items[1]) row = (row[0], row_type, None, numeric_precision, numeric_scale) elif issubclass(type_cls, Timestamp): row = (row[0], row_type, row[2], None, None) else: row = (row[0], row_type, None, None, None) resulted_rows.append(row) col_dict: Dict[str, ColType] = {row[0]: self.dialect.parse_type(path, *row) for row in resulted_rows} self._refine_coltypes(path, col_dict, where) return col_dict
[ 356, 410, 135 ]
def METHOD_NAME(self) -> None: slack_user_map = {"U08RGD1RD": 540, "U0CBK5KAT": 554, "U09TYF5SK": 571} # For this test, only relevant keys are 'id', 'name', 'deleted' # and 'real_name' users = [ { "id": "U0CBK5KAT", "name": "aaron.anzalone", "deleted": False, "is_mirror_dummy": False, "real_name": "", }, { "id": "U08RGD1RD", "name": "john", "deleted": False, "is_mirror_dummy": False, "real_name": "John Doe", }, { "id": "U09TYF5Sk", "name": "Jane", "is_mirror_dummy": False, "deleted": True, # Deleted users don't have 'real_name' key in Slack }, ] channel_map = {"general": ("C5Z73A7RA", 137)} message = "Hi <@U08RGD1RD|john>: How are you? <#C5Z73A7RA|general>" text, mentioned_users, has_link = convert_to_zulip_markdown( message, users, channel_map, slack_user_map ) full_name = get_user_full_name(users[1]) self.assertEqual(full_name, "John Doe") self.assertEqual(get_user_full_name(users[2]), "Jane") self.assertEqual(text, f"Hi @**{full_name}**: How are you? #**general**") self.assertEqual(mentioned_users, [540]) # multiple mentioning message = "Hi <@U08RGD1RD|john>: How are you?<@U0CBK5KAT> asked." text, mentioned_users, has_link = convert_to_zulip_markdown( message, users, channel_map, slack_user_map ) self.assertEqual(text, "Hi @**John Doe**: How are you?@**aaron.anzalone** asked.") self.assertEqual(mentioned_users, [540, 554]) # Check wrong mentioning message = "Hi <@U08RGD1RD|jon>: How are you?" text, mentioned_users, has_link = convert_to_zulip_markdown( message, users, channel_map, slack_user_map ) self.assertEqual(text, message) self.assertEqual(mentioned_users, [])
[ 9, -1, 365 ]
def METHOD_NAME(message_len, min_fragment_len, max_fragment_len): assert message_len > 0 assert min_fragment_len > 0 assert max_fragment_len >= min_fragment_len max_fragment_count = message_len // min_fragment_len fragment_len = None for fragment_count in range(1, max_fragment_count + 1): fragment_len = math.ceil(message_len / fragment_count) if fragment_len <= max_fragment_len: break assert fragment_len != None return fragment_len
[ 416, 4203, 4014, 799 ]
def METHOD_NAME(): res = query("""SELECT (xsd:double(1) as ?x) {}""") eq_(list(res)[0][0], Literal("1", datatype=XSD.double))
[ 9, 3723, 962, 24, 2152 ]
async def METHOD_NAME(pipeline_response): deserialized = self._deserialize( 'OperationListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem)
[ 297, 365 ]
def METHOD_NAME(self): types = [ (MySimpleInputType, False), (MyInputType, False), (MyDeclaredPropertiesInputType, True), ] for typ, has_doc in types: t = typ(first_value="hello", second_value=42) self.assertEqual("hello", t.first_value) self.assertEqual(42, t.second_value) t.first_value = "world" self.assertEqual("world", t.first_value) t.second_value = 500 self.assertEqual(500, t.second_value) first = typ.first_value self.assertIsInstance(first, property) self.assertTrue(callable(first.fget)) self.assertEqual("first_value", first.fget.__name__) self.assertEqual({"return": pulumi.Input[str]}, first.fget.__annotations__) if has_doc: self.assertEqual("First value docstring.", first.fget.__doc__) self.assertEqual("firstValue", first.fget._pulumi_name) self.assertTrue(callable(first.fset)) self.assertEqual("first_value", first.fset.__name__) self.assertEqual({"value": pulumi.Input[str]}, first.fset.__annotations__) second = typ.second_value self.assertIsInstance(second, property) self.assertTrue(callable(second.fget)) self.assertEqual("second_value", second.fget.__name__) self.assertEqual({"return": Optional[pulumi.Input[float]]}, second.fget.__annotations__) if has_doc: self.assertEqual("Second value docstring.", second.fget.__doc__) self.assertEqual("secondValue", second.fget._pulumi_name) self.assertTrue(callable(second.fset)) self.assertEqual("second_value", second.fset.__name__) self.assertEqual({"value": Optional[pulumi.Input[float]]}, second.fset.__annotations__) self.assertEqual({ "firstValue": "world", "secondValue": 500, }, _types.input_type_to_dict(t)) self.assertTrue(hasattr(t, "__eq__")) self.assertTrue(t.__eq__(t)) self.assertTrue(t == t) self.assertFalse(t != t) self.assertFalse(t == "not equal") t2 = typ(first_value="world", second_value=500) self.assertTrue(t.__eq__(t2)) self.assertTrue(t == t2) self.assertFalse(t != t2) self.assertEqual({ "firstValue": "world", "secondValue": 500, }, _types.input_type_to_dict(t2)) t3 = typ(first_value="foo", second_value=1) self.assertFalse(t.__eq__(t3)) self.assertFalse(t == t3) self.assertTrue(t != t3) self.assertEqual({ "firstValue": "foo", "secondValue": 1, }, _types.input_type_to_dict(t3))
[ 9, 362, 44 ]
def METHOD_NAME(self): """tick Update elapsed time by getting the current time.""" if self._total_time is None: raise RuntimeError('progress not configured with "time" value') elapsed = datetime.now() - self._start_time pct = elapsed.total_seconds() / self._total_time return self.update_percent(pct)
[ 4115 ]
def METHOD_NAME(self): pass
[ 238, 684, 403, 331 ]
def METHOD_NAME(self): op_table = ((unittest.skipUnless, False, True), (unittest.skipIf, True, False)) for deco, do_skip, dont_skip in op_table: class Foo(unittest.TestCase): @deco(do_skip, "testing") def test_skip(self): pass @deco(dont_skip, "testing") def test_dont_skip(self): pass test_do_skip = Foo("test_skip") test_dont_skip = Foo("test_dont_skip") suite = unittest.TestSuite([test_do_skip, test_dont_skip]) events = [] result = LoggingResult(events) suite.run(result) self.assertEqual(len(result.skipped), 1) expected = ['startTest', 'addSkip', 'stopTest', 'startTest', 'addSuccess', 'stopTest'] self.assertEqual(events, expected) self.assertEqual(result.testsRun, 2) self.assertEqual(result.skipped, [(test_do_skip, "testing")]) self.assertTrue(result.wasSuccessful())
[ 9, 10682, 4263 ]
def METHOD_NAME(random_f_C_i, manual_bootstrap_samples_f_C_i): """Test the bootstrap sampling for a matrix.""" # Prepare np.random.seed(12345) #! set seed for reproducibility gen_f_C_i = SobolSensitivity.bootstrap_sample_generator_2D(random_f_C_i) bootstrap_samples_C_i = next(gen_f_C_i) # Act assert np.array_equal(manual_bootstrap_samples_f_C_i, bootstrap_samples_C_i)
[ 9, 904, 43, 430 ]
async def METHOD_NAME(self): with pytest.raises(ValueError): await api.artifacts.create_task_run_artifact( task_run_id="", kind="link", data={"link": "http"}, )
[ 9, 129, 1831, 654, 758, 22, 147 ]
def METHOD_NAME(self, mock_get_team_sub_for_student): team_submission_uuid = 'this-is-the-uuid' mock_get_team_sub_for_student.return_value = {'team_submission_uuid': team_submission_uuid} self.assertEqual(self.test_block.get_team_submission_uuid(), team_submission_uuid) mock_get_team_sub_for_student.assert_called_with(STUDENT_ITEM_DICT)
[ 9, 19, 2957, 1978, 4977 ]
def METHOD_NAME( path, speedx, speedy, speedz, offset, maxt, throttle, throttle_lines, g90_extruder, bedz, progress, layers, ): """Runs a GCODE file analysis.""" import time from octoprint.util.gcodeInterpreter import gcode throttle_callback = None if throttle: def throttle_callback(filePos, readBytes): if filePos % throttle_lines == 0: # only apply throttle every $throttle_lines lines time.sleep(throttle) offsets = offset if offsets is None: offsets = [] elif isinstance(offset, tuple): offsets = list(offsets) offsets = [(0, 0)] + offsets if len(offsets) < maxt: offsets += [(0, 0)] * (maxt - len(offsets)) start_time = time.monotonic() progress_callback = None if progress: def progress_callback(percentage): click.echo(f"PROGRESS:{percentage}") interpreter = gcode(progress_callback=progress_callback, incl_layers=layers) interpreter.load( path, speedx=speedx, speedy=speedy, offsets=offsets, throttle=throttle_callback, max_extruders=maxt, g90_extruder=g90_extruder, bed_z=bedz, ) click.echo(f"DONE:{time.monotonic() - start_time}s") result = interpreter.get_result() if empty_result(result): click.echo("EMPTY:There are no extrusions in the file, nothing to analyse") sys.exit(0) if not validate_result(result): click.echo( "ERROR:Invalid analysis result, please create a bug report in OctoPrint's " "issue tracker and be sure to also include the GCODE file with which this " "happened" ) sys.exit(-1) click.echo("RESULTS:") click.echo(yaml.dump(interpreter.get_result(), pretty=True))
[ 11713, 462 ]
def METHOD_NAME(temp_dir): path = Path(f'{temp_dir}/index.html') path.write_text('0123456789') assert 'success' in client.conf( { "listeners": { "*:7080": {"pass": "routes"}, }, "routes": [ { "action": { "share": str(path), "response_headers": { "X-Foo": "foo", }, } } ], } )
[ 102, 103, 1964 ]
def METHOD_NAME(filename): invalid = False # pattern = re.compile("[^\x00-\x7F]") #do ot want to replace printable chars like €¢ etc pattern = re.compile( "[\u200B-\u200E\uFEFF\u202c\u202D\u2063\u2062]" ) # zero width characters error_message = "" for i, line in enumerate(open(filename)): for match in re.finditer(pattern, line): err = f"Unicode char in FILE {filename} Line {i+1}: {match.group().encode('utf-8')}" error_message += f"{err}\n" LOGGER.debug(err) invalid = True if invalid: raise ValueError(error_message)
[ 9, 774, 3874 ]
def METHOD_NAME(config: BottleConfig, scope: str, path: str) -> Result: """ Exports a bottle backup to the specified path. Use the scope parameter to specify the backup type: config, full. Config will only export the bottle configuration, full will export the full bottle in tar.gz format. """ if path in [None, ""]: logging.error(_("No path specified")) return Result(status=False) logging.info(f"New {scope} backup for [{config.Name}] in [{path}]") if scope == "config": backup_created = config.dump(path).status else: task_id = TaskManager.add(Task(title=_("Backup {0}").format(config.Name))) bottle_path = ManagerUtils.get_bottle_path(config) try: with tarfile.open(path, "w:gz") as tar: parent = os.path.dirname(bottle_path) folder = os.path.basename(bottle_path) os.chdir(parent) tar.add(folder, filter=BackupManager.exclude_filter) backup_created = True except (FileNotFoundError, PermissionError, tarfile.TarError, ValueError): logging.error(f"Error creating backup for [{config.Name}]") backup_created = False finally: TaskManager.remove(task_id) if backup_created: logging.info(f"New backup saved in path: {path}.", jn=True) return Result(status=True) logging.error(f"Failed to save backup in path: {path}.") return Result(status=False)
[ 294, 1001 ]
def METHOD_NAME(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) category_id = self._search_regex(r'categoryId=(.+)">', webpage, 'category ID') if category_id not in ('1', '2', '21', '22', '23', '24', '25'): raise ExtractorError('The URL does not contain audio.', expected=True) str_duration, str_filesize = self._search_regex( r'サイズ:</span>(.+?)/\(([0-9,]+?[KMG]?B))', webpage, 'duration and size', group=(1, 2), default=(None, None)) str_viewcount = self._search_regex(r'閲覧数:</span>([0-9,]+)\s+', webpage, 'view count', fatal=False) uploader_id, uploader = self._search_regex( r'<a\s+class="cd_user-name"\s+href="/(.*)">([^<]+)さん<', webpage, 'uploader', group=(1, 2), default=(None, None)) content_id = self._search_regex(r'contentId\:\'(.+)\'', webpage, 'content ID') create_date = self._search_regex(r'createDate\:\'(.+)\'', webpage, 'timestamp') player_webpage = self._download_webpage( f'https://piapro.jp/html5_player_popup/?id={content_id}&cdate={create_date}', video_id, note='Downloading player webpage') return { 'id': video_id, 'title': self._html_search_regex(r'<h1\s+class="cd_works-title">(.+?)</h1>', webpage, 'title', fatal=False), 'description': self._html_search_regex(r'(?s)<p\s+class="cd_dtl_cap">(.+?)</p>\s*<div', webpage, 'description', fatal=False), 'uploader': uploader, 'uploader_id': uploader_id, 'timestamp': unified_timestamp(create_date, False), 'duration': parse_duration(str_duration), 'view_count': str_to_int(str_viewcount), 'thumbnail': self._html_search_meta('twitter:image', webpage), 'filesize_approx': parse_filesize(str_filesize.replace(',', '')), 'url': self._search_regex(r'mp3:\s*\'(.*?)\'\}', player_webpage, 'url'), 'ext': 'mp3', 'vcodec': 'none', }
[ 1866, 297 ]
def METHOD_NAME(client, source_plugin, token, status): assert ( client.get(api.url_for(Sources, source_id=43543), headers=token).status_code == status )
[ 9, 1458, 19 ]
def METHOD_NAME(self): progname = os.path.split(sys.argv[0])[-1] help_path = os.path.join(sys.path[0], '..', 'docs', 'cli_help', progname + '_help') f = None try: try: f = open(help_path); return f.read(-1) except: return None finally: if f: f.close()
[ 203, -1 ]
f METHOD_NAME(self) -> List[str]:
[ 19, 471 ]
def METHOD_NAME(): m = 10240 n = 10240 A = random.randint(-127, 128, size=(m, n), dtype="int32") s = te.create_schedule(A.op) def verify(target="llvm"): if not tvm.testing.device_enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.random.randint", True): print("skip because extern function is not available") return dev = tvm.cpu(0) f = tvm.build(s, [A], target) a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev) f(a) na = a.numpy() assert abs(np.mean(na)) < 0.3 assert np.min(na) == -127 assert np.max(na) == 127 verify()
[ 9, 8621 ]
def METHOD_NAME(self, **kwargs): # noqa: E501 """Get healthz data. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_healthz(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: ApiGetHealthzResponse """ kwargs['_return_http_data_only'] = True return self.get_healthz_with_http_info(**kwargs) # noqa: E501
[ 19, 4173 ]
def METHOD_NAME(self): pass
[ 638, 2647, 2433, 167 ]
def METHOD_NAME(self): """Return the current performance for this segment.""" return self.performance
[ 19, 3731 ]
def METHOD_NAME(): with pytest.raises(KeyError): remove_component("not real")
[ 9, 188, 130, 3024 ]
def METHOD_NAME(): log.info("Disable valgrind") valgrind_disable(m1.ds_paths.sbin_dir)
[ 3221 ]
def METHOD_NAME(self): txt = load_file(self.current_dir+'/test_data.txt') with open(self.current_dir+'/test_data.txt') as f: content = f.readlines() self.assertEqual(txt, content)
[ 9, 557, 171, 310 ]
def METHOD_NAME(self): if self.spec["iconv"].name == "libc": return # On Linux systems, iconv is provided by libc. Since CMake finds the # symbol in libc, it does not look for libiconv, which leads to linker # errors. This makes sure that CMake always looks for the external # libconv instead. filter_file( "check_function_exists(iconv_open ICONV_IN_GLIBC)", "set(ICONV_IN_GLIBC FALSE)", join_path("cmake", "FindIconv.cmake"), string=True, )
[ 1575 ]
def METHOD_NAME(self, session): pass
[ 69, 1072 ]
def METHOD_NAME(self, command_args): super().METHOD_NAME(command_args) return self.build_lro_poller(self._execute_operations, None)
[ 1519 ]
def METHOD_NAME(self): new_password = " N3wP@55w0rd " self.login_user(self.user) response = self.client.post( self.link, data={"new_password": new_password, "password": self.USER_PASSWORD}, ) self.assertEqual(response.status_code, 403) self.assertEqual(len(mail.outbox), 0)
[ 9, 194, 2897, 58, 610, 611, 217 ]
def METHOD_NAME(self): copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder) meson = Meson(self) meson.install() # some files extensions and folders are not allowed. Please, read the FAQs to get informed. rmdir(self, os.path.join(self.package_folder, "share")) rm(self, "*.pdb", os.path.join(self.package_folder, "lib")) rm(self, "*.pdb", os.path.join(self.package_folder, "bin")) # In shared lib/executable files, meson set install_name (macOS) to lib dir absolute path instead of @rpath, it's not relocatable, so fix it fix_apple_shared_install_name(self)
[ 360 ]
def METHOD_NAME(description: str) -> str: """ (Lazily) formats description by getting rid of HTML tags, random unicode characters and multiple spaces. """ # Get rid of any html tags description = re.sub(r"<.+?>" , " ", description) # Get rid of random unicode characters description = re.sub(r"[^\x00-\x7F]+", " ", description) # Get rid of double spaces and new lines mixed with spaces description = re.sub(r" +", "\n", description) description = re.sub(r" *\n *", "\n", description) return description.strip()
[ 275, 1067 ]
def METHOD_NAME(): with expected_protocol(Fpu60, [("SHUTTER?", "SHUTTER CLOSED")], ) as inst: assert inst.shutter_open is False
[ 9, 4731, 1462, 203 ]
f METHOD_NAME(mat_m):
[ 16301, 2323, 3992, 988 ]
def METHOD_NAME(self, scope=None): """Transpose the coordinate representation in a boxlist. Args: scope: name scope of the function. """ with tf.name_scope(scope, "transpose_coordinates"): y_min, x_min, y_max, x_max = tf.split( value=self.get(), num_or_size_splits=4, axis=1 ) self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
[ 3386, 4645 ]
def METHOD_NAME(cls): if cls._schema_on_200 is not None: return cls._schema_on_200 cls._schema_on_200 = AAZObjectType() _schema_on_200 = cls._schema_on_200 _schema_on_200.primarybytes_in = AAZIntType( serialized_name="primarybytesIn", ) _schema_on_200.primarybytes_out = AAZIntType( serialized_name="primarybytesOut", ) _schema_on_200.secondarybytes_in = AAZIntType( serialized_name="secondarybytesIn", ) _schema_on_200.secondarybytes_out = AAZIntType( serialized_name="secondarybytesOut", ) return cls._schema_on_200
[ 56, 135, 69, 1072 ]
def METHOD_NAME(): with open("runDB.json") as f: runDB = json.load(f) tier_dir = os.path.expandvars(runDB["tier_dir"]) meta_dir = os.path.expandvars(runDB["meta_dir"]) df = pd.read_hdf('{}/t2_run{}.h5'.format(tier_dir,sys.argv[1])) m = np.array(df['e_ftp']) plt.hist(m, np.arange(0,9500,0.5), histtype='step', color = 'black', label='non-calibrated spectrum') plt.xlabel('e_ftp', ha='right', x=1.0) plt.ylabel('Counts', ha='right', y=1.0) plt.xlim(0,9500) #plt.ylim(0,4000) plt.legend(frameon=True, loc='upper right', fontsize='small') plt.show()
[ 1288, 772 ]
def METHOD_NAME(x, n): return 2 * dot(x, n) * n - x
[ 6044 ]
def METHOD_NAME(self): sys.argv = self.args_list parser = self.subject.GpSegStart.createParser() options, args = parser.parse_args() gpsegstart = self.subject.GpSegStart.createProgram(options, args) exitCode = gpsegstart.run() self.assertEqual(exitCode, 0) for result in gpsegstart.overall_status.results: self.assertTrue(result.reasoncode == gp.SEGSTART_SUCCESS)
[ 9, 447, 1690 ]
def METHOD_NAME(dut, asic_index, interfaces, xcvr_skip_list): """ @summary: Check transceiver information of all the specified interfaces in redis DB. @param dut: The AnsibleHost object of DUT. For interacting with DUT. @param interfaces: List of interfaces that need to be checked. """ check_transceiver_basic(dut, asic_index, interfaces, xcvr_skip_list) check_transceiver_details(dut, asic_index, interfaces, xcvr_skip_list) check_transceiver_dom_sensor_basic(dut, asic_index, interfaces, xcvr_skip_list) check_transceiver_dom_sensor_details(dut, asic_index, interfaces, xcvr_skip_list)
[ 250, 6681, 452 ]
def METHOD_NAME(self) -> IOLoop: """Override Adaptive.loop""" if self.cluster: return self.cluster.METHOD_NAME else: return IOLoop.current()
[ 1751 ]
def METHOD_NAME(self): """Return timer interval in ms""" return 1000.0 / self.fps
[ 19, 3223 ]
def METHOD_NAME(registry): # This case should not typically happen outside of the testing environment with pytest.raises(GirderException, match='Plugin missing is not installed'): plugin._loadPlugins(info={}, names=['missing']) assert plugin.loadedPlugins() == []
[ 9, 557, 1294, 1038 ]
def METHOD_NAME(): qrom = cirq_ft.SelectSwapQROM([1, 2, 5, 6, 7, 8]) assert hash(qrom) is not None assert cirq_ft.t_complexity(qrom) == cirq_ft.TComplexity(32, 160, 0)
[ 9, 8440, 6032 ]
METHOD_NAME(self, pipeline, processor):
[ 56, 5176 ]
def METHOD_NAME(self) -> np.ndarray: """Standard spatial mode.""" Iden = edge2mat(self.self_link, self.num_node) In = normalize_digraph(edge2mat(self.inward, self.num_node)) Out = normalize_digraph(edge2mat(self.outward, self.num_node)) A = np.stack((Iden, In, Out)) return A
[ 6580 ]
async def METHOD_NAME(self, url: str, **kwargs) -> aiohttp.ClientResponse: return await self.request('PUT', url, **kwargs)
[ 1276 ]
def METHOD_NAME(cmd, default_rule_group_id, default_rule_group_name, mac_region, azure_monitor_workspace_resource_id, cluster_name, default_rules_template, url, enable_rules, i): from azure.cli.core.util import send_raw_request body = json.dumps({ "id": default_rule_group_id, "name": default_rule_group_name, "type": "Microsoft.AlertsManagement/prometheusRuleGroups", "location": mac_region, "properties": { "scopes": [ azure_monitor_workspace_resource_id ], "enabled": enable_rules, "clusterName": cluster_name, "interval": "PT1M", "rules": default_rules_template[i]["properties"]["rulesArmTemplate"]["resources"][0]["properties"]["rules"] } }) for _ in range(3): try: headers = ['User-Agent=azuremonitormetrics.put_rules.' + default_rule_group_name] send_raw_request(cmd.cli_ctx, "PUT", url, body=body, headers=headers) break except CLIError as e: error = e else: raise error
[ 1276, 1634 ]
def METHOD_NAME(aggregator, instance, check): with mock.patch('datadog_checks.ibm_was.IbmWasCheck.make_request', return_value=mock_data('server.xml')): check = check(instance) check.check(instance) metrics_in_fixture = ['ibm_was.thread_pools.percent_used'] for metric_name in common.METRICS_ALWAYS_PRESENT + metrics_in_fixture: aggregator.assert_metric(metric_name) aggregator.assert_metric_has_tag(metric_name, 'key1:value1')
[ 9, 1341, 1098, 2735, 253 ]
def METHOD_NAME(monkeypatch, dumbalgo): """Check whether algorithm will stop with base algorithm max_trials check""" monkeypatch.delattr(dumbalgo, "is_done") space = Space() space.register(Real("yolo1", "uniform", 1, 4)) algo = dumbalgo(space) algo.suggest(5) for i in range(1, 5): backward.algo_observe( algo, [format_trials.tuple_to_trial((i,), space)], [dict(objective=3)] ) assert len(algo.state_dict["registry"]["_trials"]) == 4 assert not algo.is_done dumbalgo.max_trials = 4 assert algo.is_done
[ 9, 137, 1658, 232, 4231 ]
def METHOD_NAME(): """ Regenerate Active Theme CSS file after migration. Necessary to reflect possible changes in the imported SCSS files. Called at the end of every `bench migrate`. """ website_theme = frappe.db.get_single_value("Website Settings", "website_theme") if not website_theme or website_theme == "Standard": return doc = frappe.get_doc("Website Theme", website_theme) doc.save() # Just re-saving re-generates the theme.
[ 1887, 2744 ]
def METHOD_NAME(self, archq): res = packagequery.cmp(int(self.epoch()), int(archq.epoch())) if res != 0: return res res = ArchQuery.rpmvercmp(self.version(), archq.version()) if res != 0: return res res = ArchQuery.rpmvercmp(self.release(), archq.release()) return res
[ -1 ]
f METHOD_NAME(self):
[ 9, 56, 1735 ]
def METHOD_NAME(self): self._display_widget = QtWidgets.QFrame(self) self._display_layout = QtWidgets.QVBoxLayout() self._display_layout.setContentsMargins(0, 0, 0, 0) self.img_layout_widget = GraphicsLayoutWidget() self.img_widget = MaskImgWidget(self.img_layout_widget) self._display_layout.addWidget(self.img_layout_widget) self._status_layout = QtWidgets.QHBoxLayout() self._status_layout.addSpacerItem(HorizontalSpacerItem()) self.pos_lbl = LabelAlignRight('') self._status_layout.addWidget(self.pos_lbl) self._display_layout.addLayout(self._status_layout) self._display_widget.setLayout(self._display_layout)
[ 129, 52, 706 ]
def METHOD_NAME(database_name: Optional[str] = None, policy_name: Optional[str] = None, resource_group_name: Optional[str] = None, server_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLongTermRetentionPolicyResult: """ Gets a database's long term retention policy. :param str database_name: The name of the database. :param str policy_name: The policy name. Should always be Default. :param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param str server_name: The name of the server. """ __args__ = dict() __args__['databaseName'] = database_name __args__['policyName'] = policy_name __args__['resourceGroupName'] = resource_group_name __args__['serverName'] = server_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:sql/v20230201preview:getLongTermRetentionPolicy', __args__, opts=opts, typ=GetLongTermRetentionPolicyResult).value return AwaitableGetLongTermRetentionPolicyResult( id=pulumi.get(__ret__, 'id'), make_backups_immutable=pulumi.get(__ret__, 'make_backups_immutable'), monthly_retention=pulumi.get(__ret__, 'monthly_retention'), name=pulumi.get(__ret__, 'name'), type=pulumi.get(__ret__, 'type'), week_of_year=pulumi.get(__ret__, 'week_of_year'), weekly_retention=pulumi.get(__ret__, 'weekly_retention'), yearly_retention=pulumi.get(__ret__, 'yearly_retention'))
[ 19, 524, 3108, 3786, 54 ]
def METHOD_NAME(self, execute_task): task = execute_task('test_with_series_qualities') assert task.accepted, 'series plugin should have used assumed quality'
[ 9, 41, 4045, 2163 ]
def METHOD_NAME(self): t = QGraphicsTextItem() t.setDefaultTextColor(QColor(self.color.hexcolor)) if self.html: text = self.text.replace(u'\n', u'<br />') t.setHtml( u'<div align="center">%s</div>' % text if self.center else text ) else: t.setPlainText(self.text) mw = self.max_width if mw is None: mw = self._canvas.width // 2 - self.x if self.center: mw *= 2 t.setTextWidth(mw) # Register custom fonts that are placed in the file pool self._register_custom_font(self.font_family) f = QFont( self.font_family, weight=QFont.Bold if self.font_bold else QFont.Normal, italic=self.font_italic ) for family, substitute in font_substitutions: f.insertSubstitution(substitute, family) f.setPixelSize(self.font_size) t.setFont(f) return t
[ 24, -1 ]
def METHOD_NAME(self, signum: signal.Signals, frame: FrameType) -> None: ...
[ 8989, 1519 ]
def METHOD_NAME(builder, shape): """This method is deprecated. Please switch to AddShape.""" return AddShape(builder, shape)
[ 768, 238, 555 ]
def METHOD_NAME(ctx: click.Context, experiment_id: str, experiment_name: str): """Archive an experiment.""" client = ctx.obj["client"] if (experiment_id is None) == (experiment_name is None): raise ValueError('Either experiment_id or experiment_name is required') if not experiment_id: experiment = client.get_experiment(experiment_name=experiment_name) experiment_id = experiment.id client.archive_experiment(experiment_id=experiment_id)
[ 1622 ]
def METHOD_NAME(self): """ Ensure that a domain which is not tied to an active Identity Provider either by trusting one or through an account returns false """ self.assertFalse(is_domain_using_sso(self.other_domain.name))
[ 9, 256, 5821, 1674, 610, 1168 ]
def METHOD_NAME(seq): """Return pitch names and MIDI values Given a list of MEI note elements, return the tuple (pnames, midipitch) where pnames is a string of the pitch names of the given notes (no octave information) and midipitch is a list of the midi values for those same pitches. Music21's convertStepToPs function is used to get midi pitch values. """ pnames = [] midipitch = [] for note in seq: pnames.append(note.pitch_name) midipitch.append(int(convertStepToPs(note.pitch_name, note.octave))) return "".join(pnames), midipitch
[ 19, 4993 ]
def METHOD_NAME(self): transaction = TransactionFactory.create(state=Transaction.States.Settled) return_url = 'http://home.com' complete_url = "{}?return_url={}".format(get_payment_complete_url(transaction, None), return_url) expected_url = "{}?transaction_uuid={}".format(return_url, transaction.uuid) response = self.client.get(complete_url, follow=False) self.assertRedirects(response, expected_url, fetch_redirect_response=False)
[ 9, 676, 13, 1179, 41, 1413, 274 ]
def METHOD_NAME(): """Parse command line options. Args: Returns: argparse object. Raises: IOError: if dir does not exist. IOError: if workflow does not exist. IOError: if the metadata file SnapshotInfo.csv does not exist in dir when flat is False. ValueError: if adaptor is not phenofront or dbimportexport. ValueError: if a metadata field is not supported. """ parser = argparse.ArgumentParser(description='Parallel imaging processing with PlantCV.') config_grp = parser.add_argument_group("CONFIG") config_grp.add_argument("--template", required=False, help="Create a template configuration file.") run_grp = parser.add_argument_group("RUN") run_grp.add_argument("--config", required=False, help="Input configuration file (created using the --template option).") args = parser.parse_args() # Create a config config = plantcv.parallel.WorkflowConfig() # Create a template configuration file if requested if args.template: config.save_config(config_file=args.template) sys.exit() # Import a configuration if provided if args.config: config.import_config(config_file=args.config) if not config.validate_config(): raise ValueError("Invalid configuration file. Check errors above.") return config
[ 1881 ]