text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(msg): """ Processes a Git commit message to ensure it meets the Git commit style guidelines documented at: https://github.com/linkedin/Brooklin/blob/master/CONTRIBUTING.md#git-commit-messages-style-guide Args: msg (str): The entire Git commit message (possibly multiline) Returns: str: The processed message with minor improvements applied if applicable. These improvements are: - Capitalizing first letter of subject line - Removing periods from the end of subject line - Inserting a newline between subject line and description - Wrapping the description at 100 characters Raises: ValueError: if msg contains style violations that cannot be fixed automatically These violations are: 1. A subject line whose length exceeds 120 characters 2. A reference to an internal Jira ticket (DDSDBUS, LISAMZA, or DATAPIPES) """ if not msg: return msg # Filter out comments as well as empty lines added by previous Git hooks msg_lines = [] for msg_line in msg.splitlines(): msg_line = msg_line.strip() if msg_line and not msg_line.startswith('#'): msg_lines.append(msg_line) # If msg is composed entirely of new lines or comments, just return it as is. if not msg_lines: return msg subject = msg_lines[0].strip() # Limit the subject line to 120 characters if len(subject) > MAX_SUBJECT_LENGTH: raise ValueError("Commit message subject line is too long [{} chars max]".format(MAX_SUBJECT_LENGTH)) # Do not make references to internal Jira tickets if re.search('(DDSDBUS|LISAMZA|DATAPIPES)-\d+', "".join(msg_lines), flags=re.IGNORECASE): raise ValueError("Commit messages must not reference internal Jira tickets (DDSDBUS, LISAMZA, DATAPIPES)") # Capitalize the subject line if subject[0].islower(): subject = subject.capitalize() print_warning("Capitalized first letter of commit message subject line") # Do not end the subject line with a period if subject[-1] == '.': subject = subject.rstrip(".") if not subject: raise ValueError("Commit messages cannot have subject lines composed entirely of periods") print_warning("Removed trailing period(s) from commit message subject line") # Wrap the body at 100 characters description = [] for desc_line in msg_lines[1:]: description.append("\n".join(textwrap.wrap(desc_line, MAX_DESCRIPTION_LINE_LENGTH))) if any(len(line) > MAX_DESCRIPTION_LINE_LENGTH for line in msg_lines): print_warning("Wrapping lines longer than {} chars in commit message description".format(MAX_DESCRIPTION_LINE_LENGTH)) # Separate subject from body with a blank line return subject if not description else subject + "\n\n" + "\n".join(description)
[ 356, 1160, 277 ]
def METHOD_NAME(self, query): """ Perform the redirect to the CAS server. :rtype : Response :param query: All query parameters to be added to the return_to URL after successful authentication. :return: A redirect response to the CAS server. """ try: req = parse_qs(query) acr: Optional[str] = req["acr_values"][0] except KeyError: acr = None nonce = uuid.uuid4().urn.encode() service_url = urlencode({self.CONST_SERVICE: self.get_service_url(nonce, acr)}) cas_url = self.cas_server + self.CONST_CASLOGIN + service_url cookie = self.create_cookie( '{"' + self.CONST_NONCE + '": "' + base64.b64encode(nonce).decode() + '", "' + self.CONST_QUERY + '": "' + base64.b64encode(query).decode() + '"}', self.CONST_CAS_COOKIE, self.CONST_CAS_COOKIE, ) return SeeOther(cas_url, headers=[cookie])
[ 129, 1736 ]
def METHOD_NAME(err): errors.append(err)
[ 238, 168 ]
def METHOD_NAME(self) -> str: """ Resource ID. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(self): return { "config-version": 2, "models": { "test": { "users": {"tags": "specified_as_string"}, "users_rollup": { "tags": ["specified_in_project"], }, } }, }
[ 155, 200, 86 ]
def METHOD_NAME(config): # Add a directive to get an alembic configuration. config.add_directive("alembic_config", _configure_alembic) # Create our SQLAlchemy Engine. config.registry["sqlalchemy.engine"] = sqlalchemy.create_engine( config.registry.settings["database.url"], isolation_level=DEFAULT_ISOLATION, pool_size=35, max_overflow=65, pool_timeout=20, ) # Register our request.db property config.add_request_method(_create_session, name="db", reify=True)
[ 9995 ]
def METHOD_NAME(self): super(InaccessibleSysPath, self).METHOD_NAME() self.setUpPyfakefs() self.fs.create_dir(self.site_dir, perm_bits=000)
[ 0, 1 ]
def METHOD_NAME(self, id: int) -> PullRequestComment: ...
[ 19, 4381, 1591 ]
def METHOD_NAME(self, address: bytes) -> EthereumTokenInfo: from .tokens import UNKNOWN_TOKEN, token_by_chain_address # if we have a built-in definition, use it token = token_by_chain_address(self.network.chain_id, address) if token is not None: return token if address in self._tokens: return self._tokens[address] return UNKNOWN_TOKEN
[ 19, 466 ]
def METHOD_NAME(self, role, is_admin): lti_user = factories.LTIUser(lti_roles=[role]) assert lti_user.is_admin == is_admin
[ 9, 137, 2870 ]
def METHOD_NAME(f: BinaryIO) -> CoffHeader: """Read the Common Object File Format (COFF) Header of the open file at `f`""" # Quote from the "PE Format" article (see [1] in this module's doc string): # "[...] at the file offset specified at offset 0x3c, is a 4-byte signature # that identifies the file as a PE format image file. This signature is # 'PE\0\0' (the letters "P" and "E" followed by two null bytes). [...] # immediately after the signature of an image file, is a standard COFF # file header in the following format." # Our `CoffHeader` embeds the signature inside the CoffHeader. f.seek(0x3c, io.SEEK_SET) buf = f.read(struct.calcsize("I")) (s, ) = struct.unpack_from("I", buf) f.seek(int(s), io.SEEK_SET) buf = f.read(struct.calcsize(CoffFormat)) coff = CoffHeader._make(struct.unpack_from(CoffFormat, buf)) assert coff.Signature == b"PE\0\0", "Not a PE32+ file (missing PE header)" return coff
[ 203, -1, 572 ]
def METHOD_NAME(): ''' State published upload containing multiple entries ''' infrastructure.setup() main_author = infrastructure.user_management.get_user(username='test') data = ExampleData(main_author=main_author) for i in range(1, 11): upload_id = f'dft_upload_{i}' data.create_upload(upload_id=upload_id, published=False, embargo_length=0) entry_id = f'dft_bulk_{i}' data.create_entry( upload_id=upload_id, entry_id=entry_id, mainfile=f'vasp.xml', entry_archive=archive_dft_bulk() ) data.save()
[ 3624, 10281 ]
f METHOD_NAME(self):
[ 129, 3464 ]
def METHOD_NAME(): with pytest.raises(ValueError) as val_err: # unknown driver intake.open_catalog("", driver="unknown") assert "plugin directory" in str(val_err.value).lower() with pytest.raises(AttributeError) as attr_err: intake.open_not_a_real_plugin() assert "plugin directory" in str(attr_err.value).lower()
[ 9, 1068, 1452, -1 ]
def METHOD_NAME(self): if not self.writable(): raise io.UnsupportedOperation("File not open for writing")
[ 250, 1046, 77 ]
def METHOD_NAME(total_agents): for i in range(total_agents): id = i + 1 name = 'Agent-test' + str(id) date = time.time() command = f'global insert-agent {{"id":{id},"name":"{name}","date_add":{date}}}' results = query_wdb(command) assert results == 'ok' command = f'''global set-agent-groups {{"mode":"append","sync_status":"syncreq", "source":"remote","data":[{{"id":{id},"groups":["Test_group{id}"]}}]}}''' results = query_wdb(command) assert results == 'ok'
[ 408, 1849, 409, 846 ]
def METHOD_NAME(self): # Compatibility with 2.x with test_support.check_py3k_warnings(): self.test_c_buffer_value(buffer) self.test_c_buffer_raw(buffer)
[ 9, 2629, 2376, 2497 ]
def METHOD_NAME(self) -> Optional[str]: """ The URI to fetch the next page of events. Call ListNext() with this URI to fetch the next page of impacted resource. """ return pulumi.get(self, "next_link")
[ 243, 548 ]
def METHOD_NAME(font1, font2, args): """Compares fonts assuming font1/2 are font files.""" print(CompareSize(font1, font2, args))
[ 979, 1537 ]
METHOD_NAME( self ) :
[ 9, 773 ]
def METHOD_NAME(self, obj, attr): """Recurses through an attribute chain to get the ultimate value.""" return functools.reduce(getattr, attr.split('.'), obj)
[ -1 ]
def METHOD_NAME(): from iambic.plugins.v0_1_0.google_workspace.group.models import ( GoogleWorkspaceGroupTemplate, ) return [ GoogleWorkspaceGroupTemplate, ]
[ 19, 3399, 1914 ]
def METHOD_NAME(self, **kwargs): if self._hvd: from tensorflow.python.distribute import hvd_strategy with hvd_strategy.METHOD_NAME() as context: yield context elif self._hb: with self._hb.METHOD_NAME() as context: yield context
[ 1632, 913 ]
def METHOD_NAME(self) -> 'outputs.SystemDataResponse': """ Azure Resource Manager metadata containing createdBy and modifiedBy information. """ return pulumi.get(self, "system_data")
[ 112, 365 ]
def METHOD_NAME(x, scale=-1.0): return op('scale_gradient', [x, scale]).as_tensor()
[ 930, 1789 ]
f METHOD_NAME(self):
[ 9, 203, 41, 2376 ]
def METHOD_NAME(): loader = unittest.defaultTestLoader.loadTestsFromTestCase testsuite = unittest.TestSuite() testsuite.addTest(loader(TestPreproc)) return testsuite
[ 482 ]
def METHOD_NAME(search_key, range, search_type): """Returns the relative position of an item in a range that matches a specified value.""" raise NotImplementedError()
[ 590 ]
def METHOD_NAME(self) -> None: """Clear all data. Deletes all keys and values.""" return self.data.METHOD_NAME()
[ 537 ]
def METHOD_NAME(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(client: TestClient) -> DynamicSidecarsScheduler: return client.app.state.METHOD_NAME
[ 2111, 5395, 1520 ]
def METHOD_NAME(self): data = [ [1, None] ] schema = t.StructType( [ t.StructField("id", IntegerType(), True), t.StructField("geom", GeometryType(), True), ] ) self.spark.createDataFrame( data, schema ).createOrReplaceTempView("points") count = self.spark.sql("select count(*) from points").collect()[0][0] assert count == 1
[ 9, 1051, 1386 ]
def METHOD_NAME(self, svc, factories): user = factories.User.build() with pytest.raises(TypeError) as exc: svc.update_preferences(user, foo="bar", baz="qux") assert "keys baz, foo are not allowed" in str(exc.value)
[ 9, 86, 3958, 45, 43, 1950, 219 ]
def METHOD_NAME(self): lines = [" foo", " bar"] value = ">" y = "\n".join([value] + lines) (lines, value) = monkeyYaml.myMultiline(lines, value) self.assertEqual(lines, []) self.assertEqual(value, yaml.load(y))
[ 9, 3830, 988 ]
def METHOD_NAME(): operations = {"variables": {"fileA": None, "fileB": None}} files_map = {"0": ["variables.fileA", "variables.fileB"]} files = {"0": True} assert combine_multipart_data(operations, files_map, files) == { "variables": {"fileA": True, "fileB": True} }
[ 9, 97, 171, 137, 0, 623, 107 ]
def METHOD_NAME() -> None: """Test full workflow.""" # Prepare rounds = 5 client_messages_received: List[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() worker_thread = start_worker(rounds, bridge, client_messages_received) # Execute # Simulate remote client side for _ in range(rounds): try: _ = next(ins_wrapper_iterator) bridge.set_res_wrapper(ResWrapper(client_message=ClientMessage())) except Exception as exception: raise Exception from exception # Wait until worker_thread is finished worker_thread.join(timeout=1) # Assert assert len(client_messages_received) == rounds
[ 9, 3855, 3163 ]
def METHOD_NAME(self): labels = parse_non_spatial_labels(dict(time='2021-06-08'), dims=self.dims, coords=self.coords) self.assertEqual( dict(time=np.array('2021-06-08', dtype='datetime64')), labels )
[ 9, 1080, 1393, 217, 99, 1393 ]
def METHOD_NAME(self, configured_datasets=None): """Automatically determine datasets provided by this file.""" handled_variables = set() # Iterate over dataset contents for var_name, val in self.file_content.items(): # Only evaluate variables if not isinstance(val, netCDF4.Variable): continue if (var_name in handled_variables): logger.debug("Already handled, skipping: %s", var_name) continue handled_variables.add(var_name) new_info = { 'name': var_name, 'file_type': self.filetype_info['file_type'], } yield True, new_info
[ 1272, 4146 ]
def METHOD_NAME(fn): """Return a completed Future whose result is the return of fn. Added to simplify using unittest.Mock in async code. Python 3.8's AsyncMock would be preferable. """ @functools.wraps(fn) def wrapper(*args, **kwargs): result = fn(*args, **kwargs) return get_completed_future(result) return wrapper
[ 503, 623, 3637 ]
def METHOD_NAME(si_pods: Sequence[V1Pod], container_type: str) -> int: """Return count of healthy Flink containers with given type""" return len( [ pod for pod in si_pods if pod.metadata.labels["flink.yelp.com/container-type"] == container_type and is_pod_ready(pod) and container_lifetime(pod).total_seconds() > 60 ] )
[ 5314, 11882, 2954, 10766 ]
def METHOD_NAME(data_category: FidesKey) -> FidesKey: if data_category not in defined_data_categories: raise common_exceptions.DataCategoryNotSupported( f"The data category {data_category} is not supported." ) return data_category
[ 187, 253 ]
def METHOD_NAME(do_fn): res = {} for testname in enabled_tests: res[testname] = do_fn(testname) return res
[ 74, 75 ]
def METHOD_NAME(self) -> ssl.SSLContext: ...
[ 198 ]
def METHOD_NAME(mesh, iterations=100): # test function space v = FunctionSpace(mesh, "DG", 1) m = VectorFunctionSpace(mesh, "CG", 1) # advecting velocity if m.shape == (1, ): u0 = as_vector([1]) else: u0 = as_vector([1, 0]) u = Function(m).interpolate(u0) # advection problem dt = 1. / iterations phi = TestFunction(v) D = TrialFunction(v) n = FacetNormal(mesh) un = 0.5 * (dot(u, n) + abs(dot(u, n))) # upwind value a_mass = inner(D, phi) * dx a_int = inner(-u * D, grad(phi)) * dx a_flux = inner(un('+') * D('+') - un('-') * D('-'), jump(phi)) * dS arhs = a_mass - dt * (a_int + a_flux) dD1 = Function(v) D1 = Function(v) x = SpatialCoordinate(mesh) # Initial Conditions D0 = conditional(real(x[0]) < 0.5, 1., 0.) D = Function(v).interpolate(D0) D1.assign(D) t = 0.0 T = iterations * dt problem = LinearVariationalProblem(a_mass, action(arhs, D1), dD1) solver = LinearVariationalSolver(problem, solver_parameters={'ksp_type': 'cg'}) # Make slope limiter limiter = VertexBasedLimiter(v) limiter.apply(D) while t < (T - dt / 2): D1.assign(D) limiter.apply(D1) solver.solve() D1.assign(dD1) limiter.apply(D1) solver.solve() D1.assign(0.75 * D + 0.25 * dD1) limiter.apply(D1) solver.solve() D.assign((1.0 / 3.0) * D + (2.0 / 3.0) * dD1) limiter.apply(D1) t += dt assert np.max(u.dat.data_ro) <= 1.0, "Failed by exceeding max values" assert np.min(u.dat.data_ro) >= 0.0, "Failed by exceeding min values"
[ 9, 367, 559, 1751 ]
def METHOD_NAME(self): return "Rouge"
[ 103 ]
def METHOD_NAME(project: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTunnelIamPolicyResult: """ Retrieves the current IAM policy data for tunnel ## example ```python import pulumi import pulumi_gcp as gcp policy = gcp.iap.get_tunnel_iam_policy(project=google_project_service["project_service"]["project"]) ``` :param str project: The ID of the project in which the resource belongs. If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. """ __args__ = dict() __args__['project'] = project opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('gcp:iap/getTunnelIamPolicy:getTunnelIamPolicy', __args__, opts=opts, typ=GetTunnelIamPolicyResult).value return AwaitableGetTunnelIamPolicyResult( etag=pulumi.get(__ret__, 'etag'), id=pulumi.get(__ret__, 'id'), policy_data=pulumi.get(__ret__, 'policy_data'), project=pulumi.get(__ret__, 'project'))
[ 19, 7134, 1694, 54 ]
def METHOD_NAME(tmp_path): assert ruby.get_default_version() == 'system' _setup_hello_world(tmp_path) ret = run_language(tmp_path, ruby, 'ruby_hook') assert ret == (0, b'Hello world from a ruby hook\n')
[ 9, 5889, 1021, 112 ]
def METHOD_NAME(processId): ...
[ 19, 356, 281 ]
def METHOD_NAME(self, value: int, port: int) -> None: self.outl_ptr(value, port)
[ -1 ]
f METHOD_NAME(self, classifier):
[ 294, 1171, 2973, 578 ]
def METHOD_NAME(message): """A simple parser that parses the report of cuda-memcheck. This parser is meant to be simple and it only split the report into separate errors and a summary. Where each error is further splitted into error message and backtrace. No further details are parsed. A report contains multiple errors and a summary on how many errors are detected. It looks like: ========= CUDA-MEMCHECK ========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaPointerGetAttributes. ========= Saved host backtrace up to driver entry point at error ========= Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3] ========= Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaPointerGetAttributes + 0x1a9) [0x428b9] ========= Host Frame:/home/xgao/anaconda3/lib/python3.7/site-packages/torch/lib/libtorch.so [0x5b778a9] ========= ..... ========= ========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaGetLastError. ========= Saved host backtrace up to driver entry point at error ========= Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3] ========= Host Frame:/usr/local/cuda/lib64/libcudart.so.10.1 (cudaGetLastError + 0x163) [0x4c493] ========= ..... ========= ========= ..... ========= ========= Program hit cudaErrorInvalidValue (error 1) due to "invalid argument" on CUDA API call to cudaGetLastError. ========= Saved host backtrace up to driver entry point at error ========= Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 [0x38c7b3] ========= ..... ========= Host Frame:python (_PyEval_EvalFrameDefault + 0x6a0) [0x1d0ad0] ========= Host Frame:python (_PyEval_EvalCodeWithName + 0xbb9) [0x116db9] ========= ========= ERROR SUMMARY: 4 errors """ errors = [] HEAD = '=========' headlen = len(HEAD) started = False in_message = False message_lines = [] lines = message.splitlines() for l in lines: if l == HEAD + ' CUDA-MEMCHECK': started = True continue if not started or not l.startswith(HEAD): continue l = l[headlen + 1:] if l.startswith('ERROR SUMMARY:'): return Report(l, errors) if not in_message: in_message = True message_lines = [l] elif l == '': errors.append(Error(message_lines)) in_message = False else: message_lines.append(l) raise ParseError("No error summary found")
[ 214 ]
def METHOD_NAME(self, filename): ''' Load sample keys and entries to current sample instance. ''' if not os.path.isabs(filename): filename = os.path.join(qkit.cfg.get('datadir'),filename) with open(filename) as filehandle: self.__dict__ = json.METHOD_NAME(filehandle, cls = QkitJSONDecoder) s = Sample() s.__dict__ = self.__dict__['sample'] self.__dict__['sample'] = s
[ 557 ]
def METHOD_NAME(self): """List series as an authenticated user.""" series = self._create_series() user = create_user() self.client.force_authenticate(user=user) resp = self.client.get(self.api_url()) self.assertEqual(status.HTTP_200_OK, resp.status_code) self.assertEqual(1, len(resp.data)) series_rsp = resp.data[0] self.assertSerialized(series, series_rsp)
[ 9, 245, 218 ]
def METHOD_NAME(self): """Test oldest report + limit:3""" code, out, err = self.t("oldest limit:3") self.assertIn(" one", out) self.assertIn(" two", out) self.assertIn(" three", out) self.assertNotIn(" four", out) self.assertNotIn(" five", out) self.assertNotIn(" six", out) self.assertNotIn(" seven", out) self.assertNotIn(" eight", out) self.assertNotIn(" nine", out) self.assertNotIn(" ten", out) self.assertNotIn(" eleven", out)
[ 9, 10966, 2756 ]
def METHOD_NAME(link: Link, out_dir: Optional[Path]=None, overwrite: Optional[bool]=False) -> bool: output_path = wget_output_path(link) out_dir = out_dir or Path(link.link_dir) if not overwrite and output_path and (out_dir / output_path).exists(): return False return SAVE_WGET
[ 427, 73, 1169 ]
def METHOD_NAME(zz_lst, data, z_spaccing): data_interpol = np.copy(data) for z_hole_start, z_hole_end in list(_list2range(zz_lst)): z_ref_start, z_ref_end = z_hole_start - 1, z_hole_end slice_ref_start, slice_ref_end = data[:, :, z_ref_start], data[:, :, z_ref_end] hole_cur_lst = list(range(z_hole_start, z_hole_end)) lenght_hole = len(hole_cur_lst) + 1 phys_lenght_hole = lenght_hole * z_spaccing denom_interpolation = (lenght_hole + 1) if phys_lenght_hole < 10: logger.warning('Filling a hole in the segmentation around z_slice #:' + str(z_ref_start)) for idx_z, z_hole_cur in enumerate(hole_cur_lst): num_interpolation = (lenght_hole - idx_z - 1) * slice_ref_start # Contribution of the bottom ref slice num_interpolation += (idx_z + 1) * slice_ref_end # Contribution of the top ref slice slice_interpolation = num_interpolation * 1. / denom_interpolation slice_interpolation = (slice_interpolation > 0).astype(int) data_interpol[:, :, z_hole_cur] = slice_interpolation return data_interpol
[ 1917, 2079, 9339 ]
def METHOD_NAME(session, window): query = ( session.query(models.Annotation) .filter(models.Annotation.updated.between(window.start, window.end)) .order_by(models.Annotation.updated.asc()) ) ids = set() for annotation in query: normalized = uri.normalize(annotation.target_uri) if normalized != annotation.target_uri_normalized: annotation._target_uri_normalized = ( # pylint: disable=protected-access normalized ) ids.add(annotation.id) return ids
[ 1137, 5360, 1092 ]
def METHOD_NAME(): assert len(get_sqs_queue()._messages()) == 0 q = get_sqs_queue() q.send_message("1235", {"attr1": "val1", "attr2": 111}) assert len(get_sqs_queue()._messages()) == 1
[ 9, 1278, 353, 277 ]
def METHOD_NAME(self): if not self.closed_off: self.closed_off = True self.decr()
[ 2267 ]
def METHOD_NAME(character_set): """Returns a regexp matching any sequence of a set of input characters.""" character_set -= set(range(0x00, 0x20)) # Remove ASCII controls literal_list = [] for code in character_set: char = chr(code) if char in ["\\", "[", "]", "^", "-"]: char = "\\" + char literal_list.append(char) regexp = "[" + "".join(literal_list) + "]+" return re.compile(regexp)
[ 2448, 1120, 280, 0 ]
def METHOD_NAME(args: argparse.Namespace) -> None: """Update default settings with command line arguments and configfiles. Args: args: Arguments returned from parser.parse_args(). """ configfile.parse(args.config) for option, value in args.cmd_settings: try: setting = api.settings.get(option) setting.value = value except KeyError: log.error("Unknown setting %s", option) except ValueError as e: log.error(str(e)) keyfile.parse(args.keyfile) styles.parse()
[ 86, 817 ]
def METHOD_NAME(cursor): """Role cannot drop cache.cached""" with pytest.raises(pg.ProgrammingError): cursor.execute("DROP TABLE cache.cached")
[ 9, 2286, 1050, 596, 773, 410 ]
def METHOD_NAME(self, reddit): reddit.read_only = False assert len(list(reddit.inbox.all(limit=128))) == 128
[ 9, 75, 41, 1467 ]
def METHOD_NAME(net): """Returns the next subnet of the same size as net""" return IP(net.int() + net.len()).make_net(net.prefixlen())
[ 19, 243, 1782 ]
def METHOD_NAME(cookie, driver): driver.add_cookie(cookie) driver.delete_cookie("foo") assert not driver.get_cookies()
[ 9, 34, 4177 ]
def METHOD_NAME(self): kerning = self.getKerning_generic() self.assertEqual( kerning.find(('D', 'D')), None )
[ 9, 416, 6634, 6634, 98 ]
def METHOD_NAME(self): with self.assertRaisesRegex(XmlRPCFault, "Build matching query does not exist"): self.rpc_client.Build.update(-99, {})
[ 9, 56, 86, 41, 256, 1153, 56 ]
def METHOD_NAME(self): # arrange project_json = get_canned_json("canned_kml_project.json") project_dto = DraftProjectDTO(project_json) expected = geojson.loads(json.dumps(get_canned_json("2d_multi_polygon.json"))) aoi_geojson = geojson.loads(json.dumps(project_dto.area_of_interest)) # act result = GridService.merge_to_multi_polygon(aoi_geojson, dissolve=True) # assert self.assertEqual(str(expected), str(result))
[ 9, 964, 1098, 457, 1117, 41, -1 ]
def METHOD_NAME(fname): """Find and parse conversion table in implementation file `fname`.""" cmds = [] in_rpcs = False with open(fname, "r", encoding="utf8") as f: for line in f: line = line.rstrip() if not in_rpcs: if line == "static const CRPCConvertParam vRPCConvertParams[] =": in_rpcs = True else: if line.startswith("};"): in_rpcs = False elif "{" in line and '"' in line: m = re.search('{ *("[^"]*"), *([0-9]+) *, *("[^"]*") *},', line) assert m, "No match to table expression: %s" % line name = parse_string(m.group(1)) idx = int(m.group(2)) argname = parse_string(m.group(3)) cmds.append((name, idx, argname)) assert not in_rpcs and cmds return cmds
[ 356, 445 ]
def METHOD_NAME(self, _): """Process macrotrends command""" webbrowser.open( f"https://www.macrotrends.net/stocks/charts/{self.ticker}/{self.ticker}/market-cap" )
[ 128, -1 ]
def METHOD_NAME( cls, inst_data: Optional[ConfigurableClassData], config_value: PostgresStorageConfig ) -> "PostgresScheduleStorage": return PostgresScheduleStorage( inst_data=inst_data, postgres_url=pg_url_from_config(config_value), should_autocreate_tables=config_value.get("should_autocreate_tables", True), )
[ 280, 200, 99 ]
def METHOD_NAME(cls, data): jsonschema.validate(schema=cls.SCHEMA, instance=data) shape = Shape(tuple(data["shape"]), sig_dims=data["sig_dims"]) return PartitionStructure( slices=[tuple(item) for item in data["slices"]], shape=shape, dtype=np.dtype(data["dtype"]), )
[ 280, 763 ]
def METHOD_NAME(tmpdir): class NonSequentialSampler(Sampler): def __init__(self, data_source): self.data_source = data_source def __iter__(self): return iter(range(len(self.data_source))) def __len__(self): return len(self.data_source) class TestModel(BoringModel): def train_dataloader(self): dataset = RandomDataset(32, 64) sampler = NonSequentialSampler(dataset) return torch.utils.data.DataLoader(dataset, sampler=sampler) def val_dataloader(self): dataset = RandomDataset(32, 64) sampler = NonSequentialSampler(dataset) return torch.utils.data.DataLoader(dataset, sampler=sampler) model = TestModel() trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, overfit_batches=2) with pytest.warns(UserWarning, match="requested to overfit but enabled train dataloader shuffling"): trainer.fit(model) assert isinstance(trainer.train_dataloader.sampler, SequentialSampler) assert isinstance(trainer.val_dataloaders.sampler, SequentialSampler)
[ 9, 16508, 1154, 45, 3437, 623, 331 ]
def METHOD_NAME(self, value): """ Cleans up a slug by removing slug separator characters that occur at the beginning or end of a slug. If an alternate separator is used, it will also replace any instances of the default '-' separator with the new separator. """ re_sep = "(?:-|%s)" % re.escape(self.separator) value = re.sub("%s+" % re_sep, self.separator, value) return re.sub(r"^%s+|%s+$" % (re_sep, re_sep), "", value)
[ 1231, 1360 ]
def METHOD_NAME(input_database, release): """ Connect to CellxGene Census or user-provided TileDBSoma object """ if input_database != "CellxGene": raise NotImplementedError( "Custom census database is not implemented yet!" ) logger.info( "Initializing %s release %s", input_database, release ) return cellxgene_census.open_soma( census_version = release )
[ 707, 7401 ]
def METHOD_NAME(account_name: Optional[pulumi.Input[str]] = None, collection_name: Optional[pulumi.Input[str]] = None, database_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMongoDBResourceMongoDBCollectionResult]: """ Gets the MongoDB collection under an existing Azure Cosmos DB database account. Azure REST API version: 2023-04-15. :param str account_name: Cosmos DB database account name. :param str collection_name: Cosmos DB collection name. :param str database_name: Cosmos DB database name. :param str resource_group_name: The name of the resource group. The name is case insensitive. """ ...
[ 19, 2176, 1267, 191, 2176, 1267, 1098 ]
def METHOD_NAME(self, reader): """Initialize the class with the Reader object.""" self.__reader = reader
[ 0, 781 ]
def METHOD_NAME(fn): f_size = os.path.getsize(fn) if f_size > MAX_SIZE: cloudlog.error(f"Tombstone {fn} too big, {f_size}. Skipping...") return message = "" # One line description of the crash contents = "" # Full file contents without coredump path = "" # File path relative to openpilot directory proc_maps = False with open(fn) as f: for line in f: if "CoreDump" in line: break elif "ProcMaps" in line: proc_maps = True elif "ProcStatus" in line: proc_maps = False if not proc_maps: contents += line if "ExecutablePath" in line: path = line.strip().split(': ')[-1] path = path.replace('/data/openpilot/', '') message += path elif "Signal" in line: message += " - " + line.strip() try: sig_num = int(line.strip().split(': ')[-1]) message += " (" + signal.Signals(sig_num).name + ")" except ValueError: pass stacktrace = get_apport_stacktrace(fn) stacktrace_s = stacktrace.split('\n') crash_function = "No stacktrace" if len(stacktrace_s) > 2: found = False # Try to find first entry in openpilot, fall back to first line for line in stacktrace_s: if "at selfdrive/" in line: crash_function = line found = True break if not found: crash_function = stacktrace_s[1] # Remove arguments that can contain pointers to make sentry one-liner unique crash_function = " ".join(x for x in crash_function.split(' ')[1:] if not x.startswith('0x')) crash_function = re.sub(r'\(.*?\)', '', crash_function) contents = stacktrace + "\n\n" + contents message = message + " - " + crash_function sentry.report_tombstone(fn, message, contents) # Copy crashlog to upload folder clean_path = path.replace('/', '_') date = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S") new_fn = f"{date}_{get_commit(default='nocommit')[:8]}_{safe_fn(clean_path)}"[:MAX_TOMBSTONE_FN_LEN] crashlog_dir = os.path.join(ROOT, "crash") mkdirs_exists_ok(crashlog_dir) # Files could be on different filesystems, copy, then delete shutil.copy(fn, os.path.join(crashlog_dir, new_fn)) try: os.remove(fn) except PermissionError: pass
[ 339, 8843, -1 ]
def METHOD_NAME(): frame1 = Mock(f_lineno=111, f_code=Mock(co_name='CO_NAME1', co_filename='CO_FILENAME1')) frame2 = Mock(f_lineno=222, f_code=Mock(co_name='CO_NAME2', co_filename='CO_FILENAME2')) start_time_1 = time.time() - 2 start_time_2 = time.time() - 1 stack = [(frame1, start_time_1), (frame2, start_time_2)] prev_switch_interval = sys.getswitchinterval() test_switch_interval = 10.0 assert prev_switch_interval != pytest.approx(test_switch_interval, abs=0.01) sys.setswitchinterval(test_switch_interval) with patch('tribler.core.utilities.slow_coro_detection.main_thread_stack_tracking._main_thread_stack', stack): stack_info = _get_main_thread_stack_info() assert stack_info == [StackFrameInfo(func_name='CO_NAME1', file_name='CO_FILENAME1', line_number=111, start_time=start_time_1, is_under_profiling=False), StackFrameInfo(func_name='CO_NAME2', file_name='CO_FILENAME2', line_number=222, start_time=start_time_2, is_under_profiling=False)] assert sys.getswitchinterval() == pytest.approx(test_switch_interval, abs=0.01) sys.setswitchinterval(prev_switch_interval)
[ 9, 19, 57, 600, 1501, 100 ]
def METHOD_NAME(self): item_pool = {} for room in self.__rooms: for location in room.locations: print(room, location.METHOD_NAME(), location.__class__.__name__) for k, v in location.METHOD_NAME().items(): item_pool[k] = item_pool.get(k, 0) + v unmapped_count = item_pool.get(None, 0) del item_pool[None] for item in PRIMARY_ITEMS: if item not in item_pool: item_pool[item] = 1 unmapped_count -= 1 while item_pool[POWER_BRACELET] < 2: item_pool[POWER_BRACELET] = item_pool.get(POWER_BRACELET, 0) + 1 unmapped_count -= 1 while item_pool[SHIELD] < 2: item_pool[SHIELD] = item_pool.get(SHIELD, 0) + 1 unmapped_count -= 1 assert unmapped_count >= 0 for item in SECONDARY_ITEMS: if unmapped_count > 0: item_pool[item] = item_pool.get(item, 0) + 1 unmapped_count -= 1 # Add a heart container per 10 items "spots" left. heart_piece_count = unmapped_count // 10 unmapped_count -= heart_piece_count * 4 item_pool[HEART_PIECE] = item_pool.get(HEART_PIECE, 0) + heart_piece_count * 4 # Add the rest as rupees item_pool[RUPEES_50] = item_pool.get(RUPEES_50, 0) + unmapped_count return item_pool
[ 19, 1024, 1567 ]
def METHOD_NAME(layer, **kwargs): X = np.random.normal(size=(batch_size, N, F)) if "target_shape" in kwargs: target_output_shape = kwargs.pop("target_shape") else: target_output_shape = (batch_size, kwargs.get("channels", F)) X_in = Input(shape=(N, F)) layer_instance = layer(**kwargs) output = layer_instance(X_in) model = Model(X_in, output) output = model(X) assert output.shape == target_output_shape assert output.shape == layer_instance.compute_output_shape(X.shape) _check_output_and_model_output_shapes(output.shape, model.output_shape)
[ 9, 2277, 854 ]
def METHOD_NAME(posture, positions, point=0): """Set the posture positions using a dict of joint positions.""" # XXX: Why have we been doing this? # posture.header.stamp = now posture.joint_names = positions.keys() # Extend the array to be big enough. if len(posture.points) < point + 1: for _ in range(point + 1): posture.points.append(JointTrajectoryPoint()) # Update the point in place jtp = JointTrajectoryPoint() for _, pos in positions.items(): jtp.positions.append(pos) posture.points[point] = jtp
[ 0, -1, 1669 ]
def METHOD_NAME() -> str: if sys.platform == "win32": return os.path.join(get_windows_local_appdata_dir()) elif sys.platform == "darwin": return os.path.expanduser("~/Library/Caches") else: return os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
[ 19, 21, 596, 1190 ]
def METHOD_NAME(self) -> Optional[str]: """ Etag of the azure resource """ return pulumi.get(self, "etag")
[ 431 ]
def METHOD_NAME(self, context, layout): layout.prop(self, "level", text="Level") layout.prop(self, "unwrap", text="UnWrap")
[ 1100, 1409 ]
def METHOD_NAME(self, event=None): for _, title, page in self._page_records: try: page.cancel() except Exception: get_workbench().report_exception("Error when cancelling options in " + title) self.destroy()
[ 608 ]
def METHOD_NAME(): print("USAGE: %s [-i <if_name>]" % argv[0]) print("") print("optional arguments:") print(" -h print this help") print(" -i if_name select interface if_name. Default is eth0") print("") print("examples:") print(" http-parse # bind socket to eth0") print(" http-parse -i wlan0 # bind socket to wlan0") exit()
[ 40 ]
def METHOD_NAME(self, benefactor: User) -> dict: shift_swap = self.get_object() prev_state = shift_swap.insight_logs_serialized try: shift_swap.take(benefactor) except exceptions.ShiftSwapRequestNotOpenForTaking: raise BadRequest(detail="The shift swap request is not in a state which allows it to be taken") except exceptions.BeneficiaryCannotTakeOwnShiftSwapRequest: raise BadRequest(detail="A shift swap request cannot be created and taken by the same user") write_resource_insight_log( instance=shift_swap, author=self.request.user, event=EntityEvent.UPDATED, prev_state=prev_state, new_state=shift_swap.insight_logs_serialized, ) return ShiftSwapRequestSerializer(shift_swap).data
[ 74, 1828 ]
def METHOD_NAME(coco_visiondata_train, coco_visiondata_test): # Act context = Context(train=coco_visiondata_train, test=coco_visiondata_test) # Assert assert_that(context, has_properties({'train': instance_of(VisionData), 'test': instance_of(VisionData)}))
[ 9, 9714, 198, 7319, 43, 279, 381 ]
def METHOD_NAME(driver, pages): pages.load("scrolling_tests/page_with_frame_out_of_view.html") driver.switch_to.frame(driver.find_element(By.NAME, "frame")) element = driver.find_element(By.NAME, "checkbox") element.click() assert element.is_selected()
[ 9, 427, 673, 4871, 24, 212, 669 ]
def METHOD_NAME(self): for url in self.start_urls: yield JsonRequest(url=url)
[ 447, 311 ]
def METHOD_NAME(self): self.set_whitelist([]) for i in range(10): mime_type = get_random_string(6) + "/" + get_random_string(5) # If this throws an error, the test fails validate_upload(f"test.{mime_type.split('/')[-1]}", None, None, mime_type)
[ 9, 654, 7842 ]
def METHOD_NAME(tmpdir): def touch(path: Path) -> None: if path.is_dir(): raise ValueError("Only files, not folders are supported") path.parent.mkdir(parents=True, exist_ok=True) path.touch() def rlist(input_dir): """Recursively list files in input_dir""" paths = list(sorted(input_dir.rglob("*"))) res = [] for el in paths: if el.is_file(): res.append(str(el.relative_to(input_dir))) return res install_prefix = Path(str(tmpdir / "install")) test_install_prefix = Path(str(tmpdir / "install-tests")) # create the example package touch(install_prefix / "ex1" / "base.py") touch(install_prefix / "ex1" / "conftest.py") touch(install_prefix / "ex1" / "test_base.py") touch(install_prefix / "ex1" / "tests" / "data.csv") touch(install_prefix / "ex1" / "tests" / "test_a.py") n_moved = buildpkg.unvendor_tests(install_prefix, test_install_prefix) assert rlist(install_prefix) == ["ex1/base.py"] assert rlist(test_install_prefix) == [ "ex1/conftest.py", "ex1/test_base.py", "ex1/tests/data.csv", "ex1/tests/test_a.py", ] # One test folder and two test file assert n_moved == 3
[ 9, -1, 450 ]
def METHOD_NAME(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return self._tab.VectorLen(o) return 0
[ 773, 2376, 799 ]
def METHOD_NAME(self, servicename, dllpath): svchost_path = r"Software\Microsoft\Windows NT\CurrentVersion\Svchost" parameter_path = rf"System\CurrentControlSet\Services\{servicename}\Parameters" try: log.info("Adding Parameters value: %s -> ServiceDll = %s", parameter_path, dllpath) with CreateKeyEx(HKEY_LOCAL_MACHINE, parameter_path, 0, KEY_ALL_ACCESS) as key: SetValueEx(key, "ServiceDll", 0, REG_EXPAND_SZ, dllpath) CloseKey(key) except Exception as e: log.info("Error setting registry value: %s", e) # Service is not installed return try: log.info("Adding capegroup value: capegroup = %s", servicename) with CreateKeyEx(HKEY_LOCAL_MACHINE, svchost_path, 0, KEY_ALL_ACCESS) as key: SetValueEx(key, "capegroup", 0, REG_MULTI_SZ, [servicename]) CloseKey(key) except Exception as e: log.info("Error setting registry value: %s", e) return
[ 0, 219 ]
def METHOD_NAME(self, using=None, keep_parents=False): removed = super().METHOD_NAME(using, keep_parents) if self.file.name: self.file.METHOD_NAME() return removed
[ 34 ]
def METHOD_NAME(params): if not os.path.exists(settings_dir): os.makedirs(settings_dir) working_phil = master_phil_scope.format(python_object = params) diff_phil = master_phil_scope.fetch_diff(source = working_phil) try: f = open(settings_file.encode('utf8'), 'wb') f.write(diff_phil.as_str().encode('utf8')) f.close() except IOError: raise Sorry('Unable to write %s.' % settings_file)
[ 73, 175, 817 ]
def METHOD_NAME(): file = tests.modules.get_test_resources_directory("createbatchfiles/v7.pipeline") with open(file, "r") as fd: data = fd.read() pipeline = cellprofiler_core.pipeline.Pipeline() pipeline.loadtxt(six.moves.StringIO(data)) assert len(pipeline.modules()) == 1 module = pipeline.modules()[0] assert isinstance(module, cellprofiler.modules.createbatchfiles.CreateBatchFiles) assert module.wants_default_output_directory assert module.custom_output_directory.value == r"C:\foo\bar" assert not module.remote_host_is_windows assert not module.distributed_mode assert module.default_image_directory == r"C:\bar\baz" assert module.revision == 0 assert not module.from_old_matlab assert len(module.mappings) == 1 mapping = module.mappings[0] assert mapping.local_directory == r"\\argon-cifs\imaging_docs" assert mapping.remote_directory == r"/imaging/docs"
[ 9, 557, 13542 ]
def METHOD_NAME(self, text): """ Applies text normalization rules. Args: text: input text Returns: clean text """ # Trim whitespace text = text.strip() # Convert all upper case strings to capitalized case return text.capitalize() if text.isupper() else text
[ 1356 ]
def METHOD_NAME(self, str: builtins.str) -> typing.Tuple[builtins.int, Gdk.Atom, builtins.int, builtins.bytes]: ...
[ 144, 24, 5617, 526 ]