text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(*, db_session, project_id: int, name: str) -> Optional[CaseSeverity]: """Returns a case severity based on the given severity name.""" return ( db_session.query(CaseSeverity) .filter(CaseSeverity.name == name) .filter(CaseSeverity.project_id == project_id) .one_or_none() )
[ 19, 604, 156 ]
def METHOD_NAME(i, loss_sum): rng_key_binarize = random.fold_in(rng_key, i) batch = binarize(rng_key_binarize, test_fetch(i, test_idx)[0]) # FIXME: does this lead to a requirement for an rng_key arg in svi_eval? loss = svi.evaluate(svi_state, batch) / len(batch) loss_sum += loss return loss_sum
[ 2829, 3435 ]
def METHOD_NAME(rpm_publication_api, gen_object_with_cleanup): """A factory to generate an RPM Publication with auto-deletion after the test run.""" def _rpm_publication_factory(pulp_domain=None, **body): # XOR check on repository and repository_version assert bool("repository" in body) ^ bool("repository_version" in body) kwargs = {} if pulp_domain: kwargs["pulp_domain"] = pulp_domain return gen_object_with_cleanup(rpm_publication_api, body, **kwargs) return _rpm_publication_factory
[ 3466, 3973, 1155 ]
def METHOD_NAME(client): """ Checks if current phase is one of the phases in the phase list. """ resp = client.get("/mxcube/api/v0.1/diffractometer/phaselist") data = json.loads(resp.data) phase_list = data["current_phase"] resp = client.get("/mxcube/api/v0.1/diffractometer/phase") data = json.loads(resp.data) phase = data["current_phase"] assert phase in phase_list
[ 9, 19, 3200 ]
def METHOD_NAME(): args = argparse.ArgumentParser() args.add_argument( "--workload", type=str, required=True, ) args.add_argument( "--target", type=str, required=True, ) args.add_argument( "--num-trials", type=int, required=True, ) args.add_argument( "--rpc-host", type=str, required=True, ) args.add_argument( "--rpc-port", type=int, required=True, ) args.add_argument( "--rpc-key", type=str, required=True, ) args.add_argument( "--work-dir", type=str, required=True, ) args.add_argument( "--number", type=int, default=3, ) args.add_argument( "--repeat", type=int, default=1, ) args.add_argument( "--min-repeat-ms", type=int, default=100, ) args.add_argument( "--adaptive-training", type=lambda x: bool(strtobool(x)), required=False, help="example: True / False", default=True, ) args.add_argument( "--cpu-flush", type=lambda x: bool(strtobool(x)), help="example: True / False", required=True, ) parsed = args.parse_args() parsed.target = tvm.target.Target(parsed.target) parsed.rpc_config = ms.runner.RPCConfig( tracker_host=parsed.rpc_host, tracker_port=parsed.rpc_port, tracker_key=parsed.rpc_key, session_timeout_sec=60, ) return parsed
[ 214, 335 ]
def METHOD_NAME(self, context, event): """ Args: context: event: Returns: """ wm = context.window_manager context.area.tag_redraw() if not self.running: from phobos.blender.phoboslog import log bpy.types.SpaceView3D.draw_handler_remove(self._handle2d, 'WINDOW') bpy.types.SpaceView3D.draw_handler_remove(self._handle3d, 'WINDOW') log("Stop drawing Phobos information.", 'DEBUG') return {'FINISHED'} if event.type == 'PAGE_UP' and event.value == 'PRESS': wm.phobos_msg_offset += 1 if event.type == 'PAGE_DOWN' and event.value == 'PRESS': wm.phobos_msg_offset -= 1 if event.shift and event.type == 'LEFTMOUSE' and event.value == 'CLICK': pass return {'PASS_THROUGH'}
[ 12921 ]
def METHOD_NAME(self): iam_client = client("iam") user = "test-user" iam_client.create_user(UserName=user)["User"]["Arn"] current_audit_info = self.set_mocked_audit_info() from prowler.providers.aws.services.iam.iam_service import IAM with mock.patch( "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info", new=current_audit_info, ), mock.patch( "prowler.providers.aws.services.iam.iam_root_mfa_enabled.iam_root_mfa_enabled.iam_client", new=IAM(current_audit_info), ) as service_client: from prowler.providers.aws.services.iam.iam_root_mfa_enabled.iam_root_mfa_enabled import ( iam_root_mfa_enabled, ) service_client.credential_report[0]["user"] = "<root_account>" service_client.credential_report[0]["mfa_active"] = "true" service_client.credential_report[0][ "arn" ] = "arn:aws:iam::123456789012:<root_account>:root" check = iam_root_mfa_enabled() result = check.execute() assert result[0].status == "PASS" assert search("MFA is enabled for root account.", result[0].status_extended) assert result[0].resource_id == "<root_account>" assert result[0].resource_arn == service_client.credential_report[0]["arn"]
[ 9, 1563, 8520, 1111 ]
def METHOD_NAME(tsv_path, rm_stress=True): uid2phns = {} with open(tsv_path) as f: for line in f: uid, phns = line.rstrip().split("\t") phns = phns.split(",") if rm_stress: phns = [re.sub("[0-9]", "", phn) for phn in phns] uid2phns[uid] = phns return uid2phns
[ 203, 7536 ]
METHOD_NAME(self, ra, dec):
[ 3187, -1, 695 ]
def METHOD_NAME( mutate: bool, cookie_session_middleware: SessionMiddleware[ClientSideSessionBackend] ) -> None: """Should load session cookies into session from request and overwrite the previously set cookies with the upcoming response. Session cookies from the previous session should not persist because session is mutable. Once the session is loaded from the cookies, those cookies are redundant. The response sets new session cookies overwriting or expiring the previous ones. """ # Test for large session data. If it works for multiple cookies, it works for single also. _session = create_session(size=4096) @get(path="/test") def handler(request: Request) -> dict: nonlocal _session if mutate: # Modify the session, this will overwrite the previously set session cookies. request.session.update(create_session()) _session = request.session return request.session ciphertext = cookie_session_middleware.backend.dump_data(_session) with create_test_client( route_handlers=[handler], middleware=[cookie_session_middleware.backend.config.middleware], ) as client: # Set cookies on the client to avoid warnings about per-request cookies. client.cookies = { # type: ignore[assignment] f"{cookie_session_middleware.backend.config.key}-{i}": text.decode("utf-8") for i, text in enumerate(ciphertext) } response = client.get("/test") assert response.json() == _session # The session cookie names that were in the request will also be present in its response to overwrite or to expire # them. So, the number of cookies in the response will be at least equal to or greater than the number of cookies # that were in the request. assert response.headers["set-cookie"].count("session") >= response.request.headers["Cookie"].count("session")
[ 9, 557, 240, 880, 61, 7740, 1511 ]
def METHOD_NAME(x, y, coord_mapping, parser, ds_files): precision = {'f': 4, 'd': 8} ds_files.find_ibd_key() ibd_key = ds_files.ibd_key bucket = ds_files.upload_bucket if (x, y) in coord_mapping: index = coord_mapping[(x, y)] mz_offset = parser.mzOffsets[index] mz_length = parser.mzLengths[index] * precision[parser.mzPrecision] mzs = np.frombuffer( ds_files.read_file_partially(mz_offset, mz_length, ibd_key, bucket=bucket), dtype=parser.mzPrecision, ) intensity_offset = parser.intensityOffsets[index] intensity_length = parser.intensityLengths[index] * precision[parser.intensityPrecision] ints = np.frombuffer( ds_files.read_file_partially( intensity_offset, intensity_length, ibd_key, bucket, ), dtype=parser.intensityPrecision, ) else: mzs, ints = np.array([]), np.array([]) return mzs, ints
[ 19, -1, 5841 ]
def METHOD_NAME(): content_sizes = get_globus_dataset_content_sizes( globus_endpoint_id="188a6110-96db-11eb-b7a9-f57b2d55370d", path="/SenzaiY/YutaMouse41/YutaMouse41-150821/originalClu/", ) assert estimate_s3_conversion_cost(total_mb=sum(content_sizes.values()) / 1e6) == 1.756555806400279e-13
[ 9, 918, 607, 1719, 1955, 280, -1 ]
def METHOD_NAME(builder, payloadTransparency): builder.PrependBoolSlot(11, payloadTransparency, 0)
[ 14890, 2247, 238, 288, 6867 ]
def METHOD_NAME( filepath: Union[str, pathlib.Path], *, expected_sha384_hash: str = None, error: bool = True, chunk_size: int = 8192, ): r""" Read the contents of a file, hash the contents, and compare that hash to the one given. Examples -------- Write out ``file.txt`` with the contents of b"Hello World\n". Then check it to see if the SHA 384 hash of the contents matches the SHA 384 hash for b"Hello FeedFace\n". >>> import pathlib >>> import hashlib >>> from dffml import validate_file_hash >>> >>> correct_contents = b"Hello FeedFace\n" >>> expected_sha384_hash = hashlib.sha384(correct_contents).hexdigest() >>> >>> original_path = pathlib.Path("file.txt") >>> original_path.write_text("Hello World\n") 12 >>> >>> validate_file_hash( ... original_path, ... expected_sha384_hash=expected_sha384_hash, ... ) Traceback (most recent call last): ... dffml.util.file.HashValidationError: file.txt hash was acbfd470c22c0d95a1d10a087dc31988b9f7bfeb13be70b876a73558be664e5858d11f9459923e6e5fd838cb5708b969, should be 00d7bdbf0b24d37463bd9d2107926c3fa870537c009cd64dde72c3578160d9e04f63bf487631a2e2e7610f9654cf0f78 >>> >>> validate_file_hash( ... original_path, ... expected_sha384_hash=expected_sha384_hash, ... error=False, ... ) False >>> >>> # Write the correct contents to the file so validation passes >>> original_path.write_bytes(correct_contents) 15 >>> validate_file_hash( ... original_path, ... expected_sha384_hash=expected_sha384_hash, ... ) True """ filepath = pathlib.Path(filepath) if expected_sha384_hash is None: raise NoHashToUseForValidationSuppliedError(filepath) filehash = SECURE_HASH_ALGORITHM() with open(filepath, "rb") as fileobj: bytes_read = fileobj.read(chunk_size) filehash.update(bytes_read) while len(bytes_read) == chunk_size: bytes_read = fileobj.read(chunk_size) filehash.update(bytes_read) filehash = filehash.hexdigest() if filehash != expected_sha384_hash: if error: raise HashValidationError( str(filepath), filehash, expected_sha384_hash ) return False return True
[ 187, 171, 1161 ]
async def METHOD_NAME(bridge): peer = CancellableConnect(bridge, PEER_CONFIG) await peer.start() peer.close() while len(asyncio.all_tasks()) > 1: await asyncio.sleep(0.1) assert peer.was_cancelled
[ 9, 4096, 5514, 707, 176 ]
def METHOD_NAME(): "Gets a character from the keyboard and returns the key code" char = get_raw_chars() if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(char) == KEYMAP["esc"]: combo = get_raw_chars() if ord(combo) == KEYMAP["mod_int"]: key = get_raw_chars() if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(key) + ARROW_KEY_FLAG) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
[ 19, 5859 ]
def METHOD_NAME(self): if os.path.exists(self.vad_threshold_path): # εΉ³ε‡ε“εΊ¦ζ–‡δ»Άε­˜εœ¨ self.vad_threshold = np.load(self.vad_threshold_path)
[ 176 ]
def METHOD_NAME(endpoint_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, workspace_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBatchEndpointResult]: """ Use this data source to access information about an existing resource. :param str endpoint_name: Name for the Batch Endpoint. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str workspace_name: Name of Azure Machine Learning workspace. """ ...
[ 19, 2277, 841, 146 ]
def METHOD_NAME(self, data): self.asynchdl.add(self.termid, data)
[ 6930, 365 ]
def METHOD_NAME(self, command_args): super().METHOD_NAME(command_args) return self.build_lro_poller(self._execute_operations, None)
[ 1519 ]
def METHOD_NAME(self): copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) cmake = CMake(self) cmake.install() rmdir(self, os.path.join(self.package_folder, "CMake")) rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) # TODO: to remove in conan v2 once cmake_find_package* generators removed self._create_cmake_module_alias_targets( os.path.join(self.package_folder, self._module_file_rel_path), {"tlx": "tlx::tlx"} )
[ 360 ]
def METHOD_NAME(self, predictions): # we should not create insight for non existing products ! import_result = self._run_import( predictions, product_store={DEFAULT_PRODUCT_ID: None} ) assert import_result.created_insights_count() == 0 assert import_result.updated_insights_count() == 0 assert import_result.deleted_insights_count() == 0 # no insight created assert ProductInsight.select().count() == 1
[ 9, 512, 1188, 130, 623, 1308 ]
def METHOD_NAME(self, query, from_date=None, to_date=None, page=1): data = { 'q': query, 'api-key': self.credentials.key, 'page': str(page), 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount', 'show-tags': 'all', } if from_date is not None: data['from-date'] = from_date if to_date is not None: data['to-date'] = to_date return data
[ 56, 539 ]
def METHOD_NAME(self): self.assertTrue( JvecAdjointTest( nsem.utils.test_utils.random(1e-2), "All", 0.1, testLocations=True, testSingle=True, ) )
[ 9, 10095, 1330, 708, 97, 75 ]
def METHOD_NAME(self) -> None: self._undo()
[ 2797 ]
def METHOD_NAME( self, mock_generate_skill_score_mapping, mock_get_job_holder_usernames, mock_get_top_skill_categories_for_job ): """ Test that average value is None when users are less than 5. """ mock_get_top_skill_categories_for_job.return_value = DUMMY_CATEGORIES_RESPONSE mock_get_job_holder_usernames.return_value = {"usernames": ['user1', 'user2']} mock_generate_skill_score_mapping.return_value = {'Technology Roadmap': 2, 'Python': 3} self.client.login(username=self.user.username, password=TEST_PASSWORD) response = self.client.get(self.url) assert response.status_code == 200 # check if the response is mutated and scores are appended for skills # for when some skills are learned by user in a category, check if user_score and avg score is appended assert response.data['skill_categories'][0]['user_score'] == 0.8 assert response.data['skill_categories'][0]['edx_average_score'] is None # for when no skill is learned by user in a category, check if user_score and avg score is appended assert response.data['skill_categories'][1]['user_score'] == 0.0 assert response.data['skill_categories'][1]['edx_average_score'] is None
[ 9, 19, 41, 488, 489, 822, 3467 ]
def METHOD_NAME(vd, url, filetype=None): url = urlparse(url.given) dbname = url.path[1:] return MyTablesSheet(dbname+"_tables", sql=SQL(url), schema=dbname)
[ 4908, 4001 ]
def METHOD_NAME(self): invalid_filters = check_filters(self, ["sample_accession_code"]) if invalid_filters: raise InvalidFilters(invalid_filters=invalid_filters) queryset = DownloaderJob.objects.all() sample_accession_code = self.request.query_params.get("sample_accession_code", None) if sample_accession_code: queryset = queryset.filter( original_files__samples__accession_code=sample_accession_code ).distinct() return queryset
[ 19, 2386 ]
async def METHOD_NAME(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(self, package_name): inc_data = "do_package:append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\" > ${PKGDESTWORK}/${PN}.datestamp\n}" % datetime.datetime.now() self.write_recipeinc(package_name, inc_data) res = bitbake(package_name, ignore_status=True) self.delete_recipeinc(package_name) self.assertEqual(res.status, 0, msg=res.output)
[ 2978, 360, 1933 ]
def METHOD_NAME(self): desc = "this tests foo" test = unittest.FunctionTestCase(lambda: None, description=desc) self.assertEqual(test.shortDescription(), "this tests foo")
[ 9, 1707, 1067, -1, 2573 ]
def METHOD_NAME(kwargs, key, value): if key not in kwargs.keys(): kwargs[key] = value
[ 1475 ]
def METHOD_NAME(): def f1(): """ docstring headline other text """ def f2(): """ docstring headline other text """ def f3(): pass assert util.docstring_headline(f1) == 'docstring headline' assert util.docstring_headline(f2) == 'docstring headline' assert util.docstring_headline(f3) == ''
[ 9, 2573, 15951 ]
def METHOD_NAME(test_cube): """Test town plus region plus country.""" msg = "Requested location Minas Tirith,Gondor can not be found" with pytest.raises(ValueError, match=msg): extract_location(test_cube, scheme='nearest', location='Minas Tirith,Gondor')
[ 9, 256, 1153, 708 ]
def METHOD_NAME(cls, cache_key, project_ids, organization_id, order_by, value_list): cache_result = cache.get(cache_key) if cache_result is None: result = list( cls.objects.filter( project_id__in=project_ids, organization_id=organization_id, ) .order_by(*order_by) .values_list(*value_list) ) cache.set(cache_key, result, PROJECT_TRANSACTION_THRESHOLD_CACHE_TTL) return result else: return cache_result
[ 527, 61, 596 ]
def METHOD_NAME(state, params): print("Replaying forward run") for i in range(adjointer.equation_count): (fwd_var, output) = adjointer.get_forward_solution(i) s = libadjoint.MemoryStorage(output) s.set_compare(0.0) s.set_overwrite(True) adjointer.record_variable(fwd_var, s)
[ 2826 ]
def METHOD_NAME( self, cluster: str, instance_type_class: Type[InstanceConfig_T] ) -> Iterable[InstanceConfig_T]: """Returns an iterator that yields InstanceConfig objects. :param cluster: The cluster name :param instance_type_class: a subclass of InstanceConfig :returns: an iterator that yields instances of MarathonServiceConfig, etc. :raises NotImplementedError: when it doesn't know how to create a config for instance_type_class """ if (cluster, instance_type_class) not in self._framework_configs: self._refresh_framework_config(cluster, instance_type_class) for instance, config in self._framework_configs.get( (cluster, instance_type_class), {} ).items(): try: yield self._create_service_config( cluster, instance, config, instance_type_class ) except NoDeploymentsAvailable: pass
[ 89, 736 ]
def METHOD_NAME(self, path): path = self._realpath(path) try: os.METHOD_NAME(path) except OSError as e: return SFTPServer.convert_errno(e.errno) return SFTP_OK
[ 1275 ]
def METHOD_NAME( ctx: click.Context | None, param: click.Parameter, value: datetime.datetime | None ) -> str: if value is None: return "" return value.strftime("%Y-%m-%d %H:%M:%S")
[ 275, 153, 1076 ]
def METHOD_NAME(project_factory): projects = [project_factory(is_draft=False) for i in range(10)] featured = list(models.Project.objects.featured()) assert featured == list(reversed(projects))[:8]
[ 9, 964, 2847 ]
def METHOD_NAME(self) -> "SetupInitializer": """ Dump setup.py into file and build the python artifacts - wheel and tar.gz """ if Setup.VERSION_KEY in self._setup_data: self._setup_data.pop(Setup.VERSION_KEY) with open(self._setup_path, "w") as f: f.write(Setup.FORMAT.format(**self._setup_data)) self.build("bdist_wheel") with open(self._setup_path, "w") as f: f.write(Setup.FORMAT.format(**self._setup_data)) subprocess.check_output(["python3", self._setup_path, "check"]) self._setup_data[Setup.SETUP_REQUIRES_KEY] = [] self._setup_data.pop(Setup.VCVERSIONER_KEY) Setup.FORMAT = Setup.FORMAT.replace("vcversioner={vcversioner},", 'version="{version}",') with open(os.path.join(self._project_path, self.DEFAULT_VERSION_PATH)) as f: self._setup_data[Setup.VERSION_KEY] = f.read() with open(self._setup_path, "w") as f: f.write(Setup.FORMAT.format(**self._setup_data)) self.build("sdist") return self
[ 278 ]
def METHOD_NAME(cls): return (1.0, 0.4, 0.216, 0.5)
[ 1100, 36, 53 ]
def METHOD_NAME(self, bk_biz_id, begin_time, end_time, shied_type, shied_value, username): dimension_config = self.get_dimension_config(shied_type, shied_value, bk_biz_id, username) request_body = self.build_request_body( begin_time=begin_time, bk_biz_id=bk_biz_id, shied_type=shied_type, dimension_config=dimension_config, end_time=end_time, ) return request_body
[ 19, 377, 2829 ]
def METHOD_NAME(self): objects = self.get_actionable_objects()[0] return self.run_hook("after_delete_snippet", self.request, objects)
[ 22, 1887, 1021 ]
def METHOD_NAME(self): self.assertEqual( "\n".join( [ "Heat pan over a medium heat and add garlic, ground beef and salt and coriander. Cook until the meat is half-way cooked for about 15 to 20 minutes.", "Add black pepper and remove from heat.", "Combine one cup of raw chopped onions to the meat and let it cool down to room temperature", "Soak split peas for 2–3 hours or overnight.", "Heat oil in a frying pan and cook onion and garlic until lightly browned.", "Add tomato and tomato paste. Add split peas and ΒΎ cup water and cook for 30–45 minutes until soft.", "Season with salt and pepper and set aside.", "Combine all of the ingredients for dip in a bowl and mix well.", "Place flour in a large mixing bowl and gradually add water, mixing with hands until it becomes doughy.", "Leave the dough to settle for 15–20 minutes or until it becomes firm.", "Separate dough into small handfuls and roll into individual ball shapes.", "Scatter some flour on the bench surface and using a small rolling pin, roll the balls into circular shapes.", "Roll the dough ball into a very thin (1/16-inch) strips using a pasta machine. Cut the strips into 2-inch squares.", "Place approximately one tablespoon of the cooled ground beef and onion mixture onto each wrap. To make the wraps stick together easily, wet the edges with water (you may use your fingers or a basting brush).", "Fold over first two opposite ends of the egg roll wrap and followed by other two ends to enclose dumplings. Press the edges tightly to seal together. Continue with the remaining wraps.", "Take the racks out of the steamer dish. Add water to the dish, cover and bring to boil.", "Oil the base of steamer to prevent sticking and place dumplings carefully across oil.", "Cover lid and cook for approximately 40 minutes.", "When the dumplings have cooked, add a thin layer of the yogurt mixture to a large serving plate. Place the steamed dumplings on top.", "Pour some more of the yogurt mixture on top of the dumplings and coat everything with the the topping sauce.", "Garnish with dried mint and a little bit of cayenne pepper.", ] ), self.harvester_class.instructions(), )
[ 9, 897 ]
def METHOD_NAME(self): """ Sets the borders to nicely display graphs """ self.fig.subplots_adjust(left=0, bottom=0, right=1, top=0.9, wspace=0, hspace=0)
[ 0, 303, 854 ]
async def METHOD_NAME(client): assert await client.string.get_not_provided() is None
[ 9, 19, 130, 4158 ]
def METHOD_NAME(scheme: str, timeout: int = 0): if timeout <= 0: timeout = 10 * 60 elapsed = 0 while not __has_exit and elapsed < timeout: for k, v in __proxy_list.items(): if v and (not scheme or k == scheme): return True time.sleep(0.1) elapsed += 0.1
[ 618, 43, 865, 127 ]
def METHOD_NAME(self): contest = Contest.objects.get() user = User.objects.get(username='test_user') p = Participant(contest=contest, user=user, status='ACTIVE') p.save() user = User.objects.get(username='test_user2') p = Participant(contest=contest, user=user, status='ACTIVE') p.save()
[ 0, 1 ]
def METHOD_NAME(self) -> "Image.Image": if not PIL_AVAILABLE: raise RuntimeError( "PIL is not available. Please install via 'pip install Pillow'" ) uncompressed = _decompress(self.data) pil_mode: Literal["L", "RGB"] if self.mode is ToifMode.grayscale: pil_mode = "L" raw_data = _to_grayscale(uncompressed, right_hi=False) elif self.mode is ToifMode.grayscale_eh: pil_mode = "L" raw_data = _to_grayscale(uncompressed, right_hi=True) elif self.mode is ToifMode.full_color: pil_mode = "RGB" raw_data = _to_rgb(uncompressed, little_endian=False) else: # self.mode is ToifMode.full_color_le: pil_mode = "RGB" raw_data = _to_rgb(uncompressed, little_endian=True) return Image.frombuffer(pil_mode, self.size, raw_data, "raw", pil_mode, 0, 1)
[ 24, 660 ]
def METHOD_NAME(user, page): """ This predicate checks whether the given user is a member of the page's responsible organization. :param user: The user who's permission should be checked :type user: ~django.contrib.auth.models.User :param page: The requested page :type page: ~integreat_cms.cms.models.pages.page.Page :return: Whether or not ``user`` is a member of ``page.organization`` :rtype: bool """ if not page or not page.id or not page.organization: return False return user in page.organization.members.all()
[ 137, 623, 15517, 1044 ]
def METHOD_NAME(self): with patch("api.job.views.cancel_task"): self.client.put(path=reverse(viewname="v1:tasklog-cancel", kwargs={"task_pk": self.task.pk})) log: AuditLog = AuditLog.objects.order_by("operation_time").last() self.check_log( log=log, operation_name="Task cancelled", operation_result=AuditLogOperationResult.SUCCESS, user=self.test_user, obj=self.adcm, )
[ 9, 608 ]
async def METHOD_NAME(self): await asyncio.sleep(IO_SLEEP_TIME)
[ 248, 249, 128 ]
def METHOD_NAME(): u""" Width of Japanese phrase: コンニチハ, γ‚»γ‚«γ‚€! Given a phrase of 5 and 3 Katakana ideographs, joined with 3 English-ASCII punctuation characters, totaling 11, this phrase consumes 19 cells of a terminal emulator. """ # given, phrase = u'コンニチハ, γ‚»γ‚«γ‚€!' expect_length_each = (2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1) expect_length_phrase = sum(expect_length_each) # exercise, length_each = tuple(map(wcwidth.wcwidth, phrase)) length_phrase = wcwidth.wcswidth(phrase) # verify. assert length_each == expect_length_each assert length_phrase == expect_length_phrase
[ 9, 4117, 10789 ]
def METHOD_NAME(tmp_path): with open(tmp_path / "setup.cfg", "w") as f: f.write( dedent( """ [options] python_requires = 1.234 [metadata] something = other """ ) ) assert get_requires_python_str(tmp_path) == "1.234"
[ 9, 203, 102, 2610 ]
def METHOD_NAME(self): """Adding a protocol with a number >=2**16 raises an exception.""" e = ethernet.EthernetProtocol() try: e.addProto(2 ** 16, MyProtocol([])) except TypeError as e: if e.args == ("Added protocol must fit in 16 bits",): pass else: raise else: raise AssertionError("addProto must raise an exception for bad protocols")
[ 9, 3894, 1068, 6349, 564, 4289 ]
def METHOD_NAME(tool_name: str, organisation=Depends(get_user_organisation)): try: return ToolsHandler(session=db.session, organisation_id=organisation.id).get_tool_usage_by_name(tool_name) except Exception as e: if hasattr(e, 'status_code'): raise HTTPException(status_code=e.status_code, detail=e.detail) else: raise HTTPException(status_code=500, detail="Internal Server Error")
[ 19, 3081, 558 ]
def METHOD_NAME(self): self.traj[15] # index is 0-based and frames are 0-based assert_equal(self.ts.frame, 15, "jumping to frame 15")
[ 9, 8210, 7669 ]
def METHOD_NAME(x, axis, exclusive=False): return op('cumsum', [x, axis, exclusive]).as_tensor()
[ 6173 ]
def METHOD_NAME(): """Check for packages used in code but not in requirements.txt.""" mod_imports = analyze_imports( package_root=PKG_ROOT, package_name=PKG_NAME, req_file=REQS_FILE ) import_errs = {v for s in mod_imports.values() for v in s.unknown} print("re module path:", re.__file__) print("Import errors:\n", import_errs) stdlib_paths = { p for p in sys.path if p.lower().startswith(sys.prefix.lower()) and "site-packages" not in p } print("sys.path", sys.path) print("sys.prefix", sys.prefix) print("Stdlib paths:\b", stdlib_paths) missing_req_mod = { f"{req}:{mod}" for mod, reqs in mod_imports.items() for req in reqs.missing_reqs } missing_reqs = { req.strip() for reqs in mod_imports.values() for req in reqs.missing_reqs } missing_reqs = missing_reqs - EXTRAS_EXCEPTIONS if missing_reqs: print( "Missing packages:\n", "\n".join( req for req in missing_req_mod if req.split(":")[0] in missing_reqs ), ) check.is_false(missing_reqs)
[ 9, 1038, 3955, 3776 ]
def METHOD_NAME(): amino = SeedAmino(None) amino.count_aminos('GGG', 4) # => G other = SeedAmino(consensus_nuc_index=7) other.count_aminos('AAA', 5) # => K expected_counts = {'G': 4, 'K': 5} expected_nucleotide_counts = {'G': 4, 'A': 5} amino.add(other) assert amino.counts == expected_counts assert amino.nucleotides[0].counts == expected_nucleotide_counts assert amino.consensus_nuc_index is None
[ 9, 238 ]
def METHOD_NAME(self): """Parser method is removed or replaced dynamically.""" class MySpider(Spider): name = "my_spider" def parse(self, response): pass spider = MySpider() r = Request("http://www.example.com", callback=spider.parse) setattr(spider, "parse", None) self.assertRaises(ValueError, r.to_dict, spider=spider)
[ 9, 17713, -1 ]
def METHOD_NAME() -> None: all_content_types_validator.choices = content_type_list.endpoint_allowed_types_slug()
[ 86, 7085 ]
def METHOD_NAME(self) -> None: threaded.run( self._fetch_publisher_state, self._saas_file_inventory.publishers, thread_pool_size=self._thread_pool_size, )
[ 1047, 3559, 1866, 3021, 4085 ]
def METHOD_NAME(value, num_spaces=4): """ Adds ``num_spaces`` spaces at the beginning of every line in value. """ return ' ' * num_spaces + value.replace('\n', '\n' + ' ' * num_spaces)
[ 4, 144 ]
def METHOD_NAME(release): """ Remove a specific version of the kernel. release The release number of an installed kernel. This must be the entire release number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`, not the package name. CLI Example: .. code-block:: bash salt '*' kernelpkg.remove 4.4.0-70-generic """ if release not in list_installed(): raise CommandExecutionError( "Kernel release '{}' is not installed".format(release) ) if release == active(): raise CommandExecutionError("Active kernel cannot be removed") target = "{}-{}".format(_package_prefix(), release) log.info("Removing kernel package %s", target) __salt__["pkg.purge"](target) return {"removed": [target]}
[ 188 ]
METHOD_NAME(self, i):
[ 200 ]
def METHOD_NAME(self): with_replacement = True self.check_other_connect(SOURCES-2, with_replacement)
[ 9, 369, 2395 ]
f METHOD_NAME(self):
[ 9, 1544, 6640 ]
def METHOD_NAME(self): code = dedent("""\ def f(p): return p + 1 """) self.mod1.write(code) user = UseFunction(self.project, self.mod1, code.rindex("f")) self.mod2.write("print(2 + 1)\n") self.project.do(user.get_changes()) self.assertEqual( dedent("""\ import mod1 print(mod1.f(2)) """), self.mod2.read(), )
[ 9, 3231, 623, 2395, 468 ]
def METHOD_NAME(self, resource_group, storage_account): account_info = self.get_account_info(resource_group, storage_account) container = self.create_container(account_info) local_file = self.create_temp_file(128, full_random=False) file_name = self.create_random_name('blobfile', 16) policy = self.create_random_name('policy', 16) self.storage_cmd('storage container policy create -c {} -n {} --permissions racwdxyltfmei', account_info, container, policy) self.storage_cmd('storage blob upload -c {} -f "{}" -n {} ', account_info, container, local_file, file_name) sas = self.storage_cmd('storage blob generate-sas --container-name {} --name {} --policy-name {} ' '--https-only --expiry 2100-01-01', account_info, container, file_name, policy).get_output_in_json() url = self.cmd(f'storage blob url --container-name {container} --name {file_name} ' f'--account-name {storage_account} --sas-token {sas}').get_output_in_json() import requests x = requests.get(url) self.assertEqual(x.status_code, 200
[ 9, 948, 224, 1089, 54, 41, 2789 ]
def METHOD_NAME(database_url, plugins, config_auth): return f"""
[ 200, 414 ]
def METHOD_NAME(self): self.user.is_superuser = True with assume_test_silo_mode(SiloMode.CONTROL): self.user.save() edited_comment = "this comment has been edited" with self.feature("organizations:incidents"): self.get_success_response( self.organization.slug, self.incident.identifier, self.user2_activity.id, comment=edited_comment, status_code=200, ) activity = IncidentActivity.objects.get(id=self.user2_activity.id) assert activity.user_id != self.user.id assert activity.comment == edited_comment
[ 9, 5733, 1046, 2004 ]
def METHOD_NAME(self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: # initialize derivatives for response properties inputs = self.initialize_derivatives(inputs) for m in self.input_modules: inputs = m(inputs) inputs = self.representation(inputs) for m in self.output_modules: inputs = m(inputs) # apply postprocessing (if enabled) inputs = self.postprocess(inputs) results = self.extract_outputs(inputs) return results
[ 76 ]
def METHOD_NAME(self, obj): resources = {} resources["Catalogus"] = [obj.pk] # Resources with foreign keys to catalogus fields = ["InformatieObjectType", "BesluitType", "ZaakType"] for field in fields: resources[field] = list( getattr(obj, f"{field.lower()}_set").values_list("pk", flat=True) ) resources["ZaakTypeInformatieObjectType"] = list( ZaakTypeInformatieObjectType.objects.filter( zaaktype__in=resources["ZaakType"], informatieobjecttype__in=resources["InformatieObjectType"], ).values_list("pk", flat=True) ) # Resources with foreign keys to ZaakType fields = ["ResultaatType", "RolType", "StatusType", "Eigenschap"] for field in fields: model = apps.get_model("catalogi", field) resources[field] = list( model.objects.filter(zaaktype__in=resources["ZaakType"]).values_list( "pk", flat=True ) ) resource_list = [] id_list = [] for resource, ids in resources.items(): if ids: resource_list.append(resource) id_list.append(ids) return resource_list, id_list
[ 19, 252, 635 ]
def METHOD_NAME(odoo_dir): """Attempts to get Odoo git commit from :param:`odoo_dir`.""" if not odoo_dir: return try: return fetch_git_sha(odoo_dir) except InvalidGitRepository: _logger.debug("Odoo directory: '%s' not a valid git repository", odoo_dir)
[ 19, -1, 1160 ]
def METHOD_NAME(self): audit_info = AWS_Audit_Info( session_config=None, original_session=None, audit_session=session.Session( profile_name=None, botocore_session=None, ), audited_account=AWS_ACCOUNT_NUMBER, audited_account_arn=f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root", audited_user_id=None, audited_partition="aws", audited_identity_arn=None, profile=None, profile_region=None, credentials=None, assumed_role_info=None, audited_regions=None, organizations_metadata=None, audit_resources=None, mfa_enabled=False, audit_metadata=Audit_Metadata( services_scanned=0, expected_checks=[], completed_checks=0, audit_progress=0, ), ) return audit_info
[ 0, 4331, 1422, 100 ]
def METHOD_NAME(self, request: Request, pk: str): """ modify an existing parcel's details. """ parcel = models.Parcel.access_by(request).get(pk=pk) can_mutate_parcel(parcel, update=True) ParcelSerializer.map(parcel, data=request.data).save() return Response(Parcel(parcel).data)
[ 1575 ]
def METHOD_NAME(self) -> str: return self.conn.name
[ 19, 156 ]
def METHOD_NAME(): """Main function of execution""" browser = webdriver.Chrome() browser.maximize_window() wish(browser)
[ 57 ]
def METHOD_NAME(self): default_calendar = BusinessCalendar() business_schedule = default_calendar.business_schedule(self.next_nb_day) self.assertEqual(business_schedule.business_periods, []) self.assertFalse(business_schedule.is_business_day())
[ 9, 4253, 507, 256, 4253, 1724 ]
def METHOD_NAME(self) -> None: parser = ArgumentParser(usage=__doc__) subparsers = parser.add_subparsers() parser_state = subparsers.add_parser('dump') parser_state.set_defaults(func=self.txt_dump) parser_state = subparsers.add_parser('state') parser_state.set_defaults(func=self.txt_state) parser.parse_args(self.argv, namespace=self)
[ 214, 134 ]
def METHOD_NAME(): bpy.utils.register_class(SvBendAlongSurfaceFieldNode)
[ 372 ]
def METHOD_NAME(): """Test ``DynamicFactor`` in absence of exogenous variables.""" from statsmodels.tsa.statespace.dynamic_factor import ( DynamicFactor as _DynamicFactor, ) unfitted_sktime_model = DynamicFactor( k_factors=K_FACTORS, factor_order=FACTOR_ORDER ) fitted_sktime_model = unfitted_sktime_model.fit(TRAIN_Y) sktime_point_predictions = fitted_sktime_model.predict( fh=range(1, PREDICTION_LENGTH + 1) ) sktime_interval_predictions = fitted_sktime_model.predict_interval( fh=range(1, PREDICTION_LENGTH + 1), coverage=COVERAGES ) unfitted_statsmodels_model = _DynamicFactor(TRAIN_Y, K_FACTORS, FACTOR_ORDER) fitted_statsmodels_model = unfitted_statsmodels_model.fit() statsmodels_predictions = fitted_statsmodels_model.get_prediction( start=HISTORY_LENGTH, end=HISTORY_LENGTH + PREDICTION_LENGTH - 1 ) compare_predictions_against_statsmodels( sktime_point_predictions, sktime_interval_predictions, statsmodels_predictions )
[ 9, 2111, 397, 529, 3482, 2045 ]
def METHOD_NAME( organisation_admin, organisation_user, collaborator, user_without_organisation, user_with_no_api_token, ): # Usernames are: # organisation_admin: alice # organisation_user: bob # collaborator: charlie # user_without_organisation: dave # user_with_no_api_token: eve # Ordering of users in SignOffForm is by (case insensitive) username User.objects.create_user( username="Fred", password="test", email="[email protected]", name="Fred", ) # ordering just by username puts title case names first assert list( User.objects.all().order_by("username").values_list("username", flat=True) ) == ["Fred", "alice", "bob", "charlie", "dave", "eve"] # the signoff form shows them in the model ordering (i.e. case insensitive) form = SignOffForm() user_field_choices = form.fields["user"].queryset assert list(user_field_choices.values_list("username", flat=True)) == [ "alice", "bob", "charlie", "dave", "eve", "Fred", ]
[ 9, -1, 1029, 2687, 604, 2072 ]
def METHOD_NAME(self, node): "Creates rcc and py task for ``.qrc`` files" rcnode = node.change_ext('.py') self.create_task('pyrcc', node, rcnode) if getattr(self, 'install_from', None): self.install_from = self.install_from.get_bld() else: self.install_from = self.path.get_bld() self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') self.process_py(rcnode)
[ 129, 6437, 758 ]
def METHOD_NAME(): """ Generate workflow footer """ return '\n</workflow>\n'
[ 19, 3855, 1201 ]
def METHOD_NAME(cc: List[str], source, executable, options): # This should behave the same as AC_TRY_LINK, so arrange well-known flags # in the same order as autoconf would. # # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for # reference. env_flags: List[str] = [] for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']: env_flags += filter(None, os.environ.get(var, '').split(' ')) subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True) p = subprocess.run([os.path.join(os.path.dirname(__file__), 'symbol-check.py'), executable], stdout=subprocess.PIPE, text=True) os.remove(source) os.remove(executable) return (p.returncode, p.stdout.rstrip())
[ 128, 1608, 250 ]
def METHOD_NAME(_root, _info: ResolveInfo, **kwargs): country_rates = models.TaxClassCountryRate.objects.all() rates_per_country = defaultdict(list) for country_rate in country_rates: rates_per_country[country_rate.country].append(country_rate) return [ TaxCountryConfiguration( country=country, tax_class_country_rates=rates_per_country[country] ) for country in rates_per_country ]
[ 1014, 6131, 1078, 4880 ]
def METHOD_NAME(self): res = IntrinsicsSymbolTable.handle_pseudo_account_id() self.assertEqual(res, "123456789012")
[ 9, 276, 12643, 598, 147 ]
def METHOD_NAME(trait_list): name_list = [] for trait_db in trait_list: name_list.append(trait_db[0].name) return name_list
[ 19, 2551, 156, 245 ]
def METHOD_NAME(self): """ Actions to take when the current custom interpreter is removed outside Spyder. """ # NOTES: # 1. The interpreter will be updated when the option changes below # generate a change in the 'executable' one in the container. # 2. *Do not* change the order in which these options are set or the # interpreter won't be updated correctly. self.set_conf('custom_interpreter', ' ') self.set_conf('custom', False) self.set_conf('default', True)
[ 69, 2747, 674 ]
def METHOD_NAME() -> None: parser = ArgumentParser(description=__doc__) parser.add_argument("scan", nargs="*", metavar="SCAN", help="Scan results") parser.add_argument("-c", "--categories", default="", help="Scan categories.") parser.add_argument("-s", "--source", default=None, help="Scan source.") parser.add_argument( "-t", "--test", action="store_true", help="Test mode (JSON output)." ) parser.add_argument( "--test-normal", action="store_true", help='Test mode ("normal" Nmap output).' ) parser.add_argument( "--ports", "--port", action="store_true", help='Store only hosts with a "ports" element.', ) parser.add_argument( "--open-ports", action="store_true", help="Store only hosts with open ports." ) parser.add_argument( "--masscan-probes", nargs="+", metavar="PROBE", help="Additional Nmap probes to use when trying to " "match Masscan results against Nmap service " "fingerprints.", ) parser.add_argument( "--zgrab-port", metavar="PORT", help="Port used for the zgrab scan. This might be " "needed since the port number does not appear in the" "result.", ) parser.add_argument( "-r", "--recursive", action="store_true", help="Import all files from given directories.", ) parser.add_argument( "--update-view", action="store_true", help="Merge hosts in current view" ) parser.add_argument( "--no-update-view", action="store_true", help="Do not merge hosts in current view (default)", ) args = parser.parse_args() database = ivre.db.db.nmap categories = args.categories.split(",") if args.categories else [] if args.test: args.update_view = False args.no_update_view = True database = ivre.db.DBNmap() if args.test_normal: args.update_view = False args.no_update_view = True database = ivre.db.DBNmap(output_mode="normal") # Ugly hack: we use a one-element list so that # recursive_filelisting can modify its value error = [False] if args.recursive: scans = recursive_filelisting(args.scan, error) else: scans = args.scan if not args.update_view or args.no_update_view: callback = None else: def callback(x: Record) -> None: result = nmap_record_to_view(x) set_auto_tags(result, update_openports=False) set_openports_attribute(result) result["infos"] = {} for func in [ ivre.db.db.data.country_byip, ivre.db.db.data.as_byip, ivre.db.db.data.location_byip, ]: result["infos"].update(func(result["addr"]) or {}) ivre.db.db.view.store_or_merge_host(result) ivre.db.db.view.start_store_hosts() count = 0 for scan in scans: if not os.path.exists(scan): ivre.utils.LOGGER.warning("file %r does not exist", scan) error[0] = True continue try: if database.store_scan( scan, categories=categories, source=args.source, needports=args.ports, needopenports=args.open_ports, masscan_probes=args.masscan_probes, callback=callback, zgrab_port=args.zgrab_port, ): count += 1 except Exception: ivre.utils.LOGGER.warning("Exception (file %r)", scan, exc_info=True) error[0] = True if callback is not None: ivre.db.db.view.stop_store_hosts() ivre.utils.LOGGER.info("%d results imported.", count) sys.exit(error[0])
[ 57 ]
def METHOD_NAME(status): """Verifies that the given trial status is supported. Raises falcon.HTTPBadRequest otherwise""" if status and status not in Trial.allowed_stati: description = 'The "status" parameter is invalid. ' description += "The value of the parameter must be one of {}".format( list(Trial.allowed_stati) ) raise falcon.HTTPBadRequest( title=ERROR_INVALID_PARAMETER, description=description )
[ 1162, 452 ]
def METHOD_NAME(self): result = self.run_command(["snap", "--destructive-mode", "--output", "/tmp"]) self.assertThat(result.exit_code, Equals(0)) self.fake_get_provider_for.mock.assert_not_called() self.fake_lifecycle_execute.mock.assert_called_once_with( steps.PRIME, mock.ANY, tuple() ) self.fake_pack.mock.assert_called_once_with( os.path.join(self.path, "prime"), compression=None, output="/tmp" )
[ 9, 146, 626, 5735, 854, 41, 2851 ]
def METHOD_NAME(): S = SymmetricGroup(4) S.schreier_sims() base = S.base strong_gens = S.strong_gens strong_gens_distr = _distribute_gens_by_base(base, strong_gens) result = _orbits_transversals_from_bsgs(base, strong_gens_distr) orbits = result[0] transversals = result[1] base_len = len(base) for i in range(base_len): for el in orbits[i]: assert transversals[i][el](base[i]) == el for j in range(i): assert transversals[i][el](base[j]) == base[j] order = 1 for i in range(base_len): order *= len(orbits[i]) assert S.order() == order
[ 9, 11915, -1, 280, -1 ]
def METHOD_NAME(screens_path: Path, test_name: str) -> Path: doc = document(title=test_name, model=test_name[:2]) screens, hashes = screens_and_hashes(screens_path) html.store_images(screens, hashes) with doc: h1(test_name) p( "This UI test has been added to fixtures.json.", style="color: green; font-weight: bold;", ) hr() with table(border=1): with tr(): th("Added files") for hash in hashes: with tr(): html.image_column(hash, MASTERDIFF_PATH / "added") return html.write(MASTERDIFF_PATH / "added", doc, test_name + ".html")
[ 4398 ]
def METHOD_NAME(self): """ Test command error raised if file_from_database is required and the config model is not enabled""" with pytest.raises(CommandError): call_command("bulk_change_enrollment_csv", "--file_from_database")
[ 9, 462, 168, 43, 200, 578 ]
def METHOD_NAME(self, v: T, predecessors: List[T]) -> None: v_ni = self._get_node_info(v) v_ni.num_predecessors += len(predecessors) for u in predecessors: u_ni = self._get_node_info(u) u_ni.successors.append(v_ni)
[ 238, 15388 ]
def METHOD_NAME(pipeline_response): deserialized = self._deserialize("ResourceProviderOperationList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, iter(list_of_elem)
[ 297, 365 ]