text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self, fields, rng, rational_function): phi = fields[-1] inv_sqrt_M = self.inv_sqrt_matrix(fields, rational_function) eta = g.lattice(phi) rng.cnormal(eta, sigma=2.0**-0.5) # 1/sqrt(2) phi @= inv_sqrt_M * eta return g.norm2(eta)
[ 1100 ]
def METHOD_NAME(self) -> None: realm = get_realm("zulip") stream = ensure_stream(realm, "zulip features", acting_user=None) UserProfile.objects.filter(email__contains="stage").delete() starr = do_create_user( "[email protected]", "password", realm, "Ada Starr", acting_user=None ) self.set_avatar(starr, "static/images/characters/starr.png") fisher = do_create_user( "[email protected]", "password", realm, "Bel Fisher", acting_user=None ) self.set_avatar(fisher, "static/images/characters/fisher.png") twitter_bot = do_create_user( "[email protected]", "password", realm, "Twitter Bot", bot_type=UserProfile.DEFAULT_BOT, acting_user=None, ) self.set_avatar(twitter_bot, "static/images/features/twitter.png") bulk_add_subscriptions( realm, [stream], list(UserProfile.objects.filter(realm=realm)), acting_user=None ) staged_messages: List[Dict[str, Any]] = [ { "sender": starr, "content": "Hey @**Bel Fisher**, check out Zulip's Markdown formatting! " "You can have:\n* bulleted lists\n * with sub-bullets too\n" "* **bold**, *italic*, and ~~strikethrough~~ text\n" "* LaTeX for mathematical formulas, both inline -- $$O(n^2)$$ -- and displayed:\n" "```math\n\\int_a^b f(t)\\, dt=F(b)-F(a)\n```", }, { "sender": fisher, "content": "My favorite is the syntax highlighting for code blocks\n" "```python\ndef fib(n: int) -> int:\n # returns the n-th Fibonacci number\n" " return fib(n-1) + fib(n-2)\n```", }, { "sender": starr, "content": "I think you forgot your base case there, Bel :laughing:\n" "```quote\n```python\ndef fib(n: int) -> int:\n # returns the n-th Fibonacci number\n" " return fib(n-1) + fib(n-2)\n```\n```", }, { "sender": fisher, "content": "I'm also a big fan of inline link, tweet, video, and image previews. " "Check out this picture of Çet Whalin[](/static/images/features/whale.png)!", }, { "sender": starr, "content": "I just set up a custom linkifier, " "so `#1234` becomes [#1234](github.com/zulip/zulip/1234), " "a link to the corresponding GitHub issue.", }, { "sender": twitter_bot, "content": "https://twitter.com/gvanrossum/status/786661035637772288", }, { "sender": fisher, "content": "Oops, the Twitter bot I set up shouldn't be posting here. Let me go fix that.", }, ] messages = [ internal_prep_stream_message( message["sender"], stream, "message formatting", message["content"], ) for message in staged_messages ] message_ids = do_send_messages(messages) preview_message = Message.objects.get( id__in=message_ids, content__icontains="image previews" ) whale = get_emoji_data(realm.id, "whale") do_add_reaction(starr, preview_message, "whale", whale.emoji_code, whale.reaction_type) twitter_message = Message.objects.get(id__in=message_ids, content__icontains="gvanrossum") # Setting up a twitter integration in dev is a decent amount of work. If you need # to update this tweet, either copy the format below, or send the link to the tweet # to chat.zulip.org and ask an admin of that server to get you the rendered_content. twitter_message.rendered_content = ( "<p><a>https://twitter.com/gvanrossum/status/786661035637772288</a></p>\n" '<div class="inline-preview-twitter"><div class="twitter-tweet">' '<a><img class="twitter-avatar" ' 'src="https://pbs.twimg.com/profile_images/424495004/GuidoAvatar_bigger.jpg"></a>' "<p>Great blog post about Zulip's use of mypy: " "<a>http://blog.zulip.org/2016/10/13/static-types-in-python-oh-mypy/</a></p>" "<span>- Guido van Rossum (@gvanrossum)</span></div></div>" ) twitter_message.save(update_fields=["rendered_content"]) # Put a short pause between the whale reaction and this, so that the # thumbs_up shows up second thumbs_up = get_emoji_data(realm.id, "thumbs_up") do_add_reaction( starr, preview_message, "thumbs_up", thumbs_up.emoji_code, thumbs_up.reaction_type )
[ 238, 277, 1901, 5348 ]
def METHOD_NAME(self, username, passwd): conn = MockVirConnect(vms=self._vms) def connect(uri, username, password): return conn with MonkeyPatchScope([ (libvirtconnection, 'open_connection', connect), ]), make_env() as env: vmInfo = {'vmName': self._vms[0].name()} kvm = v2v.KVMCommand('qemu+tcp://domain', username, passwd, vmInfo, uuid.uuid4(), None) if passwd: kvm._passwd_file = env.password kvm._source_images = lambda: (['/fake/source'], ['file']) kvm._dest_images = lambda: [env.destination] kvm2ovirt.main(kvm._command()) with open(env.destination) as f: actual = f.read() self.assertEqual(actual, FakeVolume().data())
[ 9, 67, 136, 171, 2072 ]
def METHOD_NAME(*, record: colrev.record.Record, assume_complete: bool) -> str: """Create the colrev_id""" __check_colrev_id_preconditions( record=record, assume_complete=assume_complete, ) srep = __get_colrev_id_from_record(record=record) # Safeguard against titles that are rarely distinct if any(x in srep for x in ["|minitrack-introduction|"]): raise colrev_exceptions.NotEnoughDataToIdentifyException( msg="Title typically non-distinct", missing_fields=["title"] ) return srep
[ 129, -1, 147 ]
def METHOD_NAME(self, args): from fairseq import models model = models.METHOD_NAME(args, self) model.register_classification_head( 'sentence_classification_head', num_classes=self.args.num_classes, ) return model
[ 56, 578 ]
def METHOD_NAME(self): request = self.factory.get("/en/test/?a=1&a=2&b=3&c=OK&d=Hello%2C+World%21") params_to_remove = ["only_query_string", "c"] params_to_add = {"a": 3, "b": 4} path = add_to_query({"request": request}, *params_to_remove, **params_to_add) # Then we should get the template names saved in context variables self.assertEquals( path, "?a=1&amp;a=2&amp;a=3&amp;b=3&amp;b=4&amp;d=Hello%2C+World%21", )
[ 9, 238, 24, 539, 246, 539, 144 ]
def METHOD_NAME(*, machine_name, object_name, counter_name, instance_name=None, instance_index=0): # More info: https://docs.microsoft.com/en-us/windows/win32/perfctrs/specifying-a-counter-path # # https://docs.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhmakecounterpatha # https://mhammond.github.io/pywin32/win32pdh__MakeCounterPath_meth.html return win32pdh.MakeCounterPath((machine_name, object_name, instance_name, None, instance_index, counter_name))
[ 363, 2469, 157 ]
def METHOD_NAME(self): """This likely fails on non-UTF-8 systems (i.e. MS Win)""" run_command("g.search.modules", keyword="Příšerný kůň")
[ 9, 440, 298, -1, 2503 ]
def METHOD_NAME(self): """A video recording can be ended if already started.""" start = timezone.now() stop = start + timedelta(minutes=10) video = VideoFactory( recording_slices=[{"start": to_timestamp(start)}], ) with mock.patch.object(timezone, "now", return_value=stop): stop_recording(video) self.assertEqual( video.recording_slices, [ { "start": to_timestamp(start), "stop": to_timestamp(stop), "status": PENDING, } ], )
[ 9, 3186, 1781, 2104, 631, 2104, 997 ]
def METHOD_NAME(self) -> str: """ The value of the send key. """ return pulumi.get(self, "send_key_value")
[ 353, 59, 99 ]
def METHOD_NAME(self, response, key, number): self.checks.append(key == "value") self.checks.append(number == 123) self.crawler.stats.inc_value("boolean_checks", 2) yield response.follow( self.mockserver.url("/two"), self.parse_second, cb_kwargs={"new_key": "new_value"}, )
[ 214, 865 ]
def METHOD_NAME(self): """Test with a valid pending user and invalid password (trigger logout counter)""" plan = FlowPlan(flow_pk=self.flow.pk.hex, bindings=[self.binding], markers=[StageMarker()]) plan.context[PLAN_CONTEXT_PENDING_USER] = self.user session = self.client.session session[SESSION_KEY_PLAN] = plan session.save() for _ in range(self.stage.failed_attempts_before_cancel - 1): response = self.client.post( reverse( "authentik_api:flow-executor", kwargs={"flow_slug": self.flow.slug}, ), # Form data {"password": self.user.username + "test"}, ) self.assertEqual(response.status_code, 200) self.assertStageResponse( response, flow=self.flow, response_errors={"password": [{"string": "Invalid password", "code": "invalid"}]}, ) response = self.client.post( reverse("authentik_api:flow-executor", kwargs={"flow_slug": self.flow.slug}), # Form data {"password": self.user.username + "test"}, ) self.assertEqual(response.status_code, 200) # To ensure the plan has been cancelled, check SESSION_KEY_PLAN self.assertNotIn(SESSION_KEY_PLAN, self.client.session) self.assertStageResponse(response, flow=self.flow, error_message="Unknown error")
[ 9, 532, 2897, 15059 ]
def METHOD_NAME(fulfillment): sample_fulfillment_payload = generate_sample_payload( WebhookEventAsyncType.FULFILLMENT_CREATED )[0] fulfillment_payload = json.loads(generate_fulfillment_payload(fulfillment))[0] order = fulfillment.order obj_id = graphene.Node.to_global_id("Fulfillment", fulfillment.id) order_id = graphene.Node.to_global_id("Order", order.id) assert obj_id == sample_fulfillment_payload["id"] # Check anonymized data differ assert order_id == sample_fulfillment_payload["order"]["id"] assert order.user_email != sample_fulfillment_payload["order"]["user_email"] assert ( order.shipping_address.street_address_1 != sample_fulfillment_payload["order"]["shipping_address"]["street_address_1"] ) assert order.metadata != sample_fulfillment_payload["order"]["metadata"] assert ( order.private_metadata != sample_fulfillment_payload["order"]["private_metadata"] ) # Remove anonymized data sample_fulfillment_payload["order"] = _remove_anonymized_order_data( sample_fulfillment_payload["order"] ) fulfillment_payload["order"] = _remove_anonymized_order_data( fulfillment_payload["order"] ) # Compare the payloads assert sample_fulfillment_payload == fulfillment_payload
[ 9, 567, 734, 288, 14626, 152 ]
def METHOD_NAME(self): return []
[ 141, 275 ]
def METHOD_NAME(self): """Main process to obtian the hierarchical segmentation of a given track.""" raise NotImplementedError("This method does not return hierarchical " "segmentations.")
[ 356, 3228 ]
def METHOD_NAME(self): self.copy("LICENSE", dst="licenses", src=self._source_subfolder) cmake = self._configure_cmake() cmake.install() tools.rmdir(os.path.join(self.package_folder, "share")) tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig")) tools.rmdir(os.path.join(self.package_folder, "lib", "cmake")) # Remove MS runtime files for dll_pattern_to_remove in ["concrt*.dll", "msvcp*.dll", "vcruntime*.dll"]: tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), dll_pattern_to_remove)
[ 360 ]
def METHOD_NAME(): set_current_org(Organization.system())
[ 0, 24, 112, 3411 ]
def METHOD_NAME(self, sv): s = sv.get() self.component.compName = s self.component.compImgName = s[:1].lower() + s[1:] if s else '' self.imgNameVar.set('imageName: %s' % self.component.compImgName)
[ 156, 1180 ]
def METHOD_NAME(self): torch.utils.rename_privateuse1_backend("extension_device") register_backend_for_device( "extension_device", ExtensionScheduling, ExtensionWrapperCodegen ) self.assertTrue( get_scheduling_for_device("extension_device") == ExtensionScheduling ) self.assertTrue( get_wrapper_codegen_for_device("extension_device") == ExtensionWrapperCodegen ) self.assertFalse(self.module.custom_op_called()) device = self.module.custom_device() x = torch.empty(2, 16).to(device=device).fill_(1) self.assertTrue(self.module.custom_op_called()) y = torch.empty(2, 16).to(device=device).fill_(2) z = torch.empty(2, 16).to(device=device).fill_(3) ref = torch.empty(2, 16).fill_(5) self.assertTrue(x.device == device) self.assertTrue(y.device == device) self.assertTrue(z.device == device) def fn(a, b, c): return a * b + c metrics.reset() opt_fn = torch.compile()(fn) code = run_and_get_cpp_code(opt_fn, x, y, z) FileCheck().check("void kernel").check("loadu").check("extension_device").run( code ) opt_fn(x, y, z) res = opt_fn(x, y, z) self.assertEqual(ref, res.to(device="cpu"))
[ 9, 1452, 398, 2213 ]
def METHOD_NAME(self): rule = self.get_rule(data={"integration": self.integration.id}) form = rule.get_form_instance() assert form.is_valid()
[ 9, 532, 155 ]
def METHOD_NAME( wrapped, instance: http.client.HTTPConnection, args, kwargs ): result = wrapped(*args, **kwargs) trysetip(instance, loglevel=logging.WARNING) return result
[ 9161, 707 ]
def METHOD_NAME(self): if self.token_has_expired(): self.get_access_token() return self._server
[ 19, 163 ]
def METHOD_NAME(): support.run_unittest(TestSuper)
[ 9, 57 ]
def METHOD_NAME(self) -> bool: return self.async_flag
[ 137, 22, -1 ]
def METHOD_NAME(image): base64_str = str(base64.b64encode(image), 'utf-8') return base64_str
[ 421, 660, 24, 2426 ]
def METHOD_NAME(self): """ :avocado: tags=machine:pc :avocado: tags=accel:tcg """ self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") self.launch_and_wait(set_up_ssh_connection=False)
[ 9, 6570, 13127, 6305 ]
def METHOD_NAME(self, *args, **kwargs): raise FileNotFoundError(f'{self} is not a file')
[ 203, 526 ]
def METHOD_NAME(): """ Make sure info works on xarray.Dataset 1-D inputs with a time column. """ table = xr.Dataset( coords={"index": [0, 1, 2, 3, 4]}, data_vars={ "z": ("index", [10, 13, 12, 15, 14]), "time": ("index", pd.date_range(start="2020-01-01", periods=5)), }, ) output = info(data=table) expected_output = ( "<vector memory>: N = 5 <10/15> <2020-01-01T00:00:00/2020-01-05T00:00:00>\n" ) assert output == expected_output
[ 9, 100, 7724, 126, 104, 105 ]
def METHOD_NAME(argument: bytes, expected=None): _validate_type(argument, expected)
[ 321 ]
def METHOD_NAME(cls, action): f = '{}.{}'.format(cls.__name__, action) return f if cls.isOld__() else '{} ({})'.format(f, Flow.__name__)
[ 156 ]
def METHOD_NAME(): testsuite = unittest.TestSuite() loader = unittest.defaultTestLoader.loadTestsFromTestCase testsuite.addTest(loader(TestErrorModel)) return testsuite
[ 482 ]
def METHOD_NAME(self): # This fixes the damage that test_various___class___pathologies does. nonlocal __class__ __class__ = TestSuper
[ 531, 481 ]
def METHOD_NAME(self, url: URL) -> OntologyElementMetadata: """Load an external data type.""" return async_to_sync(self.inner.METHOD_NAME(url))
[ 557, 751, 365, 44 ]
def METHOD_NAME( self, schedule_definition_id: str, id: str, **kwargs: Any ) -> Iterable["_models.AccessReviewContactedReviewer"]: """Get access review instance contacted reviewers. :param schedule_definition_id: The id of the access review schedule definition. Required. :type schedule_definition_id: str :param id: The id of the access review instance. Required. :type id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AccessReviewContactedReviewer or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewContactedReviewer] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop( "api_version", _params.pop("api-version", self._api_version or "2021-12-01-preview") ) cls: ClsType[_models.AccessReviewContactedReviewerListResult] = kwargs.pop("cls", None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def prepare_request(next_link=None): if not next_link: request = build_list_request( schedule_definition_id=schedule_definition_id, id=id, api_version=api_version, template_url=self.METHOD_NAME.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("AccessReviewContactedReviewerListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) _stream = False pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access request, stream=_stream, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged(get_next, extract_data)
[ 245 ]
def METHOD_NAME(self): with self.assertRaises(OasisException): create_analysis_settings_json('/tmp/non_existing_dir_{}'.format(uuid.uuid4().hex))
[ 9, 2851, 870, 130, 1985, 2852, 442 ]
def METHOD_NAME(self) -> int: return self._ttl
[ 596, 858 ]
def METHOD_NAME(self): bot = CandidateBot(self.person.pk) with self.assertRaises(ValueError): bot.edit_field("linkedin", "https://linkedin.com/CandidateBot")
[ 9, 3438, 2004, 10448 ]
def METHOD_NAME(self, sample: dict[str, Any]) -> dict[str, Any]: """Apply the augmentation. Args: sample: Input sample. Returns: Augmented sample. """ for key in ["image", "mask"]: dtype = sample[key].dtype # All inputs must be float sample[key] = sample[key].float() sample[key] = self.aug(sample[key]) sample[key] = sample[key].to(dtype) # Kornia adds batch dimension sample[key] = rearrange(sample[key], "() c h w -> c h w") return sample
[ 76 ]
def METHOD_NAME(self) -> str: """ (Required only by `compute.RegionBackendServiceIamPolicy`) The policy data generated by a `organizations_get_iam_policy` data source. """ return pulumi.get(self, "policy_data")
[ 54, 365 ]
def METHOD_NAME( word: str, callback: _Callback, triggers: Sequence[str] = ["space"], match_suffix: bool = False, timeout: float = 2 ) -> Callable[[], None]: ...
[ 238, 2236, 4130 ]
def METHOD_NAME(self): runSimulation(self, 40., restart=False, interval=10., **params) sim1 = runSimulation(self, 80., restart=True, **params) sim2 = runSimulation(self, 80., restart=False, **params) compareSim(self,sim1,sim2)
[ 74, 9 ]
def METHOD_NAME(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--num_samples", type=int, default=50, help="Number of Monte Carlo samples to use to estimate performance.") parser.add_argument( "--torque_limit", type=float, default=2.0, help="Torque limit of the pendulum.") args = parser.parse_args() if args.torque_limit < 0: raise ValueError("Please supply a nonnegative torque limit.") # Assemble the Pendulum plant. builder = DiagramBuilder() pendulum = builder.AddSystem(MultibodyPlant(0.0)) Parser(pendulum).AddModelsFromUrl( url="package://drake/examples/pendulum/Pendulum.urdf") pendulum.Finalize() # Set the pendulum to start at uniformly random # positions (but always zero velocity). elbow = pendulum.GetMutableJointByName("theta") upright_theta = np.pi theta_expression = Variable( name="theta", type=Variable.Type.RANDOM_UNIFORM)*2.*np.pi elbow.set_random_angle_distribution(theta_expression) # Set up LQR, with high position gains to try to ensure the # ROA is close to the theoretical torque-limited limit. Q = np.diag([100., 1.]) R = np.identity(1)*0.01 linearize_context = pendulum.CreateDefaultContext() linearize_context.SetContinuousState( np.array([upright_theta, 0.])) actuation_port = pendulum.get_actuation_input_port() actuation_port.FixValue(linearize_context, 0) controller = builder.AddSystem( LinearQuadraticRegulator( pendulum, linearize_context, Q, R, np.zeros(0), actuation_port.get_index())) # Apply the torque limit. torque_limit = args.torque_limit torque_limiter = builder.AddSystem( Saturation(min_value=np.array([-torque_limit]), max_value=np.array([torque_limit]))) builder.Connect(controller.get_output_port(0), torque_limiter.get_input_port(0)) builder.Connect(torque_limiter.get_output_port(0), pendulum.get_actuation_input_port()) builder.Connect(pendulum.get_state_output_port(), controller.get_input_port(0)) diagram = builder.Build() # Perform the Monte Carlo simulation. def make_simulator(generator): ''' Create a simulator for the system using the given generator. ''' simulator = Simulator(diagram) simulator.set_target_realtime_rate(0) simulator.Initialize() return simulator def calc_wrapped_error(system, context): ''' Given a context from the end of the simulation, calculate an error -- which for this stabilizing task is the distance from the fixed point. ''' state = diagram.GetSubsystemContext( pendulum, context).get_continuous_state_vector() error = state.GetAtIndex(0) - upright_theta # Wrap error to [-pi, pi]. return (error + np.pi) % (2*np.pi) - np.pi num_samples = args.num_samples results = MonteCarloSimulation( make_simulator=make_simulator, output=calc_wrapped_error, final_time=1.0, num_samples=num_samples, generator=RandomGenerator()) # Compute results. # The "success" region is fairly large since some "stabilized" trials # may still be oscillating around the fixed point. Failed examples are # consistently much farther from the fixed point than this. binary_results = np.array([abs(res.output) < 0.1 for res in results]) passing_ratio = float(sum(binary_results)) / len(results) # 95% confidence interval for the passing ratio. passing_ratio_var = 1.96 * np.sqrt( passing_ratio*(1. - passing_ratio)/len(results)) print("Monte-Carlo estimated performance across %d samples: " "%.2f%% +/- %0.2f%%" % (num_samples, passing_ratio*100, passing_ratio_var*100)) # Analytically compute the best possible ROA, for comparison, but # calculating where the torque needed to lift the pendulum exceeds # the torque limit. arm_radius = 0.5 arm_mass = 1.0 # torque = r x f = r * (m * 9.81 * sin(theta)) # theta = asin(torque / (r * m)) if torque_limit <= (arm_radius * arm_mass * 9.81): roa_half_width = np.arcsin(torque_limit / (arm_radius * arm_mass * 9.81)) else: roa_half_width = np.pi roa_as_fraction_of_state_space = roa_half_width / np.pi print("Max possible ROA = %0.2f%% of state space, which should" " match with the above estimate." % ( 100 * roa_as_fraction_of_state_space))
[ 57 ]
async def METHOD_NAME(self): wallet_record = WalletRecord(wallet_id="test", settings={}) create_profile_stub = asyncio.Future() create_profile_stub.set_result("") with async_mock.patch( "aries_cloudagent.multitenant.askar_profile_manager.AskarProfile" ) as AskarProfile: sub_wallet_profile = AskarProfile(None, None) sub_wallet_profile.context.copy.return_value = InjectionContext() sub_wallet_profile.store.create_profile.return_value = create_profile_stub self.manager._multitenant_profile = sub_wallet_profile await self.manager.get_wallet_profile( self.profile.context, wallet_record, provision=True ) sub_wallet_profile.store.create_profile.assert_called_once_with( wallet_record.wallet_id )
[ 9, 19, 2945, 337, 427, 129, 337 ]
def METHOD_NAME(loops=LOOPS): return Proc0(loops)
[ 4750 ]
def METHOD_NAME( self, service_status: Dict[str, Dict[str, List[str]]], ) -> Optional[Dict[str, Dict[str, List[str]]]]: for service, status in service_status.items(): # was the service not active? active_status_first_check = status[SERVICE_ACTIVE][0] active_status_second_check = status[SERVICE_ACTIVE][1] not_active = ( active_status_first_check != 'active' or active_status_second_check != 'active' ) if not_active: status[SERVICE_RESULT].append('not active') # was the service restarted? start_time_first_check = status[SERVICE_START_TIME][0] start_time_second_check = status[SERVICE_START_TIME][1] restarted = start_time_first_check != start_time_second_check if restarted: status[SERVICE_RESULT].append('restarted') return { service: status for service, status in service_status.items() if len(status[SERVICE_RESULT]) > 0 }
[ 19, 1423, 3186 ]
def METHOD_NAME(self, return_obj, response_code, raw_response, limit): """ status progress calculation :param return_obj: dict, building return response dict :param raw_response: str, Api response, :param response_code: int, Api call response code :param limit: int, limit for status calculation """ if 199 < response_code < 300: response_dict = json.loads(raw_response) return_obj['success'] = True return_obj['status'] = self.__getStatus(response_dict['status']) results = int(response_dict['hit']) if return_obj['status'] == 'COMPLETED': return_obj['progress'] = 100 elif return_obj['status'] == 'RUNNING': progress = (results / int(limit)) * 100 progress_floor = math.floor(progress) return_obj['progress'] = progress_floor if return_obj['progress'] >= 100: return_obj['progress'] = 100 return_obj['status'] = 'COMPLETED' else: return_obj['progress'] = 0 # arcsight logger error codes - currently unavailable state elif response_code in [500, 503]: response_string = raw_response.decode() ErrorResponder.fill_error(return_obj, response_string, ['message'], connector=self.connector) elif isinstance(json.loads(raw_response), dict): response_error = json.loads(raw_response) response_dict = response_error['errors'][0] ErrorResponder.fill_error(return_obj, response_dict, ['message'], connector=self.connector) else: raise Exception(raw_response)
[ 452, 3064 ]
def METHOD_NAME(v): warnings.warn( "The converter `(field2)ustring` is deprecated " "and will be removed in Zope 6. " "Please use `(field2)string` instead.", DeprecationWarning) return field2string(v)
[ -1 ]
def METHOD_NAME(self): """Variant of test_issue4156_loop_vars_leak. Interleaves loops and allocations """ @njit def udt(N): sum_vec = 0 for n in range(N): vec = np.zeros(7) for n in range(N): z = np.zeros(7) sum_vec += vec[0] + z[0] return sum_vec got = udt(4) expect = udt.py_func(4) self.assertPreciseEqual(got, expect)
[ 9, -1, 1751, 1659, 6191, -1 ]
def METHOD_NAME(self): """ Performs self.n_iter EM steps. """ self.initialize_centroids() for i in range(self.n_iter): try: self.step(i) except EmptyClusterResolveError: break
[ 421 ]
def METHOD_NAME(self) -> None: patch_loader(__name__, "fixtures/duplicate") with self.assertRaisesRegex(Exception, "found duplicate manifest 'foo'"): load_all_manifests(None)
[ 9, 1119, 1220 ]
def METHOD_NAME(arr1, arr2): return np.max(np.abs(arr1 - arr2))
[ 16065 ]
def METHOD_NAME(): bpy.utils.unregister_class(SvDelaunay2DCdt)
[ 2468 ]
def METHOD_NAME(cls): """Overridden method. Runs once before all tests in this class.""" try: cls.app = QApplication().processEvents() except RuntimeError: pass logging.basicConfig( stream=sys.stderr, level=logging.DEBUG, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', )
[ 0, 1, 2 ]
def METHOD_NAME(fraction, msg, is_done): """Update progress bar from GLib main loop""" if msg: self.progressbar.set_text(msg) self.progressbar.set_fraction(fraction) if is_done: self.progressbar.hide() self.make_button.set_sensitive(True)
[ 69, 3064 ]
def METHOD_NAME(name: Any, parent: Any = None, default_key=None): value = None if isinstance(name, str): try: # is key of parent? value = parent[name] except (KeyError, TypeError, ValueError): pass else: # key = (key, value)? try: name, value = name except (TypeError, ValueError): # key = {key: value}? try: name, value = dict(name).popitem() except (TypeError, ValueError, AttributeError, KeyError): pass if not isinstance(name, str): raise ValueError(f'name must be a string') if value is None: return name, None try: # noinspection PyUnresolvedReferences value.items() except AttributeError as e: if default_key: value = {default_key: value} else: raise ValueError(f'value of {name!r} must be a dictionary') from e return name, value
[ 24, 156, 553, 637 ]
def METHOD_NAME(x, width): assert width in (16, 32, 64) if width == 64: return float(x) elif width == 32: return reinterpret_bits(float(x), b"!f", b"!f") else: return reinterpret_bits(float(x), b"!e", b"!e")
[ 1819, 47 ]
def METHOD_NAME( self, stream_state: Optional[StreamState] = None, stream_slice: Optional[StreamSlice] = None, next_page_token: Optional[Mapping[str, Any]] = None, ) -> Mapping[str, Any]: # Pass the stream_slice from the argument, not the cursor because the cursor is updated after processing the response return self._get_request_option(RequestOptionType.header, stream_slice)
[ 19, 377, 2131 ]
def METHOD_NAME(s): """ Returns true if argument is the name of a running process. s: process name returns Boolean and pid """ # find pids of running processes pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] for pid in pids: try: pid_file = os.path.join('/proc', pid, 'cmdline') with open(pid_file, 'rb') as f: data = f.read() procname = os.path.basename(crmshutils.to_ascii(data).replace('\x00', ' ').split(' ')[0]) if procname == s or procname == s + ':': return True, int(pid) except EnvironmentError: # a process may have died since we got the list of pids pass return False, -1
[ 19, 356, 452 ]
def METHOD_NAME(notification): return _generate_unsubscribe_link( unsubscribe_pb2.UnsubscribePayload( user_id=notification.user_id, topic_action=unsubscribe_pb2.UnsubscribeTopicAction( topic=notification.topic, action=notification.action, ), ) )
[ 567, 5002, 39, 1006 ]
def METHOD_NAME(self): bad_players = ['fred',None,'',0,1,-1,'xx','xo','Ox'] for bad in bad_players: with self.assertRaises(ValueError): game.other_player(bad)
[ 9, 1068, 2395, 867 ]
def METHOD_NAME(self, start, stop, step, tsteps, quiet=False): """ Tune the sorting period. Args: start (int): Start of tuning interval to scan (inclusive). stop (int): End of tuning interval to scan (inclusive). step (int): Spacing between tuning points. tsteps (int): Number of timesteps to run at each tuning point. quiet (bool): Quiet the individual run calls. Returns: int: The optimal sorting period from the scanned range. The optimal sorting period for the MPCD particles is determined from a sequence of short runs. The sorting period is first set to *start*. The TPS value is determined for a run of length *tsteps*. This run is repeated 3 times, and the median TPS of the runs is saved. The sorting period is then incremented by *step*, and the process is repeated until *stop* is reached. The period giving the fastest TPS is determined, and the sorter period is updated to this value. The results of the scan are also reported as output, and the fastest sorting period is also returned. Note: A short warmup run is **required** before calling :py:meth:`tune()` in order to ensure the runtime autotuners have found optimal kernel launch parameters. Examples:: # warmup run hoomd.run(5000) # tune sorting period sorter.tune(start=5, stop=50, step=5, tsteps=1000) """ # scan through range of sorting periods and log TPS periods = range(start, stop + 1, step) tps = [] for p in periods: cur_tps = [] self.set_period(period=p) for i in range(0, 3): hoomd.run(tsteps, quiet=quiet) cur_tps.append(hoomd.context.current.system.getLastTPS()) # save the median tps cur_tps.sort() tps.append(cur_tps[1]) # determine fastest period and set it on the sorter fastest = tps.index(max(tps)) opt_period = periods[fastest] self.set_period(period=opt_period) # output results hoomd.context.current.device.cpp_msg.notice( 2, '--- sort.tune() statistics\n') hoomd.context.current.device.cpp_msg.notice( 2, 'Optimal period = {0}\n'.format(opt_period)) hoomd.context.current.device.cpp_msg.notice( 2, ' period = ' + str(periods) + '\n') hoomd.context.current.device.cpp_msg.notice( 2, ' TPS = ' + str(tps) + '\n') return opt_period
[ 5958 ]
def METHOD_NAME(self, parser): # optional arguments parser.add_argument('appnames', nargs='*', help='app names to update')
[ 238, 134 ]
def METHOD_NAME(self): LOG.info(f'Init MemPoolExecutor: {self._id}') self._event_loop = asyncio.new_event_loop() asyncio.set_event_loop(self._event_loop) self._pickable_data_srv = PipePickableDataSrv(user=self, srv_sock=self._srv_sock) self._solana = SolInteractor(self._config, self._config.solana_url) self._stat_client = ProxyStatClient(self._config) self._stat_client.start() self._gas_price_task = MPExecutorGasPriceTask(self._config, self._solana, self._stat_client) self._op_res_task = MPExecutorOpResTask(self._config, self._solana, self._stat_client) self._elf_params_task = MPExecutorElfParamsTask(self._config, self._solana) self._state_tx_cnt_task = MPExecutorStateTxCntTask(self._config, self._solana) self._exec_neon_tx_task = MPExecutorExecNeonTxTask(self._config, self._solana) self._free_alt_task = MPExecutorFreeALTQueueTask(self._config, self._solana) self._stuck_tx_task = MPExecutorStuckTxListTask(self._config, self._solana)
[ 176, 623, 2305 ]
def METHOD_NAME(self) -> str: ...
[ 13107 ]
def METHOD_NAME(origin_func): from importlib import import_module decorated_path = inspect.getfile(origin_func) module_path = __path__[0] if not decorated_path.startswith(module_path): raise Exception("Decorator can only be used in submodules!") manual_path = os.path.join( decorated_path[module_path.rfind(os.path.sep) + 1:]) manual_file_path, manual_file_name = os.path.split(manual_path) module_name, _ = os.path.splitext(manual_file_name) manual_module = "..manual." + \ ".".join(manual_file_path.split(os.path.sep) + [module_name, ]) return getattr(import_module(manual_module, package=__name__), origin_func.__name__)
[ 512, 2663, 559 ]
def METHOD_NAME(self, run_server1, run_server2, run_server3): self.qmpc_request.variance(["data_id1"], [1, 2, 3])
[ 9, 2873 ]
def METHOD_NAME(cls, hostnames, data): host_stats = [('hostname', 'total', 'per minute')] for h in hostnames: h_safe = safe_name(h) prefix = f'awx_{h_safe}' messages_total = data.get(f'{prefix}_messages_received', '0') messages_per_minute = data.get(f'{prefix}_messages_received_per_minute', '0') host_stats.append((h, str(int(messages_total)), str(int(messages_per_minute)))) return host_stats
[ 19, 550, 577 ]
def METHOD_NAME(request: HttpRequest) -> str: try: # request.headers = {"Authorization": "JWT abc123def456"} auth_header: str = request.META["HTTP_AUTHORIZATION"] return auth_header.split(" ", 1)[1] except (KeyError, IndexError): raise AtlassianConnectValidationError("Missing/Invalid authorization header")
[ 19, 466 ]
def METHOD_NAME(pattern, str): matches = re.findall(pattern, str) assert len(matches) > 0, "Pattern not found.\nPattern: " + pattern + "\nString: " + str
[ -1 ]
def METHOD_NAME(self, request, create_scale_pods_and_pvcs_using_kube_job_on_ms_consumers): """ Save the current index and initialize the Sanity instance """ self.orig_index = config.cur_index switch_to_correct_cluster_at_setup(request) self.sanity_helpers = SanityManagedService( create_scale_pods_and_pvcs_using_kube_job_on_ms_consumers )
[ 102 ]
def METHOD_NAME(profile_file): """Initialize a profiler from profile file.""" print_mdl.METHOD_NAME(compat.as_bytes(profile_file)) profiler = model_analyzer.Profiler.__new__(model_analyzer.Profiler) yield profiler print_mdl.DeleteProfiler()
[ 7275, 280, 171 ]
def METHOD_NAME(modname: str, cmd: code_writer_cmd) -> None: cmd.write_status(f"importing module {modname}\n") mod = importlib.import_module(modname) destination_path = mod.__file__ assert destination_path is not None tempfile = process_module(modname, destination_path, cmd) cmd.run_zimports(tempfile) cmd.run_black(tempfile) cmd.write_output_file_from_tempfile(tempfile, destination_path)
[ 22, 298 ]
f METHOD_NAME(self):
[ 9, 1819 ]
def METHOD_NAME(self, symbol: int, modifiers: int): if symbol == arcade.key.SPACE: if arcade.timings_enabled(): arcade.disable_timings() else: arcade.enable_timings()
[ 69, 59, 2971 ]
def METHOD_NAME(self, app: App): logger.info(f'Enable plugins that are scaned. total: {len(self._plugins)} ') for plugin in self._plugins.values(): # Try to enbale the plugin. self.about_to_enable.emit(plugin) try: plugin.enable(app) except Exception: # noqa logger.exception(f'Enable plugin:{plugin.name} failed') self.scan_finished.emit(list(self._plugins.values()))
[ 1317, 1294 ]
def METHOD_NAME(self, x, u, dt: float): next_x = x['x'] + x['v']*dt drag_acc = self.parameters['lumped_param'] * x['v'] * x['v'] next_v = x['v'] + (self.parameters['g'] - drag_acc*np.sign(x['v']))*dt return self.StateContainer(np.array([ np.atleast_1d(next_x), np.atleast_1d(next_v) # Acceleration of gravity ]))
[ 243, 551 ]
def METHOD_NAME(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.METHOD_NAME() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].METHOD_NAME()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result
[ 24, 553 ]
def METHOD_NAME(self): # Arrange self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) self.fc.update_raw(1.00000, 1.00010, 0.99990, 1.00005) # Act result_candle = self.fc.value result_vector = self.fc.vector # Assert assert np.array_equal([1, 1, 1, 1, 1], result_vector) assert result_candle.direction == CandleDirection.BULL assert result_candle.size == CandleSize.VERY_SMALL assert result_candle.body_size == CandleBodySize.SMALL assert result_candle.upper_wick_size == CandleWickSize.SMALL assert result_candle.lower_wick_size == CandleWickSize.SMALL
[ 9, 199, 41, 492, 5636, 610, 391 ]
def METHOD_NAME(self): return self.preprocessing_and_model.METHOD_NAME()
[ 8662 ]
def METHOD_NAME(self, data): data['server_room'] = ServerRoom.objects.get( pk=data['server_room'] ) data['orientation'] = RackOrientation.id_from_name(data['orientation']) return super(RackBaseSerializer, self).METHOD_NAME(self.instance, data)
[ 86 ]
METHOD_NAME(matchobj):
[ -1 ]
def METHOD_NAME(self, examples: Dataset) -> Tuple[Dataset, Dataset]: """ Process eval examples into features. Args: examples: examples to process into features. Returns: tuple (examples, features) comprising examples adapted into standardized format and processed input features for model. """ pass
[ 356, 1171 ]
def METHOD_NAME(self): try: self.migrate() except InterruptedError: self.out("Changes not applied") else: self.out("Changes applied") self.out("DONE!")
[ 22 ]
def METHOD_NAME(f, mtxmspecs, mtxm_gen, complex_a=False, complex_b=False, bg=True): r = lambda x: x if bg: r = lambda x: x.replace("double complex", "__complex__ double").replace("cabs", "abs") complex_c = (complex_a or complex_b) and "complex" or "" complex_a = complex_a and "complex" or "" complex_b = complex_b and "complex" or "" print(r(_header(bg, complex_c, complex_a, complex_b)), file=f) for mtxm in mtxmspecs: mtxm_gen(f, *mtxm) mtxms = [x[-1] for x in mtxmspecs] print(r(_timer(mtxms, complex_c, complex_a, complex_b)), file=f) if not (bool(complex_a) ^ bool(complex_b)): print(r(_transtimer(mtxms, complex_c, complex_a, complex_b)), file=f) print(r(_main(mtxms, complex_c, complex_a, complex_b)), file=f)
[ 4769, 370 ]
def METHOD_NAME(self, pairs): """Join clusters using direct ratios given a tuple (i,j) of bin pairs""" for i, j in zip(*pairs): # Both bins not joined if self.bin_assign[i] == -1 and self.bin_assign[j] == -1: # Create new cluster self.bin_assign[i] = self.cluster_id self.bin_assign[j] = self.cluster_id self.cluster_contents[self.cluster_id] = {i, j} self.cluster_id += 1 rij = self.ratios[i, j] denom = rij + 1.0 self.bin_data[i] = rij / denom # relative probability for bin i self.bin_data[j] = denom.recip() # relative probability for bin j # Only one bin previously assigned to a cluster elif self.bin_assign[i] == -1 or self.bin_assign[j] == -1: if self.bin_assign[i] == -1: idum, jdum = i, j else: idum, jdum = j, i jclust = self.bin_assign[jdum] rik = self.ratios[idum, jdum] pk = self.bin_data[jdum] piTmp = rik * pk # estimate for p_idum / P_cluster based on 'path' through bin k # Note that here P_cluster is value before addition of bin idum # now, compute relative prob of each bin in *new* cluster (including bin idum) denom = piTmp + 1.0 self.bin_data[idum] = piTmp / denom # Update bins already in cluster jclust_mid = np.where(self.bin_assign == jclust) # index of bins in jclust self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / denom # Move bin idum into cluster jclust self.bin_assign[idum] = jclust self.cluster_contents[jclust].update({idum}) # Both bins previously assigned to different cluster; Join clusters elif not self.bin_assign[i] == self.bin_assign[j]: iclust = self.bin_assign[i] jclust = self.bin_assign[j] rij = self.ratios[i, j] pi = self.bin_data[i] pj = self.bin_data[j] ij_cluster_ratio = rij * pj / pi idenom = ij_cluster_ratio.recip() + 1.0 jdenom = ij_cluster_ratio + 1.0 iclust_mid = np.where(self.bin_assign == iclust) self.bin_data[iclust_mid] = self.bin_data[iclust_mid] / idenom jclust_mid = np.where(self.bin_assign == jclust) self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / jdenom # Join all bins in cluster j into cluster iclust self.bin_assign[jclust_mid] = iclust # Move contents of jclust into iclust self.cluster_contents[iclust].update(self.cluster_contents[jclust]) # Clear contents of jclust self.cluster_contents[jclust].clear() if len(self.cluster_contents[iclust]) == self.nbins: break
[ 2831, 53 ]
f METHOD_NAME(self) -> int:
[ 181, 9, 2794 ]
def METHOD_NAME(): with pytest.raises(ValueError): minimize(func, (1.5, 1.7, 1.5), method="foo")
[ 9, 103, 883 ]
def METHOD_NAME() -> Generator: METHOD_NAME = [ System( fides_key="database-2", organization_fides_key="default_organization", name="database-2", description="Fides Generated Description for RDS Cluster: database-2", fidesctl_meta=SystemMetadata( endpoint_address="database-2.cluster-ckrdpkkb4ukm.us-east-1.rds.amazonaws.com", endpoint_port="3306", resource_id="arn:aws:rds:us-east-1:910934740016:cluster:database-2", ), system_type="rds_cluster", privacy_declarations=[], ), System( fides_key="database-1", organization_fides_key="default_organization", name="database-1", description="Fides Generated Description for RDS Instance: database-1", fidesctl_meta=SystemMetadata( endpoint_address="database-1.ckrdpkkb4ukm.us-east-1.rds.amazonaws.com", endpoint_port="3306", resource_id="arn:aws:rds:us-east-1:910934740016:db:database-1", ), system_type="rds_instance", privacy_declarations=[], ), ] yield METHOD_NAME
[ 4909, 5372 ]
def METHOD_NAME(conanfile, remove_lib_prefix=True): """remove lib prefix & change extension to .lib in case of cl like compiler""" if not conanfile.settings.get_safe("compiler.runtime"): return from conan.tools.files import rename import glob libdirs = getattr(conanfile.cpp.package, "libdirs") for libdir in libdirs: for ext in [".dll.a", ".dll.lib", ".a"]: full_folder = os.path.join(conanfile.package_folder, libdir) for filepath in glob.glob(os.path.join(full_folder, f"*{ext}")): libname = os.path.basename(filepath)[0:-len(ext)] if remove_lib_prefix and libname[0:3] == "lib": libname = libname[3:] rename(conanfile, filepath, os.path.join(os.path.dirname(filepath), f"{libname}.lib"))
[ 1112, 8745, 15939 ]
def METHOD_NAME( # pylint: disable=too-many-arguments reference_left: ndarray, reference_right: ndarray, processed_left: ndarray, processed_right: ndarray, sample_rate: float, listener: Listener, level: float = 100.0, ) -> float: """Better ear HASPI. Calculates HASPI for left and right ear and selects the better result. Args: ref_left (np.ndarray): left channel of reference signal ref_right (np.ndarray): right channel of reference signal proc_left (np.ndarray): left channel of processed signal proc_right (np.ndarray): right channel of processed signal sample_rate (int): sampling rate for both signal audiogram_left (): left ear audiogram audiogram_right (): right ear audiogram level: level in dB SPL corresponding to RMS=1 Returns: float: beHASPI score Updates: Zuzanna Podwinska, March 2022 """ score_left, _ = haspi_v2( reference_left, sample_rate, processed_left, sample_rate, listener.audiogram_left, level, ) score_right, _ = haspi_v2( reference_right, sample_rate, processed_right, sample_rate, listener.audiogram_right, level, ) return max(score_left, score_right)
[ -1, 820, 673 ]
METHOD_NAME(self):
[ 187 ]
def METHOD_NAME(): q0, q1 = cirq.LineQubit.range(2) s00 = cirq.KET_ZERO(q0) * cirq.KET_ZERO(q1) np.testing.assert_equal(s00.state_vector(), [1, 0, 0, 0]) np.testing.assert_equal(s00.state_vector(qubit_order=(q1, q0)), [1, 0, 0, 0]) s01 = cirq.KET_ZERO(q0) * cirq.KET_ONE(q1) np.testing.assert_equal(s01.state_vector(), [0, 1, 0, 0]) np.testing.assert_equal(s01.state_vector(qubit_order=(q1, q0)), [0, 0, 1, 0])
[ 9, 9302, 551, 798 ]
def METHOD_NAME(self): """Test that the spectra of a circuit is calculated correctly in the tf interface.""" import tensorflow as tf dev = qml.device("default.qubit", wires=3) qnode = qml.QNode(circuit, dev) x = tf.Variable([1.0, 2.0, 3.0]) w = tf.constant([[-1, -2, -3], [-4, -5, -6]]) res = circuit_spectrum(qnode)(x, w) assert res for (k1, v1), (k2, v2) in zip(res.items(), expected_result.items()): assert k1 == k2 assert v1 == v2
[ 9, 1911, 554 ]
def METHOD_NAME(x): using_bool_impl = try_call_method(x, "__bool__") if '__len__' in x.jit_methods: def using_len_impl(x): return bool(len(x)) else: using_len_impl = None always_true_impl = lambda x: True return take_first(using_bool_impl, using_len_impl, always_true_impl)
[ 2, 863 ]
def METHOD_NAME(args): with open(args.json) as json_file: cb = json.load(json_file) if args.token and cb['token'] != args.token: print("Token mismatch") sys.exit(1) if is_infra_error(cb): print("Infrastructure error") ret = BISECT_SKIP elif args.test_case: ret = handle_test(cb, args.test_case, args.verbose) else: ret = handle_boot(cb, args.verbose) sys.exit(ret)
[ 57 ]
def METHOD_NAME(builder, asymmetricQuantizeInputs): """This method is deprecated. Please switch to AddAsymmetricQuantizeInputs.""" return AddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs)
[ 9628, 771, 1376, 1881, 238, 5669, 1429 ]
def METHOD_NAME(router: APIRouter, url: str, function_members: list[tuple[str, Any]]) -> None: # sourcery skip: merge-nested-ifs existing_routes_endpoints: list[tuple[Any, str]] = [ (route.endpoint, route.path) for route in router.routes if isinstance(route, APIRoute) ] for name, func in function_members: if hasattr(router, name) and not name.startswith("__") and not name.endswith("__"): if (func, url) not in existing_routes_endpoints: response_model = None responses = None kwargs = {} status_code = 200 if return_types_func := getattr(func, RETURN_TYPES_FUNC_KEY, None): response_model, status_code, responses, kwargs = return_types_func() api_resource = router.api_route( url, methods=[name.capitalize()], response_model=response_model, status_code=status_code, responses=responses, **kwargs, ) api_resource(func)
[ 783, 3968, 604, 103, 156 ]
def METHOD_NAME(deck_slot_location: DeckSlotLocation) -> int: return deck_slot_location.slotName.as_int()
[ 3854, 3572, 24, 962 ]
def METHOD_NAME(self): x = torch.randn(2, 3, 4) linear = Linear2D(4, 5) old_linear = nn.Linear(4, 5) old_linear.load_state_dict(linear.state_dict()) y = linear(x) self.assertEqual(y.size(), (2, 3, 5)) y1 = old_linear(x) self.assertEqual(y1.size(), (2, 3, 5)) torch.testing.assert_close(y, y1) Y = torch.randn_like(y) loss = torch.nn.functional.mse_loss(y, Y) loss1 = torch.nn.functional.mse_loss(y1, Y) loss.backward() loss1.backward() name2grad = {} for name, p in linear.named_parameters(): name2grad[name] = p.grad for name, p in old_linear.named_parameters(): torch.testing.assert_close(name2grad[name], p.grad, rtol=1e-4, atol=1e-4)
[ 9, 1085, 1783 ]
def METHOD_NAME(): good_rule = { "id": "good_rule", "remediations": { "bash": { "shared.sh": {} } } } bad_rule = { "id": "bad_rule", "remediations": { "bash": {} } } assert not rds.missing_remediation(good_rule, 'bash') assert rds.missing_remediation(bad_rule, 'bash') assert rds.missing_remediation(bad_rule, 'anaconda')
[ 9, 1038, 9626 ]