text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self): for i in range(10): self.admin.assert_icommand(['itouch', str(i) + ".file"]) self.admin.assert_icommand(['imkdir', str(i)]) self.admin.assert_icommand(["itree", "-c"], "STDOUT_SINGLELINE", "Found 10 collections and 0 data objects")
[ 9, 5321, 246 ]
def METHOD_NAME( target: Callable[..., None], name_prefix: str, args: Tuple = (), num_workers: int = 2,
[ 22, 1794, 1573 ]
def METHOD_NAME(self): """Get the distance between a port and its anchor particle. If the port has no anchor particle, returns None. """ if self.anchor: return np.linalg.norm(self.center - self.anchor.pos) else: warn( "This port is not anchored to another particle. Returning a " "separation of None" ) return None
[ 11812 ]
def METHOD_NAME(self, mock_time): self.handler.reset("a") self.assertTrue(self.handler.ready("a")) self.assertEqual(self.handler.time_left("a"), 0)
[ 9, 656, 256, 9023 ]
def METHOD_NAME(search_repo, username, org_id, shared_from, not_shared_from): # for getting repo type map def get_repo_type_map(repo_list, repo_type): repo_type_map = {} for repo in repo_list: repo_type_map[repo.id] = repo_type return repo_type_map repo_id_map = {} repo_type_map = {} if search_repo == 'mine': repo_list = get_owned_repos(username, org_id=org_id) repo_type_map = get_repo_type_map(repo_list, search_repo) elif search_repo == 'shared': repo_list = get_shared_repos(username, org_id=org_id) if shared_from: repo_list = [r for r in repo_list if r.user == shared_from] if not_shared_from: repo_list = [r for r in repo_list if r.user != not_shared_from] repo_type_map = get_repo_type_map(repo_list, search_repo) elif search_repo == 'group': repo_list = get_group_repos(username, org_id=org_id) repo_type_map = get_repo_type_map(repo_list, search_repo) elif search_repo == 'public': repo_list = get_public_repos(username, org_id=org_id) repo_type_map = get_repo_type_map(repo_list, search_repo) else: owned_repos, shared_repos, group_repos, public_repos = get_user_repos( username, org_id=org_id) repo_list = owned_repos + shared_repos + group_repos + public_repos # priority is group > public > mine(or shared) repo_type_map.update(get_repo_type_map(owned_repos, 'mine')) repo_type_map.update(get_repo_type_map(shared_repos, 'shared')) repo_type_map.update(get_repo_type_map(public_repos, 'public')) repo_type_map.update(get_repo_type_map(group_repos, 'group')) for repo in repo_list: subrepo_tag = False search_repo_id = repo.id if repo.origin_repo_id: search_repo_id = repo.origin_repo_id subrepo_tag = True # search priority: repo > subrepo if search_repo_id not in repo_id_map or subrepo_tag is False: repo_id_map[search_repo_id] = repo return repo_id_map, repo_type_map
[ 19, 1070, 4822, 422 ]
def METHOD_NAME(self, func): self._add_needed_fixtures_from_function(func)
[ 238, 2002, 4003, 280, 559 ]
def METHOD_NAME(): with pytest.raises(SkipComponent): postfix_conf.PostfixMaster(context_wrap(POSTFIXMASTER_ERR))
[ 9, 3444, 2614, 3451 ]
def METHOD_NAME(self): # Arrange self.dc.update_raw(1.00020, 1.00000) self.dc.update_raw(1.00030, 1.00010) self.dc.update_raw(1.00040, 1.00020) # Act, Assert assert self.dc.upper == 1.00040 assert self.dc.middle == 1.00020 assert self.dc.lower == 1.00000
[ 9, 99, 41, 2756, 1461, 610, 391 ]
f METHOD_NAME(self):
[ 2054 ]
def METHOD_NAME(variants_all): b = mi.load_dict({'type': 'hair'}) assert b is not None assert b.component_count() == 2 assert b.flags(0) == ( mi.BSDFFlags.Glossy | mi.BSDFFlags.FrontSide | mi.BSDFFlags.Anisotropic | mi.BSDFFlags.NonSymmetric ) assert b.flags(1) == (mi.BSDFFlags.Null | mi.BSDFFlags.BackSide) assert b.flags() == (b.flags(0) | b.flags(1))
[ 3243, 129 ]
def METHOD_NAME(self): """Test double escaping an end.""" self.check_markdown( r''' \begin{align}3+2\\\end{align} ''', r''' <div class="arithmatex"> <div class="MathJax_Preview">\begin{align}3+2\\\end{align}</div> <script type="math/tex; mode=display">\begin{align}3+2\\\end{align}</script> </div> ''', True )
[ 9, 2152, 6409, 1798, 573 ]
def METHOD_NAME(self): super().METHOD_NAME() self.logger.info("%s listening on %s:%s", self, self.host, self.port)
[ 72, 447 ]
def METHOD_NAME(resource_group_name: Optional[str] = None, service_name: Optional[str] = None, service_registry_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceRegistryResult: """ Get the Service Registry and its properties. :param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param str service_name: The name of the Service resource. :param str service_registry_name: The name of Service Registry. """ __args__ = dict() __args__['resourceGroupName'] = resource_group_name __args__['serviceName'] = service_name __args__['serviceRegistryName'] = service_registry_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:appplatform/v20230701preview:getServiceRegistry', __args__, opts=opts, typ=GetServiceRegistryResult).value return AwaitableGetServiceRegistryResult( id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), properties=pulumi.get(__ret__, 'properties'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type'))
[ 19, 549, 510 ]
f METHOD_NAME( self -> Tuple[List[Tuple[str, float, float, int, float, float]], int, float]:
[ 93, 339 ]
def METHOD_NAME(self, **fields): """Check that 1) the user has selected a valid protocol, 2) the user has not left the required fields blank, and 3) the user is not attempting to change the protocol of an existing admin auth service.""" protocol = fields.get("protocol") auth_service = fields.get("auth_service") id = fields.get("id") if protocol: if protocol not in ExternalIntegration.ADMIN_AUTH_PROTOCOLS: return UNKNOWN_PROTOCOL else: wrong_format = self.validate_formats() if wrong_format: return wrong_format if auth_service: if id and int(id) != auth_service.id: return MISSING_SERVICE if protocol != auth_service.protocol: return CANNOT_CHANGE_PROTOCOL else: if id: return MISSING_SERVICE
[ 187, 1029, 342 ]
def METHOD_NAME(self): """Terminate tasks in a controlled way.""" for worker in self.worker_list: worker.METHOD_NAME() log.info("The workers terminated in a controlled way")
[ 1602 ]
def METHOD_NAME(self, run_time, manual=False): """Uses its JobCollection to build new JobRuns. If all_nodes is set, build a run for every node, otherwise just builds a single run on a single node. """ pool = self.node_pool nodes = pool.nodes if self.all_nodes else [pool.next()] for n in nodes: run = self.runs.build_new_run(self, run_time, n, manual=manual) self.watch(run) yield run
[ 56, 80, 420 ]
def METHOD_NAME(rating_true, rating_pred): with Timer() as t: recall_at_k( rating_true=rating_true, rating_pred=rating_pred, col_prediction=DEFAULT_PREDICTION_COL, k=10, ) assert t.interval < 50 * (1 + TOL)
[ 9, 440, 2398 ]
def METHOD_NAME(msg): sys.stdout.write('==== %s\n' % msg) sys.stdout.flush()
[ 1737 ]
def METHOD_NAME(tensor, **kwargs): nn.init.normal_(tensor, mean=0, std=embed_dim**-0.5) nn.init.constant_(tensor[1], 0)
[ 3259, 176 ]
def METHOD_NAME(self) -> None: self.command.METHOD_NAME()
[ 22 ]
def METHOD_NAME(self, episode: int) -> None: if self.disable: return self.overall_timer.end()
[ 69, 3188, 1798 ]
def METHOD_NAME(layers): return [list(map(toKC, layer)) for layer in layers]
[ 2315, 24, 14845 ]
METHOD_NAME(self):
[ 176, 7958 ]
def METHOD_NAME(self, block=True, timeout=None): """Get an iopub message.""" return self.iopub_channel.get_msg(block, timeout)
[ 19, 12598, 169 ]
def METHOD_NAME(verbosity=0): """Helper to support a clean default uninstall process on Windows Note that calling this function may alter os.environ. """ # Nothing to do if pip was never installed, or has been removed try: import pip except ImportError: return # If the pip version doesn't match the bundled one, leave it alone if pip.__version__ != _PIP_VERSION: msg = ("ensurepip will only uninstall a matching version " "({!r} installed, {!r} bundled)") print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr) return _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command args = ["uninstall", "-y", "--disable-pip-version-check"] if verbosity: args += ["-" + "v" * verbosity] return _run_pip(args + [p[0] for p in reversed(_PROJECTS)])
[ 2112, 1087 ]
def METHOD_NAME(self, output_tensors, input_tensor, opts=AllgatherOptions()): # NOTE: in general it's not good form to try to make FakePG work with 'real data', # but the reasoning here is that we want FakePG to work with DeviceMesh's init # code that have the data validation, which makes it worth the tradeoff. # In general user should use MTPG or normal PG for cases where they may care about # real data from collectives for chunk in output_tensors[0]: chunk.copy_(input_tensor[0]) return ret_work(output_tensors)
[ 9467 ]
def METHOD_NAME( self, instrument_code: str ) -> futuresAdjustedPrices: """ :param instrument_code: :return: """ raise NotImplementedError()
[ 19, -1, 4259, 806 ]
def METHOD_NAME(tree): for sub_tree in tree: if toolbox.is_tree(sub_tree): if sub_tree.label() == 'QI': tree.remove(sub_tree) return tree
[ 188, -1 ]
def METHOD_NAME(self): with self.assertRaises(ValueError) as err: PhoneNumber("22234567890") self.assertEqual(type(err.exception), ValueError) self.assertEqual(err.exception.args[0], "11 digits must start with 1")
[ 9, 532, 1646, 4281, 1583, 870, 130 ]
async def METHOD_NAME( client: TestClient, storage_s3_client: StorageS3Client, storage_s3_bucket: S3BucketName, ): assert client.app url = client.app.router["get_status"].url_for() response = await client.get(f"{url}") data, error = await assert_status(response, web.HTTPOk) assert data assert not error app_status_check = AppStatusCheck.parse_obj(data) assert app_status_check.services["s3"]["healthy"] == "connected" # now delete the bucket await storage_s3_client.client.delete_bucket(Bucket=storage_s3_bucket) # check again the health response = await client.get(f"{url}") data, error = await assert_status(response, web.HTTPOk) assert data assert not error app_status_check = AppStatusCheck.parse_obj(data) assert app_status_check.services["s3"]["healthy"] == "no access to S3 bucket"
[ 9, 1068, 3393, 452, 217, 2538, 1038 ]
def METHOD_NAME(): input_variable_name = "input" input_variable_value = "value" output_variable_name = "output" sparkcommand = SendStringToSparkCommand( input_variable_name, input_variable_value, output_variable_name ) sparkcommand._r_command = MagicMock(return_value=MagicMock()) sparkcommand.to_command( constants.SESSION_KIND_SPARKR, input_variable_name, input_variable_value, output_variable_name, ) sparkcommand._r_command.assert_called_with( input_variable_name, input_variable_value, output_variable_name )
[ 9, 353, 24, 3264 ]
def METHOD_NAME(self, client_id: str): _stop_site(self.client_properties[client_id]) super().METHOD_NAME(client_id)
[ 631, 340 ]
def METHOD_NAME(cls, ctx, op: "StatsModelsTrain"): endog = ctx[op.endog.key] exog = ctx[op.exog.key] # code from statsmodels.base.distributed_estimation::_helper_fit_partition model = op.model_class(endog, exog, **op.init_kwds) results = op.estimation_method( model, op.partition_id, op.num_partitions, fit_kwds=op.fit_kwds, **op.estimation_kwds ) ctx[op.outputs[0].key] = pickle.dumps(results)
[ 750, 422 ]
def METHOD_NAME(data: UserInfo) -> "User": return User(id=data.id, name=data.name)
[ 280, 5457 ]
def METHOD_NAME(ptr: int, space: str='auto') -> int: _check(_bf.bfFree(ptr, _string2space(space)))
[ 772, 3712 ]
f METHOD_NAME(self):
[ 3498 ]
def METHOD_NAME(field, value): return 16
[ 89, 377, 1318 ]
def METHOD_NAME(self): time_spec = trontimespec.TimeSpecification(weekdays=[1, 5], ordinals=[1, 3],) gen = time_spec.next_day(14, 2012, 3) assert_equal(list(gen), [16, 19]) gen = time_spec.next_day(1, 2012, 3) assert_equal(list(gen), [2, 5, 16, 19])
[ 9, 243, 1724, 9220, 41, -1 ]
def METHOD_NAME(self): """Bounding box in form [x0,y0,z0, x1,y1,z1] (or none if a bounding box does not make sense for this layer) over-ride in derived classes """ return None
[ 2739 ]
f METHOD_NAME(self, var_list, state):
[ 129, 1659 ]
def METHOD_NAME(self) -> 'Heartbeat': return Heartbeat.from_db(db.upsert_heartbeat(self))
[ 129 ]
def METHOD_NAME(self): repomd = cr.Repomd(REPO_01_REPOMD) types = [] for rec in repomd: types.append(rec.type) self.assertEqual(types, ['filelists', 'other', 'primary']) rec = repomd["primary"] self.assertEqual(rec.type, "primary") self.assertRaises(KeyError, repomd.__getitem__, "foobar") self.assertTrue("primary" in repomd)
[ 9, -1, 4379, 61, 2478, -1 ]
def METHOD_NAME(self, name, ext=None): fname, fext = op.splitext(op.basename(self.inputs.in_file)) if fext == ".gz": fname, fext2 = op.splitext(fname) fext = fext2 + fext if not isdefined(self.inputs.out_prefix): out_prefix = op.abspath(fname) else: out_prefix = self.inputs.out_prefix if ext is None: ext = fext return out_prefix + "_" + name + ext
[ 370, 1147 ]
def METHOD_NAME(instance): """Delete invalid product channel listings. Delete product channel listings for channels for which the deleted variant was the last available variant. """ channel_ids = set( instance.channel_listings.values_list("channel_id", flat=True) ) product_id = instance.product_id variants = ( models.ProductVariant.objects.filter(product_id=product_id) .exclude(id=instance.id) .values("id") ) available_channel_ids = set( models.ProductVariantChannelListing.objects.filter( Exists( variants.filter(id=OuterRef("variant_id")), channel_id__in=channel_ids, ) ).values_list("channel_id", flat=True) ) not_available_channel_ids = channel_ids - available_channel_ids models.ProductChannelListing.objects.filter( product_id=product_id, channel_id__in=not_available_channel_ids ).delete()
[ 34, 1188, 307, 8627, 529, 1272, 7439 ]
def METHOD_NAME(): pass
[ 1278, 2103 ]
def METHOD_NAME(self): # Note that the check is skipped when we don't have # the "Add portal content" permission. self.folder.manage_permission("Add portal content", ["Manager"], acquire=0) self.folder._setObject("foo", dummy.Item("foo")) for alias in self.folder.getTypeInfo().getMethodAliases().keys(): r = check_id(self.folder.foo, alias) self.assertEqual(r, "%s is reserved." % alias)
[ 9, 935, 103, 533, 2319 ]
def METHOD_NAME(self, other, **kwargs): """Fit the landmarks in `self` to those in `other` in a least squares sense, i.e., the transformation matrix moves points from `self` to `other`. kwargs to pass to `fit_matched_points_analytical` """ assert isinstance(other, Montage) assert self.landmarks and other.landmarks names = set(self.get_landmark_names()).intersection( set(other.get_landmark_names()) ) return fit_matched_points_analytical( self.get_landmark_pos(names), other.get_landmark_pos(names), **kwargs )
[ 90, 24 ]
def METHOD_NAME(field, value): return get_default_field_value(field, value)
[ 89, 2010, 415 ]
def METHOD_NAME(x): try: if True: return 1 elif 0: return 1.0 else: return '' except ValueError: return set
[ 12088, -1 ]
def METHOD_NAME(self, fake_log): user = UserFactory() PathForm(user=user) fake_log.warning.assert_called_with('Ignoring entry in HIDDEN_FORM_FIELDS: field \'geom\' is required on form PathForm.')
[ 9, 2830, 342, 830, 984, 342 ]
def METHOD_NAME(self): """Test simple, multiple registers""" cr1 = ClassicalRegister(1, "c1") cr2 = ClassicalRegister(1, "c2") qr = QuantumRegister(1, "q") qc = QuantumCircuit(qr, cr1, cr2) qc.measure(0, 1) qc.reset(0) new_qc = ResetAfterMeasureSimplification()(qc) ans_qc = QuantumCircuit(qr, cr1, cr2) ans_qc.measure(0, 1) ans_qc.x(0).c_if(cr2[0], 1) self.assertEqual(new_qc, ans_qc)
[ 9, 53, 457, 739 ]
def METHOD_NAME(self, meshroomFile, node): tags = self.DEFAULT_TAGS.copy() # copy to not modify default tags nbFrames = node.size arguments = {} parallelArgs = '' print('node: ', node.name) if node.isParallelized: blockSize, fullSize, nbBlocks = node.nodeDesc.parallelization.getSizes(node) parallelArgs = ' --iteration @start' arguments.update({'start': 0, 'end': nbBlocks - 1, 'step': 1}) tags['nbFrames'] = nbFrames tags['prod'] = self.prod allRequirements = list() allRequirements.extend(self.config['CPU'].get(node.nodeDesc.cpu.name, [])) allRequirements.extend(self.config['RAM'].get(node.nodeDesc.ram.name, [])) allRequirements.extend(self.config['GPU'].get(node.nodeDesc.gpu.name, [])) task = simpleFarm.Task( name=node.name, command='{exe} --node {nodeName} "{meshroomFile}" {parallelArgs} --extern'.format( exe='meshroom_compute' if self.reqPackages else os.path.join(binDir, 'meshroom_compute'), nodeName=node.name, meshroomFile=meshroomFile, parallelArgs=parallelArgs), tags=tags, rezPackages=self.reqPackages, requirements={'service': str(','.join(allRequirements))}, **arguments) return task
[ 129, 758 ]
async def METHOD_NAME(self, sig: Optional[int] = None): logger.info("Waiting for gRPC server shutdown") # TODO: Read from config await self._server.METHOD_NAME(grace=5) logger.info("gRPC server shutdown complete")
[ 631 ]
def METHOD_NAME(path): ''' Returns the full NOMAD API url for the given api path. ''' return f'{config.client.METHOD_NAME}/v1/{path}'
[ 274 ]
def METHOD_NAME(self, *, key_id: str) -> Optional[pathlib.Path]: """Find snap key asset matching key_id. The key asset much be named with the last 8 characters of the key identifier, in upper case. :param key_id: Key ID to search for. :returns: Path of key asset if match found, otherwise None. """ key_file = key_id[-8:].upper() + ".asc" key_path = self._key_assets / key_file if key_path.exists(): return key_path return None
[ 416, 3455, 41, 59, 147 ]
def METHOD_NAME(): src = "> > > list()" cln = "list()" assert text.strip_email_quotes(src) == cln
[ 9, 1360, 15374 ]
def METHOD_NAME(s, labels, table, **unused_kwargs): """ Normalizes string. For example: 'call me at 8:00 pm!' -> 'call me at eight zero pm' Args: s: string to normalize labels: labels used during model training. Returns: Normalized string """ def good_token(token, labels): s = set(labels) for t in token: if not t in s: return False return True try: text = _clean_text(s, ["english_cleaners"], table).strip() return ''.join([t for t in text if good_token(t, labels=labels)]) except: print("WARNING: Normalizing {} failed".format(s)) return None
[ 1137, 144 ]
def METHOD_NAME( self, request: pulumi.analyzer_pb2.AnalyzeRequest, context: grpc.ServicerContext, ) -> pulumi.analyzer_pb2.AnalyzeResponse: """Analyze analyzes a single resource object, and returns any errors that it finds. Called with the "inputs" to the resource, before it is updated. """
[ 902 ]
def METHOD_NAME(self): # mktime(t) self.assertFailsArgsLengthCheck( "exactly 1 argument", time.mktime) # no args self.assertFailsArgsLengthCheck( "exactly 1 argument", time.mktime, self.t_struct, "arg 2")
[ 9, -1 ]
def METHOD_NAME(x, Pflat, testName, show_plots): numStates = len(x[0,:])-1 P = np.zeros([len(Pflat[:,0]),numStates,numStates]) t= np.zeros(len(Pflat[:,0])) for i in range(len(Pflat[:,0])): t[i] = x[i, 0]*1E-9 P[i,:,:] = Pflat[i,1:(numStates*numStates+1)].reshape([numStates,numStates]) plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k') plt.subplot(321) plt.plot(t , x[:, 1], "b", label='Error Filter') plt.plot(t , x[:, 1]+3 * np.sqrt(P[:, 0, 0]), 'r--', label='Covar Filter') plt.plot(t , x[:, 1]-3 * np.sqrt(P[:, 0, 0]), 'r--') plt.legend(loc='lower right') plt.title('First pos component (m)') plt.grid() plt.subplot(322) plt.plot(t , x[:, 4], "b") plt.plot(t , x[:, 4]+3 * np.sqrt(P[:, 3, 3]), 'r--') plt.plot(t , x[:, 4]-3 * np.sqrt(P[:, 3, 3]), 'r--') plt.title('Second rate component (m/s)') plt.grid() plt.subplot(323) plt.plot(t , x[:, 2], "b") plt.plot(t , x[:, 2]+3 * np.sqrt(P[:, 1, 1]), 'r--') plt.plot(t , x[:, 2]-3 * np.sqrt(P[:, 1, 1]), 'r--') plt.title('Second pos component (m)') plt.grid() plt.subplot(324) plt.plot(t , x[:, 5], "b") plt.plot(t , x[:, 5]+3 * np.sqrt(P[:, 4, 4]), 'r--') plt.plot(t , x[:, 5]-3 * np.sqrt(P[:, 4, 4]), 'r--') plt.xlabel('t(s)') plt.title('Third rate component (m/s)') plt.grid() plt.subplot(325) plt.plot(t , x[:, 3], "b") plt.plot(t , x[:, 3]+3 * np.sqrt(P[:, 2, 2]), 'r--') plt.plot(t , x[:, 3]-3 * np.sqrt(P[:, 2, 2]), 'r--') plt.xlabel('t(s)') plt.title('Third pos component (m)') plt.grid() plt.subplot(326) plt.plot(t , x[:, 6], "b") plt.plot(t , x[:, 6]+3 * np.sqrt(P[:, 5, 5]), 'r--') plt.plot(t , x[:, 6]-3 * np.sqrt(P[:, 5, 5]), 'r--') plt.xlabel('t(s)') plt.title('Third rate component (m/s)') plt.grid() unitTestSupport.writeFigureLaTeX('StatesPlot' + testName, 'State error and covariance', plt, 'height=0.9\\textwidth, keepaspectratio', path) if show_plots: plt.show() plt.close()
[ 551, 9133, 1288 ]
def METHOD_NAME(self) -> int: self._completeIfNotSet(self._id) return self._id.value
[ 147 ]
def METHOD_NAME(self): with self.assertRaises(exceptions.MultipleRegistrationsError): register(Restaurant, manager_name="again")
[ 9, 16011 ]
def METHOD_NAME( self, *, base_type: Type, # type: ignore[type-arg] class_info: ClassInfo, method_template: str, optional_return: bool, ) -> None: if not class_info.name.endswith("Manager"): return mro = class_info.type.mro() # The class needs to be derived from GetMixin or we ignore it if base_type not in mro: return obj_cls = class_info.type._obj_cls signature = inspect.signature(class_info.type.get) filename = inspect.getfile(class_info.type) fail_message = ( f"class definition for {class_info.name!r} in file {filename!r} " f"must have defined a 'get' method with a return annotation of " f"{obj_cls} but found {signature.return_annotation}\n" f"Recommend adding the following method:\n" ) fail_message += method_template.format(obj_cls=obj_cls) check_type = obj_cls if optional_return: check_type = Optional[obj_cls] assert check_type == signature.return_annotation, fail_message
[ 19, 250, 1087 ]
def METHOD_NAME(records_fp): """Load record files (json lines.)""" with jsonlines.open(records_fp, 'r') as reader: records = list(reader) return records
[ 557, 2530 ]
async def METHOD_NAME( _: KnownPackageJsonUserResolveNamesRequest, all_projects: AllNodeJSProjects, user_chosen_resolves: UserChosenNodeJSResolveAliases, ) -> KnownUserResolveNames: names = FrozenOrderedSet( user_chosen_resolves.get( os.path.join(project.root_dir, project.lockfile_name), project.default_resolve_name ) for project in all_projects ) unmatched_aliases = set(user_chosen_resolves.values()).difference(names) if unmatched_aliases: projects = pluralize(len(unmatched_aliases), "project", include_count=False) lockfiles = ", ".join( lockfile for lockfile, alias in user_chosen_resolves.items() if alias in unmatched_aliases ) paths = pluralize(len(unmatched_aliases), "path", include_count=False) raise ValueError( softwrap( f""" No nodejs {projects} could be found for {lockfiles}, but some are configured under [nodejs].resolves. Ensure that a package.json file you intend to manage with pants has a corresponding BUILD file containing a `{PackageJsonTarget.alias}` target by running `{bin_name()} {TailorGoal.name} ::`. Also confirm that {lockfiles} would be generated by your chosen nodejs package manager at the specified {paths}. """ ) ) return KnownUserResolveNames( names=tuple(names), option_name="[nodejs].resolves", requested_resolve_names_cls=RequestedPackageJsonUserResolveNames, )
[ 2982, 360, 763, 21, 7122 ]
def METHOD_NAME(self) -> 'outputs.SubResourceResponse': """ The reference to the subnet used for the outbound endpoint. """ return pulumi.get(self, "subnet")
[ 1782 ]
def METHOD_NAME(config, xslt_version, xml): try: html_generator = get_htmlgenerator( xml, config.nonetwork, config.nochecks, config.css, config.print_css, config.js, config.math_elem_preference, config.math_js, config.permlink, config.url_article_page, config.url_download_ris, config.gs_abstract, config.output_style, xslt_version, config.bootstrap_css, config.article_css, config.design_system_static_img_path, ) LOGGER.debug('HTMLGenerator repr: %s' % repr(html_generator)) except XMLError as e: LOGGER.debug(e) LOGGER.warning('Error generating %s. Skipping. Run with DEBUG for more info.', xml) return try: abstract_suffix = config.gs_abstract and '.abstract' or '' version = xslt_version.replace(".", "_") for lang, trans_result in html_generator: # nome do arquivo a ser criado fname, fext = xml.rsplit('.', 1) if xslt_version == "2.0": name_parts = [fname, lang + abstract_suffix, 'html'] else: name_parts = [fname, lang + abstract_suffix, version, 'html'] out_fname = '.'.join(name_parts) # criação do arquivo with open(out_fname, 'wb') as fp: fp.write(etree.tostring(trans_result, pretty_print=True, encoding='utf-8', method='html', doctype=u"<!DOCTYPE html>")) print('Generated HTML file:', out_fname) except TypeError as e: LOGGER.debug(e) LOGGER.warning('Error generating %s. Skipping. Run with DEBUG for more info.', xml) return
[ 567, 382, 1537 ]
def METHOD_NAME(self): pred_labels = self.clf.predict(self.X_test) assert_equal(pred_labels.shape, self.y_test.shape)
[ 9, 2726, 415 ]
def METHOD_NAME(self): '''TODO: revisit''' if is_cli: self.assertEqual(sys.getsizeof(1), sys.getsizeof(1.0)) else: self.assertTrue(sys.getsizeof(1)>sys.getsizeof(1.0))
[ 9, 12300 ]
def METHOD_NAME(): result = all(uid not in to_expire for uid in sessions.content) self.assertTrue(result)
[ 250, 1571 ]
def METHOD_NAME(self): delete(self.filename_1) self.failUnless(DSF(self.filename_1).tags is None)
[ 9, 298, 34 ]
def METHOD_NAME(self): super().METHOD_NAME() self._setUpSuperuser() # creates self.admin # save scheduler and connection data self._saved_scheduler = admin.scheduler # overwrite admin.scheduler = self.scheduler # fake RQJob self.email = EmailTemplate.objects.create(slug="test-1") self.trigger = Trigger.objects.create( action="new-instructor", template=self.email ) self.rqjob = RQJob.objects.create(job_id="fake-id", trigger=self.trigger)
[ 0, 1 ]
def METHOD_NAME(self, func, *args, **kwargs): """Serialize and persist the function and arguments. Args: func: the python function. args: the positional arguments to func. kwargs: the keyword arguments to func. Returns: None """ logger.info( f"Serializing function code to {s3_path_join(self.s3_base_uri, FUNCTION_FOLDER)}" ) serialization.serialize_func_to_s3( func=func, sagemaker_session=self.sagemaker_session, s3_uri=s3_path_join(self.s3_base_uri, FUNCTION_FOLDER), s3_kms_key=self.s3_kms_key, hmac_key=self.hmac_key, ) logger.info( f"Serializing function arguments to {s3_path_join(self.s3_base_uri, ARGUMENTS_FOLDER)}" ) serialization.serialize_obj_to_s3( obj=(args, kwargs), sagemaker_session=self.sagemaker_session, s3_uri=s3_path_join(self.s3_base_uri, ARGUMENTS_FOLDER), hmac_key=self.hmac_key, s3_kms_key=self.s3_kms_key, )
[ 73 ]
def METHOD_NAME(tmpdir, fmt): """largest fitting dimension is width""" base, ext = os.path.splitext(TEST_VIDEO) dstfile = str(tmpdir.join(base + "." + fmt)) settings = create_settings(video_size=(125, 50), video_format=fmt) generate_video(SRCFILE, dstfile, settings) size_src = video_size(SRCFILE) size_dst = video_size(dstfile) assert size_dst[1] == 50 # less than 2% error on ratio assert abs(size_dst[0] / size_dst[1] - size_src[0] / size_src[1]) < 2e-2
[ 9, 567, 1781, 90 ]
def METHOD_NAME(): detector_size = (256, 256) pixel_size = (0.0135 * 8, 0.0135 * 8) calibrated_center = (256 / 2.0, 256 / 2.0) dist_sample = 355.0 energy = 640 # ( in eV) # HC_OVER_E to convert from Energy to wavelength (Lambda) hc_over_e = 12398.4 wavelength = hc_over_e / energy # (Angstrom ) ub_mat = np.array( [ [-0.01231028454, 0.7405370482, 0.06323870032], [0.4450897473, 0.04166852402, -0.9509449389], [-0.7449130975, 0.01265920962, -0.5692399963], ] ) setting_angles = np.array([[40.0, 15.0, 30.0, 25.0, 10.0, 5.0], [90.0, 60.0, 0.0, 30.0, 10.0, 5.0]]) # delta=40, theta=15, chi = 90, phi = 30, mu = 10.0, gamma=5.0 pdict = {} pdict["setting_angles"] = setting_angles pdict["detector_size"] = detector_size pdict["pixel_size"] = pixel_size pdict["calibrated_center"] = calibrated_center pdict["dist_sample"] = dist_sample pdict["wavelength"] = wavelength pdict["ub"] = ub_mat # ensure invalid entries for frame_mode actually fail # todo test frame_modes 1, 2, and 3 # test that the values are coming back as expected for frame_mode=4 hkl = recip.process_to_q(**pdict) # Known HKL values for the given six angles) # each entry in list is (pixel_number, known hkl value) known_hkl = [ (32896, np.array([-0.15471196, 0.19673939, -0.11440936])), (98432, np.array([0.10205953, 0.45624416, -0.27200778])), ] for pixel, kn_hkl in known_hkl: npt.assert_array_almost_equal(hkl[pixel], kn_hkl, decimal=8) # smoketest the frame_mode variable pass_list = recip.process_to_q.frame_mode pass_list.append(None) for passes in pass_list: recip.process_to_q(frame_mode=passes, **pdict)
[ 9, 356, 24, 1010 ]
def METHOD_NAME(self): return self.client_metadata.get( 'token_endpoint_auth_method', 'client_secret_basic' )
[ 466, 841, 2433, 103 ]
def METHOD_NAME(cls, record: MetricsRecord) -> MetricsTabularRow: return cls( step=record.step, clock_time=record.clock_time, relative_time=record.relative_time, metrics=record.data, )
[ 280, 148 ]
def METHOD_NAME(self): self.write_config(self.default_config() + '\nLICENSE:append:pn-bash = " & SomeLicense"') error_msg = "ERROR: core-image-minimal-1.0-r0 do_rootfs: Package bash cannot be installed into the image because it has incompatible license(s): GPL-3.0-or-later" result = bitbake('core-image-minimal', ignore_status=True) if error_msg not in result.output: raise AssertionError(result.output)
[ 9, 6180, 61, 2130 ]
def METHOD_NAME(self): return "DELETE"
[ 103 ]
def METHOD_NAME() -> int: ...
[ 19, 4773 ]
def METHOD_NAME(self) -> Any: ExampleIDatasetFormPlugin.num_times_new_template_called += 1 return super(ExampleIDatasetFormPlugin, self).METHOD_NAME()
[ 80, 671 ]
def METHOD_NAME(x): if x > 0: return () else: return 1j
[ 4183, 1413, 44 ]
def METHOD_NAME(self, image_obj_list, print_inclusive=False): '''Generate a default report''' report = formats.disclaimer.format( version_info=content.get_tool_version()) logger.debug('Creating a detailed report of components in image...') report_only = False for image in image_obj_list: if not print_inclusive and image.load_until_layer != 0: report_only = True report = report + print_full_report(image, print_inclusive) if report_only: return report return report + print_licenses_only(image_obj_list)
[ 567 ]
def METHOD_NAME(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super().METHOD_NAME(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) _args_schema.vm_name = AAZStrArg( options=["-n", "--name", "--vm-name"], help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`", required=True, id_part="name", configured_default="vm", ) return cls._args_schema
[ 56, 134, 135 ]
def METHOD_NAME(fee_sub, deployer, request_manager): token2 = deployer.deploy(ape.project.MintableToken, request_manager.address) fee_sub.setMinimumAmount(token2.address, 5) assert token2.allowance(fee_sub.address, request_manager.address) == 2**256 - 1 fee_sub.setMinimumAmount(token2.address, 0) assert token2.allowance(fee_sub.address, request_manager.address) == 0
[ 9, 1317, 193, 466 ]
def METHOD_NAME( self, resource_group_name: str, library_name: str, workspace_name: str, **kwargs: Any ) -> _models.LibraryResource: """Get library by name. Get library by name in a workspace. :param resource_group_name: The name of the resource group. The name is case insensitive. Required. :type resource_group_name: str :param library_name: Library name. Required. :type library_name: str :param workspace_name: The name of the workspace. Required. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: LibraryResource or the result of cls(response) :rtype: ~azure.mgmt.synapse.models.LibraryResource :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: Literal["2021-06-01-preview"] = kwargs.pop( "api_version", _params.pop("api-version", "2021-06-01-preview") ) cls: ClsType[_models.LibraryResource] = kwargs.pop("cls", None) request = build_get_request( resource_group_name=resource_group_name, library_name=library_name, workspace_name=workspace_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.METHOD_NAME.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize("LibraryResource", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
[ 19 ]
def METHOD_NAME(): test_map = {'default_rhel8.conf': default_rhel8_conf, 'all_the_things.conf': all_the_things_conf, 'converted_the_things.conf': converted_the_things_conf, 'complicated.conf': complicated_conf, 'no_foreign.conf': no_foreign_conf, 'allow_usb.conf': allow_usb_conf, 'no_defaults.conf': no_defaults_conf, 'two_defaults.conf': two_defaults_conf, 'empty.conf': empty_conf} for config_name, expected_data in test_map.items(): config = multipathconfread._parse_config(os.path.join(TEST_DIR, config_name)) assert config assert_config(config, expected_data)
[ 9, 214, 200 ]
def METHOD_NAME(self): projects = json.loads("""\
[ 9, 97, 1216 ]
def METHOD_NAME(connection: MagicMock, s3_writer: FireboltS3Writer) -> None: expected_sql = "DROP TABLE IF EXISTS ex_airbyte_raw_my_table" bucket_path = "dummy_bucket/airbyte_output/111_dummy-uuid/my_table" s3_writer.cleanup("my_table") connection.cursor.return_value.execute.assert_called_once_with(expected_sql) s3_writer.fs.delete_dir_contents.assert_called_once_with(bucket_path)
[ 9, 607, 950 ]
def METHOD_NAME(): hcl.init(hcl.Float()) A = hcl.placeholder((10,)) B = hcl.compute(A.shape, lambda x: hcl.select(A[x] > 0.5, A[x], 0.0)) s = hcl.create_schedule([A, B]) f = hcl.build(s) np_A = np.random.rand(10) np_B = np.zeros(10) np_C = np.zeros(10) for i in range(0, 10): np_C[i] = np_A[i] if np_A[i] > 0.5 else 0 hcl_A = hcl.asarray(np_A, dtype=hcl.Float(32)) hcl_B = hcl.asarray(np_B, dtype=hcl.Float(32)) f(hcl_A, hcl_B) np_B = hcl_B.asnumpy() assert np.allclose(np_B, np_C)
[ 9, 1472 ]
def METHOD_NAME(args, invocations): header_title = "API" if args.libsel4: header_title = "LIBSEL4" if args.arch: template = Environment(loader=BaseLoader).from_string(ARCH_INVOCATION_TEMPLATE) elif args.sel4_arch: template = Environment(loader=BaseLoader).from_string(SEL4_ARCH_INVOCATION_TEMPLATE) else: template = Environment(loader=BaseLoader).from_string(INVOCATION_TEMPLATE) data = template.render({'header_title': header_title, 'libsel4': args.libsel4, 'invocations': invocations, 'num_invocations': len(invocations)}) args.dest.write(data) args.dest.close()
[ 567 ]
def METHOD_NAME(self): self._generator.compute_layout() self.finished.emit(self._id)
[ 22 ]
def METHOD_NAME(): client = boto3.client("iot", region_name="ap-northeast-1") # no such rule with pytest.raises(ClientError) as ex: client.disable_topic_rule(ruleName=name) error_code = ex.value.response["Error"]["Code"] assert error_code == "ResourceNotFoundException" client.create_topic_rule(ruleName=name, topicRulePayload=payload) client.disable_topic_rule(ruleName=name) rule = client.get_topic_rule(ruleName=name) assert rule["rule"]["ruleName"] == name assert rule["rule"]["ruleDisabled"] is True
[ 9, 39, 446, 193 ]
def METHOD_NAME(self, pyramid_config): pyramid_config.add_route("account", "/account") pyramid_config.add_route("account_profile", "/account/profile") pyramid_config.add_route("account_notifications", "/account/notifications") pyramid_config.add_route("account_developer", "/account/developer") pyramid_config.add_route("activity.search", "/search") pyramid_config.add_route("activity.user_search", "/users/{username}") pyramid_config.add_route("group_create", "/groups/new") pyramid_config.add_route("group_read", "/groups/:pubid/:slug") pyramid_config.add_route("logout", "/logout")
[ 3968 ]
def METHOD_NAME(args): global _SCRIPT _SCRIPT = args[0] long_opts = ["backends=", "format=", "help", "list-backends", "check-backends", "group="] long_opts += ["binary=", "target-type=", "target-name=", "probe-prefix="] try: opts, args = getopt.getopt(args[1:], "", long_opts) except getopt.GetoptError as err: error_opt(str(err)) check_backends = False arg_backends = [] arg_format = "" arg_group = None binary = None target_type = None target_name = None probe_prefix = None for opt, arg in opts: if opt == "--help": error_opt() elif opt == "--backends": arg_backends = arg.split(",") elif opt == "--group": arg_group = arg elif opt == "--format": arg_format = arg elif opt == "--list-backends": public_backends = tracetool.backend.get_list(only_public = True) out(", ".join([ b for b,_ in public_backends ])) sys.exit(0) elif opt == "--check-backends": check_backends = True elif opt == "--binary": binary = arg elif opt == '--target-type': target_type = arg elif opt == '--target-name': target_name = arg elif opt == '--probe-prefix': probe_prefix = arg else: error_opt("unhandled option: %s" % opt) if len(arg_backends) == 0: error_opt("no backends specified") if check_backends: for backend in arg_backends: if not tracetool.backend.exists(backend): sys.exit(1) sys.exit(0) if arg_group is None: error_opt("group name is required") if arg_format == "stap": if binary is None: error_opt("--binary is required for SystemTAP tapset generator") if probe_prefix is None and target_type is None: error_opt("--target-type is required for SystemTAP tapset generator") if probe_prefix is None and target_name is None: error_opt("--target-name is required for SystemTAP tapset generator") if probe_prefix is None: probe_prefix = ".".join(["qemu", target_type, target_name]) if len(args) < 1: error_opt("missing trace-events filepath") events = [] for arg in args: with open(arg, "r") as fh: events.extend(tracetool.read_events(fh, arg)) try: tracetool.generate(events, arg_group, arg_format, arg_backends, binary=binary, probe_prefix=probe_prefix) except tracetool.TracetoolError as e: error_opt(str(e))
[ 57 ]
def METHOD_NAME(filters: dict) -> dict: """Turn the filters into the result dictionary when dealing with Sub-Awards Note: Due to how the Django ORM joins to the awards table as an INNER JOIN, it is necessary to explicitly enforce the aggregations to only count Sub-Awards that are linked to a Prime Award. Remove the filter and update if we can move away from this behavior. """ queryset = ( subaward_filter(filters) .filter(award_id__isnull=False) .values("prime_award_group") .annotate(count=Count("broker_subaward_id")) ) results = {} results["subgrants"] = sum([sub["count"] for sub in queryset if sub["prime_award_group"] == "grant"]) results["subcontracts"] = sum([sub["count"] for sub in queryset if sub["prime_award_group"] == "procurement"]) return results
[ 276, 3077 ]
def METHOD_NAME(protocol): """Gathers data on current server/game state from protocol class""" players = [] for player in protocol.players.values(): player_data = { 'name': player.name, 'latency': player.latency, 'client': player.client_string, 'kills': player.kills, 'team': player.team.name } players.append(player_data) dictionary = { "serverIdentifier": protocol.identifier, "serverName": protocol.name, "serverVersion": protocol.version, "serverUptime": time.time() - protocol.start_time, "gameMode": protocol.game_mode_name, "map": { "name": protocol.map_info.name, "version": protocol.map_info.version, "author": protocol.map_info.author }, "scripts": scripts_option.get(), "players": players, "maxPlayers": protocol.max_players, "scores": { "currentBlueScore": protocol.blue_team.score, "currentGreenScore": protocol.green_team.score, "maxScore": protocol.max_score} } return dictionary
[ 1056, 551 ]
f METHOD_NAME(self):
[ 9, 2054, 3688, 620, 2818, 1210, 24 ]
def METHOD_NAME(self, line: str, cell: str) -> None: """Memory profile the code in the cell and display a flame graph.""" if self.shell is None: raise UsageError("Cannot profile code when not in a shell") try: options = argument_parser().parse_args(shlex.split(line)) except SystemExit: # argparse wants to bail if the options aren't valid. # It already printed a message, just return control to IPython. return results_dir = Path("memray-results") results_dir.mkdir(exist_ok=True) tempdir = Path(tempfile.mkdtemp(dir=results_dir)) dump_file = Path(tempdir) / "memray.dump" code = TEMPLATE.format( dump_file=dump_file, native_traces=options.native, trace_python_allocators=options.trace_python_allocators, follow_fork=options.follow_fork, code=indent(cell, " " * 4), ) self.shell.run_cell(code) merge_threads = not options.split_threads reporter = None with FileReader(dump_file, report_progress=True) as reader: if reader.metadata.has_native_traces: warn_if_not_enough_symbols() if options.show_memory_leaks: snapshot = reader.get_leaked_allocation_records( merge_threads=merge_threads ) elif options.temporary_allocation_threshold >= 0: snapshot = reader.get_temporary_allocation_records( threshold=options.temporary_allocation_threshold, merge_threads=merge_threads, ) else: snapshot = reader.get_high_watermark_allocation_records( merge_threads=merge_threads ) memory_records = tuple(reader.get_memory_snapshots()) reporter = FlameGraphReporter.from_snapshot( snapshot, memory_records=memory_records, native_traces=options.native, inverted=options.inverted, ) assert reporter is not None flamegraph_path = Path(tempdir) / "flamegraph.html" with open(flamegraph_path, "w") as f: reporter.render( outfile=f, metadata=reader.metadata, show_memory_leaks=options.show_memory_leaks, merge_threads=merge_threads, inverted=options.inverted, ) dump_file.unlink() pprint(f"Results saved to [bold cyan]{flamegraph_path}") display(IFrame(flamegraph_path, width="100%", height="600")) # type: ignore
[ -1, -1 ]