text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(): version_from_file = get_version_from_file() if version_from_file: return version_from_file else: from setuptools_scm import METHOD_NAME version = METHOD_NAME(root='..', relative_to=__file__) return version
[ 19, 281 ]
def METHOD_NAME(): """make standalone HTML files""" return build('html', 'The HTML pages are in {}.')
[ 382 ]
def METHOD_NAME(self) -> str: """ (Computed) The etag of the IAM policy. """ return pulumi.get(self, "etag")
[ 431 ]
def METHOD_NAME(self, *a, **k): pass
[ 22, 2277 ]
def METHOD_NAME( optimizer_class: Type[torch.optim.Optimizer], params: Iterable[torch.nn.Parameter], optimizer_kwargs: Dict[str, Any], register_hook: bool = True, ) -> None: """ Upon ``backward()``, parameters will fire the corresponding optimizer. Note - gradients for these parameters will be set to None after ``backward()``. This means that any other (non applied) optimizer over this parameter will be a no-op. Args: optimizer_class: (Type[torch.optim.Optimizer]): Optimizer to apply to parameter params: (Iterator[nn.Parameter]): parameters to apply optimizer state to optimizer_kwargs: (Dict[str, Any]): kwargs to pass to optimizer constructor register_hook: (bool): whether to register a hook that runs the optimizer after gradient for this parameter is computed. This is the default way that optimizer in backward is implemented, but specific use cases (such as DDP) may wish to override this to implement custom behavior. (Default = True) Example:: params_generator = model.parameters() param_1 = next(params_generator) remainder_params = list(params_generator) apply_optimizer_in_backward(torch.optim.SGD, [param_1], {"lr": .02}) apply_optimizer_in_backward(torch.optim.Adam, remainder_params, {"lr": .04}) model(...).sum().backward() # after backward, parameters will already # have their registered optimizer applied. """ @no_type_check def _apply_optimizer_in_backward_to_param(param: torch.nn.Parameter) -> None: # view_as creates a node in autograd graph that allows us access to the # parameter's AccumulateGrad autograd function object. We register a # hook on this object to fire the optimizer when the gradient for # this parameter is ready (has been accumulated into .grad field) # Don't create a new acc_grad if we already have one # i.e. for shared parameters or attaching multiple optimizers to a param. if param not in param_to_acc_grad_map: param_to_acc_grad_map[param] = param.view_as(param).grad_fn.next_functions[0][0] optimizer = optimizer_class([param], **optimizer_kwargs) if not hasattr(param, "_in_backward_optimizers"): param._in_backward_optimizers = [] # type: ignore[attr-defined] # TODO: investigate whether we really need these attributes. param._optimizer_classes = [] # type: ignore[attr-defined] param._optimizer_kwargs = [] # type: ignore[attr-defined] param._in_backward_optimizers.append(optimizer) # type: ignore[attr-defined] param._optimizer_classes.append(optimizer_class) # type: ignore[attr-defined] param._optimizer_kwargs.append(optimizer_kwargs) # type: ignore[attr-defined] if not register_hook: return def optimizer_hook(*_unused) -> None: for opt in param._in_backward_optimizers: # type: ignore[attr-defined] opt.step() param.grad = None handle = param_to_acc_grad_map[param].register_hook(optimizer_hook) # type: ignore[attr-defined] if param not in param_to_optim_hook_handle_map: param_to_optim_hook_handle_map[param] = [] param_to_optim_hook_handle_map[param].append(handle) for param in params: _apply_optimizer_in_backward_to_param(param)
[ 231, 968, 623, 2955 ]
def METHOD_NAME(self): self.data["extra_fields"]["options"].update(completion_criteria={"model": "time", "threshold": "test"}) serializer = self.serializer serializer.is_valid() with self.assertRaises(serializers.ValidationError): serializer.update(self.node, serializer.validated_data)
[ 9, 1323, 4415, 532 ]
def METHOD_NAME(text_and_events): text, events = text_and_events text.insert("1.0", "abc") assert events.pop().data_class(Changes).change_list == [ Change(start=[1, 0], old_end=[1, 0], new_end=[1, 3], old_text="", new_text="abc") ] text.window_create("1.0", window=tkinter.Button(text)) text.insert("1.0 lineend", "xyz") # Notice that text index says 4 counting button, change event says 3 ignoring button assert text.search("xyz", "1.0") == "1.4" assert events.pop().data_class(Changes).change_list == [ Change(start=[1, 3], old_end=[1, 3], new_end=[1, 6], old_text="", new_text="xyz") ]
[ 9, 2314, 1092 ]
def METHOD_NAME(options_name): '''Given the build options name, return the build directory name. Does not return the full path to the directory - just the base name.''' return 'build_%s' % options_abbrev(options_name)
[ 19, 414, -1, 1190 ]
def METHOD_NAME(self) -> Iterable[tuple[str, str, str, str, str, int]]: return []
[ 19, 635 ]
f METHOD_NAME(self):
[ 19, 1298, -1 ]
def METHOD_NAME(user, semester): if not can_reward_points_be_used_by(user): return None, False if not is_semester_activated(semester): return None, False # does the user have at least one required evaluation in this semester? required_evaluations = Evaluation.objects.filter(participants=user, course__semester=semester, is_rewarded=True) if not required_evaluations.exists(): return None, False # How many points have been granted to this user vs how many should they have (this semester) granted_points = ( RewardPointGranting.objects.filter(user_profile=user, semester=semester).aggregate(Sum("value"))["value__sum"] or 0 ) progress = float(required_evaluations.filter(voters=user).count()) / float(required_evaluations.count()) target_points = max((points for threshold, points in settings.REWARD_POINTS if threshold <= progress), default=0) missing_points = target_points - granted_points if missing_points > 0: granting = RewardPointGranting.objects.create(user_profile=user, semester=semester, value=missing_points) return granting, progress >= 1.0 return None, False
[ 3211, 4181, 182, 217, 11014 ]
def METHOD_NAME(self, node: Node) -> int: ...
[ 447, 224 ]
def METHOD_NAME(self): self.cmdloop()
[ 22 ]
def METHOD_NAME(): parser = OptionParser() parser.add_option("-l", "-L", dest="logDir", help = "creates log files to DIRECTORY", metavar = "DIRECTORY", default = os.getcwd()) parser.add_option("-p", "-P", dest="pickleDir", help = "reads pickle files from DIRECTORY", metavar = "DIRECTORY", default = os.getcwd()) parser.add_option("-c", "-C", dest="htmlDir", help = "creates cmsCRPage.html file to DIRECTORY", metavar = "DIRECTORY", default = os.getcwd()) (options, args) = parser.parse_args() logsDir = options.logDir pickleDir = options.pickleDir htmlDir = options.htmlDir if not os.path.exists(logsDir): print("Error: wrong directory %s"%logsDir) return if not os.path.exists(pickleDir): print("Error: wrong directory %s"%pickleDir) return if not os.path.exists(htmlDir): print("Error: wrong directory %s"%htmlDir) return run(pickleDir, logsDir, htmlDir) return
[ 57 ]
def METHOD_NAME(self, username=None, sudo_path=None, password=None, preserve_env=None, set_home=None, sudo_args=None, **kwargs): super(Stream, self).METHOD_NAME(**kwargs) opts = parse_sudo_flags(sudo_args or []) if username is not None: self.username = username if sudo_path is not None: self.sudo_path = sudo_path if password is not None: self.password = password if (preserve_env or opts.preserve_env) is not None: self.preserve_env = preserve_env or opts.preserve_env if (set_home or opts.set_home) is not None: self.set_home = set_home or opts.set_home
[ 363 ]
def METHOD_NAME(self): # Arrange self.dema.update_raw(1.00000) self.dema.update_raw(2.00000) self.dema.update_raw(3.00000) # Act, Assert assert self.dema.value == pytest.approx(1.904583020285499, rel=1e-9)
[ 9, 99, 41, 2756, 1461, 610, 391 ]
METHOD_NAME(self):
[ 2222 ]
def METHOD_NAME(event: UIOnChangeEvent): texture_button_with_toggle.disabled = event.new_value
[ 69, 194 ]
def METHOD_NAME(recid): """Test serialisation of record with given recid.""" uuid = PersistentIdentifier.get('recid', recid).object_uuid record = Record.get_record(uuid) experiment = record.get('experiment', None) doi = record['doi'] # serialize record to schema40 doc = DataCiteSerializer().dump(record) schema40.validate(doc) doc = schema40.tostring(doc) click.echo(doc)
[ 9, 3061 ]
def METHOD_NAME(self): return self.__pos
[ 19, 195 ]
def METHOD_NAME(self, number): if number >= self.max_num: raise OverflowError(self.errmsg_toobig % (number, self.max_num)) minus = '' if number < 0: minus = 'min ' float_word = '' n = self.split_by_koma(abs(number)) if len(n) == 2: float_word = self.spell_float(n[1]) return minus + self.join(self.spell(self.split_by_3(n[0])), float_word)
[ 24, 10811 ]
def METHOD_NAME(ctx, grad_output): outputs = lightconv_cuda.METHOD_NAME( grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors ) grad_input, grad_weights = outputs return grad_input, grad_weights, None
[ 2955 ]
async def METHOD_NAME(self, plugin_ctx: PluginCtx): curr_user_id = plugin_ctx.session.get('user', {'id': None})['id'] api_key = self._get_api_key(plugin_ctx) if api_key: hash_key = hashlib.sha256(api_key.encode()).hexdigest() user_info = await self._find_user(hash_key) if self.is_anonymous(curr_user_id): plugin_ctx.session.clear() if user_info is None: plugin_ctx.session['user'] = self.anonymous_user(plugin_ctx) else: plugin_ctx.session['user'] = user_info else: if not self.is_anonymous(curr_user_id): plugin_ctx.session.clear() plugin_ctx.session['user'] = self.anonymous_user(plugin_ctx)
[ 9766 ]
def METHOD_NAME(self): "Convert a coordinate vector to an integer normal"
[ 753, 1576 ]
def METHOD_NAME(self, put_data): """ Invoke exists() with a key and policy. """ key = ("test", "demo", 1) record = {"Name": "Jeff"} policy = { "max_retries": 1, } put_data(self.as_connection, key, record) key, meta = self.as_connection.exists(key, policy) assert meta["gen"] is not None assert meta["ttl"] is not None
[ 9, 934, 954, 41, 59, 61, 54 ]
def METHOD_NAME(encoder): dict_content_ben = {"name": "Ben", "favorite_number": 7, "favorite_color": "red"} encoded_message_content_ben = encoder.encode(dict_content_ben, schema=SCHEMA_STRING) print("Encoded message content is: ", encoded_message_content_ben) return EventData.from_message_content( encoded_message_content_ben["content"], encoded_message_content_ben["content_type"], )
[ 421, 277, 459, 553 ]
METHOD_NAME(self, *args):
[ 144, 674 ]
def METHOD_NAME( self, ) -> list[tuple[int, DNSAddressFamily, bytes, int, str]] | None: """Return fallback DNS servers including port and server name.""" return self.properties[DBUS_ATTR_FALLBACK_DNS_EX]
[ 1008, 2455, 2258 ]
def METHOD_NAME(self, df: DataFrame, metadata: dict) -> DataFrame: enter_long_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"]] if enter_long_conditions: df.loc[ reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"] ] = (1, "long") enter_short_conditions = [df["do_predict"] == 1, df["&-s_close"] < df["sell_roi"]] if enter_short_conditions: df.loc[ reduce(lambda x, y: x & y, enter_short_conditions), ["enter_short", "enter_tag"] ] = (1, "short") return df
[ 3914, 475, 8165 ]
def METHOD_NAME(self, response): education_blocks = response.css('li.education__list-item') education_list = [] for block in education_blocks: education = {} education['organisation'] = block.css('h3::text').get(default='').strip() education['organisation_profile'] = block.css('a::attr(href)').get(default='').split('?')[0] try: education['course_details'] = '' for text in block.css('h4 span::text').getall(): education['course_details'] = education['course_details'] + text.strip() + ' ' education['course_details'] = education['course_details'].strip() except Exception as e: print("education --> course_details", e) education['course_details'] = '' education['description'] = block.css('div.education__item--details p::text').get(default='').strip() try: date_ranges = block.css('span.date-range time::text').getall() if len(date_ranges) == 2: education['start_time'] = date_ranges[0] education['end_time'] = date_ranges[1] elif len(date_ranges) == 1: education['start_time'] = date_ranges[0] education['end_time'] = 'present' except Exception as e: print("education --> time_ranges", e) education['start_time'] = '' education['end_time'] = '' education_list.append(education) return education_list
[ 297, 13488, 1287 ]
f METHOD_NAME(self, op, inputs):
[ 250, 441, 220, 1461 ]
METHOD_NAME(self, fanid):
[ 220, 3466, 6234 ]
def METHOD_NAME(self): controller_name = 'oioioi.teachers.controllers.TeacherContestController' self.assertTrue(self.client.login(username='test_user')) url = reverse('oioioiadmin:contests_contest_add') response = self.client.get(url) self.assertEqual(response.status_code, 200) post_data = make_empty_contest_formset() post_data.update( { 'name': 'Teacher\'s contest', 'id': 'tc', 'start_date_0': '2012-02-03', 'start_date_1': '04:05:06', 'end_date_0': '2012-02-04', 'end_date_1': '05:06:07', 'results_date_0': '2012-02-05', 'results_date_1': '06:07:08', 'controller_name': controller_name, 'problemstatementconfig-0-visible': 'AUTO', 'teamsconfig-0-max_team_size': 3, 'teamsconfig-0-teams_list_visible': 'NO', } ) response = self.client.post(url, post_data, follow=True) self.assertEqual(response.status_code, 200) self.assertContains(response, 'allow a pupil to access this contest') contest = Contest.objects.get() self.assertEqual(controller_name, contest.controller_name)
[ 9, 9530, 238, 8807 ]
def METHOD_NAME(self, model_name: str) -> None: model = ObjectDetectionTask( model=model_name, backbone="resnet18", freeze_backbone=True ) assert not all([param.requires_grad for param in model.model.parameters()])
[ 9, 3125, 2472 ]
def METHOD_NAME(self): response = { 'message': 'Here is a random number courtesy of randint: {0}'.format( random.randint(0, 100000000) ) } return response
[ 19, 236, 106 ]
def METHOD_NAME(self): path = _get_path("doesnotexist.txt") with self.assertRaises(RuntimeError) as ctx: MBTilesSource(path) error = "The file does not exist: {}".format(path) self.assertTrue(error in str(ctx.exception))
[ 9, 256, 1153, 171 ]
def METHOD_NAME(a, axis=None, dtype=None, out=None, keepdims=False, initial=_NoValue): return umr_prod(a, axis, dtype, out, keepdims, initial)
[ 1518 ]
def METHOD_NAME(): recent = apps.METHOD_NAME(50) compat = [get_short_app(f"apps:{appid}") for appid in recent] return [app for app in compat if app]
[ 19, 8910, 3758 ]
def METHOD_NAME(csv_file): """ Parse the fields in the given csv """ csv_data = [] f_csv = open(csv_file) reader = csv.reader(f_csv) for data in reader: if len(data) != len(_csv_fields): sys.stderr.write("Invalid Data.Skipping line .. \n" % data) continue csv_data.append(data) return csv_data
[ 203, 732, 171 ]
def METHOD_NAME(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
[ 24, 3 ]
def METHOD_NAME(model, temp_dir): data_path = os.path.join(temp_dir, "data.txt") test_util.make_data_file(data_path, ["a a a b b d", "a b b b", "c c"]) dataset = model.examples_inputter.make_inference_dataset(data_path, 1) return dataset
[ 129, 126 ]
def METHOD_NAME(page): # metadata page # general metadata metadata = {} meta_page = page[0:72] _, pgno, magic, version, pagesize, encrypt_alg, pg_type, metaflags, _, free, last_pgno, nparts, key_count, record_count, flags, uid = struct.unpack('QIIIIBBBBIIIIII20s', meta_page) metadata['pgno'] = pgno metadata['magic'] = magic metadata['version'] = version metadata['pagesize'] = pagesize metadata['encrypt_alg'] = encrypt_alg metadata['pg_type'] = pg_type metadata['metaflags'] = metaflags metadata['free'] = free metadata['last_pgno'] = last_pgno metadata['nparts'] = nparts metadata['key_count'] = key_count metadata['record_count'] = record_count metadata['flags'] = flags metadata['uid'] = uid.hex().encode() assert magic == BTREE_MAGIC, 'bdb magic does not match bdb btree magic' assert pg_type == BTREE_META, 'Metadata page is not a btree metadata page' assert version == DB_VERSION, 'Database too new' # btree metadata btree_meta_page = page[72:512] _, minkey, re_len, re_pad, root, _, crypto_magic, _, iv, chksum = struct.unpack('IIIII368sI12s16s20s', btree_meta_page) metadata['minkey'] = minkey metadata['re_len'] = re_len metadata['re_pad'] = re_pad metadata['root'] = root metadata['crypto_magic'] = crypto_magic metadata['iv'] = iv.hex().encode() metadata['chksum'] = chksum.hex().encode() return metadata
[ 278, 1094, 1174 ]
def METHOD_NAME( self: T, fdata: FData, coefs: np.ndarray, ) -> Tuple[T, NDArrayFloat]: """ Create a basis of the subspace generated by the given functions. Args: fdata: The resulting basis will span the subspace generated by these functions. coefs: Coefficients of some functions in the given fdata. These coefficients will be transformed into the coefficients of the same functions in the resulting basis. """ raise ValueError( "Unexpected type of functional data object: {type}.".format( type=type(fdata), ), )
[ 129, 15405, 1189, 8098 ]
def METHOD_NAME(self, uc, access, address, size, value, user_data): print("[ HOOK_MEM_INVALID - Address: %s ]" % hex(address)) if access == UC_MEM_WRITE_UNMAPPED: print(">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" %(address, size, value)) return True else: print(">>> Missing memory is being READ at 0x%x, data size = %u, data value = 0x%x" %(address, size, value)) return True
[ 1021, 1279, 532 ]
def METHOD_NAME(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, location: Optional[pulumi.Input[str]] = None, properties: Optional[pulumi.Input[pulumi.InputType['UserPropertiesArgs']]] = None, user_settings_name: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = UserSettingsWithLocationArgs.__new__(UserSettingsWithLocationArgs) if location is None and not opts.urn: raise TypeError("Missing required property 'location'") __props__.__dict__["location"] = location if properties is None and not opts.urn: raise TypeError("Missing required property 'properties'") __props__.__dict__["properties"] = properties __props__.__dict__["user_settings_name"] = user_settings_name alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:portal/v20181001:UserSettingsWithLocation")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(UserSettingsWithLocation, __self__).__init__( 'azure-native:portal:UserSettingsWithLocation', resource_name, __props__, opts)
[ 2026, 176 ]
def METHOD_NAME(self): for source_file in self.get_source_files(): yield source_file.with_suffix('.ll')
[ 19, 9654, 1537 ]
def METHOD_NAME(self): METHOD_NAME = YAML() for arg, value in self.dump_args.items(): setattr(METHOD_NAME, arg, value) return METHOD_NAME
[ 406 ]
def METHOD_NAME(self, dl_manager: tfds.download.DownloadManager): dl_manager.verify_ssl = False dl_paths = dl_manager.download(_URLS['the_pile']) print(dl_paths) return { 'train': self._generate_examples(dl_paths['train']), 'validation': self._generate_examples(dl_paths['validation']), 'test': self._generate_examples(dl_paths['test']), }
[ 265, 942 ]
def METHOD_NAME(self, api_id, api_stage, error_code=None): expected_params = {'restApiId': api_id, 'stageName': api_stage} self._stub_bifurcator( 'create_deployment', expected_params, error_code=error_code)
[ 492, 129, 1503 ]
def METHOD_NAME(request): """ Parametrized fixture giving method parameters 'ffill' and 'bfill' for Series.fillna(method=<method>) testing. """ return request.param
[ 14792, 103 ]
def METHOD_NAME(): hyperbola = Hyperbola(major=1.0, minor=0.5) assert close(hyperbola.minor, 0.5, tol=1e-12) hyperbola._minor = None with pytest.raises(ValueError): hyperbola.minor with pytest.raises(ValueError): hyperbola.minor = -1.0
[ 9, 14714, 525 ]
def METHOD_NAME(self, task, config): if not task.accepted and not task.options.test: return try: result = session.post( 'http://www.pogdesign.co.uk/cat/login', data={ 'username': config['username'], 'password': config['password'], 'sub_login': 'Account Login', }, ) except requests.RequestException as e: logger.error('Error logging in to pog calendar: {}', e) return if 'logout' not in result.text: logger.error('Username/password for pogdesign calendar appear to be incorrect.') return elif task.options.test: logger.verbose('Successfully logged in to pogdesign calendar.') for entry in task.accepted: if not entry.get('series_name') or not entry.get('series_id_type') == 'ep': continue show_id = self.find_show_id(entry['series_name'], task.session) if not show_id: logger.debug('Could not find pogdesign calendar id for `{}`', entry['series_name']) continue if task.options.test: logger.verbose( 'Would mark {} {} in pogdesign calenadar.', entry['series_name'], entry['series_id'], ) continue else: logger.verbose( 'Marking {} {} in pogdesign calenadar.', entry['series_name'], entry['series_id'], ) shid = '{}-{}-{}/{}-{}'.format( show_id, entry['series_season'], entry['series_episode'], datetime.now().month, datetime.now().year, ) try: session.post( 'http://www.pogdesign.co.uk/cat/watchhandle', data={'watched': 'adding', 'shid': shid}, ) except requests.RequestException as e: logger.error( 'Error marking {} {} in pogdesign calendar: {}', entry['series_name'], entry['series_id'], e, )
[ 69, 758, 146 ]
def METHOD_NAME( item, subitem, mask: Incomplete | None = ..., text_buf_size: int = ... ) -> tuple[array[int], list[Incomplete]]: ...
[ 35, -1 ]
def METHOD_NAME(self): self._stop()
[ 950 ]
def METHOD_NAME(self) -> str: """ The unique resource identifier of the ARM resource. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(name, at_time=None, **kwargs): # pylint: disable=unused-argument """ Initiate a reboot if the running kernel is not the latest one installed. .. note:: This state does not install any patches. It only compares the running kernel version number to other kernel versions also installed in the system. If the running version is not the latest one installed, this state will reboot the system. See :py:func:`kernelpkg.upgrade <salt.modules.kernelpkg_linux_yum.upgrade>` and :py:func:`~salt.states.kernelpkg.latest_installed` for ways to install new kernel packages. This module does not attempt to understand or manage boot loader configurations it is possible to have a new kernel installed, but a boot loader configuration that will never activate it. For this reason, it would not be advisable to schedule this state to run automatically. Because this state function may cause the system to reboot, it may be preferable to move it to the very end of the state run. See :py:func:`~salt.states.kernelpkg.latest_wait` for a waitable state that can be called with the `listen` requesite. name Arbitrary name for the state. Does not affect behavior. at_time The wait time in minutes before the system will be rebooted. """ active = __salt__["kernelpkg.active"]() latest = __salt__["kernelpkg.latest_installed"]() ret = {"name": name} if __salt__["kernelpkg.needs_reboot"](): ret["comment"] = "The system will be booted to activate kernel: {}".format( latest ) if __opts__["test"]: ret["result"] = None ret["changes"] = {"kernel": {"old": active, "new": latest}} else: __salt__["system.reboot"](at_time=at_time) ret["result"] = True ret["changes"] = {"kernel": {"old": active, "new": latest}} else: ret["result"] = True ret["comment"] = "The latest installed kernel package is active: {}".format( active ) ret["changes"] = {} return ret
[ 893, 923 ]
def METHOD_NAME(qapp, qWidgetFactory): """Test Axis.setScale('log') method with an empty plot Limits are reset only when negative """ plotWidget = qWidgetFactory(PlotWidget) xaxis = plotWidget.getXAxis() yaxis = plotWidget.getYAxis() y2axis = plotWidget.getYAxis("right") xaxis.setLimits(-1., 1.) yaxis.setLimits(2., 3.) y2axis.setLimits(-2., -1.) xaxis.setScale("log") qapp.processEvents() assert xaxis.getLimits() == (1., 100.) assert yaxis.getLimits() == (2., 3.) assert y2axis.getLimits() == (-2., -1.) xaxis.setLimits(10., 20.) yaxis.setScale("log") qapp.processEvents() assert xaxis.getLimits() == (10., 20.) assert yaxis.getLimits() == (2., 3.) # Positive range is preserved assert y2axis.getLimits() == (1., 100.) # Negative min is reset
[ 9, 2227, 0, 930, 390, 654, 365 ]
def METHOD_NAME(fpath): cols_names = [ "P_PARTKEY", "P_NAME", "P_MFGR", "P_BRAND", "P_TYPE", "P_SIZE", "P_CONTAINER", "P_RETAILPRICE", "P_COMMENT", ] cols = { "P_PARTKEY": np.int64, "P_NAME": str, "P_MFGR": str, "P_BRAND": str, "P_TYPE": str, "P_SIZE": np.int64, "P_CONTAINER": str, "P_RETAILPRICE": np.float64, "P_COMMENT": str, } rel = pd.read_csv(fpath, sep="|", header=None, names=cols_names, dtype=cols) return rel
[ 557, 995 ]
def METHOD_NAME(): with instance_for_test() as instance: with safe_tempfile_path() as skt: server_process = open_server_process(instance.get_ref(), port=None, socket=skt) try: assert DagsterGrpcClient(socket=skt).ping("foobar") == "foobar" finally: interrupt_ipc_subprocess_pid(server_process.pid) server_process.terminate() server_process.wait()
[ 9, 163, 1083 ]
def METHOD_NAME(request): def _fixture_lookup(name): return request.getfixturevalue(name) return _fixture_lookup
[ 1964, 1906, 717 ]
def METHOD_NAME( self, mock_Partitioner, mock_DiskFormat ): partitioner = Mock() mock_Partitioner.return_value = partitioner image_format = Mock() image_format.resize_raw_disk.return_value = False mock_DiskFormat.return_value = image_format self._init_command_args() self.task.command_args['resize'] = True self.task.command_args['--size'] = '42' with self._caplog.at_level(logging.INFO): self.task.process() self.loop_provider.create.assert_called_once_with(overwrite=False) partitioner.resize_table.assert_called_once_with() image_format.resize_raw_disk.assert_called_once_with( 42 ) assert 'Loading XML description' in self._caplog.text assert '--> loaded {0}'.format( os.sep.join([self.abs_root_dir, 'image', 'config.xml']) ) in self._caplog.text assert '--> Selected build type: oem' in self._caplog.text assert '--> Selected profiles: vmxSimpleFlavour' in self._caplog.text assert 'Resizing raw disk to 42 bytes' in self._caplog.text assert 'Raw disk is already at 42 bytes' in self._caplog.text
[ 9, 356, 660, 1128, 130, 2002 ]
METHOD_NAME(self):
[ 69, 1881, 1180 ]
def METHOD_NAME(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample['net_input'] def batch_for_softmax(dec_out, target): # assumes decoder_out[0] is the only thing needed (may not be correct for future models!) first, rest = dec_out[0], dec_out[1:] bsz, tsz, dim = first.shape if bsz * tsz < self.softmax_batch: yield dec_out, target, True else: flat = first.contiguous().view(1, -1, dim) flat_tgt = target.contiguous().view(flat.shape[:-1]) s = 0 while s < flat.size(1): e = s + self.softmax_batch yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False s = e def gather_target_probs(probs, target): probs = probs.gather( dim=2, index=target.unsqueeze(-1), ) return probs orig_target = sample['target'] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in models: model.eval() decoder_out = model.forward(**net_input) attn = decoder_out[1] if type(attn) is dict: attn = attn.get('attn', None) batched = batch_for_softmax(decoder_out, orig_target) probs, idx = None, 0 for bd, tgt, is_single in batched: sample['target'] = tgt curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data if is_single: probs = gather_target_probs(curr_prob, orig_target) else: if probs is None: probs = curr_prob.new(orig_target.numel()) step = curr_prob.size(0) * curr_prob.size(1) end = step + idx tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt) probs[idx:end] = tgt_probs.view(-1) idx = end sample['target'] = orig_target probs = probs.view(sample['target'].shape) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None and torch.is_tensor(attn): attn = attn.data if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(models) > 1: avg_probs.div_(len(models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(models)) bsz = avg_probs.size(0) hypos = [] start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz for i in range(bsz): # remove padding from ref ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \ if sample['target'] is not None else None tgt_len = ref.numel() avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len] score_i = avg_probs_i.sum() / tgt_len if avg_attn is not None: avg_attn_i = avg_attn[i] alignment = utils.extract_hard_alignment(avg_attn_i, sample['net_input']['src_tokens'][i], sample['target'][i], self.pad, self.eos) else: avg_attn_i = alignment = None hypos.append([{ 'tokens': ref, 'score': score_i, 'attention': avg_attn_i, 'alignment': alignment, 'positional_scores': avg_probs_i, }]) return hypos
[ 567 ]
def METHOD_NAME(node: ast.FunctionDef) -> Dict[str, str]: """Extract defaults from function definition node literally, as pieces of source code""" defaults: List[ast.expr] = [] if node.args.defaults: defaults.extend(node.args.defaults) if node.args.kw_defaults: defaults.extend(node.args.kw_defaults) args: List[ast.arg] = [] if node.args.posonlyargs: args.extend(node.args.posonlyargs) if node.args.args: args.extend(node.args.args) if node.args.kwonlyargs: args.extend(node.args.kwonlyargs) # zip args and defaults literal_defaults: Dict[str, str] = {} for arg, default in zip(reversed(args), reversed(defaults)): if default: literal_defaults[str(arg.arg)] = astunparse.unparse(default).strip() return literal_defaults
[ 19, 1479, 1618 ]
def METHOD_NAME(self, _graphene_info: ResolveInfo): return [ GrapheneDaemonStatus(daemon_status) for daemon_status in self._instance.get_daemon_statuses().values() ]
[ 1014, 75, 1687, 4840 ]
def METHOD_NAME(self, project_permission_obj): """测试场景:有项目创建权限""" perm_ctx = ProjectPermCtx(username=roles.ADMIN_USER) assert project_permission_obj.can_create(perm_ctx)
[ 9, 1046, 129 ]
def METHOD_NAME(scene): aabbb = SVGMobject(get_svg_resource("aabbb.svg")) scene.add(aabbb) scene.wait()
[ 9, 1080, 82, 380 ]
def METHOD_NAME( config: ModelParallelConfig, arch, hidden_size, ffn_hidden_size, num_layers, num_attention_heads, apply_query_key_layer_scaling=False, kv_channels=None, init_method=None, scaled_init_method=None, decoder_attn_mask_type=AttnMaskType.causal, pre_process=True, post_process=True, init_method_std=0.02, megatron_amp_O2=False, hidden_dropout=0.1, attention_dropout=0.1, ffn_dropout=0.0, precision=16, fp32_residual_connection=False, activations_checkpoint_method=None, activations_checkpoint_num_layers=1, activations_checkpoint_granularity=None, layernorm_epsilon=1e-5, bias_activation_fusion=True, bias_dropout_add_fusion=True, masked_softmax_fusion=True, persist_layer_norm=False, openai_gelu=False, activation="gelu", onnx_safe=False, bias=True, normalization="layernorm", headscale=False, transformer_block_type="pre_ln", hidden_steps=-1, parent_model_type=ModelType.encoder_or_decoder, layer_type=None, chunk_size=64, layer_number_offset=0, # this is use only for attention norm_factor scaling megatron_legacy=False, normalize_attention_scores=True, sequence_parallel=False, num_moe_experts=1, moe_frequency=1, moe_dropout=0.0, turn_off_rop=False, # turn off the RoP positional embedding version=1, position_embedding_type='learned_absolute', use_flash_attention=False, ): """Build language model and return along with the key to save.""" if kv_channels is None: assert ( hidden_size % num_attention_heads == 0 ), 'hidden_size must be divisible by num_attention_heads if kv_channels is None' kv_channels = hidden_size // num_attention_heads if init_method is None: init_method = init_method_normal(init_method_std) if scaled_init_method is None: scaled_init_method = scaled_init_method_normal(init_method_std, num_layers) if arch == "transformer": # Language model. decoder = MegatronTransformerDecoderModule( config=config, init_method=init_method, output_layer_init_method=scaled_init_method, hidden_size=hidden_size, num_layers=num_layers, num_attention_heads=num_attention_heads, apply_query_key_layer_scaling=apply_query_key_layer_scaling, kv_channels=kv_channels, ffn_hidden_size=ffn_hidden_size, decoder_attn_mask_type=decoder_attn_mask_type, pre_process=pre_process, post_process=post_process, megatron_amp_O2=megatron_amp_O2, hidden_dropout=hidden_dropout, attention_dropout=attention_dropout, ffn_dropout=ffn_dropout, precision=precision, fp32_residual_connection=fp32_residual_connection, activations_checkpoint_method=activations_checkpoint_method, activations_checkpoint_num_layers=activations_checkpoint_num_layers, activations_checkpoint_granularity=activations_checkpoint_granularity, layernorm_epsilon=layernorm_epsilon, bias_activation_fusion=bias_activation_fusion, bias_dropout_add_fusion=bias_dropout_add_fusion, masked_softmax_fusion=masked_softmax_fusion, persist_layer_norm=persist_layer_norm, openai_gelu=openai_gelu, onnx_safe=onnx_safe, activation=activation, bias=bias, normalization=normalization, transformer_block_type=transformer_block_type, headscale=headscale, parent_model_type=parent_model_type, megatron_legacy=megatron_legacy, normalize_attention_scores=normalize_attention_scores, num_moe_experts=num_moe_experts, moe_frequency=moe_frequency, moe_dropout=moe_dropout, position_embedding_type=position_embedding_type, use_flash_attention=use_flash_attention, ) elif arch == "retro": decoder = MegatronRetrievalTransformerDecoderModule( config=config, init_method=init_method, output_layer_init_method=scaled_init_method, hidden_size=hidden_size, num_layers=num_layers, num_attention_heads=num_attention_heads, apply_query_key_layer_scaling=apply_query_key_layer_scaling, kv_channels=kv_channels, layer_type=layer_type, ffn_hidden_size=ffn_hidden_size, pre_process=pre_process, post_process=post_process, megatron_amp_O2=megatron_amp_O2, hidden_dropout=hidden_dropout, attention_dropout=attention_dropout, precision=precision, fp32_residual_connection=fp32_residual_connection, activations_checkpoint_method=activations_checkpoint_method, activations_checkpoint_num_layers=activations_checkpoint_num_layers, activations_checkpoint_granularity=activations_checkpoint_granularity, layernorm_epsilon=layernorm_epsilon, bias_activation_fusion=bias_activation_fusion, bias_dropout_add_fusion=bias_dropout_add_fusion, masked_softmax_fusion=masked_softmax_fusion, persist_layer_norm=persist_layer_norm, openai_gelu=openai_gelu, onnx_safe=onnx_safe, activation=activation, bias=bias, normalization=normalization, transformer_block_type=transformer_block_type, parent_model_type=parent_model_type, chunk_size=chunk_size, layer_number_offset=layer_number_offset, megatron_legacy=megatron_legacy, normalize_attention_scores=normalize_attention_scores, turn_off_rop=turn_off_rop, version=version, ) else: raise ValueError(f"Unknown decoder arch = {arch}. Available decoder arch = {AVAILABLE_DECODERS}") return decoder
[ 19, 3642, 578 ]
def METHOD_NAME( SignedBlob, MsgAndCertEncodingType, CryptProv: _win32typing.PyCRYPTPROV | None = ..., Flags: int = ... ) -> _win32typing.PyCERTSTORE: ...
[ 7434, 19, 277, 7029 ]
def METHOD_NAME(self, _: rospy.timer.TimerEvent) -> None: """ When timer triggers, update model with time delta """ now = rospy.Time.now() dt = (now - self.last_time).to_sec() self.update(dt) self.last_time = now
[ 2401, 905 ]
def METHOD_NAME(eval_preds): labels = paddle.to_tensor(eval_preds.label_ids, dtype="int64") preds = paddle.to_tensor(eval_preds.predictions) preds = paddle.nn.functional.softmax(preds, axis=-1) labels = paddle.argmax(labels, axis=-1) metric = Accuracy() correct = metric.compute(preds, labels) metric.update(correct) acc = metric.accumulate() return {"accuracy": acc}
[ 226, 1097 ]
def METHOD_NAME(): cstats = ConstructorStats.get(m.Sequence) s = m.Sequence(5) assert cstats.values() == ['of size', '5'] assert "Sequence" in repr(s) assert len(s) == 5 assert s[0] == 0 and s[3] == 0 assert 12.34 not in s s[0], s[3] = 12.34, 56.78 assert 12.34 in s assert isclose(s[0], 12.34) and isclose(s[3], 56.78) rev = reversed(s) assert cstats.values() == ['of size', '5'] rev2 = s[::-1] assert cstats.values() == ['of size', '5'] it = iter(m.Sequence(0)) for _ in range(3): # __next__ must continue to raise StopIteration with pytest.raises(StopIteration): next(it) assert cstats.values() == ['of size', '0'] expected = [0, 56.78, 0, 0, 12.34] assert allclose(rev, expected) assert allclose(rev2, expected) assert rev == rev2 rev[0::2] = m.Sequence([2.0, 2.0, 2.0]) assert cstats.values() == ['of size', '3', 'from std::vector'] assert allclose(rev, [2, 56.78, 2, 0, 2]) assert cstats.alive() == 4 del it assert cstats.alive() == 3 del s assert cstats.alive() == 2 del rev assert cstats.alive() == 1 del rev2 assert cstats.alive() == 0 assert cstats.values() == [] assert cstats.default_constructions == 0 assert cstats.copy_constructions == 0 assert cstats.move_constructions >= 1 assert cstats.copy_assignments == 0 assert cstats.move_assignments == 0
[ 9, 771 ]
def METHOD_NAME(self, parent, page, doc, h_level, **kwargs): for name, pyinfo in doc.items(**kwargs): h = core.Heading(parent, level=h_level, class_='moose-pysyntax-member-heading') fname = name + pyinfo.signature if pyinfo.signature is not None else name core.Monospace(core.Strong(h), string=fname) if pyinfo.documentation is None: msg = "Missing documentation for '%s'.\n%s" LOG.error(msg, name, doc.filename) else: self.reader.tokenize(parent, pyinfo.documentation, page)
[ 238, 1200 ]
def METHOD_NAME(tmpdir, runner): outfile = str(tmpdir.join('out.tif')) result = runner.invoke(main_group, ['calc'] + [ '(+ 125 (* 0.1 (read 1)))', 'tests/data/shade.tif', outfile], catch_exceptions=False) assert result.exit_code == 0 with rasterio.open(outfile) as src: assert src.count == 1 assert src.meta['dtype'] == 'uint8' data = src.read(masked=True) assert data.min() == 125 assert data.data[0][0][0] == 255 assert data.mask[0][0][0]
[ 9, 7779, 1407 ]
def METHOD_NAME(self, path, method): responses = {} if method == "GET": responses["404"] = { "description": "Not found.", "content": { "image/*": { "schema": { "type": "string", "format": "binary", } } }, } responses["400"] = { "description": "Bad request.", "content": { "image/*": { "schema": { "type": "string", "format": "binary", } } }, } responses["200"] = { "description": "Successful retrieval of localization graphic.", "content": { "image/*": { "schema": { "type": "string", "format": "binary", } } }, } return responses
[ 19, 5309 ]
async def METHOD_NAME(self) -> None: self.overrides = await self._get_overrides()
[ 557 ]
def METHOD_NAME(self) -> None: # Evict if there are now too many items while self._max_size and len(self) > self._max_size: _key, value = self._cache.popitem(last=False) if self.iterable: self.metrics.inc_evictions(EvictionReason.size, len(value.value)) else: self.metrics.inc_evictions(EvictionReason.size)
[ 17145 ]
def METHOD_NAME(file_name): if os.path.isfile(os.path.join(self.fldr, file_name)): return "" else: return "file missing"
[ 250, 217, 954 ]
def METHOD_NAME(self): image = paddle.static.data( name='image', shape=[None, 1, 28, 28], dtype='float32') label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') model = MobileNet() out = model.net(input=image, class_dim=10) cost = paddle.nn.functional.loss.cross_entropy(input=out, label=label) avg_cost = paddle.mean(x=cost) acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1) acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5) optimizer = paddle.optimizer.Momentum( momentum=0.9, learning_rate=0.01, weight_decay=paddle.regularizer.L2Decay(4e-5)) optimizer.minimize(avg_cost) main_prog = paddle.static.default_main_program() val_prog = main_prog.clone(for_test=True) place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda( ) else paddle.CPUPlace() exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) def transform(x): return np.reshape(x, [1, 28, 28]) train_dataset = paddle.vision.datasets.MNIST( mode='train', backend='cv2', transform=transform) test_dataset = paddle.vision.datasets.MNIST( mode='test', backend='cv2', transform=transform) train_loader = paddle.io.DataLoader( train_dataset, places=place, feed_list=[image, label], drop_last=True, batch_size=64, return_list=False) valid_loader = paddle.io.DataLoader( test_dataset, places=place, feed_list=[image, label], batch_size=64, return_list=False) def sample_generator_creator(): def __reader__(): for data in test_dataset: image, label = data image = np.expand_dims(image, axis=0) label = np.expand_dims(label, axis=0) yield image, label return __reader__ def train(program): iter = 0 for data in train_loader(): cost, top1, top5 = exe.run( program, feed=data, fetch_list=[avg_cost, acc_top1, acc_top5]) iter += 1 if iter % 100 == 0: print( 'train iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'. format(iter, cost, top1, top5)) def test(program, outputs=[avg_cost, acc_top1, acc_top5]): iter = 0 result = [[], [], []] for data in valid_loader(): cost, top1, top5 = exe.run(program, feed=data, fetch_list=outputs) iter += 1 if iter % 100 == 0: print('eval iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'. format(iter, cost, top1, top5)) result[0].append(cost) result[1].append(top1) result[2].append(top5) print(' avg loss {}, acc_top1 {}, acc_top5 {}'.format( np.mean(result[0]), np.mean(result[1]), np.mean(result[2]))) return np.mean(result[1]), np.mean(result[2]) train(main_prog) top1_1, top5_1 = test(val_prog) paddle.static.save_inference_model( path_prefix='./test_quant_post_hpo/model', feed_vars=[image, label], fetch_vars=[avg_cost, acc_top1, acc_top5], executor=exe, program=val_prog) quant_post_hpo( exe, place, "./test_quant_post_hpo", "./test_quant_post_hpo_inference", train_sample_generator=sample_generator_creator(), eval_sample_generator=sample_generator_creator(), model_filename="model.pdmodel", params_filename="model.pdiparams", save_model_filename='model.pdmodel', save_params_filename='model.pdiparams', runcount_limit=2) quant_post_prog, feed_target_names, fetch_targets = paddle.static.load_inference_model( path_prefix='./test_quant_post_hpo_inference/model', executor=exe) top1_2, top5_2 = test(quant_post_prog, fetch_targets) print("before quantization: top1: {}, top5: {}".format(top1_1, top5_1)) print("after quantization: top1: {}, top5: {}".format(top1_2, top5_2))
[ 9, 5100 ]
def METHOD_NAME(self): """ get list of selected indices --- """ return self.chart.selected_points()
[ 19, 449, 1894 ]
def METHOD_NAME(contest: Contest, root: ET.Element): for id, key, name in Language.objects.all().values_list('id', 'key', 'name'): language = ET.SubElement(root, 'language') language.tail = '\n' ET.SubElement(language, 'id').text = str(id) ET.SubElement(language, 'key').text = key ET.SubElement(language, 'name').text = name
[ 1917, 2938 ]
def METHOD_NAME(self, message, *args): self._logger.log(self._level, "PMSx003: " + message, *args)
[ 390 ]
def METHOD_NAME( cls, resolved_dict: dict, raw_dict: Optional[dict] = None ) -> "PipelineConfig": config = cls.parse_obj(resolved_dict) config._raw_dict = raw_dict return config
[ 280, 553 ]
async def METHOD_NAME( self, mctx: ModelContext, sources: SourcesContext, feature: Feature ): accuracy: int = 0 async for record in sources.records(): accuracy += int(record.key) return accuracy
[ 747 ]
def METHOD_NAME(self, ioclass, value) -> None: ...
[ 13837, 0 ]
def METHOD_NAME(self, device, results, log): log.info( "Modeler {} processing data for device {}".format( self.name(), device.id)) rm = self.relMap() try: diskdrives = results.get('diskdrives').get('Win32_DiskDrive') except Exception: return rm if not diskdrives: return rm partitions = results.get('diskdrives').get('Win32_DiskDriveToDiskPartition') volumes = results.get('diskdrives').get('Win32_LogicalDiskToPartition') uniqueids_dict = {} signature_uniqueid = results.get('signature_uniqueid') if signature_uniqueid: signature_uniqueid = ''.join(signature_uniqueid.stdout).split('|') for ids in signature_uniqueid: try: key, value = ids.split('=') uniqueids_dict[key] = value except (KeyError, ValueError): pass for drive in diskdrives: utilization = 0 fs_ids = [] instance_name = '{}'.format(drive.Index) try: for partition in partitions[drive.DeviceID]: try: partsize = int(partition.Size) except (TypeError, ValueError): partsize = 0 utilization += partsize for volume in volumes[partition.DeviceID]: fs_ids.append(self.prepId(volume.DeviceID)) instance_name += ' {}'.format(volume.DeviceID) except Exception: log.debug("No partitions for drive {} on {}.".format(instance_name, device.id)) try: size = int(drive.Size) except (TypeError, ValueError): size = 0 freespace = size - utilization if freespace < 0: freespace = 0 try: num_partitions = int(drive.Partitions) except TypeError: num_partitions = 0 # drive.SerialNumber could be None. let's make it '' serialNumber = '' if hasattr(drive, 'SerialNumber'): if drive.SerialNumber is None: drive.SerialNumber = '' serialNumber = drive.SerialNumber.strip() if hasattr(drive, 'Signature'): drive.uniqueId = uniqueids_dict.get(drive.Signature) product_key = MultiArgs(drive.Model, drive.Manufacturer) capabilities = '' if hasattr(drive, 'CapabilityDescriptions') and drive.CapabilityDescriptions: capabilities = drive.CapabilityDescriptions rm.append(self.objectMap({ 'id': self.prepId(drive.PNPDeviceID), 'title': drive.Caption, 'size': size, 'partitions': num_partitions, 'capabilities': capabilities, 'serialNumber': serialNumber, 'freespace': freespace, 'disk_ids': make_disk_ids(drive), 'fs_ids': fs_ids, 'instance_name': instance_name, 'setProductKey': product_key })) return rm
[ 356 ]
def METHOD_NAME(BSP_ROOT, dist_dir): import sys cwd_path = os.getcwd() sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools')) from sdk_dist import dist_do_building dist_do_building(BSP_ROOT, dist_dir)
[ 1260, 276 ]
def METHOD_NAME(self): if self.settings.compiler.get_safe("cppstd"): check_min_cppstd(self, self._min_cppstd) minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False) if minimum_version and Version(self.settings.compiler.version) < minimum_version: raise ConanInvalidConfiguration( f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support." ) if is_msvc(self) and check_min_vs(self, "193", raise_invalid=False) and Version(self.version) < "3.21.12": raise ConanInvalidConfiguration(f"{self.ref} is not compatible with Visual Studio 2022, please use version >= 3.21.12")
[ 187 ]
def METHOD_NAME(event): djmail.outbox = [] ActivityLog.objects.create(event=event, content_object=event, action_type="test") event.date_to = now() - dt.timedelta(days=1) event.save() assert not event.settings.sent_mail_cfp_closed task_periodic_event_services(event.slug) event = event.__class__.objects.get(slug=event.slug) assert len(djmail.outbox) == 1 # event created assert not event.settings.sent_mail_event_over task_periodic_event_services(event.slug) event = event.__class__.objects.get(slug=event.slug) assert len(djmail.outbox) == 1
[ 9, 758, 2728, 417, 3217, 654, 7797 ]
def METHOD_NAME(self): """ Test the requestRecord method """ default = {'name': '', 'reqStatus': None, 'SiteWhiteList': [], 'SiteBlackList': [], 'datasets': [], 'campaign': []} self.assertItemsEqual(self.msTransferor.requestRecord({}), default) with open(self.taskChainTempl) as jo: reqData = json.load(jo)['createRequest'] expectedRes = [{'type': 'MCPileup', 'name': '/Neutrino_E-10_gun/RunIISummer17PrePremix-PUAutumn18_102X_upgrade2018_realistic_v15-v1/GEN-SIM-DIGI-RAW'}, {'type': 'MCPileup', 'name': '/Neutrino_E-10_gun/RunIISummer17PrePremix-PUAutumn18_102X_upgrade2018_realistic_v15-v1/GEN-SIM-DIGI-RAW'}] resp = self.msTransferor.requestRecord(reqData)['datasets'] self.assertEqual(len(resp), 2) for idx in range(len(resp)): self.assertItemsEqual(resp[idx], expectedRes[idx]) with open(self.stepChainTempl) as jo: reqData = json.load(jo)['createRequest'] expectedRes = [{'type': 'InputDataset', 'name': '/RelValH125GGgluonfusion_14/CMSSW_10_6_1-106X_mcRun3_2021_realistic_v1_rsb-v1/GEN-SIM'}, {'type': 'MCPileup', 'name': '/RelValMinBias_14TeV/CMSSW_10_6_1-106X_mcRun3_2021_realistic_v1_rsb-v1/GEN-SIM'}, {'type': 'MCPileup', 'name': '/RelValMinBias_14TeV/CMSSW_10_6_1-106X_mcRun3_2021_realistic_v1_rsb-v1/GEN-SIM'}] resp = self.msTransferor.requestRecord(reqData)['datasets'] self.assertEqual(len(resp), 3) for idx in range(len(resp)): self.assertItemsEqual(resp[idx], expectedRes[idx])
[ 15087, 377, 148 ]
def METHOD_NAME( self, text: bytes, entities: List[MessageEntity], offset: Optional[int] = None, length: Optional[int] = None, ) -> Generator[str, None, None]: if offset is None: offset = 0 length = length or len(text) for index, entity in enumerate(entities): if entity.offset * 2 < offset: continue if entity.offset * 2 > offset: yield self.quote(remove_surrogates(text[offset : entity.offset * 2])) start = entity.offset * 2 offset = entity.offset * 2 + entity.length * 2 sub_entities = list( filter(lambda e: e.offset * 2 < (offset or 0), entities[index + 1 :]) ) yield self.apply_entity( entity, "".join(self.METHOD_NAME(text, sub_entities, offset=start, length=offset)), ) if offset < length: yield self.quote(remove_surrogates(text[offset:length]))
[ 4404, 5399 ]
def METHOD_NAME(self, obj, property): type = getattr(obj, "type", "") if type == "project": return None return getattr(obj, property)
[ 98, 217, 155 ]
def METHOD_NAME(input_file: Path, program_trace: ProgramTrace): assert isinstance(program_trace, taint_dag.TDProgramTrace) inputs = list(program_trace.inputs) assert len(inputs) == 1 assert inputs[0].path == str(input_file) # TODO (hbrodin): Should probably not be exposed. Also, the fd is not necessarily unique # per run, which is in the documentation for uid. # stdin, stdout, stderr, tdag-file, input_path assert inputs[0].uid == 4 assert inputs[0].size == 29
[ 9, 1461 ]
def METHOD_NAME( obj ): has_primitives = False for c in obj.children: if c.type == 'MESH': has_primitives = True break primitives = [] if has_primitives: for c in obj.children: if c.type == 'MESH': primitive = {} verts = [] if len( c.data.uv_layers ) > 0: for i in range( len( c.data.vertices ) ): v = c.data.vertices[i] st = c.data.uv_layers[0].data[i] verts.append( { 'xyz': [ v.co[0], v.co[1], v.co[2] ], 'st': [ st.uv[0], 1.0 - st.uv[1] ], 'normal': [ v.normal[0], v.normal[1], v.normal[2] ] } ) else: for v in c.data.vertices: verts.append( { 'xyz': [ v.co[0], v.co[1], v.co[2] ], 'normal': [ v.normal[0], v.normal[1], v.normal[2] ] } ) primitive['verts'] = verts primitives.append( primitive ) polygons = [] for p in c.data.polygons: #print ( p.vertices ) indices = [] for i in p.vertices: indices.append( i ) #for i in range( len( p.vertices ) ): # indices.append( p.vertices[i] ) polygons.append( { 'material': c.data.materials[p.material_index].name, 'indices': indices } ) primitive['polygons'] = polygons return primitives
[ 279, 2189, 24, 3291 ]
def METHOD_NAME( self, defaults: dict[str, dict[str, Any]], targets_defaults: SetDefaultsT, ignore_unknown_fields: bool = False, ignore_unknown_targets: bool = False, ): if not isinstance(targets_defaults, dict): raise ValueError( f"Expected dictionary mapping targets to default field values for {self.address} " f"but got: {type(targets_defaults).__name__}." ) types = self.registered_target_types.aliases_to_types for target, default in targets_defaults.items(): if not isinstance(default, dict): raise ValueError( f"Invalid default field values in {self.address} for target type {target}, " f"must be an `dict` but was {default!r} with type `{type(default).__name__}`." ) targets: Iterable[str] targets = target if isinstance(target, tuple) else (target,) for target_alias in map(str, targets): if target_alias in types: target_type = types[target_alias] elif ignore_unknown_targets: continue else: raise ValueError(f"Unrecognized target type {target_alias} in {self.address}.") # Copy default dict if we may mutate it. raw_values = dict(default) if ignore_unknown_fields else default # Validate that field exists on target valid_field_aliases = set( target_type._get_field_aliases_to_field_types( self._target_type_field_types(target_type) ).keys() ) for field_alias in default.keys(): if field_alias not in valid_field_aliases: if ignore_unknown_fields: del raw_values[field_alias] else: raise InvalidFieldException( f"Unrecognized field `{field_alias}` for target {target_type.alias}. " f"Valid fields are: {', '.join(sorted(valid_field_aliases))}.", ) # Merge all provided defaults for this call. defaults.setdefault(target_type.alias, {}).update(raw_values)
[ 356, 1618 ]
def METHOD_NAME(func): """Wrap systemd dbus methods to handle its specific error types.""" @wraps(func) async def wrapper(*args, **kwds): try: return await func(*args, **kwds) except DBusFatalError as err: if err.type == DBUS_ERR_SYSTEMD_NO_SUCH_UNIT: # pylint: disable=raise-missing-from raise DBusSystemdNoSuchUnit(str(err)) # pylint: enable=raise-missing-from raise err return wrapper
[ 4658, 1096 ]
def METHOD_NAME(self): self.assertTrue(self.top.lookup("Mine").is_namespace()) self.assertTrue(self.Mine.lookup("a_method").is_namespace()) self.assertTrue(self.top.lookup("spam").is_namespace()) self.assertTrue(self.spam.lookup("internal").is_namespace()) self.assertTrue(self.top.lookup("namespace_test").is_namespace()) self.assertFalse(self.spam.lookup("x").is_namespace()) self.assertTrue(self.top.lookup("spam").get_namespace() is self.spam) ns_test = self.top.lookup("namespace_test") self.assertEqual(len(ns_test.get_namespaces()), 2) self.assertRaises(ValueError, ns_test.get_namespace)
[ 9, 6824 ]
def METHOD_NAME( clusters: Iterable[ClusterV1], state: State, slack: Optional[SlackApi] ) -> None: # Send a notification, if a cluster runs a version it was not running in the past # This does not check if an upgrade was successful or not for cluster in clusters: if cluster.spec: state_key = f"{cluster.name}-{cluster.spec.version}" msg = ( f"{cluster_slack_handle(cluster.name, slack)}: " + f"cluster `{cluster.name}` is now running version `{cluster.spec.version}`" ) handle_slack_notification( msg=msg, slack=slack, state=state, state_key=state_key, state_value=cluster.spec.version, )
[ 959, 2059, 80, 281 ]
def METHOD_NAME(self): """ This method is used to extract the kwargs for non-bias computation. For example: The kwargs for conv2d module is {} because the attributes like 'padding' or 'groups' are considered during module initializing. However, we need to consider those attributes as kwargs in F.conv2d. """ pass
[ 297, 1475, 280, 692 ]
def METHOD_NAME(self, resource_name): # type: (str) -> cdk.core.CfnResource return self.sam_template.METHOD_NAME(resource_name)
[ 19, 191 ]