text
stringlengths
15
7.82k
ids
listlengths
1
7
def METHOD_NAME(self) -> "tf.Tensor": """ :return: the optimizer function for the discriminator """ return self._discriminator_optimizer_fct
[ 2254, 968, 3410 ]
def METHOD_NAME(self, instance, request, **kw): namespace = super().METHOD_NAME(**kw) namespace['request'] = request namespace['view'] = instance namespace['context'] = context = instance.context namespace['views'] = ViewMapper(context, request) # get the root obj = context root = None meth = aq_get(obj, 'getPhysicalRoot', None) if meth is not None: root = meth() namespace.update(here=obj, # philiKON thinks container should be the view, # but BBB is more important than aesthetics. container=obj, root=root, modules=SecureModuleImporter, traverse_subpath=[], # BBB, never really worked user=getSecurityManager().getUser(), ) return namespace
[ 7353, 19, 198 ]
def METHOD_NAME(self, out): sys.stderr.write("--- tap output start ---\n") for line in out.splitlines(): sys.stderr.write(line + "\n") sys.stderr.write("--- tap output end ---\n")
[ 4316 ]
def METHOD_NAME(dataset_id, dataset_type, dataset_path, source_path): set_folder_status(dataset_path, DatasetStatus.XCHECKING) if dataset_type == 'classification': ds = ClsDataset(dataset_id, dataset_path) elif dataset_type == 'detection': ds = DetDataset(dataset_id, dataset_path) elif dataset_type == 'segmentation': ds = SegDataset(dataset_id, dataset_path) elif dataset_type == 'instance_segmentation': ds = InsSegDataset(dataset_id, dataset_path) p = mp.Process( target=_check_and_copy, args=(ds, dataset_path, source_path)) p.start() return p
[ 512, 126 ]
def METHOD_NAME(self, cluster_epoch_length: clusterlib.ClusterLib): """Test the *epochLength* configuration.""" cluster = cluster_epoch_length common.get_test_id(cluster) assert cluster.slot_length == 0.2 assert cluster.epoch_length == 1_500 check_epoch_length(cluster)
[ 9, 1165, 799 ]
def METHOD_NAME(self, attrs_d): attrs_d.setdefault("scheme", "http://search.yahoo.com/mrss/category_schema") self._start_category(attrs_d)
[ 447, 1091, 253 ]
def METHOD_NAME(self, X): return self._wrapped_model.METHOD_NAME(X)
[ 2103, 2550 ]
def METHOD_NAME(f): @functools.wraps(f) def wrapper(endpoint, uri, *args, **kwargs): if not endpoint.available: e = TemporarilyUnavailableError() log.info('Endpoint unavailable, ignoring request %s.' % uri) return {}, e.json, 503 if endpoint.catch_errors: try: return f(endpoint, uri, *args, **kwargs) except OAuth2Error: raise except FatalClientError: raise except Exception as e: error = ServerError() log.warning( 'Exception caught while processing request, %s.' % e) return {}, error.json, 500 else: return f(endpoint, uri, *args, **kwargs) return wrapper
[ 1057, 1096, 61, 11015 ]
def METHOD_NAME(repo, args): """Returns a list of the files modified by the commit specified in 'args'.""" commit = repo.commit(args.commit) return commit.stats.files
[ 19, 680, 1537 ]
def METHOD_NAME(self) -> Optional['outputs.AlertsDataTypeOfDataConnectorResponse']: """ The available data types for the connector. """ return pulumi.get(self, "data_types")
[ 365, 119 ]
def METHOD_NAME( app: Litestar, route: HTTPRoute | WebSocketRoute | ASGIRoute, route_handler: RouteHandlerType, ) -> ASGIApp: """Construct a middleware stack that serves as the point of entry for each route. Args: app: The Litestar app instance. route: The route that is being added. route_handler: The route handler that is being wrapped. Returns: An ASGIApp that is composed of a "stack" of middlewares. """ from litestar.middleware.allowed_hosts import AllowedHostsMiddleware from litestar.middleware.compression import CompressionMiddleware from litestar.middleware.csrf import CSRFMiddleware # we wrap the route.handle method in the ExceptionHandlerMiddleware asgi_handler = wrap_in_exception_handler( app=route.handle, exception_handlers=route_handler.resolve_exception_handlers() # type: ignore[arg-type] ) if app.csrf_config: asgi_handler = CSRFMiddleware(app=asgi_handler, config=app.csrf_config) if app.compression_config: asgi_handler = CompressionMiddleware(app=asgi_handler, config=app.compression_config) if app.allowed_hosts: asgi_handler = AllowedHostsMiddleware(app=asgi_handler, config=app.allowed_hosts) for middleware in route_handler.resolve_middleware(): if hasattr(middleware, "__iter__"): handler, kwargs = cast("tuple[Any, dict[str, Any]]", middleware) asgi_handler = handler(app=asgi_handler, **kwargs) else: asgi_handler = middleware(app=asgi_handler) # type: ignore # we wrap the entire stack again in ExceptionHandlerMiddleware return wrap_in_exception_handler( app=cast("ASGIApp", asgi_handler), exception_handlers=route_handler.resolve_exception_handlers(), ) # pyright: ignore
[ 56, 2476, 3174, 1501 ]
def METHOD_NAME(self, *args, **kwargs): # Cache the available_power property on the instance kva = abs(self.voltage) * self.amperage * (self.max_utilization / 100) if self.phase == PowerFeedPhaseChoices.PHASE_3PHASE: self.available_power = round(kva * 1.732) else: self.available_power = round(kva) super().METHOD_NAME(*args, **kwargs)
[ 73 ]
def METHOD_NAME(): kaka = ( "pyoidc=bjmc::1463043535::upm|" "1463043535|18a201305fa15a96ce4048e1fbb03f7715f86499" ) seed = "" name = "pyoidc" result = parse_cookie(name, seed, kaka) assert result == ("bjmc::1463043535::upm", "1463043535")
[ 9, 214, 4177 ]
def METHOD_NAME(self, name, units=None, limits=(np.nan, np.nan), subscripts=None, comp_type=None, comp_subtype=None, depends_on={}, other_deps={}): """ This decorators allows assigning metadata to a function. """ def decorator(function): function.name = name function.units = units function.limits = limits function.subscripts = subscripts function.type = comp_type function.subtype = comp_subtype function.args = inspect.getfullargspec(function)[0] # include component in namespace and dependencies self.namespace[name] = function.__name__ if function.__name__ != "time": self.dependencies[function.__name__] = depends_on self.dependencies.update(other_deps) return function return decorator
[ 238 ]
def METHOD_NAME(self): if self._tar: self._create_tar_file() else: self._check_output_folder() self._init = True
[ 176, 146 ]
f METHOD_NAME(self):
[ 9, 399, 146, 171, 345 ]
def METHOD_NAME(self, data): if data["type"] == "subca": if not data.get("parent"): raise ValidationError( "If generating a subca, parent 'authority' must be specified." )
[ 187, 9895 ]
def METHOD_NAME(self): """ Initialization script should create 3 default MarkRange objects. """ for name in ["A", "B", "Pass"]: self.assertTrue(MarkRange.objects.filter(name=name).exists())
[ 9, 235, 1743, 2149, 152 ]
def METHOD_NAME(self) -> Optional[WritableTableStorage]: """ Temporarily support getting the writable storage from an entity. Once consumers/replacers no longer reference entity, this can be removed and entity can have more than one writable storage. """ for storage_connection in self.__storages: if storage_connection.is_writable and isinstance( storage_connection.storage, WritableTableStorage ): return storage_connection.storage return None
[ 19, 8315, 948 ]
def METHOD_NAME(): """Main page.""" banned = check_banned() if banned: return render_template( 'error.min.html', message='You are banned from using this tool! Reason: ' + banned, loggedin=False ) try: auth = dologin() session['language'] = querylanguage(auth) except: # SECURITY: If we cannot login, the session is invalid. app.session_interface.abandon_session(app, session) return render_template( 'main.min.html', loggedin=False ) return render_template( 'main.min.html', loggedin=True )
[ 57 ]
def METHOD_NAME(): """Return useful information about IPython and the system, as a string. Examples -------- :: In [2]: print sys_info() {'commit_hash': '144fdae', # random 'commit_source': 'repository', 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython', 'ipython_version': '0.11.dev', 'os_name': 'posix', 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick', 'sys_executable': '/usr/bin/python', 'sys_platform': 'linux2', 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'} """ return pprint.pformat(get_sys_info())
[ 3709, 100 ]
def METHOD_NAME(request, reason=""): if _is_ajax(request): return JsonResponse( { "detail": pgettext( "api", "Your request was rejected because your browser didn't send the CSRF cookie, or the cookie sent was invalid.", ) }, status=403, ) return render(request, "misago/errorpages/csrf_failure.html", status=403)
[ 9883, 374 ]
def METHOD_NAME(self, dl_manager): downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), ]
[ 265, 942 ]
def METHOD_NAME(): from sympy import symbols x1,x2 = symbols('x1, x2') Fe = Tuple(sin(pi*x1)*sin(pi*x2), sin(pi*x1)*sin(pi*x2)) f0 = Tuple(2*pi**2*sin(pi*x1)*sin(pi*x2), 2*pi**2*sin(pi*x1)*sin(pi*x2)) Ge = cos(pi*x1)*cos(pi*x2) f1 = cos(pi*x1)*cos(pi*x2) l2_error, h1_error = run_system_1_2d_dir(Fe, Ge, f0, f1, ncells=[2**3,2**3], degree=[2,2]) expected_l2_error = np.asarray([0.00030842129059875065, 0.0002164796555228256]) expected_h1_error = np.asarray([0.018418110343264293, 0.012987988507232278]) assert( np.allclose(l2_error, expected_l2_error, 1.e-13) ) assert( np.allclose(h1_error, expected_h1_error, 1.e-13) )
[ 9, 58, 112, 1170, 1085, 1190, 1170 ]
def METHOD_NAME(x, default): if x is not None: return x return default
[ 9515 ]
def METHOD_NAME(self): self.dstore.close() self.dstore.clear()
[ 531, 481 ]
async def METHOD_NAME(self, hash, params): self.create_async_task(self._restart())
[ 0, 1141 ]
def METHOD_NAME(social_account: SocialAccount) -> OAuth2Session: refresh_token_url = FirefoxAccountsOAuth2Adapter.access_token_url social_token = social_account.socialtoken_set.first() if social_token is None: raise NoSocialToken(uid=social_account.uid) def _token_updater(new_token): update_social_token(social_token, new_token) client_id = social_token.app.client_id client_secret = social_token.app.secret extra = { "client_id": client_id, "client_secret": client_secret, } expires_in = (social_token.expires_at - datetime.now(timezone.utc)).total_seconds() token = { "access_token": social_token.token, "refresh_token": social_token.token_secret, "token_type": "Bearer", "expires_in": expires_in, } # TODO: find out why the auto_refresh and token_updater is not working # and instead we are manually refreshing the token at get_subscription_data_from_fxa client = OAuth2Session( client_id, scope=settings.SOCIALACCOUNT_PROVIDERS["fxa"]["SCOPE"], token=token, auto_refresh_url=refresh_token_url, auto_refresh_kwargs=extra, token_updater=_token_updater, ) return client
[ 19, 7692, 240 ]
def METHOD_NAME(self): # Run sub commands for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) tmp_dir = tempfile.mkdtemp() name = self.distribution.metadata.get_name() zip_file = os.path.join(tmp_dir, "%s.zip" % name) try: self.create_zipfile(zip_file) self.upload_file(zip_file) finally: shutil.rmtree(tmp_dir)
[ 22 ]
def METHOD_NAME(self, o: mypy.nodes.SliceExpr) -> object: return UNKNOWN
[ 716, 55, 2078 ]
def METHOD_NAME(self): if self.pooling_type == 'att': kaiming_init(self.conv_mask, mode='fan_in') self.conv_mask.inited = True if self.channel_add_conv is not None: last_zero_init(self.channel_add_conv) if self.channel_mul_conv is not None: last_zero_init(self.channel_mul_conv)
[ 656, 386 ]
def METHOD_NAME(self): """Return the list of presets that this device supports.""" if self._mode_dp is None: return None return self._mode_dp.values(self._device)
[ 1272, 3965 ]
def METHOD_NAME(self)->float: return self._term_angular_velocity
[ 3108, 11916, 5311 ]
def METHOD_NAME(self) -> bool: """Until a validation is final, it is possible to change its state when more information is available.""" return self >= ValidationState.BASIC
[ 137, 1541, 3594, 756 ]
def METHOD_NAME(self): self._test(b'foobar', 'foobar')
[ 9, 75, 1209 ]
def METHOD_NAME(): out = subprocess.check_output(["git", "rev-parse", "HEAD"]) return out.decode().split("\n")[0]
[ 19, 1056, 1160, 1161 ]
def METHOD_NAME(mode): if mode: cronParent = '/var/spool/cron' commandT = 'chmod 755 %s' % (cronParent) ProcessUtilities.executioner(commandT, 'root') if ProcessUtilities.decideDistro() == ProcessUtilities.ubuntu or ProcessUtilities.decideDistro() == ProcessUtilities.ubuntu20: command = 'chmod 755 /var/spool/cron/crontabs' ProcessUtilities.outputExecutioner(command) else: cronParent = '/var/spool/cron' commandT = 'chmod 700 %s' % (cronParent) ProcessUtilities.executioner(commandT, 'root') if ProcessUtilities.decideDistro() == ProcessUtilities.ubuntu or ProcessUtilities.decideDistro() == ProcessUtilities.ubuntu20: command = 'chmod 1730 /var/spool/cron/crontabs' ProcessUtilities.outputExecutioner(command)
[ 5882, -1 ]
def METHOD_NAME(self, **kwargs): """ Validates if an event is already running for that particular bot and also whether the event trigger limit has exceeded. If the data was uploaded, then it is saved in 'training_data' folder. """ training_files = kwargs.get("training_files") is_data_uploaded = kwargs.get("is_data_uploaded", False) DataImporterLogProcessor.is_limit_exceeded(self.bot) DataImporterLogProcessor.is_event_in_progress(self.bot) if is_data_uploaded: is_event_data = asyncio.run(MongoProcessor().validate_and_log(self.bot, self.user, training_files, self.overwrite)) else: Utility.make_dirs(os.path.join("training_data", self.bot, str(uuid.uuid4()))) is_event_data = True return is_event_data
[ 187 ]
def METHOD_NAME(dt, utc=True): if not dt: return "" if utc: dt = to_utc(dt) return fields.Date.to_string(dt)
[ 153, 24, 144 ]
def METHOD_NAME(self, key) -> Optional[str]: if not self.server: return None return self.server.base_url + self.server.root + '/pay?id=' + key
[ 1179, 274 ]
def METHOD_NAME(self): for tag in self.params.objects("source_images"): self.generate_data_file(tag)
[ 567, 2405, 1537 ]
def METHOD_NAME(subj, pred, literal, literal_type=None): """ Constructs data property triple from given subject, predicate and literal. Arguments: subj {str} - subject of the triple pred {str} - predicate of the triple literal - literal of the triple literal_type - type of the literal, e.g. XSD_STRING (optional) Returns: SPARQL triple string in the form "<subject> <predicate> 'literal' . " """ # Ensure instances have enclosing < > subj = add_enclosing(subj) pred = add_enclosing(pred) # Cast literal if not string if not isinstance(literal, str): literal = str(literal) if literal_type is None: # Return without explicit type declaration (default string) triple = f'{subj} {pred} "{literal}" . ' else: literal_type = add_enclosing(literal_type) # Return with explicit type declaration triple = f'{subj} {pred} "{literal}"^^{literal_type} . ' return triple
[ 129, 7064, 1479 ]
def METHOD_NAME(self, data): return self.api_client.post(self.reopt_base, format='json', data=data)
[ 19, 17 ]
def METHOD_NAME(message): return ShowMessageBox(message, "Message")
[ 52, 277 ]
def METHOD_NAME(self, env): env.prepend_path("PYTHONPATH", self.prefix.python) env.prepend_path("LD_LIBRARY_PATH", self.spec["podio"].libs.directories[0]) if "+sio" in self.spec and self.version >= Version("0.16"): # sio needs to be on LD_LIBRARY_PATH for ROOT to be able to # dynamicaly load the python bindings library env.prepend_path("LD_LIBRARY_PATH", self.spec["sio"].libs.directories[0]) if self.spec.satisfies("@0.16.1:"): # Frame header needs to be available for python bindings env.prepend_path("ROOT_INCLUDE_PATH", self.prefix.include)
[ 102, 22, 1027 ]
def METHOD_NAME(filters): """return columns based on filters""" columns = [ _("Item") + ":Link/Item:100", _("Item Name") + "::150", _("Item Group") + ":Link/Item Group:125", _("Brand") + "::100", _("Description") + "::150", _("UOM") + ":Link/UOM:80", _("Last Purchase Rate") + ":Currency:90", _("Valuation Rate") + ":Currency:80", _("Sales Price List") + "::180", _("Purchase Price List") + "::180", _("BOM Rate") + ":Currency:90", ] return columns
[ 19, 1951 ]
def METHOD_NAME(): assert parse_clause("1+some_solid") is None
[ 9, 214, 1177, 532 ]
def METHOD_NAME(version: str, filepath: str) -> None: with open(filepath, encoding="utf-8") as f: content = list(f) file_modified = False with open(filepath, "w", encoding="utf-8") as f: for line in content: if line.startswith("__version__ ="): f.write(f'__version__ = "{version}"\n') file_modified = True else: f.write(line) assert file_modified, f"Version file {filepath} did not get modified"
[ 86, 281, 171 ]
def METHOD_NAME(self) -> Optional[str]: """ Gets or sets the resource id. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(config_file): """Load config file""" with open(config_file) as f: if hasattr(yaml, 'FullLoader'): config = yaml.load(f, Loader=yaml.FullLoader) else: config = yaml.load(f) return config
[ 557, 200 ]
def METHOD_NAME(self): """ This returns a list of message names that would cause this process to start. """ message_names = [] messages = self.xpath("//bpmn:message") message_event_definitions = self.xpath( "//bpmn:startEvent/bpmn:messageEventDefinition") for message_event_definition in message_event_definitions: message_model_identifier = message_event_definition.attrib.get( "messageRef" ) if message_model_identifier is None: raise ValidationException( "Could not find messageRef from message event definition: {message_event_definition}" ) # Convert the id into a Message Name message_name = next((m for m in messages if m.attrib.get('id') == message_model_identifier), None) message_names.append(message_name.attrib.get('name')) return message_names
[ 447, 1107 ]
def METHOD_NAME(self): return ".html"
[ 171, 2916, 235 ]
def METHOD_NAME(self): f = RateLimitingFilter(rate=1, per=2, burst=1) result = self._filter_twenty_records_over_two_seconds(f) self.assertEqual(result.count(True), 1)
[ 9, 1467, 24, 206, 148, 2735, 1603 ]
def METHOD_NAME(mdg, method, data_key=None, coupler=None): """Setup a standard assembler for the flow problem for a given grid bucket. The assembler will be set up with primary variable name 'pressure' on the GridBucket nodes, and mortar_flux for the mortar variables. Parameters: mdg: GridBucket. method (EllipticDiscretization). data_key (str, optional): Keyword used to identify data dictionary for node and edge discretization. Coupler (EllipticInterfaceLaw): Defaults to RobinCoulping. Returns: Assembler, ready to discretize and assemble problem. """ if data_key is None: data_key = "flow" if coupler is None: coupler = pp.RobinCoupling(data_key, method) if isinstance(method, pp.MVEM) or isinstance(method, pp.RT0): mixed_form = True else: mixed_form = False for _, data in mdg.subdomains(return_data=True): if mixed_form: data[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1, "faces": 1}} else: data[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1}} data[pp.DISCRETIZATION] = {"pressure": {"diffusive": method}} for intf, data in mdg.interfaces(return_data=True): g2, g1 = mdg.interface_to_subdomain_pair(intf) data[pp.PRIMARY_VARIABLES] = {"mortar_flux": {"cells": 1}} data[pp.COUPLING_DISCRETIZATION] = { "lambda": { g1: ("pressure", "diffusive"), g2: ("pressure", "diffusive"), intf: ("mortar_flux", coupler), } } data[pp.DISCRETIZATION_MATRICES] = {"flow": {}} assembler = pp.Assembler(mdg) return assembler
[ 102, 233, 9096 ]
def METHOD_NAME(demo_setup: Path) -> None: info = PackageInfo.from_setup_files(demo_setup) demo_check_info(info, requires_dist={"package"})
[ 9, 100, 280, 102, 1739 ]
def METHOD_NAME(base_class): """ Decorates a class so instead of the base class, multiple copies of it are registered, one for each supported version. """ class_dir = os.path.abspath(os.path.dirname( sys.modules[base_class.__module__].__file__)) versions_path = os.path.join(class_dir, '_meta', 'supported-versions.yml') variants = supported_versions(versions_path) decorator = parameterized_class(['COMPOSE_ENV'], variants) decorator(base_class)
[ 3721, 41, 616, 295 ]
def METHOD_NAME(val: A2 | B2 | C2 | Any): if guard2(val): reveal_type(val, expected_text="A2 | B2") else: reveal_type(val, expected_text="C2 | Any")
[ 17549, 490 ]
def METHOD_NAME(): factories.User(name=STANDARD_USER, email='[email protected]') factories.User(name=SYSADMIN_USER, email='[email protected]', sysadmin=True) group = factories.Group(name=LOCATION_NAME) factories.Organization( name=ORG_NAME, title='ORG NAME FOR HDX_REL_URL', users=[ {'name': STANDARD_USER, 'capacity': 'editor'}, ], hdx_org_type=ORGANIZATION_TYPE_LIST[0][1], org_url='https://hdx.hdxtest.org/' ) context = {'model': model, 'session': model.Session, 'user': SYSADMIN_USER} dataset_dict = _get_action('package_create')(context, DATASET_DICT)
[ 102, 365 ]
def METHOD_NAME(self): """ EXAMPLES:: sage: NonNegativeIntegers().some_elements() [0, 1, 3, 42] """ return [Integer(0), Integer(1), Integer(3), Integer(42)]
[ 3368, 1532 ]
def METHOD_NAME(s): '''Parse a string s and return a standard datetime.timedelta object. Handles days, hours, minutes, seconds, and microseconds. Accepts strings in these formats: 2 days 14 days 4:35:00 (hours, minutes and seconds) 4:35:12.087465 (hours, minutes, seconds and microseconds) 7 days, 3:23:34 7 days, 3:23:34.087465 .087465 (microseconds only) :raises ckan.logic.ValidationError: if the given string does not match any of the recognised formats ''' patterns = [] days_only_pattern = '(?P<days>\d+)\s+day(s)?' patterns.append(days_only_pattern) hms_only_pattern = '(?P<hours>\d?\d):(?P<minutes>\d\d):(?P<seconds>\d\d)' patterns.append(hms_only_pattern) ms_only_pattern = '.(?P<milliseconds>\d\d\d)(?P<microseconds>\d\d\d)' patterns.append(ms_only_pattern) hms_and_ms_pattern = hms_only_pattern + ms_only_pattern patterns.append(hms_and_ms_pattern) days_and_hms_pattern = '{0},\s+{1}'.format(days_only_pattern, hms_only_pattern) patterns.append(days_and_hms_pattern) days_and_hms_and_ms_pattern = days_and_hms_pattern + ms_only_pattern patterns.append(days_and_hms_and_ms_pattern) for pattern in patterns: match = re.match('^{0}$'.format(pattern), s) if match: break if not match: raise logic.ValidationError('Not a valid time: {0}'.format(s)) gd = match.groupdict() days = int(gd.get('days', '0')) hours = int(gd.get('hours', '0')) minutes = int(gd.get('minutes', '0')) seconds = int(gd.get('seconds', '0')) milliseconds = int(gd.get('milliseconds', '0')) microseconds = int(gd.get('microseconds', '0')) delta = datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds, milliseconds=milliseconds, microseconds=microseconds) return delta
[ 144, 24, 8227 ]
def METHOD_NAME(self, field: Field) -> dict: """Convert a single field to json schema""" if isinstance(field, Serializer): result = self.to_jsonschema(field) else: try: converter = field_to_converter[field] result = converter.convert(field) except KeyError: if isinstance(field, JSONField): result = {"type": "object", "additionalProperties": True} elif isinstance(field, UUIDField): result = {"type": "string", "format": "uuid"} else: raise if field.label: result["title"] = field.label if field.help_text: result["description"] = field.help_text return self.clean_result(result)
[ 101, 24, 10309 ]
def METHOD_NAME(self, key, data): """ Publishes the data to the event stream. """ publish_data = {key: data} pub = salt.utils.json.dumps(publish_data) + "\n\n" self.handler.write_message(pub)
[ 2411 ]
def METHOD_NAME(self): x = X(2) y = X(3) # singelton x singleton self.assertEqual(x.binop(y, lambda x, y: x * y), [6]) self.assertEqual(x.binop(y, lambda x, y: x * y, list1=False), 6) y = X.Alloc(5) for i in range(0, 5): y[i] = X(i + 1) # singelton x non-singleton self.assertEqual(x.binop(y, lambda x, y: x * y), [2, 4, 6, 8, 10]) self.assertEqual(x.binop(y, lambda x, y: x * y, list1=False), [2, 4, 6, 8, 10]) # non-singelton x singleton self.assertEqual(y.binop(x, lambda x, y: x * y), [2, 4, 6, 8, 10]) self.assertEqual(y.binop(x, lambda x, y: x * y, list1=False), [2, 4, 6, 8, 10]) # non-singelton x non-singleton self.assertEqual(y.binop(y, lambda x, y: x * y), [1, 4, 9, 16, 25]) self.assertEqual(y.binop(y, lambda x, y: x * y, list1=False), [1, 4, 9, 16, 25])
[ 9, 4504 ]
def METHOD_NAME(self) -> Optional[str]: """ Location of the Guest Usages resource. """ return pulumi.get(self, "location")
[ 708 ]
async def METHOD_NAME( self, attributes: dict[str | int, Any], manufacturer: int | None = None ) -> list: """Write attributes to device with internal 'attributes' validation.""" result = await super().METHOD_NAME(attributes, manufacturer) interval = attributes.get( "detection_interval", attributes.get(DETECTION_INTERVAL) ) _LOGGER.debug("detection interval: %s", interval) if interval is not None: self.endpoint.ias_zone.reset_s = int(interval) return result
[ 77, 177 ]
def METHOD_NAME(self, *args): pass
[ 176 ]
def METHOD_NAME(*args): """Convert from qibolab obj to qibosoq obj, overloaded.""" raise ValueError(f"Convert function received bad parameters ({type(args[0])}).")
[ 197 ]
def METHOD_NAME( acl_name="netmiko_test_large_acl", entries=100, base_cmd="ip access-list extended", base_addr="192.168.0.0", ): cmd = f"{base_cmd} {acl_name}" acl = [cmd] for i in range(1, entries + 1): addr = ip_address(base_addr) cmd = f"permit ip host {addr + i} any" acl.append(cmd) return acl
[ 567, 1918 ]
def METHOD_NAME(self): if self.random_name_enabled: return super(AzureMgmtPreparer, self).METHOD_NAME() return self.test_class_instance.get_preparer_resource_name(self.name_prefix)
[ 129, 236, 156 ]
def METHOD_NAME(self): # Given a base object with a value of "test1". base = dict(mytest1 = "test1") # And a patch that is attempting to modify that base object to have a value of "test2". patch = resilient.Patch(base) patch.add_value("mytest1", "test2") # Confirm that it does indeed have an "old value" of "test1" (this is taken from the base object). assert patch.get_old_value("mytest1") == "test1" # When we create a patch status that simulates a conflict error from the server (where the # value of base changed from "test1" to "blah"). patch_status = resilient.PatchStatus({ "success": False, "field_failures": [ { "field": "mytest1", "your_original_value": "test2", "actual_current_value": "blah" } ], "message": "Some message" }) # When I exchange the conflicting value... patch.exchange_conflicting_value(patch_status, "mytest1", "test2") # The patch's "old value" will be the current server's value. assert patch.get_old_value("mytest1") == "blah" assert patch.get_new_value("mytest1") == "test2"
[ 9, 2088, 5170, 99 ]
def METHOD_NAME(self, fl_ctx: FLContext, completed: bool): """To persist the FL running components Args: fl_ctx: FLContext completed: flag to indicate where the run is complete Returns: """ pass
[ 6271, 811 ]
def METHOD_NAME(self): pass
[ 1843 ]
def METHOD_NAME(self) -> None: self._test_channel_read(pattern=self.prompt_pattern) self.set_base_prompt( pri_prompt_terminator="#", alt_prompt_terminator=">", delay_factor=1 )
[ 240, 7988 ]
def METHOD_NAME(self, value): if not isinstance(value, str): raise LookupError() # Normalize for case-insensitivity value = value.lower() # Use indexes first for key in self.indices: try: return self.indices[key][value] except LookupError: pass # Use non-indexed values now. Avoid going through indexed values. for candidate in self: for k in self.no_index: v = candidate._fields.get(k) if v is None: continue if v.lower() == value: return candidate raise LookupError("Could not find a record for %r" % value)
[ 1906 ]
def METHOD_NAME(clean_speech, denoised_speech): """ computes the word error rate(WER) score for 1 single data point """ def _transcription(clean_speech, denoised_speech): # transcribe clean audio input_values = wer_tokenizer(clean_speech, return_tensors="pt").input_values logits = wer_model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) transcript_clean = wer_tokenizer.batch_decode(predicted_ids)[0] # transcribe input_values = wer_tokenizer(denoised_speech, return_tensors="pt").input_values logits = wer_model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) transcript_estimate = wer_tokenizer.batch_decode(predicted_ids)[0] return [transcript_clean, transcript_estimate] transcript = _transcription(clean_speech, denoised_speech) try: # if no words are predicted wer_val = jiwer.METHOD_NAME(transcript[0], transcript[1]) except ValueError: wer_val = None return wer_val
[ 3140 ]
def METHOD_NAME(self): # NOTE: we assume that the ordering does not change based on the # addition or removal of eos return self.dataset.METHOD_NAME()
[ 2687, 1894 ]
def METHOD_NAME(capsys: pytest.CaptureFixture, cli_runner: MetricFlowCliRunner) -> None: # noqa: D # Disabling capsys to resolve error "ValueError: I/O operation on closed file". Better solution TBD. with capsys.disabled(): resp = cli_runner.run(metrics) assert "bookings_per_listing: listing__capacity_latest" in resp.output assert resp.exit_code == 0
[ 9, 245, 1097 ]
def METHOD_NAME(self, interface, options): keyvalues = self._dict_to_keyvalues(options) cmd = "" if len(keyvalues): cmd = " -- set Interface {} {}".format(interface, keyvalues) return cmd
[ 1090, 1660 ]
def METHOD_NAME(self): self.reset_unittests_results() self.sm = Builder.load_string(screen_manager_app) return self.sm
[ 56 ]
def METHOD_NAME(input_tensor, dim, keepdims=False): return torch.amin(input_tensor, dim, keepdim=keepdims)
[ 1835 ]
def METHOD_NAME(self) -> None: self.num_classes = 1
[ 0, 1 ]
def METHOD_NAME( graph_id: str, client: datastore.Client, config: OperationLogsConfig, last_export_key: datastore.Key, logs_count: int, export_ts: datetime, ): export_log = datastore.Entity( key=last_export_key, exclude_from_indexes=config.EXPORT.EXCLUDE_FROM_INDICES ) export_log[config.EXPORT.LAST_EXPORT_TS] = export_ts export_log[config.EXPORT.LOGS_COUNT] = logs_count this_export_key = client.key( config.EXPORT.KIND, f"{graph_id}_{int(export_ts.timestamp())}", namespace=config.NAMESPACE, ) this_export_log = datastore.Entity( key=this_export_key, exclude_from_indexes=config.EXPORT.EXCLUDE_FROM_INDICES ) this_export_log[config.EXPORT.LAST_EXPORT_TS] = export_ts this_export_log[config.EXPORT.LOGS_COUNT] = logs_count client.put_multi([export_log, this_export_log]) print(f"export time {export_ts}, count {logs_count}")
[ 86, 577 ]
def METHOD_NAME(self, capsys): H = -np.eye(5) g = np.array([0, 0, 0, 0, 1e-6]) trust_radius = 1.1 subprob = KrylovQP_disp(x=0, fun=lambda x: 0, jac=lambda x: g, hess=lambda x: None, hessp=lambda x, y: H.dot(y)) p, hits_boundary = subprob.solve(trust_radius) out, err = capsys.readouterr() assert_(out.startswith(' TR Solving trust region problem'), repr(out))
[ 9, 8905 ]
def METHOD_NAME(self) -> list: METHOD_NAME = [order.filled_price for order in self] return METHOD_NAME
[ 245, 47, 7133, 806 ]
def METHOD_NAME(self, image): """Get a list of all turned on pixels in a binary image Returns a list of tuples of (x,y) coordinates of all matching pixels. See :ref:`coordinate-system`.""" if image.mode != "L": raise Exception("Image must be binary, meaning it must use mode L") return _imagingmorph.METHOD_NAME(image.im.id)
[ 19, 69, 2210 ]
def METHOD_NAME(self): composite_client = oci.logging.LoggingManagementClientCompositeOperations( self.logging_client ) response = composite_client.create_log_and_wait_for_state( log_group_id=self.log_group_id, create_log_details=oci.logging.models.CreateLogDetails( display_name=self.log_stream, is_enabled=True, log_type="CUSTOM", ), wait_for_states=['SUCCEEDED'], ) return response.data.resources[0].identifier
[ 129, 390 ]
def METHOD_NAME(self, state): w = min(int(state[0]), self.maxWidth_) h = min(int(state[1]), self.maxHeight_) c = self.ppm_.getPixel(h, w) return c.red > 127 and c.green > 127 and c.blue > 127
[ 137, 551, 1205 ]
def METHOD_NAME(self): if self.output_dir: files = glob(os.path.join(self.output_dir, 'mass_balance_*.json')) for f in files: os.remove(f)
[ 1356, 146, 1537 ]
def METHOD_NAME(self, request, *args, **kwargs): r""" Submit :class:`~integreat_cms.cms.forms.users.user_form.UserForm` and save :class:`~integreat_cms.cms.models.users.user.User` :param request: The current request :type request: ~django.http.HttpRequest :param \*args: The supplied arguments :type \*args: list :param \**kwargs: The supplied keyword arguments :type \**kwargs: dict :return: The rendered template response :rtype: ~django.template.response.TemplateResponse """ user_instance = ( get_user_model().objects.filter(id=kwargs.get("user_id")).first() ) user_form = UserForm(data=request.POST, instance=user_instance) if not user_form.is_valid(): # Add error messages user_form.add_error_messages(request) elif not ( user_form.cleaned_data["is_superuser"] or user_form.cleaned_data["is_staff"] or user_form.cleaned_data["regions"] ): # Add error message messages.error( request, _( "An account has to be either staff/superuser or needs to be restricted to at least one region." ), ) elif not request.user.is_superuser and "is_superuser" in user_form.changed_data: messages.error( request, _("Superuser permissions need to be set by another superuser."), ) elif ( not request.user.is_superuser and "passwordless_authentication_enabled" in user_form.changed_data ): messages.error( request, _("Only superusers can enable or disable passwordless authentication."), ) elif not user_form.has_changed(): # Add "no changes" messages messages.info(request, _("No changes made")) else: # Save forms user_form.save() # Check if user was created if not user_instance: # Send activation link or welcome mail activation = user_form.cleaned_data.get("send_activation_link") send_welcome_mail(request, user_form.instance, activation) # Add the success message and redirect to the edit page messages.success( request, _('Account "{}" was successfully created.').format( user_form.instance.full_user_name ), ) else: # Add the success message messages.success( request, _('Account "{}" was successfully saved.').format( user_form.instance.full_user_name ), ) return redirect( "edit_user", user_id=user_form.instance.id, ) return render( request, self.template_name, { **self.get_context_data(**kwargs), "user_form": user_form, }, )
[ 72 ]
def METHOD_NAME(): saphostctrl = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_R_CASE)) hn = Hostname(HnF(context_wrap(HOSTNAME3)), None, None, None) sap = Sap(hn, saphostctrl) assert sorted(sap.local_instances) == sorted(['W20', 'SMDA98', 'SMDA97']) assert sap['DVEBMGS12'].version == '753, patch 501, changelist 1967207' assert sap['ASCS10'].hostname == 'host_1' assert len(sap.business_instances) == 3 assert sap.is_netweaver is True assert sap.is_hana is False assert sap.is_ascs is True
[ 9, 3264, 331 ]
def METHOD_NAME(self): self.data.linear.linear_system.beam.ss.save(self.base_name + '_beamstatespace.h5') self.data.linear.linear_system.beam.METHOD_NAME(self.base_name + '_struct_matrices.h5')
[ 73, 9181, 2298 ]
def METHOD_NAME(install_root, product): """ Get path to binary release (home) directory. """ return os.path.join(install_root, product)
[ 19, 624, 1190 ]
def METHOD_NAME(): """Test the capture() and restore() methods for the global state snapshot.""" torch.use_deterministic_algorithms(True) torch.backends.cudnn.benchmark = False torch.manual_seed(123) # capture the state of globals snapshot = _GlobalStateSnapshot.capture() # simulate there is a process boundary and flags get reset here torch.use_deterministic_algorithms(False) torch.backends.cudnn.benchmark = True torch.manual_seed(321) # restore the state of globals snapshot.restore() assert torch.are_deterministic_algorithms_enabled() assert not torch.backends.cudnn.benchmark assert torch.initial_seed() == 123
[ 9, 285, 551, 394 ]
def METHOD_NAME(text, box: BoundingBoxDTO, color): img_draw.rectangle(box.xy, outline=color, width=box_line_width) img_draw.text(text=text, xy=(box.x_min, box.y_min - font_size - 1), fill=color, font=_get_font(font_size)) img_draw.text(text=f"{box.probability:.4f}", xy=(box.x_min, box.y_max + 1), fill=color, font=_get_font(font_size_smaller))
[ 1100, 381, 3521 ]
def METHOD_NAME(releases, include_prerelease=False, python_version=None): """ Filters out the newest of all matching releases. Tests: >>> requires_py2 = ">=2.7.9,<3" >>> requires_py23 = ">=2.7.9, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4" >>> requires_py3 = ">=3.6, <4" >>> releases = {"1.3.12": [dict(requires_python=requires_py2, upload_time_iso_8601="2019-10-22T10:06:03.190293Z")], "1.4.0rc1": [dict(requires_python=requires_py23, upload_time_iso_8601="2019-11-22T10:06:03.190293Z")], "2.0.0rc1": [dict(requires_python=requires_py3, upload_time_iso_8601="2020-10-22T10:06:03.190293Z")]} >>> _filter_out_latest(releases, python_version="2.7.9") '1.3.12' >>> _filter_out_latest(releases, include_prerelease=True, python_version="2.7.9") '1.4.0rc1' >>> _filter_out_latest(releases, include_prerelease=True, python_version="3.6.0") '2.0.0rc1' >>> _filter_out_latest(releases, python_version="3.6.0") """ releases = [{"version": k, "data": v[0]} for k, v in releases.items()] # filter out prereleases and versions incompatible to our python filter_function = lambda release: not is_prerelease( release["version"] ) and is_python_compatible( release["data"].get("requires_python", ""), python_version=python_version ) if include_prerelease: filter_function = lambda release: is_python_compatible( release["data"].get("requires_python", ""), python_version=python_version ) releases = list(filter(filter_function, releases)) if not releases: return None # sort by upload date releases = sorted( releases, key=lambda release: release["data"].get("upload_time_iso_8601", "") ) # latest release = last in list latest = releases[-1] return latest["version"]
[ 527, 1737, 893 ]
def METHOD_NAME(self): self.dlbox = None self.w_track = dict() self.gui_up = False
[ 631 ]
def METHOD_NAME(self): patcher = mock.patch('reference_data.management.commands.update_dbnsfp_gene.DbNSFPReferenceDataHandler', lambda: 'dbnsfp_gene') patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_gene_cn_sensitivity.CNSensitivityReferenceDataHandler', lambda: 'gene_cn_sensitivity') patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_gene_constraint.GeneConstraintReferenceDataHandler', lambda: 'gene_constraint') patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_gencc.GenCCReferenceDataHandler', lambda: 'gencc') patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_clingen.ClinGenReferenceDataHandler', lambda: 'clingen') patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_refseq.RefseqReferenceDataHandler', lambda: 'refseq') patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_mgi.MGIReferenceDataHandler') patcher.start().side_effect = mgi_exception self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_primate_ai.PrimateAIReferenceDataHandler') patcher.start().side_effect = primate_ai_exception self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_all_reference_data.OmimReferenceDataHandler') self.mock_omim = patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_all_reference_data.CachedOmimReferenceDataHandler') self.mock_cached_omim = patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_all_reference_data.update_gencode') self.mock_update_gencode = patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_all_reference_data.update_hpo') self.mock_update_hpo = patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_all_reference_data.update_records') self.mock_update_records = patcher.start() self.addCleanup(patcher.stop) patcher = mock.patch('reference_data.management.commands.update_all_reference_data.logger') self.mock_logger = patcher.start() self.addCleanup(patcher.stop)
[ 0, 1 ]
def METHOD_NAME(self, node_name: str, cd_first: bool = False, cdrom_iso_path: str = None) -> None: pass
[ 0, 1642, 852 ]
def METHOD_NAME(self, *args, **kwargs): result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) return result
[ 146 ]
def METHOD_NAME(test_dir: str, files: Iterable[str]) -> List[str]: test_files = [] for file in files: file = min( ( os.path.join(test_dir, file_name) for file_name in os.listdir(test_dir) if file_name.startswith(file) ), default=file, ) if not os.path.isfile(file): raise Exception(f"Cannot find a matching file for '{file}' in '{test_dir}'") test_files.append(os.path.abspath(file)) if not test_files: test_files = sorted( glob.glob(os.path.join(test_dir, "*.ts")) + glob.glob(os.path.join(test_dir, "*.js")) ) return test_files
[ 416, 3382, 9, 1537 ]