text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self, f): """ Schedule function to run in main wx/Twisted thread. Called by the select() thread. """ if hasattr(self, "wxapp"): wxCallAfter(f) else: # wx shutdown but twisted hasn't self._postQueue.put(f)
[ 22, 623, 57, 600 ]
def METHOD_NAME( self, log_file_path: str, thread_name: str, level: str, content: str ) -> None: """ Assert that the last line in the log file has the thread name, log level name, and content as expected Args: log_file_path: The path to the log file thread_name: The expected thread name level: The expected level content: The expected log message """ latest_log_line = self.get_log_content(log_file_path)[-1] self.assert_log_line(latest_log_line, thread_name, level, content)
[ 638, 893, 390, 534 ]
f METHOD_NAME(self):
[ 9, 5597, 227, 1053 ]
def METHOD_NAME(self): return "source_subfolder"
[ 1458, 3525 ]
def METHOD_NAME(self) -> str: """ Resource type """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME(self, model_id, env): if self._table is None: return sql = """ DELETE FROM {0} WHERE model_id = '{1}' AND env = '{2}' """.format( self._table, model_id, env ) conn = self._connect() c = conn.cursor() c.execute(sql) conn.commit() conn.close()
[ 34 ]
def METHOD_NAME(ctx): """List Containers available in Repo.""" #TODO [MV]: add more useful information repo_path = ctx.obj['repo'] if not Repo.is_remote_path(repo_path): if not Repo.exists(repo_path): click.echo(f'\'{repo_path}\' is not a valid aim repo.') exit(1) repo = Repo.from_path(repo_path) container_hashes = repo.container_hashes click.echo('\t'.join(container_hashes)) click.echo(f'Total {len(container_hashes)} containers.')
[ 245, 2954 ]
def METHOD_NAME(self, args=None): """ Parse the arguments passed to the manager and perform the appropriate action. """ options = self.parser.parse_args(args) if options.subparser_name == 'add': self.add_pipeline(options) elif options.subparser_name == 'modify': self.modify_pipeline(options) elif options.subparser_name == 'remove': self.remove_pipeline(options)
[ 22 ]
def METHOD_NAME(gl): out = gl.sidekiq.job_stats() assert isinstance(out, dict) assert "processed" in out["jobs"]
[ 9, 7083, 202, 577 ]
def METHOD_NAME(): assert ( firewall_logging.parse_syslog(b"<4>Jun 6 07:52:38 myhost \xba~\xa6r") is None )
[ 9, 214, 6897, 11769 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(location_id): from corehq.apps.locations.dbaccessors import mobile_user_ids_at_locations user_ids = mobile_user_ids_at_locations([location_id]) for doc in iter_docs(CouchUser.get_db(), user_ids): user = CouchUser.wrap_correctly(doc) user.is_active = False user.save(spawn_task=True)
[ 931, 3467, 1541, 708 ]
def METHOD_NAME(entity): """ Search for NGSIv1-style location, i.e. a "location" metadata in some of the attributes of the given entity. FIXME: currently metadata location doesn't appear in NGSIv2 results. This method could be pretty useless until https://github.com/telefonicaid/fiware-orion/issues/3122 gets solved, but we leave here for the future in any case. :param entity: the entity in which search location :return: the attribute which holds location, 'None' if no one was found """ for field in entity: if field == 'id' or field == 'type': continue for m in entity[field]['metadata']: if m == 'location': return field return None
[ 19, -1, 708 ]
def METHOD_NAME(self): """ :rtype: int """ return self._local_rank
[ 125, 1499 ]
def METHOD_NAME(self, dim_size: int) -> Tensor: return torch.zeros(dim_size, self.output_dim, requires_grad=True, device=self.lamb.device).float()
[ 176, 146 ]
def METHOD_NAME(self): """ Report metrics for the subtask. """ return self.task.METHOD_NAME()
[ 339 ]
def METHOD_NAME( submodule_name: str, module: torch.fx.GraphModule, args:Namespace ) -> TpuMlirModule: c = fx2mlir(submodule_name, args) return c.convert(module)
[ 197, 298, 1430 ]
def METHOD_NAME( input, group, running_mean=None, running_var=None, weight=None, bias=None, use_input_stats=None, momentum=None, eps=None, ): # Repeat stored stats and affine transform params if necessary if running_mean is not None: running_mean_orig = running_mean running_mean = running_mean_orig.repeat(b) if running_var is not None: running_var_orig = running_var running_var = running_var_orig.repeat(b) # norm_shape = [1, b * c / group, group] # print(norm_shape) # Apply instance norm input_reshaped = input.contiguous().view( 1, int(b * c / group), group, *input.size()[2:] ) out = F.batch_norm( input_reshaped, running_mean, running_var, weight=weight, bias=bias, training=use_input_stats, momentum=momentum, eps=eps, ) # Reshape back if running_mean is not None: running_mean_orig.copy_( running_mean.view(b, int(c / group)).mean(0, keepdim=False) ) if running_var is not None: running_var_orig.copy_( running_var.view(b, int(c / group)).mean(0, keepdim=False) ) return out.view(b, c, *input.size()[2:])
[ 89, 387 ]
def METHOD_NAME(self): for i in range(5): yield ArrayEventContainer(count=i)
[ 1443 ]
def METHOD_NAME(batch: np.ndarray, device: str): global MODEL if isinstance(batch, list): batch = np.array(batch) batch = einops.rearrange(batch.astype(np.float32) / 127.5 - 1.0, 'n h w c -> n c h w') batch = torch.from_numpy(batch).to(device) with torch.no_grad(): db, mask = MODEL(batch) db = db.sigmoid().cpu().numpy() mask = mask.cpu().numpy() return db, mask
[ 899, 2277, 76, 235 ]
def METHOD_NAME(self, response): """ `parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs. """ links_list = self._get_links(response) location = self._parse_location(response) ids_list = [] start_time = self._parse_time(response) for item in response.css("article p"): start = self._parse_start(item, start_time) if not start: continue meeting = Meeting( title="SSA #73 Chinatown Board", description="", classification=BOARD, start=start, end=None, all_day=False, time_notes="", location=location, links=self._parse_links(item, start, links_list), source=response.url, ) meeting["status"] = self._get_status(meeting) meeting["id"] = self._get_id(meeting) if meeting["id"] in ids_list: continue else: ids_list.append(meeting["id"]) yield meeting
[ 214 ]
def METHOD_NAME(self): callback = lambda a, b: None other_callback = lambda a, b: None register_subscriber("hello")(callback) assert subscriber_registry["hello"] is callback register_subscriber("goodbye")(other_callback) assert subscriber_registry["goodbye"] is other_callback
[ 9, 372 ]
def METHOD_NAME(self): with make_image_file() as image_file: create_profile_images(image_file, get_profile_image_names(self.user.username)) self.user.profile.profile_image_uploaded_at = date.today() self.user.profile.save() actual = cookies_api._get_user_info_cookie_data(self.request, self.user) # pylint: disable=protected-access expected = { 'version': settings.EDXMKTG_USER_INFO_COOKIE_VERSION, 'username': self.user.username, 'email': self.user.email, 'header_urls': self._get_expected_header_urls(), 'user_image_urls': self._get_expected_image_urls(), } self.assertDictEqual(actual, expected)
[ 9, 19, 21, 100, 4177, 365 ]
def METHOD_NAME(packages: Iterable[AnyPackageDataMinimal], allow_ignored: bool = False) -> AnyPackageDataMinimal | None: """Given a collection of packages, return a representative package. Same logic as packageset_to_best_by_repo, but assumes that all packages belong to a single repository. A bit more efficient. """ sorted_packages = packageset_sort_by_version(packages) for package in sorted_packages: if _is_good_representative_package(package, allow_ignored): return package return sorted_packages[0] if sorted_packages else None
[ 8775, 24, 2192 ]
def METHOD_NAME(): # GIVEN app = AppSyncResolver() router = Router() @router.resolver(type_name="Query", field_name="listLocations") def get_locations(name: str): return "get_locations#" + name @app.resolver(field_name="listLocations2") def get_locations2(name: str): return "get_locations2#" + name app.include_router(router) # WHEN mock_event1 = {"typeName": "Query", "fieldName": "listLocations", "arguments": {"name": "value"}} mock_event2 = {"typeName": "Query", "fieldName": "listLocations2", "arguments": {"name": "value"}} result1 = app.resolve(mock_event1, LambdaContext()) result2 = app.resolve(mock_event2, LambdaContext()) # THEN assert result1 == "get_locations#value" assert result2 == "get_locations2#value"
[ 9, 1836, 1872, 1836 ]
def METHOD_NAME(): """Cleanup is supported since borg 1.2.""" return borg.__version_tuple__ >= (1, 2)
[ 1466, 950 ]
def METHOD_NAME(self): return self._orientation
[ 19, 5354 ]
def METHOD_NAME(self) -> torch.Tensor: data = self.corpus.val if self.valid else self.corpus.train # Work out how cleanly we can divide the dataset into bsz parts. nbatch = data.size(0) // self.batch_size # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data.narrow(0, 0, nbatch * self.batch_size) # Evenly divide the data across the bsz batches. data = data.view(self.batch_size, -1).t().contiguous() return data
[ 10336 ]
def METHOD_NAME(assemeblyModelString): """This function parses the assembly model strung and provides back information on the building block multiplicity and planarity.""" assemblyModelDict = {} assemblyModelDict['numcorners'] = None assemblyModelDict['numedges'] = None assemblyModelDict['numfaces'] = None assemblyModelDict['modcorners'] = None assemblyModelDict['modedges'] = None assemblyModelDict['modfaces'] = None assemblyModelDict['plancorners'] = None assemblyModelDict['planedges'] = None assemblyModelDict['planfaces'] = None assemeblyModelString = assemeblyModelString.upper() ceflist = assemeblyModelString.split('-') for item in ceflist: mod, num = processCEF(item) if item[0] == 'C': assemblyModelDict['modcorners'] = mod assemblyModelDict['numcorners'] = num assemblyModelDict['plancorners'] = 'pyramidal' elif item[0] == 'F': assemblyModelDict['modfaces'] = mod assemblyModelDict['numfaces'] = num assemblyModelDict['planfaces'] = 'planar' elif item[0] == 'E': assemblyModelDict['modedges'] = mod assemblyModelDict['numedges'] = num else: print('Please put a corrent assembly string') #process corner if assemblyModelDict['numedges'] is not None: if assemblyModelDict['numcorners'] is not None: assemblyModelDict['planedges'] = 'linear' if assemblyModelDict['numfaces'] is not None: assemblyModelDict['planedges'] = 'bent' return assemblyModelDict
[ 214, 683, 578, 144 ]
def METHOD_NAME(self, batch_dim_size: int) -> None: """ initialize batch dim size base on the first input batch size """ if self.batch_dim_size != -1 and self.batch_dim_size != batch_dim_size: raise RuntimeError( f"batch dim size is already initialized! " f"Found new batch size: {batch_dim_size} not " f"matching existing batch dim size: {self.batch_dim_size}!" ) self.batch_dim_size = batch_dim_size
[ 176, 2277, 3014, 1318 ]
def METHOD_NAME(self, name: str, val: Any): """ Append a metadata entry to the index file. Args: name (str): name of the metadata. val (Any): value of the metadata. """ self.metadata[name] = val
[ 1459, 1094, 365 ]
def METHOD_NAME(exc, message=None): Logger.get('flask').exception(str(exc) or 'Uncaught Exception') if not current_app.debug or request.is_xhr or request.is_json: sentry_sdk.capture_exception(exc) if message is None: message = f'{type(exc).__name__}: {str(exc)}' if os.environ.get('INDICO_DEV_SERVER') == '1': # If we are in the dev server, we always want to see a traceback on the # console, even if this was an API request. traceback.print_exc() return render_error(exc, _('Something went wrong'), message, 500) # Let the exception propagate to middleware / the webserver. # This triggers the Flask debugger in development and sentry # logging (if enabled) (via got_request_exception). raise
[ 276, 442 ]
def METHOD_NAME(self): for i in range(3): try: continue finally: yield i
[ 370, 6241 ]
def METHOD_NAME(self, expr: Expr, name: Optional[str] = None, is_dfvar: bool = False) -> None: """ Add a new statement to the DataflowBlock with an automatically generated variable name. Parameters ---------- expr : Expr The expression to add. name : Optional[str], optional Variable name, by default None is_dfvar : bool, optional The variable type, by default False Notes ----- If the variable name is not given, it will be automatically generated in a form of "tmp${COUNTER}". The variable type will be DataflowVar if is_dfvar is True, otherwise it will be Var. Being Var means the variables are output variables of the DataflowBlock. While being DataflowVar means the variables are internal variables of the DataflowBlock. """ _ffi_api.dfb_rewrite_add(self, expr, name, is_dfvar) # type: ignore
[ 238 ]
def METHOD_NAME(quest_template): return quest_template.LimitTime > 0
[ 137, 3516, 9164 ]
def METHOD_NAME(elements): # element_order = _hill_system_sort(elements) element_order = _chnops_sort(elements) ion_formula_parts = [] for elem in element_order: count = elements[elem] if count != 0: ion_formula_parts.append(elem) if count > 1: ion_formula_parts.append(str(count)) return ''.join(ion_formula_parts)
[ 275, 4903 ]
def METHOD_NAME(self, mocked_access_token, mocked_request): json = { "errorDetailType": "com.linkedin.common.error.BadRequest", "message": "Multiple errors occurred during the input validation. Please see errorDetails for more information.", "errorDetails": {"inputErrors": [ {"description": "Invalid argument", "input": { "inputPath": { "fieldPath": "search/account"}}, "code": "ERROR :: /account/values/0 :: Invalid Urn Format. Key long is in invalid format. Urn urn:li:sponsoredAccount:aaa." }, {"description": "Invalid argument", "input": { "inputPath": { "fieldPath": "search/account/values/0"}}, "code": "Invalid value for field; wrong type or other syntax error" }]}, "status": 400} mocked_request.return_value = get_response(400, json = json, raise_error = True) client = _client.LinkedinClient('client_id', 'client_secret', 'refresh_token', 'access_token') try: client.request("GET") except _client.LinkedInBadRequestError as e: self.assertEquals(str(e), "HTTP-error-code: 400, Error: " + str(json.get("errorDetails")))
[ 9, 4157, 168, 4814, 763 ]
def METHOD_NAME(self, type_name: str) -> Optional[DataHandler]: from skytemple_files.common.types.file_types import FileType if hasattr(FileType, type_name): return getattr(FileType, type_name) return None
[ 19, 1519 ]
def METHOD_NAME(request: FixtureRequest) -> bool: return request.param
[ 41, 2813, 1089 ]
def METHOD_NAME(self, test, err): """Called when an expected failure/error occured.""" self.expectedFailures.append( (test, self._exc_info_to_string(err, test)))
[ 238, 391, 374 ]
def METHOD_NAME(self, threshold=30): seg_num = math.ceil((self.end - self.start) / threshold) if seg_num == 1: return [self.segs] avg = (self.end - self.start) / seg_num return_seg = [] start_time = self.start cache_seg = [] for seg in self.segs: cache_time = seg[1] - start_time if cache_time > avg: return_seg.append(cache_seg) start_time = seg[0] cache_seg = [seg] else: cache_seg.append(seg) return_seg.append(cache_seg) return return_seg
[ 265 ]
def METHOD_NAME(self): self.assertTrue(add_participation( self.user.username, self.contest.id, "1.2.3.4", 60, 120, "pwd", "plaintext", True, None, True, True)) self.assertParticipationInDb(self.user.id, self.contest.id, "pwd", delay_time=60, extra_time=120, hidden=True, unrestricted=True, ip=[ipaddress.ip_network("1.2.3.4")])
[ 9, 2395, 199 ]
def METHOD_NAME( df_s: pd.DataFrame, path: str, benchmark_time: float, request: pytest.FixtureRequest ) -> None: with ExecutionTimer(request, data_paths=path) as timer: df_s.to_parquet(path[:-1]) # path[:-1] due to Modin not properly handling S3 prefixes assert timer.elapsed_time < benchmark_time
[ 9, 12538, 607, 77, 4650, 53 ]
def METHOD_NAME(): with freeze_time('2016-9-19'): assert utils.relative_timedelta_str(dt.date(2016, 9, 24)) == '5 days from now' assert utils.relative_timedelta_str(dt.date(2016, 9, 29)) == '~1 week from now' assert utils.relative_timedelta_str(dt.date(2017, 9, 29)) == '~1 year from now' assert utils.relative_timedelta_str(dt.date(2016, 7, 29)) == '~7 weeks ago'
[ 9, 1821, 8227, 3 ]
def METHOD_NAME(script_id: str) -> ExecutionScript: try: script = [s for s in conf.KNOWN_SCRIPTS.values() if s.id == script_id][0] except IndexError: log(f"script {script_id!r} is not known locally") raise IndexError return script
[ 19, 782, 604, 147, 280, 125 ]
def METHOD_NAME(seq): return {'branch_attrs':{'mutations':{}}, 'name':seq['seqName']+"_clades", 'node_attrs':{'clade_membership':{'value':seq['clade']}, 'new_node': {'value': 'Yes'}, 'QCStatus':{'value':seq['QCStatus']}}, 'mutations':{} }
[ 19, 1716, 1755 ]
def METHOD_NAME(): return np.frombuffer(b'foo', dtype=np.uint8)
[ 321, 947, 3402, 877 ]
def METHOD_NAME(self): pairs = self.system.cell_system.get_pairs(1.5) epairs = self.expected_pairs(self.system.periodicity) self.assertSetEqual(set(pairs), set(epairs)) pairs_by_type = self.system.cell_system.get_pairs( 1.5, types=self.types_to_get_pairs) epairs_by_type = self.expected_pairs_with_types( self.system.periodicity) self.assertSetEqual(set(pairs_by_type), set(epairs_by_type))
[ 250, 3151 ]
def METHOD_NAME(self, privacy=False, *args, **kwargs): """ Returns the URL built dynamically based on specified arguments. """ # Prepare our cache value if isinstance(self.cache, bool) or not self.cache: cache = 'yes' if self.cache else 'no' else: cache = int(self.cache) # Define any URL parameters params = { 'encoding': self.encoding, 'cache': cache, } if self.config_format: # A format was enforced; make sure it's passed back with the url params['format'] = self.config_format return 'file://{path}{params}'.format( path=self.quote(self.path), params='?{}'.format(self.urlencode(params)) if params else '', )
[ 274 ]
def METHOD_NAME(tx): """ Make the signature in vin 0 of a tx non-DER-compliant, by adding padding after the S-value. """ scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): newscript.append(i[0:-1] + b'\0' + i[-1:]) else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript)
[ 4939, 5833, 16645 ]
def METHOD_NAME(self): """Override that uses the form field's ``save_object_data()``.""" super().METHOD_NAME() for name, field in self.fields.items(): if not hasattr(field, 'save_object_data'): continue value = self.cleaned_data.get(name, None) if value: field.save_object_data(self.instance, name, value)
[ 72, 1356 ]
def METHOD_NAME(example_dir_root, forced): '''Add examples to the library''' return_value = 0 file_list = [] count = 0 for root, _directories, files in os.walk(example_dir_root): for file in files: if file.endswith(".ino"): if file not in file_list: file_list.append(file) source = os.path.join(root, file) destination = os.path.join("examples", os.path.basename(os.path.splitext(file)[0]), file) return_value = u_arduino_common.copy_file(source, destination, forced) if return_value < 0: break count += return_value else: print(f"Duplicate example sketch file name \"{file}\"") return_value = -2 break if return_value >= 0: return_value = count print(f"{count} example(s) copied.") return return_value
[ 238, 2794 ]
def METHOD_NAME(self): """Get copy of whole data with a shape of (N, 3, H, W).""" return self._images.copy()
[ 3669 ]
f METHOD_NAME(self) -> str:
[ 85 ]
def METHOD_NAME(): import sys filename = os.path.join(ROOT, 'test_fixed.h5') filename = os.path.join(ROOT, 'test_table_no_dc.h5') if len(sys.argv) > 1: filename = sys.argv[1] a_tree = ATree(h5_tree=_hdf5_tree(filename)) return a_tree
[ 57 ]
def METHOD_NAME(Name, Value, Ensure): retval = -1 try: p = Params(Name, Value, Ensure) except Exception, e: LG().Log('ERROR', 'ERROR - Unable to initialize nxNopProvider. ' + str(e)) return [retval] if p.Ensure == 'present': if p.Value == 'fail': LG().Log('ERROR', 'ERROR - Failing Set(present) due to Value = "fail".' + str(e)) else: retval = 0 else: if p.Value == 'fail': LG().Log('ERROR', 'ERROR - Failing Set(absent) due to Value = "fail". ' + str(e)) else: retval = 0 return [retval]
[ 0 ]
f METHOD_NAME(self, loc, scale):
[ 9, 1576, 1116 ]
def METHOD_NAME(self, **kw): """ Checks for the right value for inline macro. Define INLINE_MACRO to 1 if the define is found. If the inline macro is not 'inline', add a define to the ``config.h`` (#define inline __inline__) :param define_name: define INLINE_MACRO by default to 1 if the macro is defined :type define_name: string :param features: by default *c* or *cxx* depending on the compiler present :type features: list of string """ self.start_msg('Checking for inline') if not 'define_name' in kw: kw['define_name'] = 'INLINE_MACRO' if not 'features' in kw: if self.env.CXX: kw['features'] = ['cxx'] else: kw['features'] = ['c'] for x in INLINE_VALUES: kw['fragment'] = INLINE_CODE % (x, x) try: self.check(**kw) except self.errors.ConfigurationError: continue else: self.end_msg(x) if x != 'inline': self.define('inline', x, quote=False) return x self.fatal('could not use inline functions')
[ 250, 1817 ]
def METHOD_NAME(self): model_state = { **super().METHOD_NAME(), "document_embeddings": self.embeddings.save_embeddings(use_state_dict=False), "label_dictionary": self.label_dictionary, "label_type": self.label_type, "multi_label": self.multi_label, "multi_label_threshold": self.multi_label_threshold, "weight_dict": self.weight_dict, } return model_state
[ 19, 551, 553 ]
def METHOD_NAME(): return ConfiguredAirbyteCatalog.parse_obj( { "streams": [ { "stream": { "name": "example_stream", "json_schema": {"$schema": "http://json-schema.org/draft-07/schema#", "type": "object", "properties": {}}, "supported_sync_modes": ["full_refresh", "incremental"], "source_defined_cursor": False, "default_cursor_field": ["column_name"], }, "primary_key": [["id"]], "sync_mode": "incremental", "destination_sync_mode": "append_dedup", }, { "stream": { "name": "example_stream2", "json_schema": {"$schema": "http://json-schema.org/draft-07/schema#", "type": "object", "properties": {}}, "supported_sync_modes": ["full_refresh", "incremental"], "source_defined_cursor": False, "default_cursor_field": ["column_name"], }, "primary_key": [["id"]], "sync_mode": "full_refresh", "destination_sync_mode": "overwrite", }, ] } )
[ 567, 2824 ]
def METHOD_NAME(self): return True
[ 137, 3272 ]
def METHOD_NAME(self, click): coords = click.coords click.indx = self.click_indx_offset + self.num_pos_clicks + self.num_neg_clicks if click.is_positive: self.num_pos_clicks += 1 else: self.num_neg_clicks += 1 self.clicks_list.append(click) if self.gt_mask is not None: self.not_clicked_map[coords[0], coords[1]] = False
[ 238, 212 ]
def METHOD_NAME(self, fl_ctx: FLContext): (self.train_images, self.train_labels), ( self.test_images, self.test_labels, ) = tf.keras.datasets.mnist.load_data() self.train_images, self.test_images = ( self.train_images / 255.0, self.test_images / 255.0, ) # simulate separate datasets for each client by dividing MNIST dataset in half client_name = fl_ctx.get_identity_name() if client_name == "site-1": self.train_images = self.train_images[: len(self.train_images) // 2] self.train_labels = self.train_labels[: len(self.train_labels) // 2] self.test_images = self.test_images[: len(self.test_images) // 2] self.test_labels = self.test_labels[: len(self.test_labels) // 2] elif client_name == "site-2": self.train_images = self.train_images[len(self.train_images) // 2 :] self.train_labels = self.train_labels[len(self.train_labels) // 2 :] self.test_images = self.test_images[len(self.test_images) // 2 :] self.test_labels = self.test_labels[len(self.test_labels) // 2 :] model = Net() loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"]) _ = model(tf.keras.Input(shape=(28, 28))) self.var_list = [model.get_layer(index=index).name for index in range(len(model.get_weights()))] self.model = model
[ 102 ]
def METHOD_NAME(output, expected): assert type(output) == type(expected) assert output == expected
[ 638, 926, 235 ]
async def METHOD_NAME(self, websocket: web.WebSocketResponse) -> Callable: async def _receive(data: Union[str, bytes]) -> None: self.websocket_received_data = data return _receive
[ 4389, 365 ]
def METHOD_NAME(stub, context=None): return _GetVsanStub(stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT, context=context)
[ 19, 13113, 5586, 492 ]
def METHOD_NAME(bucket_name: str, bucket_path: str) -> pd.DataFrame: """ Given S3 URI, read the file from the S3 bucket and return a pandas dataframe Args: bucket_name (str): name of s3 bucket bucket_path (str): path within s3 bucket where the file resides """ s3 = boto3.client("s3") obj = s3.get_object(Bucket=bucket_name, Key=bucket_path) df = pd.read_csv(io.BytesIO(obj["Body"].read())) return df
[ 203, 2057, 280, 2538 ]
def METHOD_NAME(dataframe: pd.DataFrame, informative: pd.DataFrame, timeframe: str, timeframe_inf: str, ffill: bool = True, append_timeframe: bool = True, date_column: str = 'date', suffix: Optional[str] = None) -> pd.DataFrame: """ Correctly merge informative samples to the original dataframe, avoiding lookahead bias. Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a 1h candle that starts at 15:00 will result in all candles to know the close at 16:00 which they should not know. Moves the date of the informative pair by 1 time interval forward. This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the last candle that's closed at 15:00, 15:15, 15:30 or 15:45. Assuming inf_tf = '1d' - then the resulting columns will be: date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d :param dataframe: Original dataframe :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe :param timeframe: Timeframe of the original pair sample. :param timeframe_inf: Timeframe of the informative pair sample. :param ffill: Forwardfill missing values - optional but usually required :param append_timeframe: Rename columns by appending timeframe. :param date_column: A custom date column name. :param suffix: A string suffix to add at the end of the informative columns. If specified, append_timeframe must be false. :return: Merged dataframe :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe """ minutes_inf = timeframe_to_minutes(timeframe_inf) minutes = timeframe_to_minutes(timeframe) if minutes == minutes_inf: # No need to forwardshift if the timeframes are identical informative['date_merge'] = informative[date_column] elif minutes < minutes_inf: # Subtract "small" timeframe so merging is not delayed by 1 small candle # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073 informative['date_merge'] = ( informative[date_column] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm') ) else: raise ValueError("Tried to merge a faster timeframe to a slower timeframe." "This would create new rows, and can throw off your regular indicators.") # Rename columns to be unique date_merge = 'date_merge' if suffix and append_timeframe: raise ValueError("You can not specify `append_timeframe` as True and a `suffix`.") elif append_timeframe: date_merge = f'date_merge_{timeframe_inf}' informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns] elif suffix: date_merge = f'date_merge_{suffix}' informative.columns = [f"{col}_{suffix}" for col in informative.columns] # Combine the 2 dataframes # all indicators on the informative sample MUST be calculated before this point if ffill: # https://pandas.pydata.org/docs/user_guide/merging.html#timeseries-friendly-merging # merge_ordered - ffill method is 2.5x faster than seperate ffill() dataframe = pd.merge_ordered(dataframe, informative, fill_method="ffill", left_on='date', right_on=date_merge, how='left') else: dataframe = pd.merge(dataframe, informative, left_on='date', right_on=date_merge, how='left') dataframe = dataframe.drop(date_merge, axis=1) # if ffill: # dataframe = dataframe.ffill() return dataframe
[ 411, 969, 637 ]
def METHOD_NAME(package, prop): name = prop.children[0].attributes['name'] values = { "name": name, "values": sorted(set(map(operator.attrgetter("text"), prop.children[0].children))) } assign_and_validate(enumeration_properties, name, values) return name
[ 275, 44, 1042, 6178, 99 ]
def METHOD_NAME( adata: AnnData, log: bool = True, plot: bool = False, copy: bool = False ) -> Optional[AnnData]: """\ Normalization and filtering as of Seurat [Satija15]_. This uses a particular preprocessing. Expects non-logarithmized data. If using logarithmized data, pass `log=False`. """ if copy: adata = adata.copy() pp.filter_cells(adata, min_genes=200) pp.filter_genes(adata, min_cells=3) normalize_total(adata, target_sum=1e4) filter_result = filter_genes_dispersion( adata.X, min_mean=0.0125, max_mean=3, min_disp=0.5, log=not log ) if plot: from ..plotting import ( _preprocessing as ppp, ) # should not import at the top of the file ppp.filter_genes_dispersion(filter_result, log=not log) adata._inplace_subset_var(filter_result.gene_subset) # filter genes if log: pp.log1p(adata) pp.scale(adata, max_value=10) return adata if copy else None
[ 3912, 13521 ]
def METHOD_NAME(qc: Any) -> Dict[int, int]: i = 0 positional_logical_mapping = {} for inst in qc.data: if inst[0].name == "measure": positional_logical_mapping[i] = inst[1][0].index i += 1 return positional_logical_mapping
[ 19, 304, 1692, 445, 280, 5229 ]
def METHOD_NAME(gamemodel): perspective.activate_panel("annotationPanel") if "FEN" in gamemodel.tags: asyncio.create_task( gamemodel.start_analyzer(HINT, force_engine=discoverer.getEngineLearn()) )
[ 69, 2674, 3550 ]
def METHOD_NAME(): """ Returns the node categories the node is part of (used by the device panel). :returns: list of node categories """ return [Node.end_devices]
[ 2065 ]
def METHOD_NAME(self): return self.model.base_model
[ 2472 ]
def METHOD_NAME(self) -> None: super().METHOD_NAME() self.sas_url = 'https://fake.sas.url' self.rc_ingest_url = 'https://fake.rc.url' self.crl_validator_mock = mock.MagicMock(spec=CRLValidator) self.router = RequestRouter( sas_url=self.sas_url, rc_ingest_url=self.rc_ingest_url, cert_path='fake/cert/path', ssl_key_path='fake/key/path', request_mapping=request_mapping, ssl_verify=False, crl_validator=self.crl_validator_mock, )
[ 0, 1 ]
def METHOD_NAME(process): #Adding SimpleMemoryCheck service: process.SimpleMemoryCheck=cms.Service("SimpleMemoryCheck", ignoreTotal=cms.untracked.int32(1), oncePerEventMode=cms.untracked.bool(True)) #Adding Timing service: process.Timing=cms.Service("Timing") #Tweak Message logger to dump G4cout and G4cerr messages in G4msg.log #print process.MessageLogger.__dict__ process.MessageLogger.debugModules=cms.untracked.vstring('g4SimHits') #Configuring the G4msg.log output process.MessageLogger.files = dict(G4msg = cms.untracked.PSet( noTimeStamps = cms.untracked.bool(True) #First eliminate unneeded output ,threshold = cms.untracked.string('INFO') ,INFO = cms.untracked.PSet(limit = cms.untracked.int32(0)) ,DEBUG = cms.untracked.PSet(limit = cms.untracked.int32(0)) ,FwkReport = cms.untracked.PSet(limit = cms.untracked.int32(0)) ,FwkSummary = cms.untracked.PSet(limit = cms.untracked.int32(0)) ,Root_NoDictionary = cms.untracked.PSet(limit = cms.untracked.int32(0)) ,FwkJob = cms.untracked.PSet(limit = cms.untracked.int32(0)) ,TimeReport = cms.untracked.PSet(limit = cms.untracked.int32(0)) ,TimeModule = cms.untracked.PSet(limit = cms.untracked.int32(0)) ,TimeEvent = cms.untracked.PSet(limit = cms.untracked.int32(0)) ,MemoryCheck = cms.untracked.PSet(limit = cms.untracked.int32(0)) #TimeModule, TimeEvent, TimeReport are written to LogAsbolute instead of LogInfo with a category #so they cannot be eliminated from any destination (!) unless one uses the summaryOnly option #in the Timing Service... at the price of silencing the output needed for the TimingReport profiling # #Then add the wanted ones: ,PhysicsList = cms.untracked.PSet(limit = cms.untracked.int32(-1)) ,G4cout = cms.untracked.PSet(limit = cms.untracked.int32(-1)) ,G4cerr = cms.untracked.PSet(limit = cms.untracked.int32(-1)) ,CaloSim = cms.untracked.PSet(limit = cms.untracked.int32(-1)) ,ForwardSim = cms.untracked.PSet(limit = cms.untracked.int32(-1)) )
[ 5317 ]
def METHOD_NAME(self): data = self.create_data() pc_data = pycuda.gpuarray.to_gpu(data) bf_data = bf.ndarray(pc_data) np_data = bf_data.copy(space='system') np.testing.assert_allclose(np_data, data)
[ 9, 280, -1 ]
def METHOD_NAME(self) -> str: """ Returns: the name of the database the graph is stored in """ return self._graph_info(["database"]) # type: ignore
[ 463 ]
def METHOD_NAME(self, currency): if self.valid_currencies is None: return True return currency in self.valid_currencies
[ 1466, 5251 ]
def METHOD_NAME(self): parameters = { **self.serialize_query_param( "api-version", "2018-09-01", required=True, ), } return parameters
[ 539, 386 ]
def METHOD_NAME(submission, registration_attribute: str, component_attribute: str): for component in submission.form.iter_components(recursive=True): attribute = glom(component, component_attribute, default=None) if attribute == registration_attribute: return component
[ 19, 1007 ]
def METHOD_NAME(self): # Skip this test if running below Splunk 6.2, cookie-auth didn't exist before splver = self.service.splunk_version if splver[:2] < (6, 2): self.skipTest("Skipping cookie-auth tests, running in %d.%d.%d, this feature was added in 6.2+" % splver) event_count = int(self.service.indexes[self.index_name]['totalEventCount']) cookie = "%s=%s" % (list(self.service.http._cookies.items())[0]) service = client.Service(**{"cookie": cookie}) service.login() cn = service.indexes[self.index_name].attach() cn.send(b"Hello Boris!\r\n") cn.close() self.assertEventuallyTrue(lambda: self.totalEventCount() == event_count+1, timeout=60)
[ 9, 579, 2499, 645, 41, 4177, 572 ]
def METHOD_NAME(self, flags=0): topic, record = self.subscriber.recv_serialized( deserialize=lambda msg: (msg[0].decode(), cloudpickle.loads(msg[1])), flags=flags ) return topic, record
[ 375 ]
def METHOD_NAME(arch, encoding_type=EncodingType.ADJACENCY_ONE_HOT): if encoding_type == EncodingType.ADJACENCY_ONE_HOT: return encode_adjacency_one_hot(arch) elif encoding_type == EncodingType.PATH: return encode_paths(arch) elif encoding_type == EncodingType.GCN: return encode_gcn_nasbench201(arch) elif encoding_type == EncodingType.BONAS: return encode_bonas_nasbench201(arch) elif encoding_type == EncodingType.SEMINAS: return encode_seminas_nasbench201(arch) else: logger.info(f"{encoding_type} is not yet supported as an architecture encoding for nb201") raise NotImplementedError()
[ 421, 4905 ]
def METHOD_NAME(self): invalid = os.path.join("bin", "file", "does", "not", "exist") for path in self.ROOTS: newpath = os.path.join(path, invalid) dirlist = DirectoryTree(newpath, folders=self.ROOTS) selected = dirlist.get_selected_paths() dirlist.destroy() # select the last valid parent directory self.assertEqual(len(selected), 1) self.assertTrue(selected[0].startswith(path))
[ 9, 1068, 2471 ]
async def METHOD_NAME(port=10000, input_dir: str = None): """Run sim ssh server using the files in the given dir""" keyfile = Path(get_sq_install_dir()) / 'config/etc' / 'ssh_insecure_key' await asyncssh.listen( '127.0.0.1', port, server_factory=lambda: MySSHServer(input_dir=input_dir), server_host_keys=[keyfile])
[ 447, 163 ]
def METHOD_NAME( self, artifact_id: str, *, finalize: bool = True, before_commit: step_upload.PreCommitFn, result_future: "concurrent.futures.Future[None]", ): event = step_checksum.RequestCommitArtifact( artifact_id, finalize, before_commit, result_future ) self._incoming_queue.put(event)
[ 1160, 1831 ]
def METHOD_NAME(symbol, event): component = symbol.getComponent() component.getSymbolByID("PAC_HEADER").setEnabled(event["value"]) component.getSymbolByID("PAC_SOURCE").setEnabled(event["value"]) component.getSymbolByID("PAC_SYS_DEF").setEnabled(event["value"]) Database.setSymbolValue("core", pacInstanceName.getValue() + "_CLOCK_ENABLE", event["value"], 1)
[ 86, 1023, 544, 552, 1042 ]
def METHOD_NAME(MEE, outputfile, Z_max=86, realtype='real_t'): """ Save data to C++ file. """ s = "/**\n" s += " * This file was automatically generated by 'tools/Generate_MeanExcitationEnergy.py'.\n" s += " */\n\n" s += "/* List of mean excitation energies in units of eV */\n" s += "const {0} MEAN_EXCITATION_ENERGY_EXTENDED[{1}][{1}] = {{\n".format(realtype, Z_max) for Z in range(1, Z_max+1): s += "/* {} */ {{".format(element_symbol(Z)) for Z0 in range(1, Z_max+1): s += " {:.1F}".format(MEE[Z][Z0]) if Z0<Z_max: s += ',' if Z < Z_max: s += ' },\n' else: s += ' }\n' s += "};\n\n" if outputfile is not None: # Create directory if it doesn't already exist pathlib.Path(outputfile).parent.mkdir(parents=True, exist_ok=True) # Write C++ file with open(outputfile, 'w') as f: f.write(s) return s
[ 73, 1273, -1 ]
def METHOD_NAME(self): if self._category == 'pass_fail' and self._value_unit == 'boolean': self._initialize_pass_fail_recording(self._add_pass_fail_result_category_pass_fail) return if self._category == 'pass_only': self._initialize_pass_fail_recording(self._add_pass_fail_result_category_pass_only) return if self._category in ('fail_only', 'code_issue'): self._initialize_pass_fail_recording(self._add_pass_fail_result_category_fail_only) return
[ 15, 403, 180, 2104, 217, 2002 ]
def METHOD_NAME(x, shape): return op('reshape', [x, shape]).as_tensor()
[ 3013 ]
def METHOD_NAME(mock_click_echo, mock_list, mock_package): mock_list.return_value = [("dist", "plugin")] check_plugin = NewVersionCheckPlugin() context = build_core_context( check_plugin, { new_version_check_plugin.ConfigKey.PACKAGE_INDEX.value: "https://testing.package.index" }, ) check_plugin.vdk_exit(context) mock_package.assert_any_call( package_index="https://testing.package.index", package_name="vdk-core" ) mock_package.assert_any_call( package_index="https://testing.package.index", package_name="dist" ) # we verify the correctness of the command that is suggested to the user expected_command = "pip install --upgrade-strategy eager -U vdk-core dist --extra-index-url https://testing.package.index" assert any( filter(lambda c: expected_command in str(c), mock_click_echo.mock_calls) ), f"did not get expected substring inside message: {mock_click_echo.mock_calls}"
[ 9, 80, 281, 250 ]
def METHOD_NAME(self): rev = self.fs.repo.get_rev() if self.def_repo.get(self.PARAM_REV_LOCK) is None: self.def_repo[self.PARAM_REV_LOCK] = rev
[ 73 ]
def METHOD_NAME(twth, wavelength, ang_units='degrees'): ''' Converts 2th axis into q (q returned in inverse units of wavelength) q = [(4*PI)/lamda]*sin(2th/2) ang_unit : default in degrees; will convert from 'rad' if given ''' if not ang_units.startswith('rad'): twth = DEG2RAD*twth return ((2*TAU)/wavelength)*sin(twth/2.)
[ 1010, 280, -1 ]
def METHOD_NAME(self): return """\ fill Sets the fill ratio of the `spaceframe` elements. The default fill value is 1 meaning that they are entirely shaded. Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. show Displays/hides tetrahedron shapes between minimum and maximum iso-values. Often useful when either caps or surfaces are disabled or filled with values less than 1. """
[ 1302, 1303 ]
def METHOD_NAME(self): ...
[ 203, 246 ]
def METHOD_NAME(self, bookmarks: Dict = None) -> Generator[List[Dict], None, None]: incremental_range = self.config.get('incremental_range') now = datetime.datetime.now() start_date = None if self.config.get('start_date'): start_date = dateutil.parser.parse(self.config.get('start_date')) else: if incremental_range == 'daily': s_d = now.replace(hour=0, minute=0, second=0, microsecond=0) start_date = s_d + datetime.timedelta(days=-1, hours=0) elif incremental_range == 'hourly': s_d = now.replace(minute=0, second=0, microsecond=0) start_date = s_d + datetime.timedelta(days=0, hours=-1) self.logger.info('start_date: {} '.format(start_date)) end_date = None if self.config.get('end_date'): end_date = dateutil.parser.parse(self.config.get('end_date')).replace(tzinfo=pytz.utc) else: if incremental_range == 'daily': end_date = now.replace(hour=0, minute=0, second=0, microsecond=0).replace(tzinfo=pytz.utc) elif incremental_range == 'hourly': end_date = now.replace(minute=0, second=0, microsecond=0).replace(tzinfo=pytz.utc) self.logger.info('end_date: {} '.format(end_date)) # if the state file has a date_to_resume, we use it as it is. # if it doesn't exist, we overwrite by start date s_d = start_date.strftime('%Y-%m-%d %H:%M:%S') last_date = dateutil.parser.parse((bookmarks or {}).get('start_time', s_d)) self.logger.info('last_date: {} '.format(last_date)) # no real reason to assign this other than the naming # makes better sense once we go into the loop current_date = last_date.replace(tzinfo=pytz.utc) columns = extract_selected_columns(self.stream.metadata) metrics = [c for c in columns if c not in ['start_time', 'end_time']] while current_date < end_date: if incremental_range == 'daily': next_date = current_date + datetime.timedelta(days=1, hours=0) elif incremental_range == 'hourly': next_date = current_date + datetime.timedelta(days=0, hours=1) yield self.get_metrics( metrics, current_date, next_date, ) current_date = next_date
[ 557, 365 ]
def METHOD_NAME(pbox, anchor, downsample): pbox = decode_yolo(pbox, anchor, downsample) pbox = xywh2xyxy(pbox) return pbox
[ 2739, 1053 ]
def METHOD_NAME(self): x, y, h = self.width, self.length, self.height t = self.thickness hf = self.frontheight yg = self.tophole tf = self.fronttop r=self.radius if (hf > h): # Give an error because the result will be wrong with possible unconnected paths raise ValueError("Height at front of shoe must be less than height at back of shoe.") stretch = (self.edges["X"].settings.stretch) self.ctx.save() self.rectangularWall(y, x, "FFFF", move="up", label="Bottom") lf,a=self.shoeside(y,h,hf,yg,tf,r, move="up", label="Side") self.shoeside(y,h,hf,yg,tf,r, move="mirror up", label="Side") self.ctx.restore() self.rectangularWall(y, x, "FFFF", move="right only") self.rectangularWall(x, h, "ffef", move="up", label="Back") self.rectangularWall(x, hf, "ffff", move="up", label="front") dr = a*(r-t)/stretch self.shoelip(x, tf, dr, lf, label="top")
[ 338 ]
def METHOD_NAME(cls, code: str, verify_package=False): try: if verify_package: ScorePackageValidator.execute(code) icon_score: 'IconScoreBase' = cls._get_icon_score(GETAPI_DUMMY_ADDRESS, code) get_api = getattr(icon_score, '_IconScoreBase__get_api') ret = get_api() status = Status.SUCCESS except BaseException as e: status, ret = cls._get_status_from_exception(e) return status, ret
[ 19, 747, 58 ]