text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(): run_unittest(GetoptTests)
[ 9, 57 ]
async def METHOD_NAME(user_id): with plugins.runtime.DB as db: curr_items = await db.get(mk_list_key(user_id)) await db.set(mk_list_key(user_id), []) print('Removed access to any corpus for the user.') print(('Removed values:\n\t{0}'.format(', '.join(curr_items))))
[ 188, 75, 11815 ]
def METHOD_NAME(self, x): return {"logits": self.model(x)}
[ 1852 ]
def METHOD_NAME(self) -> typing.Sequence[DataAndMetadata.DataAndMetadata]: ...
[ 1383, 13210 ]
def METHOD_NAME(self): # The interior of the toplevel panel interior = self.interior() mainFrame = Frame(interior) self.inputZone = Pmw.Group(mainFrame, tag_text='Offset setting') self.inputZone.pack(fill='both',expand=1) settingFrame = self.inputZone.interior() Label(settingFrame,text=' X ').place(anchor=NW,x=110,y=15) Label(settingFrame,text=' Y ').place(anchor=NW,x=205,y=15) Label(settingFrame,text=' Z ').place(anchor=NW,x=295,y=15) self.move_x = Pmw.EntryField(settingFrame,label_text='Move :',labelpos='w',value='0.0', validate=Pmw.realvalidator) self.move_x.component('entry').config(width=10) self.move_y = Pmw.EntryField(settingFrame,value='0.0', validate=Pmw.realvalidator) self.move_y.component('entry').config(width=10) self.move_z = Pmw.EntryField(settingFrame, value='0.0', validate=Pmw.realvalidator) self.move_z.component('entry').config(width=10) self.move_x.place(anchor=NW,x=50,y=40) self.move_y.place(anchor=NW,x=185,y=40) self.move_z.place(anchor=NW,x=275,y=40) self.rotate_x = Pmw.EntryField(settingFrame,label_text='Rotate:',labelpos='w',value='0.0', validate=Pmw.realvalidator) self.rotate_x.component('entry').config(width=10) self.rotate_y = Pmw.EntryField(settingFrame,value='0.0', validate=Pmw.realvalidator) self.rotate_y.component('entry').config(width=10) self.rotate_z = Pmw.EntryField(settingFrame, value='0.0', validate=Pmw.realvalidator) self.rotate_z.component('entry').config(width=10) self.rotate_x.place(anchor=NW,x=50,y=70) self.rotate_y.place(anchor=NW,x=185,y=70) self.rotate_z.place(anchor=NW,x=275,y=70) self.scale_x = Pmw.EntryField(settingFrame,label_text='Scale :',labelpos='w',value='1.0', validate=Pmw.realvalidator) self.scale_x.component('entry').config(width=10) self.scale_y = Pmw.EntryField(settingFrame,value='1.0', validate=Pmw.realvalidator) self.scale_y.component('entry').config(width=10) self.scale_z = Pmw.EntryField(settingFrame, value='1.0', validate=Pmw.realvalidator) self.scale_z.component('entry').config(width=10) self.scale_x.place(anchor=NW,x=52,y=100) self.scale_y.place(anchor=NW,x=185,y=100) self.scale_z.place(anchor=NW,x=275,y=100) self.numberOfCopy = Pmw.EntryField(settingFrame,label_text='Number of Copy :',labelpos='w',value='1', validate=Pmw.integervalidator) self.numberOfCopy.component('entry').config(width=15) self.numberOfCopy.place(anchor=NW,x=52,y=150) settingFrame.pack(fill=BOTH,expand=1,padx=7,pady=7) self.button_ok = Button(mainFrame, text="OK", command=self.ok_press,width=10) self.button_ok.pack(fill=BOTH,expand=0,side=RIGHT) mainFrame.pack(fill = 'both', expand = 1,padx=7,pady=7)
[ 129, 1090 ]
def METHOD_NAME(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
[ 9, 13662 ]
def METHOD_NAME(self) -> None: expected_topic = "student-yqqtag" expected_message = "**eeshangarg** extended the deadline for the task [Sails unspread it stopped at kearney](https://codein.withgoogle.com/dashboard/task-instances/6296903092273152/) by 1.0 day(s)." self.check_webhook("task_deadline_extended_by_mentor", expected_topic, expected_message)
[ 9, 978, 417, 277 ]
def METHOD_NAME(self, name, value): assert self._values[name] == value
[ 250, 99 ]
def METHOD_NAME(): reg_dir = os.path.join(CLI_DEMO_DIR, 'regression') script = os.path.join(reg_dir, 'mapfeat.py') cmd = ['python', script] subprocess.check_call(cmd, cwd=reg_dir) script = os.path.join(reg_dir, 'mknfold.py') cmd = ['python', script, 'machine.txt', '1'] subprocess.check_call(cmd, cwd=reg_dir) exe = os.path.join(DEMO_DIR, os.path.pardir, 'xgboost') conf = os.path.join(reg_dir, 'machine.conf') subprocess.check_call([exe, conf], cwd=reg_dir)
[ 9, 615, 1399, 2660 ]
METHOD_NAME( self, index ) :
[ 99, 3432, 43, 724 ]
def METHOD_NAME(self): ...
[ 214, 462, 534 ]
f METHOD_NAME(self):
[ 9, 1399, 56, 667 ]
def METHOD_NAME( A: T.Buffer((4,), "int8", offset_factor=1), B: T.Buffer((4, 4), "int8", offset_factor=1), C: T.Buffer((4,), "int32", offset_factor=1),
[ 16439, 18172, 18173, 1966 ]
def METHOD_NAME(self, session, id=None, **params): query = session.query(PromoCode).filter(PromoCode.uses_count == 0) if id is not None: ids = [s.strip() for s in id.split(',') if s.strip()] query = query.filter(PromoCode.id.in_(ids)) result = query.delete(synchronize_session=False) referer = cherrypy.request.headers.get('Referer', 'index') page = urllib.parse.urlparse(referer).path.split('/')[-1] raise HTTPRedirect(page + '?message={}', '{} promo code{} deleted'.format(result, '' if result == 1 else 's'))
[ 34, 5738, 1114 ]
def METHOD_NAME(): listdir = MockListDir('/etc/vsftpd', []) fileops = MockFileOperations() facts = vsftpdconfigread.get_vsftpd_facts(read_func=fileops.read, listdir=listdir.listdir) assert facts.default_config_hash is None assert not facts.configs
[ 9, 19, 9792, 5035, 35, 1190 ]
def METHOD_NAME(domain): if domain in settings.DOMAIN_MODULE_MAP: return settings.DOMAIN_MODULE_MAP[domain] return custom_domain_module(domain)
[ 19, 343, 1674, 298 ]
def METHOD_NAME(df, columns): return df[columns]
[ 2981, 1951 ]
def METHOD_NAME(models: Union[Dict[str, Any], List[str]], included_model_types: List[str]) -> Union[Dict[str, Any], List[str]]: """ Only include models specified in `included_model_types`, other models will be removed If model specified in `included_model_types` doesn't present in `models`, will warn users and ignore Parameters ---------- models: Union[Dict[str, Any], List[str]] A dictionary containing models and their hyperparameters included_model_types: List[str] List of model types to be included Return ------ Union[Dict[str, Any], List[str]] Updated dictionary or list with correct models """ if isinstance(models, dict): included_models = {model: val for model, val in models.items() if model in included_model_types} missing_models = set(included_model_types) - set(included_models.keys()) elif isinstance(models, list): included_models = [model for model in models if model in included_model_types] missing_models = set(included_model_types) - set(included_models) if included_model_types is not None: logger.log(20, f"Included models: {list(included_model_types)} (Specified by `included_model_types`, all other model types will be skipped)") if len(missing_models) > 0: logger.warning(f"\tThe models types {list(missing_models)} are not present in the model list specified by the user and will be ignored:") return included_models
[ 1872, 379 ]
def METHOD_NAME(cls): if cls.config.connect: return path = cls.path_by_nick(cls.os_api, "os_poweroff") try: cls.curl(path, method='POST', timeout=0.5) except: pass retry = 10 while cls.os_process.poll() == None: retry -= 1 if retry == 0: raise Exception("Fail to shutdown server") time.sleep(1)
[ 158 ]
def METHOD_NAME(self): """display the tooltip""" if self.tipwindow: return self.tipwindow = tw = Toplevel(self.anchor_widget) # show no border on the top level window tw.wm_overrideredirect(1) try: # This command is only needed and available on Tk >= 8.4.0 for OSX. # Without it, call tips intrude on the typing process by grabbing # the focus. tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates") except TclError: pass self.position_window() self.showcontents() self.tipwindow.update_idletasks() # Needed on MacOS -- see #34275. self.tipwindow.lift() # work around bug in Tk 8.5.18+ (issue #24570)
[ 6583 ]
def METHOD_NAME(): """Create a log folder for test logs.""" if not os.path.isdir(app.LOG_DIR): os.mkdir(app.LOG_DIR)
[ 129, 9, 390, 451 ]
def METHOD_NAME(self): self.dps[MODE_DPS] = "colour" self.assertEqual(self.light.color_mode, ColorMode.HS) self.dps[MODE_DPS] = "white" self.assertEqual(self.light.color_mode, ColorMode.WHITE) self.dps[MODE_DPS] = "scene" self.assertEqual(self.light.color_mode, ColorMode.HS) self.dps[MODE_DPS] = "scene_1" self.assertEqual(self.light.color_mode, ColorMode.HS) self.dps[MODE_DPS] = "scene_2" self.assertEqual(self.light.color_mode, ColorMode.HS) self.dps[MODE_DPS] = "scene_3" self.assertEqual(self.light.color_mode, ColorMode.HS) self.dps[MODE_DPS] = "scene_4" self.assertEqual(self.light.color_mode, ColorMode.HS)
[ 9, 4322, 36, 854 ]
def METHOD_NAME(self): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: click.secho('> Restarting (%s mode)' % self.name, fg='yellow') args = _get_args_for_reloading() new_environ = os.environ.copy() new_environ['EMMETT_RUN_MAIN'] = 'true' # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. # if os.name == 'nt' and PY2: # for key, value in iteritems(new_environ): # if isinstance(value, unicode): # new_environ[key] = value.encode('iso-8859-1') exit_code = subprocess.call(args, env=new_environ) if exit_code != 3: return exit_code
[ 1141, 41, 13694 ]
def METHOD_NAME( self, url: str = "/", data: Optional[Dict] = None, query_params: Optional[Dict] = None, ) -> Any: return self._request("PATCH", url, data=data, query_params=query_params)
[ 1575 ]
def METHOD_NAME(): """Test that metric key names are converted correctly.""" metrics = {"1": 1.0, "2": 2.0, "3": 3.0} prefix = "foo" separator = "." converted_metrics = _prefix_metric_keys(metrics, prefix, separator) assert converted_metrics == {"foo.1": 1.0, "foo.2": 2.0, "foo.3": 3.0}
[ 9, 426, 1341, 219 ]
def METHOD_NAME(self): self.login_as(self.admin) url = reverse('api-v2.1-admin-groups') limit_punctuation = """-'_.""" group_name = randstring(2) + random.choice(limit_punctuation) + randstring(2) data = { 'group_name': group_name, 'group_owner': self.user.email } resp = self.client.post(url, data) self.assertEqual(201, resp.status_code) json_resp = json.loads(resp.content) assert json_resp['name'] == group_name assert json_resp['owner'] == self.user.email self.remove_group(json_resp['id'])
[ 9, 1046, 129, 604, 1467, 6751 ]
def METHOD_NAME(method, cmd_args, env={}, capture_stderr=False, return_bytes=False, trap=False, input=None): # A safe way to execute processes. # Some processes like apt-get require being given a sane PATH. import subprocess env.update({ "PATH": "/sbin:/bin:/usr/sbin:/usr/bin" }) kwargs = { 'env': env, 'stderr': None if not capture_stderr else subprocess.STDOUT, } if method == "check_output" and input is not None: kwargs['input'] = input if not trap: ret = getattr(subprocess, method)(cmd_args, **kwargs) else: try: ret = getattr(subprocess, method)(cmd_args, **kwargs) code = 0 except subprocess.CalledProcessError as e: ret = e.output code = e.returncode if not return_bytes and isinstance(ret, bytes): ret = ret.decode("utf8") if not trap: return ret else: return code, ret
[ 2770 ]
def METHOD_NAME(client_server): # pylint: disable=redefined-outer-name with pytest.raises(JsonRpcMethodNotFound): client_server._endpoint.request('unknown_method').result(timeout=CALL_TIMEOUT)
[ 9, 1038, 277 ]
def METHOD_NAME(contract, web3, contract_functions, func_name): # direct function calls if hasattr(contract, func_name): return getattr(contract, func_name) # contract functions def wrap(*args, **kwargs): args2 = list(args) tx_dict = None # retrieve tx dict from either args or kwargs if args and isinstance(args[-1], dict): tx_dict = args[-1] if args[-1].get("from") else None args2 = list(args[:-1]) if "tx_dict" in kwargs: tx_dict = kwargs["tx_dict"] if kwargs["tx_dict"].get("from") else None del kwargs["tx_dict"] # use addresses instead of wallets when doing the call for arg in args2: if hasattr(arg, "address"): args2 = list(args2) args2[args2.index(arg)] = arg.address func = getattr(contract_functions, func_name) result = func(*args2, **kwargs) # view/pure functions don't need "from" key in tx_dict if not tx_dict and result.abi["stateMutability"] not in ["view", "pure"]: raise Exception("Needs tx_dict with 'from' key.") # if it's a view/pure function, just call it if result.abi["stateMutability"] in ["view", "pure"]: return result.call() else: # if it's a transaction, build and send it wallet = tx_dict["from"] tx_dict2 = tx_dict.copy() tx_dict2["nonce"] = web3.eth.get_transaction_count(wallet.address) tx_dict2["from"] = tx_dict["from"].address result = result.build_transaction(tx_dict2) # sign with wallet private key and send transaction signed_tx = web3.eth.account.sign_transaction(result, wallet._private_key) receipt = web3.eth.send_raw_transaction(signed_tx.rawTransaction) return web3.eth.wait_for_transaction_receipt(receipt) return wrap
[ 559, 291 ]
def METHOD_NAME(obj): """Returns True if the supplied object is a property container (or its Proxy).""" if type(obj) == PropertyBag: return True if hasattr(obj, "Proxy"): return METHOD_NAME(obj.Proxy) return False
[ 137, 1042, 7512 ]
def METHOD_NAME(page): """Returns the number of pages that are children of a particular page""" count = Page.objects.child_of(page).live().count() return "{} {}".format(count, 'result' if count == 1 else 'results')
[ 186, 1174, 29 ]
def METHOD_NAME(cls): cls.browser.quit() if cls.display: cls.display.stop() super(SeleniumTestCase, cls).METHOD_NAME()
[ 531, 481, 2 ]
def METHOD_NAME(self, xc=None): if xc is not None and xc != self.xc: mf = copy.copy(self) mf.xc = xc mf.converged = False return self
[ 24, 9258 ]
def METHOD_NAME(self, test, vcn): """ test adding freeform tag to vcn """ vcn_ocid = self._get_vcn_details(vcn) session_factory = test.oci_session_factory() policy = test.load_policy( { "name": "add-tag-freeform-to-vcn", "resource": "oci.vcn", "filters": [ {"type": "value", "key": "id", "value": vcn_ocid}, ], "actions": [{"type": "update", "freeform_tags": {"Environment": "Development"}}], }, session_factory=session_factory, ) policy.run() resource = self._fetch_instance_validation_data(policy.resource_manager, vcn_ocid) test.assertEqual(resource["id"], vcn_ocid) test.assertEqual(resource["freeform_tags"]["Environment"], "Development")
[ 9, 86, 10451 ]
def METHOD_NAME(exception: JsonableError) -> MutableJsonResponse: """ This should only be needed in middleware; in app code, just raise. When app code raises a JsonableError, the JsonErrorHandler middleware takes care of transforming it into a response by calling this function. """ response_type = "error" if 200 <= exception.http_status_code < 300: response_type = "success" response = json_response( response_type, msg=exception.msg, data=exception.data, status=exception.http_status_code ) for header, value in exception.extra_headers.items(): response[header] = value return response
[ 763, 17, 280, 168 ]
def METHOD_NAME(self): self.mc.events.post('show_slide1') self.advance_time() self.mc.events.post('wipe') self.advance_time()
[ 9, 4641 ]
def METHOD_NAME(self, original): # pylint: disable=redefined-builtin @functools.wraps(original) def wrapper(name, globals=None, locals=None, fromlist=(), level=0): is_absolute_import = level == 0 if is_absolute_import: self._record_imported_module(name) return original(name, globals, locals, fromlist, level) return wrapper
[ 503, 512 ]
def METHOD_NAME(f: C) -> C: return click.option( "--fail-on-quota-errors", is_flag=True, default=False, show_default=True, help=( "Cause the task to fail if any quota exceeded errors are hit " "during the transfer." ), )(f)
[ 180, 69, 2268, 1096, 1335 ]
def METHOD_NAME(self): """Human readable description of modeling approach. The modeler description is intended for the energy modeler using the measure. It should explain the measure's intent, and include any requirements about how the baseline model must be set up, major assumptions made by the measure, and relevant citations or references to applicable modeling resources """ return "MODELER_DESCRIPTION_TEXT"
[ -1, 1067 ]
def METHOD_NAME(self): sf0 = set(self.files) f1 = os.listdir(test_support.TESTFN) f2 = os.listdir(unicode(test_support.TESTFN, sys.getfilesystemencoding())) sf2 = set(os.path.join(unicode(test_support.TESTFN), f) for f in f2) self.assertEqual(sf0, sf2) self.assertEqual(len(f1), len(f2))
[ 9, 2778 ]
def METHOD_NAME(): shape = (10, 10, 10, 10) x = relay.var("x", shape=shape) y = relay.nn.global_avg_pool2d(x) func = relay.Function([x], y) _construct_model(func)
[ 9, 285, 1654, 6768 ]
def METHOD_NAME(evt: Event, var: GameState, player: User, old_role: Optional[str]): if old_role == "vigilante": del KILLS[:player:] PASSED.discard(player)
[ 69, 80, 1018 ]
def METHOD_NAME(files): img_file, inst_file, segm_file = files inst_img = mmcv.imread(inst_file, 'unchanged') # ids < 24 are stuff labels (filtering them first is about 5% faster) unique_inst_ids = np.unique(inst_img[inst_img >= 24]) anno_info = [] for inst_id in unique_inst_ids: # For non-crowd annotations, inst_id // 1000 is the label_id # Crowd annotations have <1000 instance ids label_id = inst_id // 1000 if inst_id >= 1000 else inst_id label = CSLabels.id2label[label_id] if not label.hasInstances or label.ignoreInEval: continue category_id = label.id iscrowd = int(inst_id < 1000) mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F') mask_rle = maskUtils.encode(mask[:, :, None])[0] area = maskUtils.area(mask_rle) # convert to COCO style XYWH format bbox = maskUtils.toBbox(mask_rle) # for json encoding mask_rle['counts'] = mask_rle['counts'].decode() anno = dict( iscrowd=iscrowd, category_id=category_id, bbox=bbox.tolist(), area=area.tolist(), segmentation=mask_rle) anno_info.append(anno) video_name = osp.basename(osp.dirname(img_file)) img_info = dict( # remove img_prefix for filename file_name=osp.join(video_name, osp.basename(img_file)), height=inst_img.shape[0], width=inst_img.shape[1], anno_info=anno_info, segm_file=osp.join(video_name, osp.basename(segm_file))) return img_info
[ 557, 2029, 100 ]
def METHOD_NAME(self, modulename): mpconf = CONF_PATH if not os.path.isabs(mpconf): mpconf = os.path.join(curdir, mpconf) with open(mpconf, 'wb') as f: f.write(self.template % {'port': self.port, 'modulename': modulename, 'host': self.host}) result = read_process(APACHE_PATH, '-k start -f %s' % mpconf) if result: print(result)
[ 447 ]
def METHOD_NAME(self, input): runner = CliRunner() if 'db_table' in input: db = Db() data_list = list(db.cfgdb.get_table(input['db_table'])) input['rc_msg'] = input['rc_msg'].format(",".join(data_list)) if 'show' in input['cmd']: exec_cmd = show.cli.commands["ecn"] result = runner.invoke(exec_cmd, input['args']) exit_code = result.exit_code output = result.output elif 'q_cmd' in input['cmd'] : exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) else: exec_cmd = config.config.commands["ecn"] result = runner.invoke(exec_cmd, input['args']) exit_code = result.exit_code output = result.output print(exit_code) print(output) if input['rc'] == 0: assert exit_code == 0 else: assert exit_code != 0 if 'cmp_args' in input: fd = open('/tmp/ecnconfig', 'r') cmp_data = json.load(fd) if 'cmp_q_args' in input: profile, value = self.process_cmp_args(input['cmp_args'][0]) if 'other_q' in input: profile1, value1 = self.process_cmp_args(input['cmp_args'][-1]) for key in cmp_data: queue_idx = ast.literal_eval(key)[-1] if queue_idx in input['cmp_q_args']: self.verify_profile(cmp_data[key], profile, value) if 'other_q' in input and queue_idx in input['other_q']: self.verify_profile(cmp_data[key], profile1, value1) else: for args in input['cmp_args']: profile, name, value = args.split(',') assert(cmp_data[profile][name] == value) fd.close() if 'rc_msg' in input: assert input['rc_msg'] in output if 'rc_output' in input: assert output == input['rc_output']
[ 5153 ]
def METHOD_NAME(self): return self._data.get("run_uuid")
[ 22, 4977 ]
def METHOD_NAME(f): def func(self): try: f(self) finally: _teardown_random() return func
[ 503, 531, 481 ]
def METHOD_NAME(args, cwd): with fs.cd(cwd): exit_code = __main__.main(["-c"] + args) return ( PIOCoreRPC.thread_stdout.get_value_and_reset(), PIOCoreRPC.thread_stderr.get_value_and_reset(), exit_code, )
[ 600, 1209, 128 ]
def METHOD_NAME(): if using_pyside: if ProcessCLIInput.instance is None: ProcessCLIInput.instance = ProcessCLIInput(0.001) ProcessCLIInput.instance.start()
[ 102, 2401 ]
def METHOD_NAME(self) -> int: return sum(len(digests) for digests in self._allowed.values())
[ 5347, 29 ]
def METHOD_NAME(self, session) -> bool: issuedToken = IssuedToken.objects.filter( session = session ) # TODO: check also ACR return ( (issuedToken.count() - 1) < getattr( settings, "OIDCFED_PROVIDER_MAX_REFRESH", 1 ) )
[ 137, 466, -1 ]
def METHOD_NAME(packer, button_val, car_fingerprint): values = { 'CRUISE_BUTTONS': button_val, 'CRUISE_SETTING': 0, } # send buttons to camera on radarless cars bus = 2 if car_fingerprint in HONDA_BOSCH_RADARLESS else get_pt_bus(car_fingerprint) return packer.make_can_msg("SCM_BUTTONS", bus, values)
[ 5340, 1409, 462 ]
def METHOD_NAME(): for doc_type in app_doc_types(): res = Application.get_db().view( 'all_docs/by_doc_type', startkey=[doc_type], endkey=[doc_type, {}], reduce=False, include_docs=True ) for row in res: Application.get_db().delete_doc(row['doc'])
[ 34, 75, 3708 ]
def METHOD_NAME(self, app, datadir): if self._host is None or self._port is None or self._ssl is None: raise BrokenCoreConnectionException( f"Cannot start Spectrum without host ({self._host}), port ({self._port}) or ssl ({self._ssl})" ) try: logger.debug(f"Spectrum node is creating a Spectrum instance.") self.spectrum = Spectrum( self._host, self._port, self._ssl, datadir=datadir, app=app, proxy_url=app.specter.proxy_url if app.specter.tor_type != "none" and app.specter.tor_type != "disabled" else None, ) logger.debug(f"{self.name} is instantiating its BridgeRPC.") self.bridge = BridgeRPC(self.spectrum, app=app) self.spectrum.sync() except Exception as e: logger.exception(e)
[ 447, 1940 ]
def METHOD_NAME(self, autoExit=False): """ Check if the current application is already running Args: autoExit (bool): automatically call sys.exit if there is an other instance running Returns: bool: ``True`` if this is the only application instance """ # check if the pidfile exists if not os.path.isfile(self.pidFile): return True self.pid, self.procname = self.readPidFile() # check if the process with specified by pid exists if 0 == self.pid: return True if not tools.processAlive(self.pid): return True # check if the process has the same procname # check cmdline for backwards compatibility if self.procname and \ self.procname != tools.processName(self.pid) and \ self.procname != tools.processCmdline(self.pid): return True if autoExit: # exit the application print("The application is already running !") # exit raise an exception so don't put it in a try/except block exit(0) return False
[ 250 ]
def METHOD_NAME(cls, *args, **kwargs): if cls._args_schema is not None: return cls._args_schema cls._args_schema = super().METHOD_NAME(*args, **kwargs) # define Arg Group "" _args_schema = cls._args_schema _args_schema.managed_instance_name = AAZStrArg( options=["--mi", "--instance-name", "--managed-instance", "--managed-instance-name"], help="Name of the managed instance.", required=True, id_part="name", ) _args_schema.resource_group = AAZResourceGroupNameArg( required=True, ) _args_schema.server_configuration_option_name = AAZStrArg( options=["-n", "--name", "--server-configuration-option-name"], help="Name of the server configuration option.", required=True, id_part="child_name_1", enum={"allowPolybaseExport": "allowPolybaseExport"}, ) return cls._args_schema
[ 56, 134, 135 ]
def METHOD_NAME(*args, **kwargs): console_manager = args[0] if not console_manager._lock.acquire(False): raise ConsoleBusyError("Console is in use.") try: return function(*args, **kwargs) finally: console_manager._lock.release()
[ 291 ]
def METHOD_NAME( error, is_error_cmd, logfile_path, should_show_stacktraces=False, ): """Displays error of appropriate message back to user, prompts user to investigate further with `cci error` commands, and writes the traceback to the latest logfile. """ error_console = Console(stderr=True) if isinstance(error, requests.exceptions.ConnectionError): connection_error_message(error_console) elif isinstance(error, click.ClickException): error_console.print(f"[red bold]Error: {escape(error.format_message())}") else: # We call str ourselves to make Typeguard shut up. error_console.print(f"[red bold]Error: {escape(str(error))}") # Only suggest gist command if it wasn't run if not is_error_cmd: error_console.print(f"[yellow]{SUGGEST_ERROR_COMMAND}") # This is None if we're handling an exception for a `cci error` command. if logfile_path: with open(logfile_path, "a") as log_file: traceback.print_exc(file=log_file) # log stacktrace silently if should_show_stacktraces and not isinstance(error, USAGE_ERRORS): error_console.print_exception()
[ 276, 442 ]
def METHOD_NAME(self) -> 'outputs.SystemDataResponse': """ The system metadata relating to this resource. """ return pulumi.get(self, "system_data")
[ 112, 365 ]
def METHOD_NAME(rrdfile, label, path=""): for sched in ['daily' , 'weekly', 'monthly', 'hourly']: period = sched[0] # 'w', 'd', 'm', 'h' # print("Graphing", sched, label, rrdfile) ret = rrdtool.graph("%smetrics-%s.%s.png" % (path, sched, label), "--start", "-1%s" % (period), "--title", label, "--vertical-label=C", "--right-axis-label=%", '--watermark=rtl_433', "-w 800", "-h 200", "DEF:t=%s:temperature:AVERAGE" % (rrdfile), "DEF:h=%s:humidity:AVERAGE" % (rrdfile), "LINE1:t#00FF00:temperature\r", "LINE2:h#0000FF:humidity\r", "GPRINT:t:AVERAGE:Temp avg %6.1lf C", "GPRINT:t:MAX:Temp max %6.1lf C\r", "GPRINT:h:AVERAGE:Hum avg %6.0lf %%", "GPRINT:h:MAX:Hum max %6.0lf %%\r") return ret
[ 303, 7218 ]
def METHOD_NAME(self) -> Callable: """Select the function to calculate the all events. This is separate method to make it easy to overwrite by a subclass. """ raise NotImplementedError()
[ 1472, 75, 417, 381, 103 ]
def METHOD_NAME(self): self.pre_operations() self.BestPracticesGet(ctx=self.ctx)() self.post_operations()
[ 750, 710 ]
def METHOD_NAME(self): self.component._rm_client = mock_client = MagicMock() mock_client.describe_simulation_job.return_value = { "outputLocation": {"s3Bucket": "cool-bucket", "s3Prefix": "fake-key",} } self.assertEqual( self.component._get_job_outputs(), "s3://cool-bucket/fake-key", )
[ 9, 19, 202, 141 ]
def METHOD_NAME(self, infrastructure_id: int, name: str, ip: str, dns: str, **kwargs ) -> List[Dict]: ''' Creates a new directory instance. Args: infrastructure_id (int): The infrastructure object to bind this directory to. name (str): Name of the directory instance. ip (str): The IP Address of the directory server. dns (str): The DNS domain that this directory is tied to. directory_type (optional, str): The directory's type. ldap_port (optional, str): The port number associated to the LDAP service on the directory server. global_catalog_port (optional, str): The port number associated to the Global Catalog service running on the directory server. smb_port (optional, str): The port number associated to the Server Messaging Block (SMB) service running on the directory server. Returns: dict: The created directory instance. Examples: >>> tad.directories.create( ... infrastructure_id=1, ... name='ExampleServer', ... ip='172.16.0.1', ... directory_type='????', ... dns='company.tld', ... ) ''' schema = DirectorySchema(unknown=INCLUDE) payload = [schema.dump(schema.load( dict_clean({ 'infrastructureId': infrastructure_id, 'name': name, 'ip': ip, 'type': kwargs.get('directory_type'), 'dns': dns, 'ldapPort': kwargs.get('ldap_port'), 'globalCatalogPort': kwargs.get('global_catalog_port'), 'smbPort': kwargs.get('smb_port') }) ))] return schema.load(self._post(json=payload), many=True)
[ 129 ]
def METHOD_NAME(self, load_collection, mock_rod): # If this fails, an exception is raised # and pytest automatically fails load_collection[3] = mock_rod
[ 9, 5719 ]
def METHOD_NAME(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--embed-dim', type=int, metavar='N', help='embedding dimension') parser.add_argument('--num-attention-heads', type=int, metavar='N', help='num attention heads') parser.add_argument('--num-layers', type=int, metavar='N', help='num layers') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability for all fully connected layers ' 'in the embeddings, encoder, and pooler') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') # fmt: on
[ 238, 335 ]
def METHOD_NAME(self, strings: Iterable[str]) -> None: for string in strings: parts = string.split(SEP) node = self._tree for part in parts: node = node.setdefault(part, Node())
[ 238, 362 ]
def METHOD_NAME(shape): """Return a list containing the placement, and dimensions of the shape. The dimensios are length, width and height of a the parallelepiped, rounded to the value indicated by `precision`. The placement point is the lowest corner of the shape. If it is not a parallelepiped (cuboid), return None. """ if not isCubic(shape): return None # determine lowest face, which will be our base z = [10, 1000000000000] for i in range(len(shape.Faces)): if shape.Faces[i].CenterOfMass.z < z[1]: z = [i, shape.Faces[i].CenterOfMass.z] if z[0] > 5: return None base = shape.Faces[z[0]] basepoint = base.Edges[0].Vertexes[0].Point plpoint = base.CenterOfMass # basenorm = base.normalAt(0.5, 0.5) # getting length and width vx = vec(base.Edges[0]) vy = vec(base.Edges[1]) if round(vx.Length) == round(vy.Length): vy = vec(base.Edges[2]) # getting rotations rotZ = DraftVecUtils.angle(vx) rotY = DraftVecUtils.angle(vx, App.Vector(vx.x, vx.y, 0)) rotX = DraftVecUtils.angle(vy, App.Vector(vy.x, vy.y, 0)) # getting height vz = None rpi = round(math.pi/2, precision()) for i in range(1, 6): for e in shape.Faces[i].Edges: if basepoint in [e.Vertexes[0].Point, e.Vertexes[1].Point]: vtemp = vec(e) # print(vtemp) if round(vtemp.getAngle(vx), precision()) == rpi: if round(vtemp.getAngle(vy), precision()) == rpi: vz = vtemp if not vz: return None mat = App.Matrix() mat.move(plpoint) mat.rotateX(rotX) mat.rotateY(rotY) mat.rotateZ(rotZ) return [App.Placement(mat), round(vx.Length, precision()), round(vy.Length, precision()), round(vz.Length, precision())]
[ 19, 7495, 5164 ]
def METHOD_NAME(self, command_args): super().METHOD_NAME(command_args) self._execute_operations() return self._output()
[ 1519 ]
def METHOD_NAME(vhost_path: str) -> str: """Get the Augeas path for a vhost with the file path removed. :param str vhost_path: Augeas virtual host path :returns: Augeas path to vhost relative to the containing file :rtype: str """ return _split_aug_path(vhost_path)[1]
[ 19, 2026, 7527, 157 ]
def METHOD_NAME(): from . import conditional_assets return load_assets_from_modules([conditional_assets])
[ 869, 3407, 1230 ]
def METHOD_NAME( self, classes: List[Class], module_namespace: Optional[str] ) -> str: """Render the source code of the classes.""" load = self.env.get_template def render_class(obj: Class) -> str: """Render class or enumeration.""" if obj.is_enumeration: template = load("enum.jinja2") elif obj.is_service: template = load("service.jinja2") else: template = load("class.jinja2") return template.render( obj=obj, module_namespace=module_namespace, ).strip() return "\n\n\n".join(map(render_class, classes)) + "\n"
[ 338, 393 ]
def METHOD_NAME(self): url = reverse("chapters:glossary") response = self.client.get(url) self.assertEqual(200, response.status_code) self.assertEqual(len(response.context["glossary_terms"]), 0)
[ 9, 15186, 1470, 1179, 41, 654, 2706 ]
def METHOD_NAME( device, local_host=None, database_notify_port_num=None, database_ack_port_num=None): """ Create a pyNN population which can be included in a network to receive spikes from a device connected to the host. .. deprecated:: 7.0 Use :py:class:`spynnaker.pyNN.external_devices` instead. """ moved_in_v7("spynnaker8.external_devices", "spynnaker.pyNN.external_devices") return moved_code.METHOD_NAME( device, local_host, database_notify_port_num, database_ack_port_num)
[ 9217, 6234, 5610 ]
def METHOD_NAME(self, tempfile: str) -> None: self._run_console_script( str(tempfile), { "entrypoint": "black", "options": f"--config {self.pyproject_toml_path}", }, )
[ 22, 5879 ]
def METHOD_NAME(self): TestController.METHOD_NAME(self) self.delete_all_policies() self.delete_all_token() self.delete_all_realms() self.delete_all_resolvers() self.create_common_resolvers() self.create_common_realms()
[ 0, 1 ]
def METHOD_NAME(fl): """ Subsets the flow list for all freshwater resource flows, excluding resource/air :param fl: df in standard flowlist format :return: df in standard flowlist format """ flows = fl[fl["Flowable"]=="Water, fresh"] flows = flows[flows["Context"].str.startswith("resource")] flows = flows[~flows["Context"].str.startswith("resource/air")] return flows
[ 19, -1, 191, 7923 ]
def METHOD_NAME(self): map_list = [] for i in range(self.row): tmp = [] for j in range(self.col): tmp.append(State(i, j)) map_list.append(tmp) return map_list
[ 176, 422 ]
def METHOD_NAME(cls): """ This method returns a dictionary of allowed actions and possible options in this handler module. :return: dict with actions """ # The event handler has just one action. Maybe we can hide action select for the clarity of the UI METHOD_NAME = {ACTION_TYPE.POST_WEBHOOK: { "URL": { "type": "str", "required": True, "description": _("The URL the WebHook is posted to") }, "content_type": { "type": "str", "required": True, "description": _("The encoding that is sent to the WebHook, for example json"), "value": [ CONTENT_TYPE.JSON, CONTENT_TYPE.URLENCODED] }, "replace": { "type": "bool", "required": True, "description": _("You can replace placeholder like {logged_in_user}") }, "data": { "type": "str", "required": True, "description": _("The data posted in the WebHook") } }} return METHOD_NAME
[ 1116 ]
def METHOD_NAME(filename): """Read in constants file, which must be output in every language.""" constant_defs = read_json_file(filename); constants_text = '\n' for key in constant_defs: value = constant_defs[key] value = value.replace('"', '\\"') constants_text += '\nBlockly.Msg["{0}"] = "{1}";'.format(key, value) return constants_text
[ 557, 891 ]
def METHOD_NAME(self, file: IO[str]) -> None: ...
[ 203, 1159, 171 ]
f METHOD_NAME(self):
[ 9, 654, 2277, 1997 ]
def METHOD_NAME(self) -> Optional['outputs.PrivateEndpointResponse']: """ The resource of private end point. """ return pulumi.get(self, "private_endpoint")
[ 547, 841 ]
def METHOD_NAME(self): # These offxml files are located in package data path, which is automatically installed and searched file_paths = [smirnoff99Frosst_offxml_file_path, tip3p_offxml_file_path] # Create a forcefield from multiple offxml files ForceField(file_paths)
[ 9, 129, 3139, 280, 171, 245 ]
def METHOD_NAME(self) -> None: self._set_traffic() self.generator.start(self._traffic)
[ 447, 2219 ]
def METHOD_NAME(): """ Test element tree interface. >>> element = ET.Element("tag", key="value") >>> tree = ET.ElementTree(element) Make sure all standard element methods exist. >>> check_method(element.append) >>> check_method(element.insert) >>> check_method(element.remove) >>> check_method(element.getchildren) >>> check_method(element.find) >>> check_method(element.findall) >>> check_method(element.findtext) >>> check_method(element.clear) >>> check_method(element.get) >>> check_method(element.set) >>> check_method(element.keys) >>> check_method(element.items) >>> check_method(element.getiterator) Basic method sanity checks. >>> serialize(ET, element) # 1 '<tag key="value" />' >>> subelement = ET.Element("subtag") >>> element.append(subelement) >>> serialize(ET, element) # 2 '<tag key="value"><subtag /></tag>' >>> element.insert(0, subelement) >>> serialize(ET, element) # 3 '<tag key="value"><subtag /><subtag /></tag>' >>> element.remove(subelement) >>> serialize(ET, element) # 4 '<tag key="value"><subtag /></tag>' >>> element.remove(subelement) >>> serialize(ET, element) # 5 '<tag key="value" />' >>> element.remove(subelement) Traceback (most recent call last): ValueError: list.remove(x): x not in list >>> serialize(ET, element) # 6 '<tag key="value" />' """
[ 1090 ]
def METHOD_NAME( config: BaseConfig, chdir: Path, id: str, filename: str, cls: type[VersionProvider], content: str, expected: str, ): file = chdir / filename file.write_text(dedent(content)) config.settings["version_provider"] = id provider = get_provider(config) assert isinstance(provider, cls) assert provider.get_version() == "0.1.0" provider.set_version("42.1") assert file.read_text() == dedent(expected)
[ 9, 171, 2190 ]
def METHOD_NAME(self): """convert data images to the convert images""" for idx, tag in enumerate(self._source_images): convert_target = self._convert_images[idx] convert_params = self.params.object_params(convert_target) convert_params["convert_target"] = convert_target img_obj = self.source_disk_define_by_params(self.params, tag) img_obj.convert(convert_params, img_obj.root_dir)
[ 197, 365, 3669 ]
def METHOD_NAME(self) -> None: result = self.invoke(["config", "--help"]) self.assertEqual(0, result.exit_code) self.assertFalse(result.exception)
[ 9, 200, 40 ]
def METHOD_NAME(self): if 'context_id' in self.lti_params: return self.lti_params.get('context_id')
[ 1122, 198 ]
def METHOD_NAME( pred: torch.Tensor, gt: torch.Tensor, crop: int = 1, mask: Optional[torch.Tensor] = None, get_best_scale: bool = True, mask_thr: float = 0.5, best_scale_clamp_thr: float = 1e-4, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Evaluate the depth error between the prediction `pred` and the ground truth `gt`. Args: pred: A tensor of shape (N, 1, H, W) denoting the predicted depth maps. gt: A tensor of shape (N, 1, H, W) denoting the ground truth depth maps. crop: The number of pixels to crop from the border. mask: A mask denoting the valid regions of the gt depth. get_best_scale: If `True`, estimates a scaling factor of the predicted depth that yields the best mean squared error between `pred` and `gt`. This is typically enabled for cases where predicted reconstructions are inherently defined up to an arbitrary scaling factor. mask_thr: A constant used to threshold the `mask` to specify the valid regions. best_scale_clamp_thr: The threshold for clamping the divisor in best scale estimation. Returns: mse_depth: Mean squared error between `pred` and `gt`. abs_depth: Mean absolute difference between `pred` and `gt`. """ # chuck out the border if crop > 0: gt = gt[:, :, crop:-crop, crop:-crop] pred = pred[:, :, crop:-crop, crop:-crop] if mask is not None: # mult gt by mask if crop > 0: mask = mask[:, :, crop:-crop, crop:-crop] gt = gt * (mask > mask_thr).float() dmask = (gt > 0.0).float() dmask_mass = torch.clamp(dmask.sum((1, 2, 3)), 1e-4) if get_best_scale: # mult preds by a scalar "scale_best" # s.t. we get best possible mse error scale_best = estimate_depth_scale_factor(pred, gt, dmask, best_scale_clamp_thr) pred = pred * scale_best[:, None, None, None] df = gt - pred # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. mse_depth = (dmask * (df**2)).sum((1, 2, 3)) / dmask_mass abs_depth = (dmask * df.abs()).sum((1, 2, 3)) / dmask_mass return mse_depth, abs_depth
[ 1171, 3144 ]
def METHOD_NAME(self, *args, **kwargs): result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) return result
[ 146 ]
def METHOD_NAME(args, pacu_main): session = pacu_main.get_active_session() args = parser.parse_args(args) print = pacu_main.print get_regions = pacu_main.get_regions regions = get_regions('waf-regional') if args.regions is None else args.regions.split(',') waf_regional_data = {} waf_global_data = {} for key, val in METHODS: waf_regional_data[val] = [] waf_global_data[val] = [] for region in regions: print(' Staring enumeration of region: {}...'.format(region)) client = pacu_main.get_boto3_client('waf-regional', region) for func, key in METHODS: items = grab_data(client, 'list_' + func, key) for index, item in enumerate(items): param_key = key[:-1] + 'Id' param = {param_key: item[param_key]} new_data = grab_id_data(client, 'get_' + func[:-1], param) new_data['region'] = region items[index] = new_data waf_regional_data[key].extend(items) # Grab additional data specifically for RuleGroups. for rule_group in waf_regional_data['RuleGroups']: region = rule_group['region'] client = pacu_main.get_boto3_client('waf-regional', region) group_id = rule_group['RuleGroupId'] rule_group['ActivatedRules'] = grab_data( client, 'list_activated_rules_in_rule_group', 'ActivatedRules', RuleGroupId=group_id ) waf_region_data = deepcopy(session.WAFRegional) waf_region_data.update(waf_regional_data) session.update(pacu_main.database, WAFRegional=waf_region_data) if args.global_region: client = pacu_main.get_boto3_client('waf') print(' Starting enumeration for global WAF...') for func, key in METHODS: items = grab_data(client, 'list_' + func, key) for index, item in enumerate(items): param_key = key[:-1] + 'Id' param = {param_key: item[param_key]} new_data = grab_id_data(client, 'get_' + func[:-1], param) items[index] = new_data waf_global_data[key].extend(items) # Grab additional data specifically for RuleGroups. for rule_group in waf_global_data['RuleGroups']: group_id = rule_group['RuleGroupId'] rule_group['ActivatedRules'] = grab_data( client, 'list_activated_rules_in_rule_group', 'ActivatedRules', RuleGroupId=group_id ) waf_data = deepcopy(session.WAF) waf_data.update(waf_global_data) session.update(pacu_main.database, WAF=waf_data) summary_data = {} for func, key in METHODS: summary_data[key] = len(waf_global_data[key]) + len(waf_regional_data[key]) return summary_data
[ 57 ]
def METHOD_NAME(client, scope, filter=None, top=None, skip_token=None): return client.list(scope=scope, filter=filter, top=top, skiptoken=skip_token)
[ 2268, 377, 452, 245 ]
def METHOD_NAME(self, button): self.stuck_record_clear() self.click_record_add(button) self.click_record_check()
[ 276, 401, 250 ]
def METHOD_NAME(self): self.assertEqual(b"", modhex_decode("")) self.assertEqual(b"\x2d\x34\x4e\x83", modhex_decode("dteffuje")) self.assertEqual( b"\x69\xb6\x48\x1c\x8b\xab\xa2\xb6\x0e\x8f\x22\x17\x9b\x58\xcd\x56", modhex_decode("hknhfjbrjnlnldnhcujvddbikngjrtgh"), )
[ 9, -1, 1268 ]
def METHOD_NAME(job_name): envs = [] if "ps" == job_name: envs.append("OMP_NUM_THREADS=1") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") elif "worker" == job_name: envs.append("OMP_NUM_THREADS=6") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") elif "evaluator" == job_name or "chief" == job_name: envs.append("OMP_NUM_THREADS=1") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") else: envs.append("OMP_NUM_THREADS=1") envs.append("KMP_BLOCKTIME=0") envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2") return envs
[ 0, 5, 6 ]
def METHOD_NAME(self): self.cv_SYSTEM.goto_state(OpSystemState.OS) exist_cfg = self.cv_HMC.get_lpar_cfg() self.des_hp = int(exist_cfg.get('desired_num_huge_pages', 0)) self.min_hp = int(exist_cfg.get('min_num_huge_pages', 0)) self.max_hp = int(exist_cfg.get('max_num_huge_pages', 0)) self.os_level = self.cv_SYSTEM.cv_HOST.host_get_OS_Level() self.obj = OpTestInstallUtil.InstallUtil() self.obj.update_kernel_cmdline(self.os_level, "default_hugepagesz=16G hugepagesz=16G hugepages=%s" % str( self.des_hp + 2), "", reboot=True, reboot_cmd=True) self.cv_SYSTEM.goto_state(OpSystemState.OFF) self.cv_HMC.poweroff_system() self.cv_FSP.cv_ASM.configure_hugepages(self.des_hp + 5)
[ 102, 15827 ]
def METHOD_NAME(data_file_name): f_ext = pathlib.Path(data_file_name).suffix if f_ext == ".pvd": return paraview.simple.PVDReader(FileName=data_file_name) else: return paraview.simple.ExodusIIReader(FileName=data_file_name)
[ 1452, 365, 171 ]
def METHOD_NAME(self): field = random.choice(['name', 'price']); order = random.choice(['asc', 'desc']); return self.request('/store-api/search', name='search-sorting', parameters = {'search': random.choice(self.context.keywords), 'order': field + '-' + order})
[ 1070, 41, 3053 ]