text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self) -> LLMMetadata: return get_llm_metadata(self._llm)
[ 773 ]
def METHOD_NAME(name): '''Checks if a package has been registered with five:registerPackage.''' from OFS.metaconfigure import has_package return has_package(name)
[ 220, 360 ]
def METHOD_NAME(self): return { 'type': 'hivealerter', 'hive_host': self.rule.get('hive_connection', {}).get('hive_host', '') }
[ 19, 100 ]
async def METHOD_NAME(ep): if not self.args.enable_am: if self.args.reuse_alloc: recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1")) assert recv_msg.nbytes == self.args.n_bytes for i in range(self.args.n_iter + self.args.n_warmup_iter): if self.args.enable_am: recv = await ep.am_recv() await ep.am_send(recv) else: if not self.args.reuse_alloc: recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1")) await ep.recv(recv_msg) await ep.send(recv_msg) await ep.close() lf.close()
[ 163, 1519 ]
def METHOD_NAME(self): """Test that mlb_transform adds new transformed columns when column is empty""" # Arrange test_df = pd.DataFrame( { "id": [1, 2], "mapping_types": [ [], [], ], } ) # Act transformed_df = self.service.mlb_transform( test_df, "mapping_types", "mapping_types_" ) # Assert self.assertEqual(transformed_df.shape, (2, 1)) self.assertEqual(transformed_df["id"].tolist(), [1, 2])
[ 9, 12810, 1053, 150, 80, 514, 1951 ]
async def METHOD_NAME(job): while not job.done(): await asyncio.sleep(0.2)
[ 618 ]
def METHOD_NAME(self): self.worldNP = render.attach_new_node('World') # World self.debugNP = self.worldNP.attach_new_node(BulletDebugNode('Debug')) self.debugNP.show() self.debugNP.node().show_wireframe(True) self.debugNP.node().show_constraints(True) self.debugNP.node().show_bounding_boxes(False) self.debugNP.node().show_normals(False) self.world = BulletWorld() self.world.set_gravity((0, 0, -9.81)) self.world.set_debug_node(self.debugNP.node()) # Box A shape = BulletBoxShape((0.5, 0.5, 0.5)) bodyA = BulletRigidBodyNode('Box A') bodyNP = self.worldNP.attach_new_node(bodyA) bodyNP.node().add_shape(shape) bodyNP.set_collide_mask(BitMask32.all_on()) bodyNP.set_pos(-1, 0, 4) visNP = loader.load_model('models/box.egg') visNP.clear_model_nodes() visNP.reparent_to(bodyNP) self.world.attach(bodyA) # Box B shape = BulletBoxShape((0.5, 0.5, 0.5)) bodyB = BulletRigidBodyNode('Box B') bodyNP = self.worldNP.attach_new_node(bodyB) bodyNP.node().add_shape(shape) bodyNP.node().set_mass(1.0) bodyNP.node().set_deactivation_enabled(False) bodyNP.node().setLinearDamping(0.6) bodyNP.node().setAngularDamping(0.6) bodyNP.set_collide_mask(BitMask32.all_on()) bodyNP.set_pos(2, 0, 0) visNP = loader.load_model('models/box.egg') visNP.clear_model_nodes() visNP.reparent_to(bodyNP) self.world.attach(bodyB) # Spherical Constraint pivotA = (2, 0, 0) pivotB = (0, 0, 4) joint = BulletSphericalConstraint(bodyA, bodyB, pivotA, pivotB) joint.set_debug_draw_size(2.0) self.world.attach(joint)
[ 102 ]
def METHOD_NAME(dataset, config, tuner, log): """ Using the given tuner, tune a random forest within the given time constraint. This function uses cross validation score as the feedback score to the tuner. The search space on which tuners search on is defined above empirically as a global variable. """ limit_type, trial_limit = config.framework_params['limit_type'], None if limit_type == 'ntrials': trial_limit = int(config.framework_params['trial_limit']) X_train, X_test = preprocess_mlp(dataset, log) y_train, y_test = dataset.train.y, dataset.test.y is_classification = config.type == 'classification' estimator = MLPClassifier if is_classification else MLPRegressor best_score, best_params, best_model = None, None, None score_higher_better = True tuner.update_search_space(SEARCH_SPACE) start_time = time.time() trial_count = 0 intermediate_scores = [] intermediate_best_scores = [] # should be monotonically increasing while True: try: param_idx, cur_params = tuner.generate_parameters() if cur_params is not None and cur_params != {}: trial_count += 1 train_params = cur_params.copy() if 'TRIAL_BUDGET' in cur_params: train_params.pop('TRIAL_BUDGET') log.info("Trial {}: \n{}\n".format(param_idx, train_params)) cur_model = estimator(random_state=config.seed, **train_params) # Here score is the output of score() from the estimator cur_score = cross_val_score(cur_model, X_train, y_train) cur_score = np.mean(cur_score) if np.isnan(cur_score): cur_score = 0 log.info("Score: {}\n".format(cur_score)) if best_score is None or (score_higher_better and cur_score > best_score) or (not score_higher_better and cur_score < best_score): best_score, best_params, best_model = cur_score, cur_params, cur_model intermediate_scores.append(cur_score) intermediate_best_scores.append(best_score) tuner.receive_trial_result(param_idx, cur_params, cur_score) if limit_type == 'time': current_time = time.time() elapsed_time = current_time - start_time if elapsed_time >= config.max_runtime_seconds: break elif limit_type == 'ntrials': if trial_count >= trial_limit: break except: break # This line is required to fully terminate some advisors tuner.handle_terminate() log.info("Tuning done, the best parameters are:\n{}\n".format(best_params)) # retrain on the whole dataset with Timer() as training: best_model.fit(X_train, y_train) predictions = best_model.predict(X_test) probabilities = best_model.predict_proba(X_test) if is_classification else None return probabilities, predictions, training, y_test, intermediate_scores, intermediate_best_scores
[ 22, 7184 ]
def METHOD_NAME(self) -> str: """ The provisioned state of the resource. """ return pulumi.get(self, "provisioning_state")
[ 1994, 551 ]
def METHOD_NAME(self): le1, re1 = self.dobj1.METHOD_NAME() if self.op == "NOT": return le1, re1 else: le2, re2 = self.dobj2.METHOD_NAME() return np.minimum(le1, le2), np.maximum(re1, re2)
[ 19, 2739 ]
def METHOD_NAME(cli_ctx, aks, resource_group_name): from msrestazure.tools import is_valid_resource_id aks_is_id = is_valid_resource_id(aks) if aks_is_id: return aks if aks and not aks_is_id: from msrestazure.tools import resource_id from azure.cli.core.commands.client_factory import get_subscription_id return resource_id( subscription=get_subscription_id(cli_ctx), resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=aks) raise ArgumentUsageError('Usage error: --aks')
[ 187, 3796, 147 ]
def METHOD_NAME(axis, item_length, loc, replace): data = test_data_values[0] def post_fn(df): return ( (df.iloc[:, :-item_length], df.iloc[:, -item_length:]) if axis else (df.iloc[:-item_length, :], df.iloc[-item_length:, :]) ) def get_loc(frame, loc): locs_dict = { "first": 0, "first + 1": 1, "middle": len(frame.axes[axis]) // 2, "penult": len(frame.axes[axis]) - 1, "last": len(frame.axes[axis]), } return locs_dict[loc] def get_reference(df, value, loc): if axis == 0: first_mask = df.iloc[:loc] if replace: loc += 1 second_mask = df.iloc[loc:] else: first_mask = df.iloc[:, :loc] if replace: loc += 1 second_mask = df.iloc[:, loc:] return pandas.concat([first_mask, value, second_mask], axis=axis) md_frames, pd_frames = create_test_dfs(data, post_fn=post_fn) md_item1, md_item2 = md_frames pd_item1, pd_item2 = pd_frames index_loc = get_loc(pd_item1, loc) pd_res = get_reference(pd_item1, loc=index_loc, value=pd_item2) md_res = md_item1._query_compiler.insert_item( axis=axis, loc=index_loc, value=md_item2._query_compiler, replace=replace ).to_pandas() df_equals( md_res, pd_res, # This test causes an empty slice to be generated thus triggering: # https://github.com/modin-project/modin/issues/5974 check_dtypes=axis != 0, ) index_loc = get_loc(pd_item2, loc) pd_res = get_reference(pd_item2, loc=index_loc, value=pd_item1) md_res = md_item2._query_compiler.insert_item( axis=axis, loc=index_loc, value=md_item1._query_compiler, replace=replace ).to_pandas() df_equals( md_res, pd_res, # This test causes an empty slice to be generated thus triggering: # https://github.com/modin-project/modin/issues/5974 check_dtypes=axis != 0, )
[ 9, 408, 1024 ]
async def METHOD_NAME(self) -> bool: # TODO: Log info message model_uri = await get_model_uri(self._settings) self._model = mlflow.pyfunc.load_model(model_uri) self._input_schema = self._model.metadata.get_input_schema() self._signature = self._model.metadata.signature self._sync_metadata() return True
[ 557 ]
def METHOD_NAME(self, METHOD_NAME=None): self._norm_action_space.METHOD_NAME(METHOD_NAME) self._observation_space.METHOD_NAME(METHOD_NAME)
[ 484 ]
def METHOD_NAME(backend): """Check measurements when simulating noise with repeated execution.""" thetas = np.random.random(5) c = models.Circuit(5) c.add((gates.RX(i, t) for i, t in enumerate(thetas))) c.add( gates.PauliNoiseChannel(i, list(zip(["Y", "Z"], [0.2, 0.4]))) for i in range(5) ) c.add(gates.M(*range(5))) backend.set_seed(123) result = backend.execute_circuit(c, nshots=20) samples = result.samples() backend.set_seed(123) target_samples = [] for _ in range(20): noiseless_c = models.Circuit(5) noiseless_c.add((gates.RX(i, t) for i, t in enumerate(thetas))) for i in range(5): if backend.np.random.random() < 0.2: noiseless_c.add(gates.Y(i)) if backend.np.random.random() < 0.4: noiseless_c.add(gates.Z(i)) noiseless_c.add(gates.M(*range(5))) result = backend.execute_circuit(noiseless_c, nshots=1) target_samples.append(backend.to_numpy(result.samples())) target_samples = np.concatenate(target_samples, axis=0) backend.assert_allclose(samples, target_samples)
[ 9, 6529, 41, 15641, 802 ]
def METHOD_NAME(self, field, lookup, value): return models.Q( **{field.get_attname(self.queryset.model) + "__" + lookup: value} )
[ 356, 1906 ]
f METHOD_NAME(self):
[ 9, 539, 106, 47, 6820 ]
def METHOD_NAME(self): model_id = 'damo/multi-modal_efficient-diffusion-tuning-control-lora' model_revision = 'v1.0.2' def cfg_modify_fn(cfg): cfg.train.max_epochs = self.max_epochs cfg.train.lr_scheduler.T_max = self.max_epochs cfg.model.inference = False return cfg kwargs = dict( model=model_id, model_revision=model_revision, work_dir=self.tmp_dir, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, cfg_modify_fn=cfg_modify_fn) trainer = build_trainer( name=Trainers.efficient_diffusion_tuning, default_args=kwargs) trainer.train() result = trainer.evaluate() print( f'Efficient-diffusion-tuning-control-lora train output: {result}.') results_files = os.listdir(self.tmp_dir) self.assertIn(f'{trainer.timestamp}.log.json', results_files) for i in range(self.max_epochs): self.assertIn(f'epoch_{i+1}.pth', results_files)
[ 9, 5579, 5212, 3290, 401, 402, 849 ]
def METHOD_NAME(): """Main Foreman thread that helps manage the Batch job queue. Will find jobs that failed, hung, or got lost and requeue them. Also will queue up Janitor jobs regularly to free up disk space. It does so on a loop forever that won't spin faster than MIN_LOOP_TIME, but it may spin slower than that. """ # last_janitorial_time = timezone.now() last_dbclean_time = timezone.now() while True: # Perform two heartbeats, one for the logs and one for Monit: logger.info("The Foreman's heart is beating, but he does not feel.") # Write the health file for Monit to check now_secs = int(time.time()) with open("/tmp/foreman_last_time", "w") as timefile: timefile.write(str(now_secs)) start_time = timezone.now() # Requeue jobs of each failure class for each job type. # The order of processor -> downloader -> surveyor is intentional. # Processors go first so we process data sitting on disk. # Downloaders go first so we actually queue up the jobs in the database. # Surveyors go last so we don't end up with tons and tons of unqueued jobs. requeuing_functions_in_order = [ retry_failed_processor_jobs, retry_hung_processor_jobs, retry_lost_processor_jobs, retry_unqueued_processor_jobs, retry_failed_downloader_jobs, retry_hung_downloader_jobs, retry_lost_downloader_jobs, retry_unqueued_downloader_jobs, retry_failed_survey_jobs, retry_hung_survey_jobs, retry_lost_survey_jobs, retry_unqueued_survey_jobs, ] for function in requeuing_functions_in_order: try: function() except Exception: logger.exception("Caught exception in %s: ", function.__name__) if settings.RUNNING_IN_CLOUD: # Disable this for now because this will trigger regardless of # whether or not we have an instance to clean up, which means that # we could spin up an instance just to run a janitor job. # if timezone.now() - last_janitorial_time > JANITOR_DISPATCH_TIME: # send_janitor_jobs() # last_janitorial_time = timezone.now() if timezone.now() - last_dbclean_time > DBCLEAN_TIME: clean_database() last_dbclean_time = timezone.now() loop_time = timezone.now() - start_time if loop_time < MIN_LOOP_TIME: remaining_time = MIN_LOOP_TIME - loop_time if remaining_time.seconds > 0: time.sleep(remaining_time.seconds)
[ 1863, 494 ]
def METHOD_NAME(self, model, path): """The function is used by tune strategy class for saving model. Args: model (object): The model to saved. path (string): The path where to save. """ raise NotImplementedError
[ 73 ]
def METHOD_NAME(level, msg, to_stdout=True): with manage_handler("console", to_stdout): logger.log(level, msg)
[ 169, 772 ]
def METHOD_NAME( wav_scp, data_root="MISP_121h", output_root="MISP_121h_WPE_", processing_id=None, processing_num=None, ): sampling_rate = 16000 iterations = 5 stft_options = dict( size=512, shift=128, window_length=None, fading=True, pad=True, symmetric_window=False, ) with codecs.open(wav_scp, "r") as handle: lines_content = handle.readlines() wav_lines = [*map(lambda x: x[:-1] if x[-1] in ["\n"] else x, lines_content)] for wav_idx in range(len(wav_lines)): if processing_id is None: processing_token = True else: if wav_idx % processing_num == processing_id: processing_token = True else: processing_token = False if processing_token: file_list = wav_lines[wav_idx].split(" ") name, wav_list = file_list[0], file_list[1:] file_exist = True for wav_path in wav_list: file_exist = file_exist and os.path.exists( wav_path.replace(data_root, output_root) ) if not file_exist: break if not file_exist: print("wait to process {} : {}".format(wav_idx, wav_list[0])) signal_list = [] for f in wav_list: _, data = wf.read(f) if data.dtype == np.int16: data = np.float32(data) / 32768 signal_list.append(data) min_len = len(signal_list[0]) max_len = len(signal_list[0]) for i in range(1, len(signal_list)): min_len = min(min_len, len(signal_list[i])) max_len = max(max_len, len(signal_list[i])) if min_len != max_len: for i in range(len(signal_list)): signal_list[i] = signal_list[i][:min_len] y = np.stack(signal_list, axis=0) Y = stft(y, **stft_options).transpose(2, 0, 1) Z = wpe(Y, iterations=iterations, statistics_mode="full").transpose( 1, 2, 0 ) z = istft(Z, size=stft_options["size"], shift=stft_options["shift"]) for d in range(len(signal_list)): store_path = wav_list[d].replace(data_root, output_root) if not os.path.exists(os.path.split(store_path)[0]): os.makedirs(os.path.split(store_path)[0], exist_ok=True) tmpwav = np.int16(z[d, :] * 32768) wf.write(store_path, sampling_rate, tmpwav) else: print("file exist {} : {}".format(wav_idx, wav_list[0])) return None
[ 10481, 1794 ]
def METHOD_NAME(config: Any, load_site_user: bool = True): conf = _get_config(config) assert 'ckan' not in dir() # otherwise loggers would be disabled # We have now loaded the config. Now we can import ckan for the # first time. from ckan.config.environment import load_environment load_environment(conf) # Set this internal test request context with the configured environment so # it can be used when calling url_for from the CLI. global _cli_test_request_context app = make_app(conf) flask_app = app.apps['flask_app']._wsgi_app _cli_test_request_context = flask_app.test_request_context() registry = Registry() registry.prepare() site_user = None if model.user_table.exists() and load_site_user: site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {}) ## give routes enough information to run url_for parsed = urlparse( cast(str, conf.get('ckan.site_url', 'http://0.0.0.0'))) request_config = routes.request_config() request_config.host = parsed.netloc + parsed.path request_config.protocol = parsed.scheme return site_user
[ 557, 200 ]
def METHOD_NAME(signal, frame): print('Pressed Ctrl+C!') sys.exit(0)
[ 900, 1519 ]
def METHOD_NAME(request): """Renders the report subscriptions""" return render(request, 'business/frag-report-items.html')
[ 338, 339, 8614 ]
def METHOD_NAME(self): kf = 1e4 self.solve(kf, "permeable")
[ 9, 9035, -1 ]
def METHOD_NAME(self) -> None: if not self.root_widget: return bui.containerwidget( edit=self.root_widget, transition=( 'out_left' if self._transition_out is None else self._transition_out ), ) if self._action is not None: self._action()
[ 1217 ]
def METHOD_NAME(self, direct_sampling_mode): self.direct_sampling_mode = direct_sampling_mode
[ 0, 4234, 5046, 854 ]
def METHOD_NAME(setup): data = np.random.rand(10, 20) a = tensor(data, chunk_size=6) b = a.view() b[:5] = 10 npa = data.copy() npb = npa.view() npb[:5] = 10 np.testing.assert_array_equal(b.execute(), npb) np.testing.assert_array_equal(a.execute(), npa) data = np.random.rand(10, 20) a = tensor(data.copy(), chunk_size=6) b = a.copy() b[:5] = 10 npa = data.copy() npb = npa.copy() npb[:5] = 10 np.testing.assert_array_equal(b.execute(), npb) np.testing.assert_array_equal(a.execute(), npa) a = tensor(data.copy(), chunk_size=6) b = a[:5, :4] c = b.copy() c[0, 0] = 10 npa = data.copy() npb = npa[:5, :4] npc = npb.copy() npc[0, 0] = 10 np.testing.assert_array_equal(c.execute(), npc) np.testing.assert_array_equal(a.execute(), npa)
[ 9, 215, 61, 1179 ]
def METHOD_NAME(self, libraries): """Ensure that the list of libraries is valid. `library` is presumably provided as a command option 'libraries'. This method checks that it is a list of 2-tuples, where the tuples are (library_name, build_info_dict). Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise. """ if not isinstance(libraries, list): raise DistutilsSetupError( "'libraries' option must be a list of tuples") for lib in libraries: if not isinstance(lib, tuple) and len(lib) != 2: raise DistutilsSetupError( "each element of 'libraries' must a 2-tuple") name, build_info = lib if not isinstance(name, str): raise DistutilsSetupError( "first element of each tuple in 'libraries' " "must be a string (the library name)") if '/' in name or (os.sep != '/' and os.sep in name): raise DistutilsSetupError("bad library name '%s': " "may not contain directory separators" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( "second element of each tuple in 'libraries' " "must be a dictionary (build info)")
[ 250, 3106, 245 ]
def METHOD_NAME(dummy_request): from encoded.batch_download import is_cart_search dummy_request.environ['QUERY_STRING'] = ( 'type=Experiment&limit=all' ) assert not is_cart_search(dummy_request) dummy_request.environ['QUERY_STRING'] = ( 'type=Experiment&cart=/carts/abc/&field=@id' ) assert is_cart_search(dummy_request)
[ 9, 2277, 136, 137, 9697, 1070 ]
def METHOD_NAME(): '''Call the setenv procedure via a user defined procedure''' code = ''' pro USER_SETENV, key, val SETENV, key + '=' + val print, GETENV(key) end ''' s = 'blabla' with GDLFile(code) as name: GDL.pro(name, 'T1', s)
[ 9, 12235, 21 ]
def METHOD_NAME(self): """Test JWKS request with HS256""" provider = OAuth2Provider.objects.create( name="test", client_id="test", authorization_flow=create_test_flow(), redirect_uris="http://local.invalid", ) app = Application.objects.create(name="test", slug="test", provider=provider) response = self.client.get( reverse("authentik_providers_oauth2:jwks", kwargs={"application_slug": app.slug}) ) self.assertJSONEqual(response.content.decode(), {})
[ 9, 1198 ]
def METHOD_NAME(self, entry_str, report): mapping, ctype = self.get_mapping_and_type(report.get('feed.url')) document = ElementTree.fromstring(entry_str) event = self.new_event(report) extra = {} for entry in document.iter(tag='entry'): for item in entry: key = item.tag value = item.text if not value: continue if value == 'undef': continue if key is None: self.logger.warning('Value without key found, skipping the' ' value: %r', value) continue key_orig = key key = mapping[key] if key == "__IGNORE__": continue if key == "source.fqdn" and event.is_valid('source.ip', value): continue if key == "source.ip" and event.is_valid('source.fqdn', value): continue if key == "time.source": try: value = (datetime.utcfromtimestamp(int(value)).isoformat() + " UTC") except TypeError as e: self.logger.warning( 'No valid "first" field epoch time found, skipping ' 'timestamp. Got {} {}'.format(value, e)) continue if key == "source.asn": if value.upper().startswith("ASNA"): continue for asn in value.upper().split(','): if asn.startswith("AS"): value = asn.split("AS")[1] break if key == "status": if value == 'down': value = 'offline' elif value == 'up': value = 'online' if key == 'extra': extra[key_orig] = value continue event.add(key, value) if extra: event.add('extra', extra) event.add('classification.type', ctype) event.add("raw", entry_str) return event
[ 214, 534 ]
def METHOD_NAME(self): self.delay = True if self.stream: if self.stream.tell() > 0: self.doRollover()
[ 538, 14243 ]
f METHOD_NAME(self):
[ 9, 554, 8609, 303, 3149, 554, 8609 ]
def METHOD_NAME(context, sequence): """ Just a mapper function for the internal _internal_check_array_additions """ if sequence.array_type not in ('list', 'set'): # TODO also check for dict updates return NO_VALUES return _internal_check_array_additions(context, sequence)
[ 250, 877, 9630 ]
f METHOD_NAME(self):
[ 34, 1537, 61, 1190 ]
def METHOD_NAME(self): return str(self.options["binutils"].target_arch)
[ 1030, 2837 ]
def METHOD_NAME(self, name): """ Return a class for the requested provider. :rtype: provider class or ``None`` :return: A class corresponding to the requested provider or ``None`` if the provider was not found. """ log.debug("Returning a class for the %s provider", name) impl = self.list_providers().get(name) if impl: log.debug("Returning provider class for %s", name) return impl["class"] else: log.debug("Provider with the name: %s not found", name) return None
[ 19, 2275, 2 ]
def METHOD_NAME(tau, amp, pha, ngood=20): """Function to minimize for FFTFIT (Taylor 1992), eqn. A7. Input ----- tau: float Trial phase shift amp: array of floats Absolute value of the product between the Fourier transform of the pulse profile and of the template (|P S|) pha: array of floats Difference between the angles of P and S Results ------- res: float Result of the function """ good = slice(1, ngood + 1) idx = np.arange(1, ngood + 1, dtype=int) res = np.sum(idx * amp[good] * np.sin(-pha[good] + TWOPI * idx * tau)) return res
[ 2192, 3200, 717 ]
def METHOD_NAME(self, updates, msg=None): """ Asserts that self.get_updates() matches updates, ignoring elapsed time data """ my_updates = [] for update in self.get_updates(): try: if "elapsed" in update: continue except Exception: pass my_updates.append(update) self.assertEqual(my_updates, updates, msg)
[ 638, 682 ]
def METHOD_NAME(self): # create the TracedCache instance for a Flask app Cache = get_traced_cache(self.tracer, service=self.SERVICE) app = Flask(__name__) config = {"CACHE_TYPE": "redis", "CACHE_REDIS_PORT": 2230, "CACHE_REDIS_HOST": "127.0.0.1"} cache = Cache(app, config=config) # use a wrong redis connection with pytest.raises(ConnectionError) as ex: cache.get(u"á_complex_operation") # ensure that the error is not caused by our tracer assert "127.0.0.1:2230. Connection refused." in ex.value.args[0] spans = self.pop_spans() # an error trace must be sent assert len(spans) == 1 span = spans[0] assert span.service == self.SERVICE assert span.resource == "get" assert span.name == "flask_cache.cmd" assert span.span_type == "cache" assert span.get_tag(CACHE_BACKEND) == "redis" assert span.get_tag(net.TARGET_HOST) == "127.0.0.1" assert span.get_tag("component") == "flask_cache" assert span.get_metric("network.destination.port") == 2230 assert span.error == 1
[ 9, 2485, 596, 3184, 41, 385, 909 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(test, checks=None): """TrunkedNetwork delete operation""" if checks is None: checks = [] test.cmd( "az networkcloud trunkednetwork delete --name {name} --resource-group {rg} -y" )
[ 367, 34 ]
def METHOD_NAME(skale, dutils): app = Flask(__name__) app.register_blueprint(schains_bp) def handler(sender, **kwargs): g.docker_utils = dutils g.wallet = skale.wallet g.config = NodeConfig() g.config.id = 1 with appcontext_pushed.connected_to(handler, app): SChainRecord.create_table() yield app.test_client() SChainRecord.drop_table()
[ 2854, 4848 ]
def METHOD_NAME( patched_run_command, _instance, role_file, _patched_ansible_galaxy_has_requirements_file, ): _instance.execute() assert _instance._sh_command is not None assert patched_run_command.call_count == 1
[ 9, 750, 6392 ]
def METHOD_NAME(self): djangocms_blog.admin.register_extension(PostPlaceholderExtension) djangocms_blog.admin.unregister_extension(PostPlaceholderExtension)
[ 9, 372, 4126, 2916 ]
def METHOD_NAME(self, line): 'Magic that set perminant options for sos and sospaste' # do not return __builtins__ beacuse it is too long... if line.strip(): print('sos options set to "{}"'.format(line.strip())) self.options = line.strip() + ' ' else: return runfile(script=None, code=None)
[ -1 ]
def METHOD_NAME(self) -> int: """Generate random passport number. :return: Number. :Example: 560430 """ return self.random.randint(100000, 999999)
[ 14365, 106 ]
def METHOD_NAME(context): rv3d = context.space_data.region_3d view_inv = rv3d.view_matrix.to_3x3() return view_inv.normalized()
[ 1192, 1179, 5354 ]
def METHOD_NAME(self): try: customer = Customer.create({"first_name":"Waldo"}).customer PayPalAccount.delete("../../{}".format(customer.id)) except NotFoundError: pass found_customer = Customer.find(customer.id) self.assertNotEqual(None, found_customer) self.assertEqual("Waldo", found_customer.first_name)
[ 9, 34, 1487, 41, 157, 4623 ]
def METHOD_NAME(metric, value, kwargs, region_name: Optional[str] = None): # publish metric only if CloudWatch service is available if not config.service_port("cloudwatch"): return cw_client = connect_to(region_name=region_name).cloudwatch try: cw_client.put_metric_data( Namespace="AWS/Lambda", MetricData=[ { "MetricName": metric, "Dimensions": dimension_lambda(kwargs), "Timestamp": datetime.utcnow().replace(tzinfo=timezone.utc), "Value": value, } ], ) except Exception as e: LOG.info('Unable to put metric data for metric "%s" to CloudWatch: %s', metric, e)
[ 2411, 1778, 1341 ]
def METHOD_NAME(self): kq = select.kqueue() a, b = socket.socketpair() a.send(b'foo') event1 = select.kevent(a, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE) event2 = select.kevent(b, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE) r = kq.control([event1, event2], 1, 1) self.assertTrue(r) self.assertFalse(r[0].flags & select.KQ_EV_ERROR) self.assertEqual(b.recv(r[0].data), b'foo') a.close() b.close() kq.close()
[ 9, 637 ]
def METHOD_NAME(self, option): "Get font attribute" return self._call("font", "config", self.name, "-"+option)
[ 13860 ]
def METHOD_NAME(self, pk, sig, m): M = group.hash(m) if pk['PP2'] == (pair(sig['s1'],pk['gd11']*(pk['gd21']**M)) * pair(sig['s2'],pk['gd12']*(pk['gd22']**M)) * pair(sig['s3'],pk['gd13']*(pk['gd23']**M)) * pair(sig['s4'],pk['gd14']*(pk['gd24']**M)) ): return True return False
[ 1162 ]
def METHOD_NAME(self, input, index=0, factor=1): # fixing the quantization bits based on the training steps # when reducing 1 bit at each period, we increase the period # to go slowly toward the target quantization bits # the period and starting bit can be configured if input.start_bits != input.target_bits: if self.qsteps >= input.q_period: self.quantize_real_ratio = 1.0 input.q_period <<= 1 input.q_period *= factor input.start_bits -= 1 if self.q_verbose: logger.info( f'Quantization settings: current bit-precision = {input.start_bits}, step = {self.qsteps}, quantization period = {input.q_period}, index = {index}' ) assert (input.start_bits >= input.target_bits), \ 'Quantization bit is lower than target precision bits!' if self.use_quantizer_kernel: if input.start_bits <= 2: raise ValueError('Quantization bit is too low, please do it without quantization kernel!') input_q = ds_quantizer(input.data.clone(), self.q_groups, input.start_bits, asym=False if self.q_type == 'symmetric' else True, sr=False if self.q_rounding == 'nearest_neighbor' else True) else: if input.start_bits >= 3: input_flat = self.quantize_highbit(input.data, input.start_bits) elif input.start_bits == 2: assert self.q_type == 'symmetric', 'Quantization type is not symmetric!' assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!' input_flat = self.quantize_tenary(input.data) elif input.start_bits == 1: assert self.q_type == 'symmetric', 'Quantization type is not symmetric!' assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!' input_flat = self.quantize_binary(input.data) if self.use_quantizer_kernel: return self.mixed_fp16_quantize(input.data, input_q, index) else: if self.q_mixed_fp16 and input.start_bits >= input.target_bits - 1: input_flat = self.quantize_real_ratio * input.data + \ (1 - self.quantize_real_ratio) * input_flat return input_flat
[ 226, 9293 ]
def METHOD_NAME(self): """Generate tasks.""" self.site.scan_posts() yield self.group_task() for lang in self.site.config['TRANSLATIONS'].keys(): destination = os.path.join(self.site.config['OUTPUT_FOLDER'], 'sidebar-{0}.inc'.format(lang)) template = 'sidebar.tmpl' yield self._prepare_sidebar(destination, lang, template)
[ 370, 620 ]
def METHOD_NAME(self): return "POST"
[ 103 ]
def METHOD_NAME(self): with self._isolate_repo("single_tool"): self._check_exit_code(["lint", "--skip", "citations"]) with self._isolate_repo("single_tool"): self._check_exit_code(["lint"], exit_code=1)
[ 9, 3060, 235 ]
def METHOD_NAME(cls, mode): if mode == 'reference': return 0b01 elif mode == 'measure': return 0b10 else: raise ValueError('Trying to set warm amplifier board switch to ' 'invalid mode: {}'.format(mode))
[ 2284, 385, -1 ]
async def METHOD_NAME( *, session: Session = Depends(get_session), current_user: User = Depends(get_current_active_user),
[ 136, 171 ]
def METHOD_NAME( self, _, posterior_storage, w_container, run_id, progress_callback=None ): w_container.iteration_nr += 1
[ 3201, 15165, 86 ]
def METHOD_NAME(self): """Assert a SMTPHandler is added to the anitya logger when ``EMAIL_ERRORS=True``.""" config = { "DB_URL": "sqlite://", "SOCIAL_AUTH_USER_MODEL": "anitya.db.models.User", "EMAIL_ERRORS": True, "SMTP_SERVER": "smtp.example.com", "ADMIN_EMAIL": "[email protected]", } anitya_logger = logging.getLogger("anitya") anitya_logger.handlers = [] app.create(config) self.assertEqual(1, len(anitya_logger.handlers)) self.assertEqual("smtp.example.com", anitya_logger.handlers[0].mailhost) self.assertEqual(["[email protected]"], anitya_logger.handlers[0].toaddrs)
[ 9, 487, 200 ]
def METHOD_NAME(self, tb): length = 0 while tb and not self._is_relevant_tb_level(tb): length += 1 tb = tb.tb_next return length
[ 29, 8816, 6345, 1043 ]
def METHOD_NAME(): global mol, mf mol = gto.Mole() mol.verbose = 0 mol.output = None mol.atom = [ ["C", (-0.65830719, 0.61123287, -0.00800148)], ["C", ( 0.73685281, 0.61123287, -0.00800148)], ["C", ( 1.43439081, 1.81898387, -0.00800148)], ["C", ( 0.73673681, 3.02749287, -0.00920048)], ["C", (-0.65808819, 3.02741487, -0.00967948)], ["C", (-1.35568919, 1.81920887, -0.00868348)], ["H", (-1.20806619, -0.34108413, -0.00755148)], ["H", ( 1.28636081, -0.34128013, -0.00668648)], ["H", ( 2.53407081, 1.81906387, -0.00736748)], ["H", ( 1.28693681, 3.97963587, -0.00925948)], ["H", (-1.20821019, 3.97969587, -0.01063248)], ["H", (-2.45529319, 1.81939187, -0.00886348)],] mol.basis = {'H': '6-31g', 'C': '6-31g',} mol.max_memory = 20 mol.build() mf = scf.RHF(mol) mf.conv_tol = 1e-12 mf.scf() mo = mf.mo_coeff.copy() mo[:,[15,16,17,18]] = mf.mo_coeff[:,[17,18,15,16]]
[ 0, 1, 298 ]
def METHOD_NAME( datetime, collection_shortname, version, sort_key, time_key ): """ Validates the collection datetime against the datetime of the last granule in the collection Args: datetime (str): datetime string collection_shortname (str): ShortName of the parent collection sort_key (str): choice of start_date and end_date time_key (str): choice of time_end and time_start Returns: (dict) An object with the validity of the check and the instance """ cmr_prms = set_cmr_prms( { "short_name": collection_shortname, "version": version, "sort_key[]": sort_key, }, "json", "granules", ) granules = cmr_request(cmr_prms) validity = True last_granule_datetime = None if len(granules["feed"]["entry"]) > 0: last_granule = granules["feed"]["entry"][0] last_granule_datetime = last_granule.get(time_key) validity = datetime == last_granule_datetime return {"valid": validity, "value": (datetime, last_granule_datetime)}
[ 187, 884, 5784, -1 ]
def METHOD_NAME(self): nt = self.id_data node_name = self.name tree_name = nt.name grease_pencil_name = tree_name + "_grease" # get grease pencil data gp = nt.grease_pencil if (node_name in gp.layers): layer = gp.layers[node_name] layer.frames[0].clear()
[ 2712, -1 ]
def METHOD_NAME(): if random_bool(): otp_secret = random_chars(16, string.ascii_uppercase + "234567") else: otp_secret = None is_totp = random_bool() if is_totp: hotp_counter = 0 if random_bool() else None else: hotp_counter = random.randint(0, 10000) if random_bool() else None last_token = random_chars(6, string.digits) if random_bool() else None params = { "username": random_username(), "pw_salt": random_bytes(1, 64, nullable=True), "pw_hash": random_bytes(32, 64, nullable=True), "is_admin": bool_or_none(), "otp_secret": otp_secret, "is_totp": is_totp, "hotp_counter": hotp_counter, "last_token": last_token, "created_on": random_datetime(nullable=True), "last_access": random_datetime(nullable=True), } sql = """INSERT INTO journalists (username, pw_salt, pw_hash, is_admin, otp_secret, is_totp, hotp_counter, last_token, created_on, last_access) VALUES (:username, :pw_salt, :pw_hash, :is_admin, :otp_secret, :is_totp, :hotp_counter, :last_token, :created_on, :last_access); """ db.engine.execute(text(sql), **params)
[ 238, 9345 ]
def METHOD_NAME(sess2, trans): # pylint: disable=unused-variable # Detecting whether this is indeed the nested transaction of the test if trans.nested and not trans._parent.nested: # pylint: disable=protected-access # Handle where test DOESN'T session.commit(), sess2.expire_all() sess.begin_nested()
[ 1141, 10484 ]
def METHOD_NAME(self): """Shortcut to the receipt of the receipt line.""" return extracted_data_from_ref(self.get('acq_receipt'), data='record')
[ 6157 ]
def METHOD_NAME(self, dev): """ Is an unused watchdog device """ if dev in self._watchdog_info_dict and self._verify_watchdog_device(dev): return True return False
[ 1205, 398 ]
def METHOD_NAME(self): """Validate studio domain id and user profile Get and validate studio domain id and user profile from NOTEBOOK_METADATA_FILE in studio environment. Set _valid_domain_and_user to True if validation succeeded. """ if not os.path.isfile(NOTEBOOK_METADATA_FILE): return with open(NOTEBOOK_METADATA_FILE, "rb") as f: metadata = json.loads(f.read()) self._domain_id = metadata.get("DomainId") self._user_profile_name = metadata.get("UserProfileName") if self._validate_domain_id() is True and self._validate_user_profile_name() is True: self._valid_domain_and_user = True else: logger.warning( "NOTEBOOK_METADATA_FILE detected but failed to get" "valid domain and user from it." )
[ 19, 1674, 61, 21 ]
def METHOD_NAME(): modified_notebook_path = os.path.join(local_notebooks_dir, "exercise_4_test.ipynb") nb = nbformat.read( os.path.join(local_notebooks_dir, "exercise_4.ipynb"), as_version=nbformat.NO_CONVERT, ) s3_path_cell = f's3_path = "{test_dataset_path}"\n' + download_taxi_dataset _replace_str( nb, 's3_path = "s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv"', s3_path_cell, ) nbformat.write(nb, modified_notebook_path) _execute_notebook(modified_notebook_path)
[ 9, 3446, 1842 ]
def METHOD_NAME(self): """blockdev-add source image: protocol and format nodes only""" self.create_source_image() self._blockdev_add_image(self.params["source_images"])
[ 238, 1458, 660 ]
def METHOD_NAME() -> Set[utils.Connector]: """Find connectors that have modified bypass_reasons. Returns: Set[str]: Set of connector names e.g {"source-github"}: The set of GA connectors that have changed bypass_reasons. """ bypass_reason_changes = utils.get_changed_acceptance_test_config(diff_regex="bypass_reason") return bypass_reason_changes.intersection(find_changed_important_connectors())
[ 19, 8808, 2293, 1103 ]
def METHOD_NAME(): from moler.cmd.unix.grep import COMMAND_OUTPUT_with_file_path, COMMAND_RESULT_with_file_path data = COMMAND_OUTPUT_with_file_path result = COMMAND_RESULT_with_file_path return data, result
[ 462, 146, 61, 391, 1571, 41, 157 ]
def METHOD_NAME(self) -> tuple[str, int, str]: ...
[ 679 ]
def METHOD_NAME(node): """Returns whether a particular AST node is an ancestor path expression e.g. - `parent/host/thing` - `parent/host` """ if not isinstance(node, BinaryExpression) or node.op != '/': return False if isinstance(node.left, BinaryExpression): return METHOD_NAME(node.left) return isinstance(node.left, Step)
[ 137, 4490, 157, 1120 ]
def METHOD_NAME(dork): """ This method performs the effective search on Google providing the google dork and the Google session cookie """ if not dork: return None page = None data = None requestHeaders = {} responseHeaders = {} requestHeaders[HTTP_HEADER.USER_AGENT] = dict(conf.httpHeaders).get(HTTP_HEADER.USER_AGENT, DUMMY_SEARCH_USER_AGENT) requestHeaders[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE requestHeaders[HTTP_HEADER.COOKIE] = GOOGLE_CONSENT_COOKIE try: req = _urllib.request.Request("https://www.google.com/ncr", headers=requestHeaders) conn = _urllib.request.urlopen(req) except Exception as ex: errMsg = "unable to connect to Google ('%s')" % getSafeExString(ex) raise SqlmapConnectionException(errMsg) gpage = conf.googlePage if conf.googlePage > 1 else 1 logger.info("using search result page #%d" % gpage) url = "https://www.google.com/search?" # NOTE: if consent fails, try to use the "http://" url += "q=%s&" % urlencode(dork, convall=True) url += "num=100&hl=en&complete=0&safe=off&filter=0&btnG=Search" url += "&start=%d" % ((gpage - 1) * 100) try: req = _urllib.request.Request(url, headers=requestHeaders) conn = _urllib.request.urlopen(req) requestMsg = "HTTP request:\nGET %s" % url requestMsg += " %s" % _http_client.HTTPConnection._http_vsn_str logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) page = conn.read() code = conn.code status = conn.msg responseHeaders = conn.info() responseMsg = "HTTP response (%s - %d):\n" % (status, code) if conf.verbose <= 4: responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING) elif conf.verbose > 4: responseMsg += "%s\n%s\n" % (responseHeaders, page) logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) except _urllib.error.HTTPError as ex: try: page = ex.read() responseHeaders = ex.info() except Exception as _: warnMsg = "problem occurred while trying to get " warnMsg += "an error page information (%s)" % getSafeExString(_) logger.critical(warnMsg) return None except (_urllib.error.URLError, _http_client.error, socket.error, socket.timeout, socks.ProxyError): errMsg = "unable to connect to Google" raise SqlmapConnectionException(errMsg) page = decodePage(page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING), responseHeaders.get(HTTP_HEADER.CONTENT_TYPE)) page = getUnicode(page) # Note: if decodePage call fails (Issue #4202) retVal = [_urllib.parse.unquote(match.group(1) or match.group(2)) for match in re.finditer(GOOGLE_REGEX, page, re.I)] if not retVal and "detected unusual traffic" in page: warnMsg = "Google has detected 'unusual' traffic from " warnMsg += "used IP address disabling further searches" if conf.proxyList: raise SqlmapBaseException(warnMsg) else: logger.critical(warnMsg) if not retVal: message = "no usable links found. What do you want to do?" message += "\n[1] (re)try with DuckDuckGo (default)" message += "\n[2] (re)try with Bing" message += "\n[3] quit" choice = readInput(message, default='1') if choice == '3': raise SqlmapUserQuitException elif choice == '2': url = "https://www.bing.com/search?q=%s&first=%d" % (urlencode(dork, convall=True), (gpage - 1) * 10 + 1) regex = BING_REGEX else: url = "https://html.duckduckgo.com/html/" data = "q=%s&s=%d" % (urlencode(dork, convall=True), (gpage - 1) * 30) regex = DUCKDUCKGO_REGEX try: req = _urllib.request.Request(url, data=getBytes(data), headers=requestHeaders) conn = _urllib.request.urlopen(req) requestMsg = "HTTP request:\nGET %s" % url requestMsg += " %s" % _http_client.HTTPConnection._http_vsn_str logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) page = conn.read() code = conn.code status = conn.msg responseHeaders = conn.info() page = decodePage(page, responseHeaders.get("Content-Encoding"), responseHeaders.get("Content-Type")) responseMsg = "HTTP response (%s - %d):\n" % (status, code) if conf.verbose <= 4: responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING) elif conf.verbose > 4: responseMsg += "%s\n%s\n" % (responseHeaders, page) logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) except _urllib.error.HTTPError as ex: try: page = ex.read() page = decodePage(page, ex.headers.get("Content-Encoding"), ex.headers.get("Content-Type")) except socket.timeout: warnMsg = "connection timed out while trying " warnMsg += "to get error page information (%d)" % ex.code logger.critical(warnMsg) return None except: errMsg = "unable to connect" raise SqlmapConnectionException(errMsg) page = getUnicode(page) # Note: if decodePage call fails (Issue #4202) retVal = [_urllib.parse.unquote(match.group(1).replace("&amp;", "&")) for match in re.finditer(regex, page, re.I | re.S)] if not retVal and "issue with the Tor Exit Node you are currently using" in page: warnMsg = "DuckDuckGo has detected 'unusual' traffic from " warnMsg += "used (Tor) IP address" if conf.proxyList: raise SqlmapBaseException(warnMsg) else: logger.critical(warnMsg) return retVal
[ 1070 ]
def METHOD_NAME(self, id=None, arches=None, *args, **kw): try: osversion = OSVersion.by_id(id) except InvalidRequestError: flash(_(u"Invalid OSVersion ID %s" % id)) redirect(".") arch_objects = [Arch.by_id(arch) for arch in arches] if osversion.arches != arch_objects: osversion.arches = arch_objects flash(_(u"Changes Saved for %s" % osversion)) else: flash(_(u"No Changes for %s" % osversion)) redirect(".")
[ 73 ]
def METHOD_NAME(self): return self.file_path
[ 19, 171, 157 ]
def METHOD_NAME(name): """ Looks up a formatter class given a prefix to it. The names are sorted, and the first matching class is returned. """ for k in sorted(_FORMATTERS): if k.startswith(name): return _FORMATTERS[k]
[ 19, 2931 ]
def METHOD_NAME(fmt, *args): print("panic:", fmt % args, file=sys.stderr) sys.exit(1)
[ 2382 ]
def METHOD_NAME(bin_name, append_item): """Create an append operation dictionary. The append operation appends `append_item` to the value in bin_name. Args: bin (str): The name of the bin to be used. append_item: The value which will be appended to the item contained in the specified bin. Returns: A dictionary to be passed to operate or operate_ordered. """ return {"op": aerospike.OPERATOR_APPEND, "bin": bin_name, "val": append_item}
[ 1459 ]
def METHOD_NAME(self, offset: int, size: int, value: int): if offset == self.struct.ISR.offset: for bitmask in [ RTC_ISR.TAMP1F, RTC_ISR.TSOVF, RTC_ISR.TSF, RTC_ISR.WUTF, RTC_ISR.ALRBF, RTC_ISR.ALRAF, RTC_ISR.RSF ]: if value & bitmask == 0: self.instance.ISR &= ~bitmask self.instance.ISR = (self.instance.ISR & ~RTC_ISR.INIT) | (value & RTC_ISR.INIT) return data = (value).to_bytes(size, 'little') ctypes.memmove(ctypes.addressof(self.instance) + offset, data, size)
[ 77 ]
METHOD_NAME(self, operation):
[ 74, 3081, 2206 ]
def METHOD_NAME(self, bidsdir): bidsdir = Path(bidsdir) if bidsdir.is_symlink(): raise ValueError("Will not write to symlink") bidsdir.mkdir(parents=True, exist_ok=True) bids_paths = set() dataset_description_path = bidsdir / "dataset_description.json" bids_paths.add(dataset_description_path) dataset_description = { "Name": self.database.sha1, "BIDSVersion": bids_version, "DatasetType": "raw", } with open(dataset_description_path, "w") as f: json.dump(dataset_description, f, indent=4) # image files for bids_path, file_path in self.file_paths.items(): assert bids_path is not None bids_path = Path(bidsdir) / bids_path bids_paths.add(bids_path) bids_path.parent.mkdir(parents=True, exist_ok=True) if bids_path.is_file(): continue # ignore real files elif bids_path.is_symlink(): if bids_path.resolve() == Path(file_path).resolve(): continue # nothing to be done else: bids_path.unlink() # symlink points to different file relative_file_path = relpath(file_path, start=bids_path.parent) bids_path.symlink_to(relative_file_path) # sidecar files for bids_path in self.file_paths.keys(): metadata = self._metadata.get(bids_path) if metadata is not None and len(metadata) > 0: basename, _ = split_ext(bids_path) sidecar_path = ( Path(bidsdir) / Path(bids_path).parent / f"{basename}.json" ) bids_paths.add(sidecar_path) jsonstr = json.dumps(metadata, indent=4, sort_keys=False) if sidecar_path.is_file(): with open(sidecar_path, "r") as f: if jsonstr == f.read(): continue with open(sidecar_path, "w") as f: f.METHOD_NAME(jsonstr) # remove unnecessary files files_to_keep = set() for bids_path in bids_paths: relative_bids_path = relpath(bids_path, start=bidsdir) # use relative paths to limit parents to bidsdir files_to_keep.add(relative_bids_path) files_to_keep.update(map(str, Path(relative_bids_path).parents)) for file_path in rlistdir(bidsdir): relative_file_path = relpath(file_path, start=bidsdir) if relative_file_path not in files_to_keep: p = Path(file_path) if not p.is_dir(): p.unlink() else: rmtree(p)
[ 77 ]
def METHOD_NAME(source: dict[Any, Any]) -> dict[Any, Any]: """clean source of all Models that would interfere with the JSONField. Models are replaced with a dictionary of { app: str, name: str, pk: Any }""" final_dict = {} for key, value in source.items(): new_value = sanitize_item(value) if new_value is not ...: final_dict[key] = new_value return final_dict
[ 1702, 553 ]
def METHOD_NAME(cortical_area): """ Returns number of Neurons in the connectome """ data = runtime_data.brain[cortical_area] synapse_count = 0 for neuron in data: for _ in data[neuron]['neighbors']: synapse_count += 1 return synapse_count
[ -1, 690, 629, 29 ]
f METHOD_NAME(self):
[ 9, 1719, 280, 820, 529, 6215, 1210 ]
def METHOD_NAME(config): # Add our tween just before the default exception handler config.add_tween(DD_TWEEN_NAME, over=pyramid.tweens.EXCVIEW) # ensure we only patch the renderer once. if not isinstance(pyramid.renderers.RendererHelper.render, wrapt.ObjectProxy): wrapt.wrap_function_wrapper("pyramid.renderers", "RendererHelper.render", trace_render)
[ 9995 ]
def METHOD_NAME(self): include_paths = self._include_paths() relative_includes = [] for node in self.inputs: for inc in include_paths: if node.is_child_of(inc): relative_includes.append(node.path_from(inc)) break else: self.generator.bld.fatal("could not resolve {}".format(node)) return relative_includes
[ 1461, 947, 1821, 3403 ]
def METHOD_NAME() -> Plugin: return Plugin( start=False, spec=Spec( module='tracardi.process_engine.action.v1.if_action', className='IfAction', inputs=["payload"], outputs=["true", "false"], init={ "condition": "", "trigger_once": False, "pass_payload": True, "ttl": 0 }, form=Form(groups=[ FormGroup( name="Condition statement", fields=[ FormField( id="condition", name="If condition statement", description="Provide condition for IF statement. If the condition is met then the payload " "will be returned on TRUE port if not then FALSE port is triggered.", component=FormComponent(type="textarea", props={"label": "condition"}) ), FormField( id="trigger_once", name="Return value only once per condition change", description="It will trigger the relevant port only once per condition change. Otherwise " "the flow will be stopped.", component=FormComponent(type="bool", props={"label": "Trigger once per condition change"}) ), FormField( id="ttl", name="Expire trigger again after", description="If the value is set to 0, the event will only occur once and will not be " "triggered again unless the conditions change. However, if a value greater " "than 0 is set, the event will be triggered again after the specified " "number of seconds, regardless of whether the conditions have changed or not.", component=FormComponent(type="text", props={"label": "Suppression time to live"}) ), FormField( id="pass_payload", name="Return input payload instead of True/False", description="It will return input payload on the output ports if enabled " "otherwise True/False.", component=FormComponent(type="bool", props={"label": "Return input payload"}) ) ] ), ]), manual="if_action", version='0.7.4', license="MIT", author="Risto Kowaczewski" ), metadata=MetaData( name='If', desc='This a conditional action that conditionally runs a branch of workflow.', tags=['condition'], purpose=['collection', 'segmentation'], type="condNode", icon='if', group=['Flow control'], documentation=Documentation( inputs={ "payload": PortDoc(desc="This port takes payload object.") }, outputs={ "true": PortDoc(desc="Returns payload if the defined condition is met."), "false": PortDoc(desc="Returns payload if the defined condition is NOT met.") } ) ) )
[ 372 ]
def METHOD_NAME(self): super(PeriodicService, self).METHOD_NAME() if self._interval is not None: self.call_soon()
[ 447, 549 ]
def METHOD_NAME(self, request: "http.HttpRequest", *args, **kwargs) -> "http.HttpResponse": if request.htmx: # This is an HTMX request and we are only interested in the items list. items = self.get_items() paginated_items = self.paginate_items( items=items, page=request.GET.get("page"), ) return self.render_items(request=request, items=paginated_items) return super().METHOD_NAME(request, *args, **kwargs)
[ 3124 ]
def METHOD_NAME(self, current_idx, end_idx): child_one_idx = current_idx * 2 + 1 # Calculate the index of the first child while child_one_idx <= end_idx: child_two_idx = -1 # Initialize the index of the second child if current_idx * 2 + 2 <= end_idx: child_two_idx = current_idx * 2 + 2 # Calculate the index of the second child if it exists index_to_swap = child_one_idx # Assume the first child is the one to swap with if child_two_idx > -1 and self.heap[child_one_idx] > self.heap[child_two_idx]: # If the second child exists and is smaller, update the index to swap with index_to_swap = child_two_idx if self.heap[current_idx] > self.heap[index_to_swap]: # If the current element is greater than the one to swap with, perform the swap self.swap(current_idx, index_to_swap) current_idx = index_to_swap child_one_idx = current_idx * 2 + 1 # Update the index of the first child else: return
[ 7051, 481 ]
def METHOD_NAME(self): return [torch.float, torch.half, torch.bfloat16]
[ 616, 4303 ]
def METHOD_NAME(self): df = pd.DataFrame({'start_time': [1., 2., 3.], 'label': ['a', 'b', 'c']}) with self.assertRaises(ValueError): TimeIntervals.from_dataframe(df, name='ti_name')
[ 9, 280, 1616, 1038, 984, 1959 ]
def METHOD_NAME(self): self.webhook_last_ping = datetime.utcnow()
[ 276, 12, 857 ]