content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def fetch_gene_id(gene_id, ENSEMBL_REST_SERVER = GRCH37_ENSEMBL_REST_SERVER): """ Get gene details from name * string Returntype: Gene """ server = ENSEMBL_REST_SERVER ext = "/lookup/id/%s?content-type=application/json" % (gene_id) try: hash = postgap.REST.get(server, ext) return Gene( name = hash['display_name'], id = hash['id'], chrom = hash['seq_region_name'], tss = int(hash['start']) if hash['strand'] > 0 else int(hash['end']), biotype = hash['biotype'] ) except: return None
5ffda8458a1d60e6c6b28e4168919332e342217d
2,900
def allCategoriesJSON(): """ Generates JSON for all categories """ categories = db_session.query(Category).all() return jsonify(categories=[c.serialize for c in categories])
cd07bdff7d7d69be152fc2ebb3f248ad77eaacd4
2,901
def get_rrule(rule, since, until): """ Compute an RRULE for the execution scheduler. :param rule: A dictionary representing a scheduling rule. Rules are of the following possible formats (e.g.): {'recurrence': '2 weeks', 'count': 5, 'weekdays': ['SU', 'MO', 'TH']} = run every 2 weeks, 5 times totally, only on sun. mon. or thu. {'count': 1'} = run exactly once, at the `since` time {'rrule': 'RRULE:FREQ=DAILY;INTERVAL=3'} = pass RRULE directly :param since: A datetime string representing the earliest time to schedule :param until: A datetime string representing the latest time to schedule :return: an iCalendar RRULE object """ since = _get_timestamp(since) until = _get_timestamp(until) if rule.get('rrule'): parsed_rule = rrule.rrulestr(rule['rrule'], dtstart=since, cache=True) if not parsed_rule._until: parsed_rule._until = until return parsed_rule if not rule.get('recurrence'): if rule.get('count') == 1: frequency = rrule.DAILY interval = 0 else: return else: interval, recurrence = parse_recurrence(rule['recurrence']) if not recurrence: return freqs = {'sec': rrule.SECONDLY, 'second': rrule.SECONDLY, 'min': rrule.MINUTELY, 'minute': rrule.MINUTELY, 'h': rrule.HOURLY, 'hour': rrule.HOURLY, 'd': rrule.DAILY, 'day': rrule.DAILY, 'w': rrule.WEEKLY, 'week': rrule.WEEKLY, 'mo': rrule.MONTHLY, 'month': rrule.MONTHLY, 'y': rrule.YEARLY, 'year': rrule.YEARLY} frequency = freqs[recurrence] weekdays = None if rule.get('weekdays'): weekdays = _get_weekdays(rule['weekdays']) if not weekdays: return rrule.rrule(freq=frequency, interval=interval, dtstart=since, until=until, count=rule.get('count'), cache=True) count = rule.get('count') rule_set = _get_rule_set_by_weekdays( frequency, interval, since, until, weekdays) return _cap_rule_set_by_occurrence_count(rule_set, count)
e4e6548f23a832b7bd5e23174b08b9134821134f
2,902
def site_id(request): """Site id of the site to test.""" return request.param if hasattr(request, 'param') else None
98f6e5af07f7c2b70397cbd1ee54fc2df66e7809
2,903
def parse(limit_string): """ parses a single rate limit in string notation (e.g. '1/second' or '1 per second' :param string limit_string: rate limit string using :ref:`ratelimit-string` :raise ValueError: if the string notation is invalid. :return: an instance of :class:`RateLimitItem` """ return list(parse_many(limit_string))[0]
2d5a3c618bd70693c1e296b77185387b302cffe0
2,904
def change_balance(email): """Change a user's balance.""" if not isinstance(request.json.get('change'), int): abort(400, {'message': 'The change in innopoints must be specified as an integer.'}) user = Account.query.get_or_404(email) if request.json['change'] != 0: new_transaction = Transaction(account=user, change=request.json['change']) db.session.add(new_transaction) try: db.session.commit() except IntegrityError as err: db.session.rollback() log.exception(err) abort(400, {'message': 'Data integrity violated.'}) notify(user.email, NotificationType.manual_transaction, { 'transaction_id': new_transaction.id, }) return NO_PAYLOAD
4500a192a51eaa548a6d2ca7807c66ac0042b75c
2,905
def contact_infectivity_asymptomatic_40x70(): """ Real Name: b'contact infectivity asymptomatic 40x70' Original Eqn: b'contacts per person normal 40x70*infectivity per contact' Units: b'1/Day' Limits: (None, None) Type: component b'' """ return contacts_per_person_normal_40x70() * infectivity_per_contact()
8b0ffa0b2d3b54d6802881d14524cd8a10d5329a
2,906
def generate_resource_link(pid, resource_path, static=False, title=None): """ Returns a valid html link to a public resource within an autogenerated instance. Args: pid: the problem id resource_path: the resource path static: boolean whether or not it is a static resource title: the displayed text. Defaults to the path Returns: The html link to the resource. """ return '<a target=_blank href="/api/autogen/serve/{}?static={}&pid={}">{}</a>'.format( resource_path, "true" if static else "false", pid, resource_path if not title else title )
c2523e254d93ecc36198ffea6f2f54c48dfe529d
2,907
def make_cointegrated(seed, n_samples, gamma): """ cointegrated pair: - x0_t = x0_t-1 + gauss[:, 0] - x1_t = gamma * x0_t + gauss[:, 1] for various gamma. cf: Hamilton [19.11.1, 19.11.2] """ np.random.seed(seed) x0 = np.random.randn(n_samples).cumsum() x1 = gamma * x0 + np.random.randn(n_samples) return np.stack([x0, x1], axis=1)
8b7ee2b414a19a9e2dc73ab5cdb98f44c3d75ddf
2,908
import random def web_index(): """主页""" news = db.session.query(HotHomeNews).to_dicts home = list() hot = list() temp = 30 for index, i in enumerate(news): temp -= random.randint(0, 2) i['date'] = '2021-04' + '-' + str(temp) if i['hot'] == 1: hot.append(i) else: home.append(i) return render_template('index.html', hot=hot, home=home)
24a39493a8b864fa7789a455fa292e13a665ba3f
2,909
def test_parametrize(): """Tests parametrizing a function""" @arg.parametrize(val=arg.val('vals')) def double(val): return val * 2 assert double(vals=[1, 2, 3]) == [2, 4, 6] # This should result in a lazy bind error with pytest.raises(arg.BindError): double(val=1) # Partial runs should be able to ignore parametrization assert double.partial(val=1) == 2
82202cb99f48f47ac21c4350008b3bf54f43666a
2,910
def F_to_C(Tf): """convertit une temperature de Fahrenheit en Celsius""" Tc = (Tf-32)*5/9 return Tc
9264ac7b0d03bc5d44e716656bafac8a1f112978
2,911
def generate_mdn_sample_from_ouput(output, test_size,distribution = 'Normal', params = None): """ Using the output layer from the prediction on a fitted mdn model generate test_size number of samples. (Note output corresponds to a one-dimensional output). Parameters ---------- output : array layer of neural network ordered mixture weights (unscaled), variance (unscaled) and means test_size : int number of samples to draw from fitted mdn. deprecated. distribution: string distribution of output. Can be Normal, Gamma or Beta. Returns ---------- result : array sample from mixture distribution. """ ec.check_distribution(distribution) num_components = int(output.shape[1]/3) out_mu = output[:,:num_components] out_sigma = output[:,num_components:2*num_components] out_pi = output[:,2*num_components:] result = np.zeros(output.shape[0]) mu = 0 std = 0 idx = 0 for i,_ in enumerate(result): idx = np.random.choice(num_components, 1, p=out_pi[i]) if(distribution is 'Normal'): mu = out_mu[i,idx] std = np.sqrt(out_sigma[i,idx]) result[i] = mu + np.random.randn()*std elif(distribution is 'Gamma'): alpha = out_mu[i,idx] beta = out_sigma[i,idx] result[i] = np.random.gamma(alpha,1/beta) elif(distribution is 'Beta'): alpha = out_mu[i,idx] beta = out_sigma[i,idx] result[i] = np.random.beta(alpha,beta) elif(distribution is 'Poisson'): rate = out_mu[i,idx] result[i] = np.random.poisson(rate) elif(distribution is 'Binomial'): p = out_mu[i,idx] n = out_sigma[i,idx] result[i] = np.random.binomial(params['binomial_n'],p) else: raise NameError('{} not a distribution'.format(distribution)) return result
6851b03b1877e0d0fdc24edca98f386db29a8733
2,912
def infer_folding_rates(clusters, activation_energies, prefactors, G, temperatures): """ Takes Arrenius parameters and uses detailed balance to compute folding rates """ print('Inferring unknown folding rates from detailed balance...') Nclusters = len(clusters) folding_rates=np.nan*np.zeros((Nclusters, Nclusters, len(temperatures) )) unfolding_rates = np.nan*np.zeros((Nclusters, Nclusters, len(temperatures))) for b in range(Nclusters): for a in range(Nclusters): unfolding_rates[a, b,:] = prefactors[a,b]*np.exp(-activation_energies[a,b]/temperatures) for t, temp in enumerate(temperatures): if -np.log(unfolding_rates[a,b,t]) < (G[t,b] - G[t,a]): #barrier height is lower than free energy difference...typically this implies Arrhenius approximation is failing unfolding_rates[a,b,t] = np.exp(-( G[t,b] - G[t,a]) ) #Then we use the barrier height folding_rates[b,a,:]= unfolding_rates[a,b,:] * np.exp(G[:,b] - G[:,a]) #detailed balance! return folding_rates, unfolding_rates, temperatures
0c986bdb7c05f8aef973598dd01913fc35f1cd75
2,913
def create_cry_nqubit(qc: qiskit.QuantumCircuit, thetas: np.ndarray): """Create control Control-RY state Args: - qc (qiskit.QuantumCircuit): init circuit - thetas (np.ndarray): parameters Returns: - qiskit.QuantumCircuit """ for i in range(0, qc.num_qubits - 1, 2): qc.cry(thetas[i], i, i + 1) for i in range(1, qc.num_qubits - 1, 2): qc.cry(thetas[i], i, i + 1) qc.cry(thetas[qc.num_qubits - 1], qc.num_qubits - 1, 0) return qc
9238d6a6d4e3ed7c66f065ed502cc3236c71abd7
2,914
def get_region(h5_dset, reg_ref_name): """ Gets the region in a dataset specified by a region reference Parameters ---------- h5_dset : h5py.Dataset Dataset containing the region reference reg_ref_name : str / unicode Name of the region reference Returns ------- value : np.ndarray Data specified by the region reference. Note that a squeeze is applied by default. """ warn('pyUSID.io.reg.ref.get_region will be moved to pyNSID in the next ' 'pyUSID version.', FutureWarning) if not isinstance(reg_ref_name, (str, unicode)): raise TypeError('reg_ref_name should be a string') if not isinstance(h5_dset, h5py.Dataset): raise TypeError('h5_dset should be of type h5py.Dataset') # this may raise KeyErrors. Let it reg_ref = h5_dset.attrs[reg_ref_name] return np.squeeze(h5_dset[reg_ref])
189fab43233d58f734e6ed616aa0d198c16bc21e
2,915
import time import os def agent(game, n_ep, n_mcts, max_ep_len, lr, c, gamma, data_size, batch_size, temp, n_hidden_layers, n_hidden_units): """ Outer training loop """ seed_best = None a_best = None episode_returns = [] # storage timepoints = [] # environments env = make_game(game) is_atari = is_atari_game(env) mcts_env = make_game(game) if is_atari else None database = Database(max_size=data_size, batch_size=batch_size) model = Model(env=env, lr=lr, n_hidden_layers=n_hidden_layers, n_hidden_units=n_hidden_units) t_total = 0 # total steps r_best = -np.Inf for ep in range(n_ep): start = time.time() s = env.reset() r2 = 0.0 # Total return counter a_store = [] seed = np.random.randint(1e7) # draw some env seed env.seed(seed) if is_atari: mcts_env.reset() mcts_env.seed(seed) mcts = MCTS(root_index=s, model=model, na=model.action_dim, gamma=gamma) # the object responsible for MCTS searches for t in range(max_ep_len): # MCTS step mcts.search(n_mcts=n_mcts, c=c, env=env, mcts_env=mcts_env) # perform a forward search state, pi, v = mcts.return_results( temp) # extract the root output database.store((state, v, pi)) # Make the true step a = np.random.choice(len(pi), p=pi) a_store.append(a) s1, r, terminal, _ = env.step(a) r2 += r # total number of environment steps (counts the mcts steps) t_total += n_mcts if terminal: break else: mcts.forward(a, s1) # Finished episode episode_returns.append(r2) # store the total episode return timepoints.append( t_total) # store the timestep count of the episode return store_safely(os.getcwd(), 'result', {'r': episode_returns, 't': timepoints}) if r2 > r_best: a_best = a_store seed_best = seed r_best = r2 print( 'Finished episode {}, total return: {}, total time: {} sec'.format( ep, np.round(r2, 2), np.round((time.time() - start), 1))) # Train database.reshuffle() for epoch in range(1): for sb, v_batch, pi_batch in database: model.train(sb, v_batch, pi_batch) # return results return episode_returns, timepoints, a_best, seed_best, r_best
2dab9af74a6712bc52bd59192372d3363da7e40b
2,916
def butter_bandpass_filter(data, lowcut, highcut, sample_rate, order): """ Bandpass filter the data using Butterworth IIR filters. Two digital Butterworth IIR filters with the specified order are created, one highpass filter for the lower critical frequency and one lowpass filter for the higher critical frequency. Both filters use second-order sections (SOS). Then first the highpass filter is applied on the given data and on its result the lowpass filter is applied. Both filters are applied as forward-backward digital filters to correct the non-linear phase. Parameters ---------- data : ndarray The data to be filtered; format (n_samples,) lowcut : float The lower critical frequency highcut : float The higher critical frequency sample_rate : float The sampling rate of the given data order : int The order of the used filters Returns ------- data : ndarray the bandpass filtered data; format (n_samples,) """ sos_high = butter(order, lowcut, btype='hp', fs=sample_rate, output='sos') sos_low = butter(order, highcut, btype='lp', fs=sample_rate, output='sos') return sosfiltfilt(sos_low, sosfiltfilt(sos_high, data, padlen=3 * order), padlen=3 * order)
52f9a400e3027223a8370c966cf88e74e878ebf3
2,917
import torch def _check_tensor_info(*tensors, size, dtype, device): """Check if sizes, dtypes, and devices of input tensors all match prescribed values.""" tensors = list(filter(torch.is_tensor, tensors)) if dtype is None and len(tensors) == 0: dtype = torch.get_default_dtype() if device is None and len(tensors) == 0: device = torch.device("cpu") sizes = [] if size is None else [size] sizes += [t.shape for t in tensors] dtypes = [] if dtype is None else [dtype] dtypes += [t.dtype for t in tensors] devices = [] if device is None else [device] devices += [t.device for t in tensors] if len(sizes) == 0: raise ValueError(f"Must either specify `size` or pass in `W` or `H` to implicitly define the size.") if not all(i == sizes[0] for i in sizes): raise ValueError(f"Multiple sizes found. Make sure `size` and `W` or `H` are consistent.") if not all(i == dtypes[0] for i in dtypes): raise ValueError(f"Multiple dtypes found. Make sure `dtype` and `W` or `H` are consistent.") if not all(i == devices[0] for i in devices): raise ValueError(f"Multiple devices found. Make sure `device` and `W` or `H` are consistent.") # Make sure size is a tuple (not a torch.Size) for neat repr-printing purposes. return tuple(sizes[0]), dtypes[0], devices[0]
1a00aa0e09e520a23591d9fd461422f7b0acf0e2
2,918
def generate_dataset(df, n_past, n_future): """ df : Dataframe n_past: Number of past observations n_future: Number of future observations Returns: X: Past steps Y: Future steps (Sequence target) Z: Sequence category""" # Split the dataframe with respect to IDs series_ids = dict(tuple(df.groupby('ID'))) # Dict of ids as keys and x,y,id as values train_data, target_data, target_category = list(), list(), list() for id in series_ids.keys(): X, Y, Z= list(), list(), list() # Drop the column ids and convert the pandas into arrays series = series_ids[id].drop(columns = ['ID']).to_numpy() for window_start in range(len(series)): past_end = window_start + n_past future_end = past_end + n_future if not future_end > len(series): # slicing the past and future parts of the window past, future = series[window_start:past_end, :], series[past_end:future_end, :] X.append(past) Y.append(future) # For each sequence length set target category Z.append(int(id)) train_data.extend(np.array(X)) target_data.extend(np.array(Y)) target_category.extend(np.array(Z)) return train_data, target_data, target_category
f11e769499576223f778b669ccff8d973f4a8039
2,919
from ray import tune from ray.tune.registry import register_env from ray.rllib.agents.agent import get_agent_class from ray.rllib.agents.registry import get_agent_class def setup_exps_rllib(flow_params, n_cpus, n_rollouts, reward_specification=None, policy_graphs=None, policy_mapping_fn=None, policies_to_train=None): """Return the relevant components of an RLlib experiment. Parameters ---------- flow_params : dict flow-specific parameters (see flow/utils/registry.py) n_cpus : int number of CPUs to run the experiment over n_rollouts : int number of rollouts per training iteration policy_graphs : dict, optional TODO policy_mapping_fn : function, optional TODO policies_to_train : list of str, optional TODO Returns ------- str name of the training algorithm str name of the gym environment to be trained dict training configuration parameters """ try: except ImportError: horizon = flow_params['env'].horizon alg_run = "PPO" agent_cls = get_agent_class(alg_run) config = deepcopy(agent_cls._default_config) config["seed"] = 17 config["num_workers"] = 7 #n_cpus - 1 config["train_batch_size"] = horizon * n_rollouts config["sgd_minibatch_size"] = min(16 * 1024, config["train_batch_size"]) config["gamma"] = GAMMA # discount rate #fcnet_hiddens = [int(sys.argv[5])] * int(sys.argv[6]) config["model"].update({"fcnet_hiddens": tune.grid_search([[], [4, 4], [16, 16], [64, 64], [256, 256]])}) #config["model"].update({"fcnet_hiddens": tune.grid_search([[4], [8], [8, 8], [16, 16], [64, 64]])}) #[32, 32, 32] config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 config["vf_clip_param"] = 10000 config["num_sgd_iter"] = 10 config["horizon"] = horizon config["framework"] = "torch" config["callbacks"] = RewardCallback config["log_level"] = "ERROR" # save the flow params for replay flow_json = json.dumps( flow_params, cls=FlowParamsEncoder, sort_keys=True, indent=4) config['env_config']['flow_params'] = flow_json config['env_config']['run'] = alg_run # multiagent configuration if policy_graphs is not None: config['multiagent'].update({'policies': policy_graphs}) if policy_mapping_fn is not None: config['multiagent'].update( {'policy_mapping_fn': tune.function(policy_mapping_fn)}) if policies_to_train is not None: config['multiagent'].update({'policies_to_train': policies_to_train}) create_env, gym_name = make_create_env(params=flow_params, reward_specification=reward_specification) # Register as rllib env register_env(gym_name, create_env) return alg_run, gym_name, config
36f1e212b8358589d5f6f54df9983c94bcfe940a
2,920
from typing import Optional from typing import Tuple from typing import Literal from typing import Union def upsampling2D( inputs: Optional[tf.Tensor] = None, size: Tuple[int, int] = (2, 2), mode: Literal['pad', 'nearest', 'bilinear'] = 'nearest', name: Optional[str] = None, ) -> Union[tf.Tensor, Resampling2D]: """ Upsampling""" layer = Resampling2D(size, mode, name=name) if inputs is None: return layer return layer(inputs)
5f2a7f642442abcbf1075d8296fa026e2e639744
2,921
import json def sns_msg_body_user_notify_topic(message, autoscale_group, instance_id, details=None): """ Purpose: To prepare dict with correct values for user topic Parameters: message, group name, instance_id, details Returns: dict Raises: """ # Constructing a JSON object as per AWS SNS requirement sns_message = { "description": message, "autoscale_group": autoscale_group, "instance_id": instance_id, "details": details } logger.debug("Prepared message body: " + json.dumps(sns_message, separators=(',', ':'))) return sns_message
5af8b384139e099b291ba97b613af94a322ccb78
2,922
def moderator_name(): """Return the name of the test game moderator.""" return 'Hannah'
55132bc74510ee9c3c2a74048bf35bae94b9a6ef
2,923
import shutil import os def delete_downloads(): """Delete all downloaded examples to free space or update the files.""" shutil.rmtree(geoist.EXAMPLES_PATH) os.makedirs(geoist.EXAMPLES_PATH) return True
104df410e6bb4e5ee97c8dd522e0cfa01b3036ea
2,924
def reversebits2(max_bits, num): """ Like reversebits1, plus small optimization regarding bit index calculation. """ rev_num = 0 high_shift = max_bits - 1 low_shift = 0 for _ in range(0, (max_bits + 1) // 2): low_bit = (num & (1 << low_shift)) >> low_shift high_bit = (num & (1 << high_shift)) >> high_shift rev_num |= low_bit << high_shift rev_num |= high_bit << low_shift high_shift -= 1 low_shift += 1 return rev_num
cbc41754928f758d689ea6b0241205a9a1c02ccd
2,925
from typing import Tuple from typing import Type from typing import Dict import typing def _get_builder_cls( ds_to_build: str, ) -> Tuple[Type[tfds.core.DatasetBuilder], Dict[str, str]]: """Infer the builder class to build. Args: ds_to_build: Dataset argument. Returns: builder_cls: The dataset class to download and prepare kwargs: """ # 1st case: Requested dataset is a path to `.py` script path = _search_script_path(ds_to_build) if path is not None: # Dynamically load user dataset script with tfds.core.utils.add_sys_path(path.parent): builder_cls = tfds.core.community.builder_cls_from_module(path.stem) return builder_cls, {} # 2nd case: Dataset is registered through imports. # Extract `name/config:version` extract_name_and_kwargs = tfds.core.naming.dataset_name_and_kwargs_from_name_str builder_name, builder_kwargs = extract_name_and_kwargs(ds_to_build) builder_cls = tfds.builder_cls(builder_name) builder_kwargs = typing.cast(Dict[str, str], builder_kwargs) return builder_cls, builder_kwargs
4f655c7df8d205683d295987c719eb7d4909df83
2,926
def keypoints_to_bbox(keypoints_list, image): """Prepare bboxes from keypoints for object tracking. args: keypoints_list (np.ndarray): trtpose keypoints list return: bboxes (np.ndarray): bbox of (xmin, ymin, width, height) """ bboxes = [] img_h, img_w = image.shape[:2] for idx, keypoints in enumerate(keypoints_list): keypoints = np.where(keypoints[:, 1:] !=0, keypoints[:, 1:], np.nan) keypoints[:, 0] *= img_w keypoints[:, 1] *= img_h xmin = np.nanmin(keypoints[:,0]) ymin = np.nanmin(keypoints[:,1]) xmax = np.nanmax(keypoints[:,0]) ymax = np.nanmax(keypoints[:,1]) bbox = expand_bbox(xmin, xmax, ymin, ymax, img_w, img_h) # discard bbox with width and height == 0 if bbox[2] < 1 or bbox[3] < 1: continue bboxes.append(bbox) return np.asarray(bboxes)
b144fecaf4c2996a945240c62a93e2a3e6dafd04
2,927
def view(): """ WIP: View admins. """ if current_user.is_admin(): admins = UserMetadata.select().where(UserMetadata.key == 'admin') postcount = SubPost.select(SubPost.uid, fn.Count(SubPost.pid).alias('post_count')).group_by(SubPost.uid).alias( 'post_count') commcount = SubPostComment.select(SubPostComment.uid, fn.Count(SubPostComment.cid).alias('comment_count')).group_by( SubPostComment.uid).alias('j2') users = User.select(User.name, User.status, User.uid, User.joindate, postcount.c.post_count.alias('post_count'), commcount.c.comment_count) users = users.join(postcount, JOIN.LEFT_OUTER, on=User.uid == postcount.c.uid) users = users.join(commcount, JOIN.LEFT_OUTER, on=User.uid == commcount.c.uid) users = users.where(User.uid << [x.uid for x in admins]).order_by(User.joindate.asc()).dicts() return render_template('admin/users.html', users=users, admin_route='admin.view') else: abort(404)
cd2e265b83cdf0028c6b0798b87e7bd26cd799f5
2,928
def get_market_offers(session, ids, base_market_url=BASE_MARKET_URL): """\nMain function for interaction with this library. \nProvided a sequence of Character Ids, returns a dictionary of offers for each. \ Requires a session which has already authenticated with Urban Rivals. \nOptional: provide a base market URL for proxy. Must end with a "?" \ Ex: "http://example.com?" >>>get_market_offers(session, [1400, 1423, 1764]) {1400: Offer, 1423: Offer, 1764: Offer} >>>get_market_offers(session, ["1301", "1543"]) {"1301": Offer, "1543": Offer} """ if len(ids) < 1: raise ValueError("Ids cannot be empty") if not base_market_url.endswith("?"): raise ValueError("URL must end with a question mark") market = { char_id: _html_to_soup( session.get( _get_offer_list(char_id, base_market_url) )) for char_id in map(_clean_input, ids) } return {char_id :_find_offers(market[char_id]) for char_id in map(_clean_input, ids) }
a526a6f79d95f8ebc5228ebac7367c5e846cfcfc
2,929
def band_listing(request): """A view of all bands.""" bands = Band.objects.all() return render(request, 'bands/band_listing.html', {'bands': bands})
11f9305784f812b481dcbb908086feedd87dd618
2,930
from typing import Optional def check_mismatched_bracket_type(path: str) -> Optional[BracketErrorType]: """ Check for miss matched brackets :param path: path to file :return: Type of miss match or None if there is none """ file_as_string = utils.read_file(path) brackets_count = utils.count_brackets(file_as_string) normal_brackets_are_even = brackets_count[0] % 2 == 0 square_brackets_are_even = brackets_count[1] % 2 == 0 curly_brackets_are_even = brackets_count[2] % 2 == 0 if not normal_brackets_are_even and not square_brackets_are_even: return BracketErrorType.NORMAL_SQUARE elif not normal_brackets_are_even and not curly_brackets_are_even: return BracketErrorType.NORMAL_CURLY elif not curly_brackets_are_even and not square_brackets_are_even: return BracketErrorType.CURLY_SQUARE return None
edd10b89865f9c17cc915875bb7ab557cc85d5b7
2,931
from typing import Union from typing import Tuple import requests import re def get_rank( day: int = day_idx, year: int = year ) -> Union[None, Tuple[str]]: """ Returns the rank for the current day. Arguments --------- day -- The day to get the rank for. year -- The year to get the rank for. Returns ------- The rank for the specified day and time for completion. """ # Get the leaderboard ranking r = requests.get( f'https://adventofcode.com/{year}/leaderboard/self', headers=headers, cookies=cookies ) data = r.text # Parse for the time/rank data = data.replace('&gt;', '>') ranks = re.findall( r'(\d+) +(\d\d:\d\d:\d\d|>24h) +(\d+) +(\d+)( +(\d\d:\d\d:\d\d|>24h) +(\d+) +(\d+))?', data ) rank_info = [t for t in ranks if t[0] == str(day)] if rank_info: rank_info = rank_info[0] else: return None # Reformat and grab the results time_1, rank_1 = rank_info[1:3] time_2, rank_2 = rank_info[5:7] if rank_1: rank_1 = int(rank_1) if rank_2: rank_2 = int(rank_2) return RankInfo(time_1, rank_1, time_2, rank_2)
c9d50857a0eb5574b971c8cf7a4e5458eb1320fc
2,932
def get_taste(dm): """ Get the classification of a matrix defining a tangent vector field of the form: | R | t | | - - - | | 0 | 0 | :param dm: input tangent matrix :return: number from 1 to 6 corresponding to taste. see randomgen_linear_by_taste. """ rot = dm[:2, :2] v, w = np.linalg.eig(rot) if v[0].imag < np.spacing(0) and v[1].imag < np.spacing(0): # Eigenvalues both real: l1 = v[0].real l2 = v[1].real if l1 > 0 and l2 > 0: # Taste 1 return 1 elif l1 < 0 and l2 < 0: # Taste 2 return 2 else: # Taste 3 return 3 else: # Complex conjugate eigenvalues if v[0].real > np.spacing(0): # Taste 4 return 4 elif v[0].real < np.spacing(0): # Taste 5 return 5 else: # Taste 6 - never get there in practice. return 6
38b986478564b118d97126b451af514b14c0e155
2,933
import time import logging def wait_for_sge_jobs(jids, wait_timeout=None, run_timeout=None): """ Wait for all sge job ids {jids} to complete before exiting. Return sge job ids that have been killed by qdel. If wait_timeout is set, qdel all jobs regardless job status after {wait_timeout} seconds have passed. If wait_timeout is None, jobs can qw or held for a long time when cluster is busy. If sge died and restarted, jobs will no longer be active and wait_for_sge_jobs should be OK to exit, however, in this case, upstream calls may not be aware of jobs are not completed. If run_timeout is set, qdel a job after it has been running for {run_timeout} seconds. If run_timeout is None, jobs can run forever unless wait_timeout is set. Note that if both wait_timeout and run_timeout are set, qdel a job when the earliest time out is reached. Parameters: jids - sge job ids that we are waiting for wait_timeout - maximum time in seconds waiting for sge jobs, regardless of their statuses. qdel it otherwise. If is None, no cap. run_timeout - maximum time in seconds that a sge job can be running, not counting qw or hold time. qdel it otherwise. If is None, no cap. """ count = 0 check_sge_every_n_seconds = 10 # check sge every n seconds. time_passed = 0 runtime_passed = dict({jid: 0 for jid in jids}) killed_jobs = [] # jobs that have been killed. while True: active_d = get_active_sge_jobs() not_done_jids = list(set(jids).intersection(set(active_d.keys()))) if len(not_done_jids) != 0: # some sge jobs are still running or qw, or held time.sleep(check_sge_every_n_seconds) time_passed += check_sge_every_n_seconds count += 1 if count % 100 == 0: logging.debug("Waiting for sge job to complete: %s.", ",".join(not_done_jids)) if wait_timeout is not None and time_passed >= wait_timeout: kill_sge_jobs(jids=not_done_jids) killed_jobs.extend(not_done_jids) break if run_timeout is not None: # update runtime_passed for jid in not_done_jids: if active_d[jid].startswith('r'): runtime_passed[jid] += check_sge_every_n_seconds to_kill_jids = [jid for jid in not_done_jids if runtime_passed[jid] >= run_timeout] kill_sge_jobs(jids=to_kill_jids) killed_jobs.extend(to_kill_jids) else: break return list(set(killed_jobs))
f104f7d30145f733d1454008089236faa1ba4bfd
2,934
from typing import Tuple def _check_removal_required(submission: Submission, cfg: Config) -> Tuple[bool, bool]: """ Check whether the submission has to be removed and whether this is reported. Note that this function returns a Tuple of booleans, where the first is to signify whether the submission is to be removed and the latter whether a relevant report was issued for this decision. """ for item in submission.user_reports: if item[0] and any( reason in item[0] for reason in ( reports.original_post_deleted_or_locked, reports.post_violates_rules, ) ): return True, True linked_submission = cfg.r.submission(submission.id_from_url(submission.url)) if is_removed(linked_submission): return True, False return False, False
a00ae21d233add5fd36b0343a3ef45bd0a11632b
2,935
def subjects(request, unique_id,form=None): """ Enlists all the subjects of a classroom , subjects can be added by admins """ classroom = get_object_or_404(Classroom,unique_id=unique_id) #querysets members = classroom.members.all() subjects = Subject.objects.filter(classroom=classroom) admin_check = classroom.special_permissions.filter(username = request.user.username).exists() # Admins can add a subject and assign a teacher to it if admin_check and request.method=="POST": form = SubjectForm(request.POST) teacher = get_object_or_404(User,username=request.POST.get('teacher')) if form.is_valid(): subject=form.save(commit=False) subject.classroom=classroom subject.teacher = teacher subject.save() subject.upload_permission.add(teacher) recipients=User.objects.filter(username__in=classroom.members.values_list('username', flat=True)) url = reverse('subjects',kwargs={'unique_id':classroom.unique_id}) notify.send(sender=request.user,verb=f"subject {subject.subject_name} added in {classroom.class_name}", recipient=recipients,url=url) messages.add_message(request,messages.INFO,f"A new Subject {subject.subject_name} added") classroom.teacher.add(teacher) return redirect(url) else: form = SubjectForm() params = { 'subjects':subjects, 'form':form, 'classroom':classroom, 'is_admin':admin_check, 'members':members } return render(request,'subjects_list.html',params)
322dd5e4c31225e66db9dcaa3cb7c6ad337ba963
2,936
from typing import Dict def retrieve_settings(skill_id: str) -> JSONStructure: """Retrieves skill's settings by leveraging the mycroft-api skill Send `skillmanager.list` message and wait for `mycroft.skills.list` message to appear on the bus. :param skill_id: Skill ID to retrieve the settings :type skill_id: str :return: Return the sanitized skill settings :rtype: JSONStructure """ status_code: int = status.HTTP_400_BAD_REQUEST msg: str = "unable to retrieve skill settings" try: skills: Skills = retrieve_list() for key in skills["results"]: if skills["results"][key]['id'] == skill_id: payload: Dict = { "type": "mycroft.api.skill_settings", "data": { "app_key": settings.app_key, "skill": skill_id } } info: JSONStructure = ws_send( payload, "mycroft.api.skill_settings.answer") if requirements(): if info["context"]["authenticated"]: return sanitize({"results": info["data"]}) status_code = status.HTTP_401_UNAUTHORIZED msg = "unable to authenticate with mycroft-api skill" raise Exception status_code = status.HTTP_401_UNAUTHORIZED msg = "mycroft-api skill is not installed on mycroft core" raise Exception status_code = status.HTTP_404_NOT_FOUND msg = f"skill {skill_id} not found" raise Exception except Exception as err: raise HTTPException( status_code=status_code, detail=msg) from err
5b6ec4b5d52563ab05005967ee21a6acfdaed9c3
2,937
def make_project(alias='project', root=None, **kwargs): """Initialize a project for testing purposes The initialized project has a few operations and a few jobs that are in various points in the workflow defined by the project. """ init(alias=alias, root=root, template='testing') project = signac.init_project(name=alias, root=root) signac.testing.init_jobs(project, **kwargs) return project
0e0e5eb9a4ceaf780fee0072811bd161bb362af8
2,938
def _get_sensors_data(driver_info): """Get sensors data. :param driver_info: node's driver info :raises: FailedToGetSensorData when getting the sensor data fails. :returns: returns a dict of sensor data group by sensor type. """ try: ipmicmd = ipmi_command.Command(bmc=driver_info['address'], userid=driver_info['username'], password=driver_info['password']) ret = ipmicmd.get_sensor_data() except Exception as e: LOG.error(_LE("IPMI get sensor data failed for node %(node_id)s " "with the following error: %(error)s"), {'node_id': driver_info['uuid'], 'error': e}) raise exception.FailedToGetSensorData( node=driver_info['uuid'], error=e) if not ret: return {} sensors_data = {} for reading in ret: # ignore the sensor data which has no sensor reading value if not reading.value: continue sensors_data.setdefault( reading.type, {})[reading.name] = { 'Sensor Reading': '%s %s' % (reading.value, reading.units), 'Sensor ID': reading.name, 'States': str(reading.states), 'Units': reading.units, 'Health': str(reading.health)} return sensors_data
7268d7a700dc4aede1e7cddc0be978168d4f0b79
2,939
def dominates(lhs, rhs): """Weak strict domination relation: lhs =] rhs and lhs [!= rhs.""" lhs_rhs = try_decide_less(lhs, rhs) rhs_lhs = try_decide_less(rhs, lhs) return rhs_lhs is True and lhs_rhs is False
80cc4af907b393b0e07d34de3549524ec33ed8ba
2,940
def complex_to_xy(complex_point): """turns complex point (x+yj) into cartesian point [x,y]""" xy_point = [complex_point.real, complex_point.imag] return xy_point
2984b70c3015cb69a0f7dfd62bd022bb26310852
2,941
def setup_mock_accessory(controller): """Add a bridge accessory to a test controller.""" bridge = Accessories() accessory = Accessory.create_with_info( name="Koogeek-LS1-20833F", manufacturer="Koogeek", model="LS1", serial_number="12345", firmware_revision="1.1", ) accessory.aid = 1 service = accessory.add_service(ServicesTypes.LIGHTBULB) on_char = service.add_char(CharacteristicsTypes.ON) on_char.value = 0 bridge.add_accessory(accessory) return controller.add_device(bridge)
5be787d14b17b4bdd79c4550131aa4ca48362056
2,942
def match_array_placeholder(loc, term, element): """Determine if the JSPEC array placeholder matches the JSON element. Args: loc (str): The current location in the JSON term (JSPECArrayPlaceholder): The JSPEC array placeholder. element (obj): The Python native object representing a JSON element Returns: Result: The result of whether the JSPEC array placeholder matches the JSON element """ if isinstance(element, list): return GoodMatch() return BadMatch(loc, "expected an array")
1f00c51ba4e6b7de5d6675ed12a97b4b63b98781
2,943
def get_mask_index(timeDict, mask='Spine', use_B=False, noise_th=None): """ :param timeDict: timeDict to use :param mask: options are 'Spine' and 'Dendrite' :param use_B: Make masksB etc. :param noise_th: if None will return all mask index if float will return mean noise < then threshold :return: index of masks """ if use_B: b = 'B' else: b = '' masks = timeDict['Masks' + b] exclude = timeDict['excludeIndex' + b] indexs_all = np.where(masks.MaskType == mask)[0] indexs_good = np.setdiff1d(indexs_all, exclude) if noise_th is not None: noise = np.nanmean(timeDict['TCNoise' + b], axis=1) good_noise = np.where(noise < noise_th)[0] return np.intersect1d(indexs_good, good_noise) else: return indexs_good
2a7c2c7546091a549ff0a48035889432c96a7554
2,944
def coverageSection(*coverItems): """Combine multiple coverage items into a single decorator. Args: *coverItems ((multiple) :class:`CoverItem`): coverage primitives to be combined. Example: >>> my_coverage = coverage.coverageSection( ... coverage.CoverPoint("x", ...), ... coverage.CoverPoint("y", ...), ... coverage.CoverCross("z", ...), ... ... ... ) >>> >>> @my_coverage >>> def decorated_fun(self, arg): ... ... """ def _nested(*decorators): def _decorator(f): for dec in reversed(*decorators): f = dec(f) return f return _decorator return _nested(coverItems)
f0430c64d8e3c09e8b2ea2c12c43cb1c61ce5cce
2,945
from typing import List from typing import Type def get_operator_metatypes() -> List[Type[OperatorMetatype]]: """ Returns a list of the operator metatypes. :return: List of operator metatypes . """ return list(PT_OPERATOR_METATYPES.registry_dict.values())
880bdef8e7b015af99eb75f48b46189918e823d1
2,946
def fnl_fix_first_line(preprocessor: Preprocessor, string: str) -> str: """final action to ensures file starts with a non-empty non-whitespace line (if it is not empty)""" while string != "": pos = string.find("\n") if pos == -1: if string.isspace(): return preprocessor.replace_string(0, len(string), string, "", []) return string if string[:pos+1].isspace(): string = preprocessor.replace_string(0, pos+1, string, "", []) else: break return string
40526f43538e99adc3ea42e6cab00284193fb927
2,947
def rf_rasterize(geometry_col, bounds_col, value_col, num_cols_col, num_rows_col): """Create a tile where cells in the grid defined by cols, rows, and bounds are filled with the given value.""" jfcn = RFContext.active().lookup('rf_rasterize') return Column(jfcn(_to_java_column(geometry_col), _to_java_column(bounds_col), _to_java_column(value_col), _to_java_column(num_cols_col), _to_java_column(num_rows_col)))
0281830fed2c556656e84270f2cf8289d779ade1
2,948
from typing import Tuple def balance_generic(array: np.ndarray, classes: np.ndarray, balancing_max: int, output: int, random_state:int=42)->Tuple: """Balance given arrays using given max and expected output class. arrays: np.ndarray, array to balance classes: np.ndarray, output classes balancing_max: int, maximum numbers per balancing maximum output: int, expected output class. """ output_class_mask = np.array(classes == output) retain_mask = np.bitwise_not(output_class_mask) n = np.sum(output_class_mask) if n > balancing_max: datapoints_to_remove = n - balancing_max mask = np.ones(shape=n) mask[:datapoints_to_remove] = 0 np.random.seed(random_state) np.random.shuffle(mask) output_class_mask[np.where(output_class_mask)] = mask array = array[np.logical_or( output_class_mask, retain_mask).reshape(-1)] return array
7912f5d5aa98ccab58e8ae0605d4af929e9501ee
2,949
def jsexternal(args, result, **kwds): """Decorator to define stubbed-out external javascript functions. This decorator can be applied to a python function to register it as the stubbed-out implementation of an external javascript function. The llinterpreter will run the python code, the compiled interpreter will link to the javascript function of the same name. """ def do_register(func): kwds.setdefault('_callable', func) kwds.setdefault('random_effects_on_gcobjs', False) kwds.setdefault('compilation_info', compilation_info) return rffi.llexternal(func.__name__, args, result, **kwds) return do_register
b9bb7fd801fb600fd1b81c0b7392c1c67401f4fc
2,950
def get_icon(filename): """ """ icon = get_image_path(filename) if icon: return QIcon(icon) else: return QIcon()
0db1c20776939d0a57c00a45987b607bb5df7f4b
2,951
from typing import Union def permission_confirm(perm_key_pair: list) -> Union[bool, str, None]: """Converts string versions of bool inputs to raw bool values.""" if perm_key_pair[1].strip() == 'true': pi = True elif perm_key_pair[1].strip() == 'false': pi = False elif perm_key_pair[1].strip() == 'none': pi = None else: pi = 'None' return pi
c1827694019dd999f71d54be148dfe2abf5aeb4e
2,952
def _parse_policy_controller(configmanagement, msg): """Load PolicyController with the parsed config-management.yaml. Args: configmanagement: dict, The data loaded from the config-management.yaml given by user. msg: The Hub messages package. Returns: policy_controller: The Policy Controller configuration for MembershipConfigs, filled in the data parsed from configmanagement.spec.policyController Raises: Error, if Policy Controller `enabled` is missing or not a boolean """ if ('spec' not in configmanagement or 'policyController' not in configmanagement['spec']): return None spec_policy_controller = configmanagement['spec']['policyController'] # Required field if configmanagement['spec'][ 'policyController'] is None or 'enabled' not in spec_policy_controller: raise exceptions.Error( 'Missing required field .spec.policyController.enabled') enabled = spec_policy_controller['enabled'] if not isinstance(enabled, bool): raise exceptions.Error( 'policyController.enabled should be `true` or `false`') policy_controller = msg.ConfigManagementPolicyController() # When the policyController is set to be enabled, policy_controller will # be filled with the valid fields set in spec_policy_controller, which # were mapped from the config-management.yaml for field in spec_policy_controller: if field not in [ 'enabled', 'templateLibraryInstalled', 'auditIntervalSeconds', 'referentialRulesEnabled', 'exemptableNamespaces', 'logDeniesEnabled', 'mutationEnabled' ]: raise exceptions.Error( 'Please remove illegal field .spec.policyController.{}'.format(field)) setattr(policy_controller, field, spec_policy_controller[field]) return policy_controller
ceeedffe2dc1f484b32cd11c6e983c733687f349
2,953
from typing import Any import json def _type_cast(type_cast: Any, content_to_typecast: bytes, func_dict: dict) -> Any: """ Basis for type casting on the server If testing, replace `func_dict` with a dummy one Currently NOT guarenteed to return, please remember to change this API """ if type_cast == bytes: return content_to_typecast if type_cast == str: try: typecasted_content = content_to_typecast.decode() return typecasted_content # Remember to change this, but I"m lazy rn except UnicodeDecodeError as e: raise TypeError( f"Type casting from bytes to string failed for function " f"\"{func_dict['name']}\"\n{str(e)}" ) from UnicodeDecodeError elif type_cast == int: try: typecasted_content = int(content_to_typecast) return typecasted_content # Remember to change this, but I"m lazy rn except ValueError as e: raise TypeError( f"Type casting from bytes to int failed for function " f"\"{func_dict['name']}\":\n {e}" ) from ValueError elif type_cast == float: try: typecasted_content = float(content_to_typecast) return typecasted_content # Remember to change this, but I"m lazy rn except ValueError as e: raise TypeError( f"Type casting from bytes to float failed for function " f"\"{func_dict['name']}\":\n {e}" ) from ValueError elif type_cast is None: return content_to_typecast for _type in [list, dict]: if type_cast == _type: try: typecasted_content = json.loads(content_to_typecast) return typecasted_content except UnicodeDecodeError: raise TypeError( f"Cannot decode message data during " f"bytes->{_type.__name__} type cast" "(current implementation requires string to " "type cast, not bytes)" ) from UnicodeDecodeError except ValueError: raise TypeError( f"Type casting from bytes to {_type.__name__} " f"failed for function \"{func_dict['name']}\"" f":\n Message is not a {_type.__name__}" ) from ValueError except Exception as e: raise TypeError( f"Type casting from bytes to {_type.__name__} " f"failed for function \"{func_dict['name']}\"" f":\n {e}" ) from type(e)
de7121ea1f29448bcd7ab44d60d6a64bbdba59d0
2,954
import os def paster_create(package, tempdir, user, template, email, fullname): """ Run paster to create a new package given a template and user info. """ dist_root = os.path.join(tempdir, package) name = get_name(user) email = get_email(user) url = '%s/%s/%s' % (config.GITHUB_URL, user, package) conffile = os.path.join(tempdir, 'pastescript.ini') paster_config = config.PASTER_CONFIG % (name, email, url) conf = open(conffile, 'w') # XXX Kill me try: conf.write(paster_config.encode('utf-8')) except: paster_config = config.PASTER_CONFIG % ('', email, url) conf.write(paster_config) conf.close() os.chdir(tempdir) # Support pyramid's pcreate if template in ('alchemy', 'starter', 'zodb'): out = pbs.pcreate('-t', template, package) else: out = pbs.paster('create', '-t', template, '--config=%s' % conffile, '--no-interactive', package) manifest = open(os.path.join(dist_root, 'MANIFEST.in'), 'w') try: # Handle namespace packages parts = package.split('.') parent_dir = parts[0] except: parent_dir = package manifest.write(config.MANIFEST_IN % parent_dir) manifest.close() return dist_root, out._stdout
d94d72a82265fbaa5105cf0d0a508fc5c98a271d
2,955
from pathlib import Path import appdirs def get_data_dir() -> Path: """ Get the pda data dir """ app_name = "pda" app_author = "StarrFox" cache_dir = Path(appdirs.user_data_dir(app_name, app_author)) cache_dir.mkdir(parents=True, exist_ok=True) return cache_dir
183aff585c0208bb5e7c2a4bfd5810c378a948e2
2,956
import os def directory_structure_to_front_matter(file_path: str) -> dict[str, str]: """ Converts the directory structure of a recipe into a front matter. """ # Make sure the path is well-formed and normalised path_to_recipe = os.path.normpath(file_path) # Unpack the directory structure into variable names *_, meal, difficulty, recipe_filename = path_to_recipe.split(os.sep) # Set some front matter using the extracted data return { "layout": "recipe", "difficulties": difficulty, "meals": meal, "originalfilename": recipe_filename, "originalpath": os.path.join(meal, difficulty, recipe_filename), }
93e940356e7527dad003525e6b3a8edfffc2fb63
2,957
def is_gradle_build_python_test(file_name): """ Return True if file_name matches a regexp for on of the python test run during gradle build. False otherwise. :param file_name: file to test """ return file_name in ["gen_all.py", "test_gbm_prostate.py", "test_rest_api.py"]
27b683a9062e09aec89be23f5f8e9dd41e9b870d
2,958
def ppis_as_cxs(ppis, cxs): """ Use the complex number to both prefix the protein id and add as an attribute. Copy the original ids to the end for mapping in cytoscape. """ ppis = ppis_label_cxs(ppis, cxs) # Requires that the last column be a list of complex ids. Replace that. def pfx(id, cxnum): return str(cxnum) + '_' + id return [[pfx(p[0],cx), pfx(p[1],cx)] + p[2:-1] + [cx] for p in ppis for cx in p[-1]]
e245e0b4bba1a2c59242f1de2c0205fed5331a67
2,959
import requests def patch_get(monkeypatch, mockresponse): """monkeypatch the requests.get function to return response dict for API calls. succesful API responses come from Tradier website. :param mockresponse: [description] :type mockresponse: [type] :return: [description] :rtype: [type] :yield: [description] :rtype: [type] """ class PatchGet: def __init__(self, status, response_json_path): self.mocked = mockresponse(status, response_json_path) self.setter() def mock_get(self, url, params, headers): return self.mocked def setter(self): monkeypatch.setattr(requests, "get", self.mock_get) yield PatchGet
54c927b421fe0e26023b4020a0fadc489e134429
2,960
import textwrap def _split_out_parameters(initdoc): """Split documentation into (header, parameters, suffix) Parameters ---------- initdoc : string The documentation string """ # TODO: bind it to the only word in the line p_res = __parameters_str_re.search(initdoc) if p_res is None: return initdoc, "", "" else: # Could have been accomplished also via re.match # where new line is after :Parameters: # parameters header index ph_i = p_res.start() # parameters body index pb_i = p_res.end() # end of parameters try: pe_i = initdoc.index('\n\n', pb_i) except ValueError: pe_i = len(initdoc) result = initdoc[:ph_i].rstrip('\n '), \ initdoc[pb_i:pe_i], initdoc[pe_i:] # XXX a bit of duplication of effort since handle_docstring might # do splitting internally return handle_docstring(result[0], polite=False).strip('\n'), \ textwrap.dedent(result[1]).strip('\n'), \ textwrap.dedent(result[2]).strip('\n')
d95d00f24f9066522468cf8d79df4b59c0ab1db9
2,961
def get_context() -> RequestContext: """ See GlobalContextManager.get_context() """ return global_context_manager.get_context()
4427202db724e62a45e5701a0376498e5ea39954
2,962
def sched_yield(space): """ Voluntarily relinquish the CPU""" while True: try: res = rposix.sched_yield() except OSError as e: wrap_oserror(space, e, eintr_retry=True) else: return space.newint(res)
310efda027b47c41cd0a9e33357824de148219a0
2,963
def preformatted(s): """Return preformatted text.""" return _tag(s, "pre")
29385a10c72fe38628077c81760e251dd2f25e72
2,964
def jitter_colors(rgb, d_brightness=0, d_contrast=0, d_saturation=0): """ Color jittering by randomizing brightness, contrast and saturation, in random order Args: rgb: Image in RGB format Numpy array of shape (h, w, 3) d_brightness, d_contrast, d_saturation: Alpha for blending drawn from [1 - d, 1 + d] Nonnegative float Optional; defaults to 0, i.e., no randomization Returns: rgb_out: Color-jittered image in RGB format Numpy array of the same shape as input """ attrs = ['brightness', 'contrast', 'saturation'] ds = [d_brightness, d_contrast, d_saturation] # In random order ind = np.array(range(len(attrs))) np.random.shuffle(ind) # in-place rgb_out = deepcopy(rgb) for idx in ind: rgb_out = adjust_image_attribute( rgb_out, attrs[idx], ds[idx], random=True) return rgb_out
7e447bf7670ba234856a42dfb81f8664bb2d4fa2
2,965
def split_numpy_array(array, portion=None, size=None, shuffle=True): """ Split numpy array into two halves, by portion or by size. Args: array (np.ndarray): A numpy array to be splitted. portion (float): Portion of the second half. Ignored if `size` is specified. size (int): Size of the second half. shuffle (bool): Whether or not to shuffle before splitting? Returns: tuple[np.ndarray]: Splitted two halves of the array. """ (a,), (b,) = split_numpy_arrays((array,), portion=portion, size=size, shuffle=shuffle) return a, b
cf956ed9dd4855a3785280bd92d9552cd19145ea
2,966
from typing import Sequence from typing import Optional from typing import Any def constraint_layer( stencils: Sequence[np.ndarray], method: Method, derivative_orders: Sequence[int], constrained_accuracy_order: int = 1, initial_accuracy_order: Optional[int] = 1, grid_step: float = None, dtype: Any = np.float32, ) -> tf.keras.layers.Layer: """Create a Keras layer for enforcing polynomial accuracy constraints.""" if constrained_accuracy_order: return PolynomialAccuracy( stencils, method, derivative_orders, accuracy_order=constrained_accuracy_order, bias_accuracy_order=initial_accuracy_order, grid_step=grid_step, dtype=dtype, ) else: if constrained_accuracy_order != 0: raise ValueError('invalid constrained_accuracy_order') return PolynomialBias( stencils, method, derivative_orders, initial_accuracy_order, grid_step, )
6194edb56db15c1a8a46d5c7947fe7784966666c
2,967
def parse_excel_xml(xml_file=None, xml_string=None): """Return a list of the tables (2D arrays) in the Excel XML. Provide either the path to an XML file, or a string of XML content. """ handler = ExcelHandler() if xml_file is not None: parse(xml_file, handler) elif xml_string is not None: parseString(xml_string, handler) else: raise ValueError("At least one of xml_file or xml_string should be" " provided.") return handler.tables
8fef6b38576281421e51da1d7bc47750b62e6316
2,968
def load_stt_plugin(module_name): """Wrapper function for loading stt plugin. Arguments: module_name (str): Mycroft stt module name from config Returns: class: STT plugin class """ return load_plugin(module_name, PluginTypes.STT)
e321d65af7ba2c04dbd3be79321ced26a0622cc6
2,969
def predict_label(model, data, as_prob=False): """Predicts the data target Assumption: Positive class label is at position 1 Parameters ---------- name : Tensorflow or PyTorch Model Model object retrieved by :func:`load_model` data : DataCatalog Dataset used for predictions Returns ------- predictions : 2d numpy array with predictions """ print(f"Predicing label '{data.target}' of {data.name} dataset.") features = data.encoded_normalized.drop(data.target, axis=1) predictions = model.predict(features) predictions = predictions[:, 1] if not as_prob: predictions = predictions.round() acc = accuracy_score(data.raw[data.target], predictions.round()) print(f"Model accuracy is: {(100* acc).round(2)}%.") return predictions
440aa695a281aeac83afbfe96cf0925fdf24faf1
2,970
def show_subpath(subpath): """ 使用转换器,为变量指定规则为 path类型(类似 string ,但可以包含斜杠) """ # show the subpath after /path/ return 'Subpath %s' % escape(subpath)
a8f924d77f6c6b3b759897f4b22ee8b14aafd7d7
2,971
def backward_propagation(parameters, cache, X, Y): """ Implement the backward propagation using the instructions above. Arguments: parameters -- python dictionary containing our parameters cache -- a dictionary containing "Z1", "A1", "Z2" and "A2". X -- input data of shape (2, number of examples) Y -- "true" labels vector of shape (1, number of examples) Returns: grads -- python dictionary containing your gradients with respect to different parameters """ m = X.shape[1] # First, retrieve W1 and W2 from the dictionary "parameters". W1 = parameters['W1'] W2 = parameters['W2'] # Retrieve also A1 and A2 from dictionary "cache". A1 = cache['A1'] A2 = cache['A2'] # Backward propagation: calculate dW1, db1, dW2, db2. dZ2 = A2 - Y dW2 = (1 / m) * np.dot(dZ2, A1.T) db2 = (1 / m) * np.sum(dZ2, axis=1, keepdims=True) dZ1 = np.multiply(np.dot(W2.T, dZ2), 1 - np.power(A1, 2)) dW1 = (1 / m) * np.dot(dZ1, X.T) db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True) grads = {'dW1': dW1, 'db1': db1, 'dW2': dW2, 'db2': db2} return grads
567f6a087854e19e3049b53773a272c7c13409f2
2,972
def MakeCdfFromHist(hist, name=''): """Makes a CDF from a Hist object. Args: hist: Pmf.Hist object name: string name for the data. Returns: Cdf object """ return MakeCdfFromItems(hist.Items(), name)
52fd379383b3099b9764fe6e3590b5909ba04edc
2,973
def box_iou(boxes, clusters): """ Introduction ------------ 计算每个box和聚类中心的距离值 Parameters ---------- boxes: 所有的box数据 clusters: 聚类中心 """ box_num = boxes.shape[0] cluster_num = clusters.shape[0] box_area = boxes[:, 0] * boxes[:, 1] #每个box的面积重复9次,对应9个聚类中心 box_area = box_area.repeat(cluster_num) box_area = np.reshape(box_area, [box_num, cluster_num]) cluster_area = clusters[:, 0] * clusters[:, 1] cluster_area = np.tile(cluster_area, [1, box_num]) cluster_area = np.reshape(cluster_area, [box_num, cluster_num]) #这里计算两个矩形的iou,默认所有矩形的左上角坐标都是在原点,然后计算iou,因此只需取长宽最小值相乘就是重叠区域的面积 boxes_width = np.reshape(boxes[:, 0].repeat(cluster_num), [box_num, cluster_num]) clusters_width = np.reshape(np.tile(clusters[:, 0], [1, box_num]), [box_num, cluster_num]) min_width = np.minimum(clusters_width, boxes_width) boxes_high = np.reshape(boxes[:, 1].repeat(cluster_num), [box_num, cluster_num]) clusters_high = np.reshape(np.tile(clusters[:, 1], [1, box_num]), [box_num, cluster_num]) min_high = np.minimum(clusters_high, boxes_high) iou = np.multiply(min_high, min_width) / (box_area + cluster_area - np.multiply(min_high, min_width)) return iou
bffdab02c3746be5a7ade1b86bf39d03bce4c3c5
2,974
def addr(arr): """ Get address of numpy array's data """ return arr.__array_interface__['data'][0]
910c893dc47e3f864e915cdf114c3ed127f3ea43
2,975
def rank_five_cards(cards): """Returns an (array) value that represents a strength for a hand. These can easily be compared against each other.""" # List of all card values values = sorted([card.number for card in cards]) # Checks if hand is a straight is_straight = all([values[i] == values[0] + i for i in range(5)]) # Additional straight check if not is_straight: # Weakest straight is_straight = all(values[i] == values[0] + i for i in range(4)) and values[4] == 12 # Rotate values as the ace is weakest in this case values = values[1:] + values[:1] # Checks if hand is a flush is_flush = all([card.suit == cards[0].suit for card in cards]) # Get card value counts value_count = {value: values.count(value) for value in values} # Sort value counts by most occuring sorted_value_count = sorted([(count, value) for value, count in value_count.items()], reverse=True) # Get all kinds (e.g. four of a kind, three of a kind, pair) kinds = [value_count[0] for value_count in sorted_value_count] # Get values for kinds kind_values = [value_count[1] for value_count in sorted_value_count] # Royal flush if is_straight and is_flush and values[0] == 8: return [ROYAL_FLUSH] + [str(value) for value in values] # Straight flush if is_straight and is_flush: return [STRAIGHT_FLUSH] + kind_values # Four of a kind if kinds[0] == 4: return [FOUR_OF_A_KIND] + kind_values # Full house if kinds[0] == 3 and kinds[1] == 2: return [FULL_HOUSE] + kind_values # Flush if is_flush: return [FLUSH] + kind_values # Straight if is_straight: return [STRAIGHT] + kind_values # Three of a kind if kinds[0] == 3: return [THREE_OF_A_KIND] + kind_values # Two pair if kinds[0] == 2 and kinds[1] == 2: return [TWO_PAIR] + kind_values # Pair if kinds[0] == 2: return [PAIR] + kind_values # No pair return [HIGH_CARD] + kind_values
912625b50d33dd7c4fef41e15e018eb3f86a0911
2,976
def _get_wmi_wbem(): """Returns a WMI client connected to localhost ready to do queries.""" client, _ = _get_win32com() if not client: return None wmi_service = client.Dispatch('WbemScripting.SWbemLocator') return wmi_service.ConnectServer('.', 'root\\cimv2')
2b888b391f3bf148e4b13abc8b54c1ad3f97cfff
2,977
from typing import Dict from typing import Any def get_skmtea_instances_meta(version, group_instances_by=None) -> Dict[str, Any]: """ Args: group_by (str, optional): How to group detection labels. Currently only supports grouping by "supercategory". """ assert group_instances_by in [None, "supercategory"], f"group_by={group_instances_by}" path_manager = env.get_path_manager() if group_instances_by is None: thing_ids = [k["id"] for k in SKMTEA_DETECTION_CATEGORIES] thing_classes = [k["name"] for k in SKMTEA_DETECTION_CATEGORIES] thing_colors = [k["color"] for k in SKMTEA_DETECTION_CATEGORIES] elif group_instances_by == "supercategory": things = { k["supercategory_id"]: (k["supercategory"], k["color"]) for k in SKMTEA_DETECTION_CATEGORIES } thing_ids = list(things.keys()) thing_classes = [v[0] for v in things.values()] thing_colors = [v[1] for v in things.values()] else: raise ValueError(f"{group_instances_by} not supported") # Mapping from the incontiguous qDESS category id to an id in [0, N] # N=15 generally, N=4 if group by supercategory thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)} # Segmentation classes # TODO: Add support for subselecting classes. # seg_classes = [k["name"] for k in QDESS_SEGMENTATION_CATEGORIES] # seg_colors = [k["color"] for k in QDESS_SEGMENTATION_CATEGORIES] # seg_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)} # seg_abbrevs = [k["abbrev"] for k in QDESS_SEGMENTATION_CATEGORIES] paths = get_paths(version) ret = { # Detection "group_instances_by": group_instances_by, "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, "thing_classes": thing_classes, "thing_colors": thing_colors, "scan_metadata": pd.read_csv(path_manager.get_local_path(paths.metadata_csv), index_col=0), # This mask path is temporary. In the future, the segmentations will be made # available directly through the recon h5 file. "mask_gw_corr_dir": path_manager.get_local_path(paths.mask_gradwarp_corrected), "version": version, } return ret
88beb9eb0dfb9fceb1d9e74bf0cca53846ac1d7f
2,978
def join_arrays(a, b): """ Joining Arrays Row-wise Parameters ---------- a : array One of the arrays b : array Second of the arrays Returns ------- arr : array Joined two arrays row wise """ return (np.r_[a, b])
7bc3b5824573a8323834280ce709cd7ebe6f9639
2,979
def psvd(a: np.ndarray): """Photonic SVD architecture Args: a: The matrix for which to perform the svd Returns: A tuple of singular values and the two corresponding SVD architectures :math:`U` and :math:`V^\\dagger`. """ l, d, r = svd(a) return rectangular(l), d, rectangular(r)
ae59e40f4ad45f97d1770b904619f4066030d3af
2,980
def vmatrix(vma): """ write a variable zmatrix (bohr/radian) to a string (angstroms/degree) """ assert automol.zmatrix.v.is_valid(vma) vma_str = automol.zmatrix.v.string(vma) return vma_str
12132cfab4c06716836ca7834a14ac5525fc663c
2,981
def quote_etag(etag_str): """ If the provided string is already a quoted ETag, return it. Otherwise, wrap the string in quotes, making it a strong ETag. """ if ETAG_MATCH.match(etag_str): return etag_str else: return '"%s"' % etag_str
114734b88502194050fa10ff13b8a20bdae60b4e
2,982
def dummy_func_1(input_array): """ a sample fitness function that uses the closeness of fit to a polynomial with random coefficients to calculate fitness (loss) Args: input_array(array): iterable of 16 floats between 0 and 1 Returns: loss(float): an approximation of how close the polynomial with coefficients determined by input is to the target polynomial (Ben) """ n_samples = 10_000 test_range = np.linspace(0, 1, n_samples) target = polyval(test_range, TARGET, tensor=False) output = polyval(test_range, input_array, tensor=False) loss = np.sum(abs(target - output)) / n_samples return -1 * loss
eb89a86107f763b0f6c166f2b430ae62a8e68227
2,983
import logging from datetime import datetime def validation_loop(sess, model, ops, handles, valid_summary_writer, external=False): """ Iterates over the validation data, calculating a trained model's cross-entropy. """ # Unpack OPs batch_loss_op, sentence_losses_op = ops # Initialize metrics valid_losses = list() sentence_losses = list() valid_global_step = 0 # Unpack iterator variables if handles is not None: handle, valid_handle = handles feed_dict = {handle: valid_handle, model.training: False} else: feed_dict = {model.training: False} logging.info('Estimating validation loss ... ') while True: try: # Run a forward pass through the model # Note, per-sentence losses used by the model are already length-normalized fetches = sess.run([model.global_step, batch_loss_op, sentence_losses_op], feed_dict=feed_dict) if fetches is not None: valid_losses += [fetches[1]] sentence_losses += fetches[2].tolist() valid_global_step = fetches[0] if len(sentence_losses) > 0: logging.info('Evaluated {:d} sentences'.format(len(sentence_losses))) except tf.errors.OutOfRangeError: break # Report total_valid_loss = sum(valid_losses) mean_valid_loss = total_valid_loss / len(valid_losses) valid_perplexity = np.exp(mean_valid_loss) if not external: current_time = datetime.now().strftime('[%Y-%m-%d %H:%M:%S]') logging.info('-' * 20) logging.info('{:s}[VALID] Loss/ word {:.4f} | Perplexity: {:.4f} | Sentence total {:d}' .format(current_time, mean_valid_loss, valid_perplexity, len(sentence_losses))) # Write summaries if valid_summary_writer: valid_loss_summary = \ tf.Summary(value=[tf.Summary.Value(tag='validation_loss', simple_value=mean_valid_loss)]) valid_perplexity_summary = \ tf.Summary(value=[tf.Summary.Value(tag='validation_perplexity', simple_value=valid_perplexity)]) valid_summary_writer.add_summary(valid_loss_summary, global_step=valid_global_step) valid_summary_writer.add_summary(valid_perplexity_summary, global_step=valid_global_step) return mean_valid_loss, valid_perplexity, sentence_losses, valid_global_step
72f532bccc1f184dcc3d7ab91d88e0a293d9914b
2,984
import os import time from datetime import datetime def tile_image (layer, z, x, y, start_time, again=False, trybetter = True, real = False): """ Returns asked image. again - is this a second pass on this tile? trybetter - should we try to combine this tile from better ones? real - should we return the tile even in not good quality? """ x = x % (2 ** (z-1)) if y<0 or y >= (2 ** (z-1)): return None if not bbox.bbox_is_in(projections.bbox_by_tile(z,x,y,layer["proj"]), layer.get("data_bounding_box",config.default_bbox), fully=False): return None global cached_objs, cached_hist_list if "prefix" in layer: if (layer["prefix"], z, x, y) in cached_objs: return cached_objs[(layer["prefix"], z, x, y)] if layer.get("cached", True): local = config.tiles_cache + layer["prefix"] + "/z%s/%s/x%s/%s/y%s."%(z, x/1024, x, y/1024,y) ext = layer["ext"] if "cache_ttl" in layer: for ex in [ext, "dsc."+ext, "ups."+ext, "tne"]: f = local+ex if os.path.exists(f): if (os.stat(f).st_mtime < (time.time()-layer["cache_ttl"])): os.remove(f) gpt_image = False try: "trying to create local cache directory, if it doesn't exist" os.makedirs("/".join(local.split("/")[:-1])) except OSError: pass if not os.path.exists(local+"tne") and not os.path.exists(local+"lock"): if os.path.exists(local+ext): # First, look for tile in cache try: im1 = Image.open(local+ext) im1.is_ok = True return im1 except IOError: if os.path.exists(local+"lock"): return None else: os.remove(local+ext) # # Cached tile is broken - remove it if layer["scalable"] and (z<layer.get("max_zoom", config.default_max_zoom)) and trybetter: # Second, try to glue image of better ones if os.path.exists(local+"ups."+ext): try: im = Image.open(local+"ups."+ext) im.is_ok = True return im except IOError: pass ec = ImageColor.getcolor(layer.get("empty_color", config.default_background), "RGBA") ec = (ec[0],ec[1],ec[2],0) im = Image.new("RGBA", (512, 512), ec) im1 = tile_image(layer, z+1,x*2,y*2, start_time) if im1: im2 = tile_image(layer, z+1,x*2+1,y*2, start_time) if im2: im3 = tile_image(layer, z+1,x*2,y*2+1, start_time) if im3: im4 = tile_image(layer, z+1,x*2+1,y*2+1, start_time) if im4: im.paste(im1,(0,0)) im.paste(im2,(256,0)) im.paste(im3,(0,256)) im.paste(im4,(256,256)) im = im.resize((256,256),Image.ANTIALIAS) if layer.get("cached", True): try: im.save(local+"ups."+ext) except IOError: pass im.is_ok = True return im if not again: if "fetch" in layer: delta = (datetime.datetime.now() - start_time) delta = delta.seconds + delta.microseconds/1000000. if (config.deadline > delta) or (z < 4): im = fetchers.fetch(z,x,y,layer) # Try fetching from outside if im: im.is_ok = True return im if real and (z>1): im = tile_image(layer, z-1, int(x/2), int(y/2), start_time, again=False, trybetter=False, real=True) if im: im = im.crop((128 * (x % 2), 128 * (y % 2), 128 * (x % 2) + 128, 128 * (y % 2) + 128)) im = im.resize((256,256), Image.BILINEAR) im.is_ok = False return im else: if "fetch" in layer: delta = (datetime.datetime.now() - start_time) delta = delta.seconds + delta.microseconds/1000000. if (config.deadline > delta) or (z < 4): im = fetchers.fetch(z,x,y,layer) # Try fetching from outside if im: im.is_ok = True return im
a37b10319c221455e6ff1e224d96900f15dc8114
2,985
import requests import json import urllib def register_geoserver_db(res_id, db): """ Attempts to register a GeoServer layer """ geoserver_namespace = settings.DATA_SERVICES.get("geoserver", {}).get('NAMESPACE') geoserver_url = settings.DATA_SERVICES.get("geoserver", {}).get('URL') geoserver_user = settings.DATA_SERVICES.get("geoserver", {}).get('USER') geoserver_pass = settings.DATA_SERVICES.get("geoserver", {}).get('PASSWORD') geoserver_directory = settings.DATA_SERVICES.get("geoserver", {}).get('IRODS_DIR') geoserver_auth = requests.auth.HTTPBasicAuth( geoserver_user, geoserver_pass ) workspace_id = f"{geoserver_namespace}-{res_id}" headers = { "content-type": "application/json" } if any(i in db['layer_name'] for i in [".", ","]): return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."} rest_url = f"{geoserver_url}/workspaces/{workspace_id}/{db['store_type']}/{db['layer_name'].replace('/', ' ')}/external.{db['file_type']}" data = f"file://{geoserver_directory}/{db['hs_path']}" response = requests.put(rest_url, data=data, headers=headers, auth=geoserver_auth) if response.status_code != 201: return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."} rest_url = f"{geoserver_url}/workspaces/{workspace_id}/{db['store_type']}/{db['layer_name'].replace('/', ' ')}/{db['layer_group']}/{db['file_name']}.json" response = requests.get(rest_url, headers=headers, auth=geoserver_auth) try: if json.loads(response.content.decode('utf-8'))[db["verification"]]["enabled"] is False: return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."} except: return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."} bbox = json.loads(response.content)[db["verification"]]["nativeBoundingBox"] data = response.content.decode('utf-8').replace('"name":"' + db["file_name"] + '"', '"name":"' + db["layer_name"].replace("/", " ") + '"') response = requests.put(rest_url, headers=headers, auth=geoserver_auth, data=data) if response.status_code != 200: return {"success": False, "type": db["layer_type"], "layer_name": db["layer_name"], "message": "Error: Unable to register GeoServer layer."} if db["layer_type"] == "GeographicRaster": try: hydroshare_url = "/".join(settings.HYDROSHARE_URL.split("/")[:-1]) layer_vrt_url = f"{hydroshare_url}/resource/{'.'.join(db['hs_path'].split('.')[:-1])}.vrt" response = requests.get(layer_vrt_url) vrt = etree.fromstring(response.content.decode('utf-8')) layer_max = None layer_min = None layer_ndv = None for element in vrt.iterfind(".//MDI"): if element.get("key") == "STATISTICS_MAXIMUM": layer_max = element.text if element.get("key") == "STATISTICS_MINIMUM": layer_min = element.text try: layer_ndv = vrt.find(".//NoDataValue").text except: layer_ndv = None if layer_max is not None and layer_min is not None and layer_min < layer_max and layer_ndv is not None: layer_style = get_layer_style(layer_max, layer_min, layer_ndv, db["layer_name"].replace("/", " ")) rest_url = f"{geoserver_url}/workspaces/{workspace_id}/styles" headers = {"content-type": "application/vnd.ogc.sld+xml"} response = requests.post(rest_url, data=layer_style, auth=geoserver_auth, headers=headers) if response.status_code == 201: rest_url = f"{geoserver_url}/layers/{workspace_id}:{db['layer_name'].replace('/', ' ')}" headers = {"content-type": "application/json"} body = '{"layer": {"defaultStyle": {"name": "' + db["layer_name"].replace("/", " ") + '", "href":"https:\/\/geoserver.hydroshare.org\/geoserver\/rest\/styles\/' + db["layer_name"].replace("/", " ") + '.json"}}}' response = requests.put(rest_url, data=body, auth=geoserver_auth, headers=headers) except Exception as e: pass return {"success": True, "type": db["layer_type"], "layer_name": db["layer_name"], "message": f"{'/'.join((geoserver_url.split('/')[:-1]))}/{workspace_id}/wms?service=WMS&version=1.1.0&request=GetMap&layers={workspace_id}:{urllib.parse.quote(db['layer_name'].replace('/', ' '))}&bbox={bbox['minx']}%2C{bbox['miny']}%2C{bbox['maxx']}%2C{bbox['maxy']}&width=612&height=768&srs={bbox['crs']}&format=application/openlayers"}
42bc90904a305964a9d37779e85004843b03c8d1
2,986
def move_file(source,destination): """perform mv command to move a file from sourc to destination Returns True if move is successful """ #print("MOV:"+source+"-->"+destination) mv_cmd=['mv',source,destination] if not getReturnStatus(mv_cmd): return False return True
d79e559fa988da8e1adfe215684187148733e352
2,987
def _freedman_diaconis_bins(a): """Calculate number of hist bins using Freedman-Diaconis rule.""" # From http://stats.stackexchange.com/questions/798/ a = np.asarray(a) iqr = stats.scoreatpercentile(a, 75)-stats.scoreatpercentile(a, 25) h = 2*iqr/(len(a)**(1/3)) bins=int(np.ceil((a.max()-a.min())/h)) if h!=0 else int(np.sqrt(a.size)) return bins
f3e7ebc5da021ac6518a1e501eefbfa3b06c14e6
2,988
def to_ascii_bytes(string): """Convert unicode to ascii byte string.""" return bytes(string, 'ascii') if PY3 else bytes(string)
adca8c49f27f53334ae19a393d5dc00a7592f7db
2,989
def get_user(user_id): """ get a user """ app.logger.debug("get_user({0})".format(user_id)) try: response = app.usersClient.get_user(user_id) return jsonify(response) except OktaError as e: message = { "error_causes": e.error_causes, "error_summary": e.error_summary, "error_id": e.error_id, "error_code": e.error_code } return make_response(jsonify(message), e.status_code)
a259f70a83ecc1a55ef4b1c467fef4fdd29da00f
2,990
def matrix_conv_both_methods_from_avg(n_realz, input_folder, mapping, v_tuple, t_tuple, prefix='real_', numbered=True, verbose=False): """ Convergence of the aggregate transition matrix both considering the frequency and not considering the frequency corresponding to the stencil method and the extended stencil method :param lag_array: :param n_realz: :param input_folder: :param mapping: :param time_step: :param prefix: :param numbered: :param verbose: :return: """ if (not numbered) and n_realz>1: raise 'Expecting only one file when no numbers are used for the input data' # unpack final transition matrices v_mat, v_mat_extend = v_tuple t_mat, t_mat_extend = t_tuple v_log_edges = mapping.v_log_edges n_v_class = mapping.n_abs_v_classes n_theta_class = mapping.n_theta_classes theta_edges = mapping.theta_edges v_output = np.zeros((n_v_class, n_v_class)) theta_output = np.zeros((n_theta_class, n_theta_class)) v_output_nofreq = np.zeros((n_v_class, n_v_class)) theta_output_nofreq = np.zeros((n_theta_class, n_theta_class)) total_transitions = 0 lag = 1 v_norm, v_norm_nofreq, t_norm, t_norm_nofreq, n_transition = [[] for _ in range(5)] for j in range(n_realz): print 'realization number '+str(j) start_idx = 0 # load the polar coordinates file data_path = os.path.join(input_folder, 'avg_polar_' + str(j) + '.npz') data = np.load(data_path) big_v, big_theta, big_f, ptr_list = data['V'], data['Theta'], data['F'], data['ptr'] for i in ptr_list: new_v, new_theta, new_f = big_v[start_idx:i], big_theta[start_idx:i], big_f[start_idx:i] length = i - start_idx start_idx = i if len(new_v) > lag: class_v = np.array(mapping.find_1d_class_idx(np.log(new_v), v_log_edges), dtype=int) class_theta = np.array(mapping.find_1d_class_idx(new_theta, theta_edges), dtype=int) count_matrix_with_freq_one_trajectory(v_output, lag, class_v, new_f) count_matrix_with_freq_one_trajectory(theta_output, lag, class_theta, new_f) v_current = normalize_columns(v_output) t_current = normalize_columns(theta_output) v_norm.append(np.linalg.norm(v_current-v_mat)) t_norm.append(np.linalg.norm(t_current-t_mat)) # get the transition matrices for the extended method (v, theta, f) -> # input (v,theta) count_matrix_one_trajectory(v_output_nofreq, lag, class_v) count_matrix_one_trajectory(theta_output_nofreq, lag, class_theta) v_current = normalize_columns(v_output_nofreq) t_current = normalize_columns(theta_output_nofreq) v_norm_nofreq.append(np.linalg.norm(v_current-v_mat_extend)) t_norm_nofreq.append(np.linalg.norm(t_current-t_mat_extend)) total_transitions += length n_transition.append(total_transitions) return v_norm, t_norm, v_norm_nofreq, t_norm_nofreq, n_transition
f67885b779bde4477de655ba5b4b00bde6a597eb
2,991
def zipper(sequence): """Given a sequence return a list that has the same length as the original sequence, but each element is now a list with an integer and the original element of the sequence.""" n = len(sequence) rn = range(n) data = zip(rn,sequence) return data
af7f0c495d920e54ea033696aefc27379b667102
2,992
def normalizeRows(x): """ Implement a function to normalizes each row of the matrix x (to have unit length) Argument: x -- A numpy matrix of shape (n, m) Returns: x -- The normalized (by row) numpy matrix """ x_norm = np.linalg.norm(x, ord=2, axis=1, keepdims=True) x = x / x_norm return x
f305aafa614121c0948725bc064e255ab44595f3
2,993
def weat_p_value(X, Y, A, B, embd, sample = 1000): """Computes the one-sided P value for the given list of association and target word pairs Arguments X, Y : List of association words A, B : List of target words embd : Dictonary of word-to-embedding for all words sample : Number of random permutations used. Returns """ size_of_permutation = min(len(X), len(Y)) X_Y = X + Y test_stats_over_permutation = [] Xmat = np.array([embd[w.lower()] for w in X if w.lower() in embd]) Ymat = np.array([embd[w.lower()] for w in Y if w.lower() in embd]) Amat = np.array([embd[w.lower()] for w in A if w.lower() in embd]) Bmat = np.array([embd[w.lower()] for w in B if w.lower() in embd]) if not sample: permutations = combinations(X_Y, size_of_permutation) else: permutations = [random_permutation(X_Y, size_of_permutation) for s in range(sample)] for Xi in permutations: Yi = filterfalse(lambda w:w in Xi, X_Y) Ximat = np.array([embd[w.lower()] for w in Xi if w.lower() in embd]) Yimat = np.array([embd[w.lower()] for w in Yi if w.lower() in embd]) test_stats_over_permutation.append(test_statistic(Ximat, Yimat, Amat, Bmat)) unperturbed = test_statistic(Xmat, Ymat, Amat, Bmat) is_over = np.array([o > unperturbed for o in test_stats_over_permutation]) return is_over.sum() / is_over.size
7c66eaa825d9e2b84ff1a49fa81de9a872ce3271
2,994
def toggle_nullclines(): """Make an interactive plot of nullclines and fixed points of the Gardner-Collins synthetic toggle switch. """ # Set up sliders params = [ dict( name="βx", start=0.1, end=20, step=0.1, value=10, long_name="beta_x_slider", ), dict( name="βy", start=0.1, end=20, step=0.1, value=10, long_name="beta_y_slider", ), dict(name="n", start=1, end=10, step=0.1, value=4, long_name="n_slider"), ] sliders = [ bokeh.models.Slider( start=param["start"], end=param["end"], value=param["value"], step=param["step"], title=param["name"], width=150, ) for param in params ] # Build base plot with starting parameters beta = 10 n = 4 # Compute nullclines x_y = np.linspace(0, 20, 400) y_x = np.linspace(0, 20, 400) x_x = beta / (1 + y_x ** n) y_y = beta / (1 + x_y ** n) cds = bokeh.models.ColumnDataSource(data=dict(x_x=x_x, x_y=x_y, y_x=y_x, y_y=y_y)) # Make the plot p = bokeh.plotting.figure( frame_height=250, frame_width=250, x_axis_label="x", y_axis_label="y", x_range=[-1, 20], y_range=[-1, 20], ) p.line(x="x_x", y="y_x", source=cds, line_width=2, legend_label="x nullcline") p.line( x="x_y", y="y_y", source=cds, line_width=2, color="orange", legend_label="y nullcline", ) cds_stable = bokeh.models.ColumnDataSource( dict(x=[0.0009999, 9.99999999999], y=[9.99999999999, 0.0009999]) ) cds_unstable = bokeh.models.ColumnDataSource( dict(x=[1.533012798623252], y=[1.533012798623252]) ) p.circle(source=cds_stable, x="x", y="y", color="black", size=10) p.circle( source=cds_unstable, x="x", y="y", line_color="black", fill_color="white", line_width=2, size=10, ) # Callback (uses JavaScript) js_code = jsfuns["rootfinding"] + jsfuns["toggle_nullclines"] + "callback()" callback = bokeh.models.CustomJS( args=dict(cds=cds, cdsStable=cds_stable, cdsUnstable=cds_unstable), code=js_code ) # We use the `js_on_change()` method to call the custom JavaScript code. for param, slider in zip(params, sliders): callback.args[param["long_name"]] = slider slider.js_on_change("value", callback) # Return layout return bokeh.layouts.row( p, bokeh.models.Spacer(width=30), bokeh.layouts.column(bokeh.models.Spacer(height=40), *sliders), )
613303946b3abff9902e060dee952e9303fa3b52
2,995
def is_chinese_word_add_number(s): """中文混数字""" if len(s) == 0: return False else: for w in s: if is_chinese(w) == False and is_number(w) == False: return False return True
a6524d31c4fbeb866406eec0617fd99f88ba40a0
2,996
def get_rack_id_by_label(rack_label): """ Find the rack id for the rack label Returns: rack_id or None """ rack_id = None session = persistent_mgr.create_database_session() rack = persistent_mgr.get_rack_by_label(session, rack_label) if rack: rack_id = rack.rack_id session.close() return rack_id
332df032e05fd8e3dde47dd1513bc6f5c381cfa3
2,997
import torch def cat(xs: torch.Tensor, lx: torch.Tensor) -> torch.Tensor: """Cat the padded xs via lengths lx Args: xs (torch.FloatTensor): of size (N, T, V) lx (torch.LongTensor): of size (N, ), whose elements are (lx0, lx1, ...) Return: x_gather (torch.FloatTensor): size (lx0+lx1+..., V) """ assert xs.dtype in [torch.float, torch.float16, torch.float64], f"expect xs to be torch.float type, instead of {xs.dtype}" assert xs.size(0) == lx.size(0) return _GatherCat.apply(xs.contiguous(), lx.to(device=xs.device, dtype=torch.int32))
60e27c001b39c6b3afe0bbe3c3743bcd817e9fbf
2,998
def rule(n: int) -> dict: """Implement one of the 256 rules of elementary cellular automata. Args: n: The id of the rule (1-256). Returns: A mapping from a tuple of 3 cellvalues to a single cell value. """ assert n > 0 and n < 257, "must choose a rule between 1 and 256" values = to_bin(n) return { s: v for s, v in zip(STATES, values) }
e423675cb3fba18b62e42a7509274b13ee8eeb0f
2,999