content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import scipy def sortedEig(X,M=None,k=None, lambda0=0): """ Return the k largest eigenvalues and the corresponding eigenvectors of the solution to X*u = b*M*u Inputs: X: matrix M: matrix k: (int) if k is None, return all but one. lambda0: (float) regularization parameter to ensure positive definiteness so that cholesky decomposition works Outputs: b: vetor of eigenvalues U: matrix of eigenvectors; each column is an eigenvector """ if k is None: k = X.shape[0] if M is None: (b,U) = scipy.linalg.eig(X) else: (b,U) = scipy.linalg.eig(X,M+lambda0*np.eye(M.shape[0])) idx = b.argsort()[-k:][::-1] return b[idx], U[:,idx] else: if M is None: (b,U) = scipy.sparse.linalg.eigsh(X,k) else: (b,U) = scipy.sparse.linalg.eigsh(X,k,M+lambda0*np.eye(M.shape[0])) return b,U
957c36b625d2e7676123547e49b9a8d3e2ba48d2
34,803
from typing import List def _fourier_transform_multi_fermionic_mode( n: int, amplitude: complex, modes: List[int]) -> np.ndarray: """Fermionic Fourier transform of a multi Fermionic mode base state. Args: n: State length, number of qubits used. amplitude: State amplitude. Absolute value must be equal to 1. modes: List of mode numbers which should appear in the resulting state. List order defines the sequence of applied creation operators which does not need to be normally ordered. Return: List representing a new, Fourier transformed amplitudes of the input modes. """ def fourier_transform_mode(k): unit = np.exp(-2j * np.pi * k / n) return [unit**j / np.sqrt(n) for j in range(n)] def append_in_normal_order(index, mode): phase = 1 mode = n - 1 - mode for i in range(n): bit = 1 << i if i == mode: if index & bit != 0: return None, None return index | bit, phase elif index & bit: phase *= -1 state = {0: amplitude} for m in modes: transform = fourier_transform_mode(m) new_state = {} # type: Dict[int, complex] for index in state: for mode in range(len(transform)): new_index, new_phase = append_in_normal_order(index, mode) if new_index: if not new_index in new_state: new_state[new_index] = 0 new_amplitude = state[index] * transform[mode] * new_phase new_state[new_index] += new_amplitude state = new_state result = np.zeros(1 << n, dtype=complex) for i in range(len(result)): if i in state: result[i] = state[i] return result / np.linalg.norm(result)
a4983387f622f450d31e25ba1a30027eda9d99c5
34,804
def get_metric_parser(op): """Return a function which can parse a line with this operator.""" return { OP_START_TIMER: parse_timer, OP_STOP_TIMER: parse_timer, OP_NAMED_EVENT: parse_named_event, OP_GAUGE: parse_gauge, }[op]
b78272c9f70318bc76d43ccaf03df294cc2de4ea
34,805
def parse_division(l, c, line, root_node, last_section_node): """ Extracts a division node from a line :param l: The line number (starting from 0) :param c: The column number :param line: The line string (without indentation) :param root_node: The document root node. :return: tuple(last_div_node, last_section_node) """ name = line name = name.replace(".", "") # trim whitespaces/tabs between XXX and DIVISION tokens = [t for t in name.split(' ') if t] node = Name(Name.Type.Division, l, c, '%s %s' % (tokens[0], tokens[1])) root_node.add_child(node) last_div_node = node # do not take previous sections into account if last_section_node: last_section_node.end_line = l last_section_node = None return last_div_node, last_section_node
376322fa8678f5440a96caaa7f3b4f968e4f24de
34,806
def diff_eqs(INP, t): """The main set of equations""" Y = np.zeros((2)) V = INP Y[0] = gamma * (N0 - V[0]) - tau * V[1] Y[1] = ( tau * (n - 1) * (n * V[0] - V[1]) * V[1] / (n * V[0]) + gamma * (n * N0 - n * V[0] - V[1]) - tau * V[1] - tau * (n - 1) * V[1] * V[1] / (n * V[0]) - gamma * V[1] ) return Y
6736fab5d0f1c54a0d44cb64af85fca31f4b18dd
34,807
from datetime import datetime def register(): """ Функция регистрации пользователя :return: Страницы Login или Register """ if request.method == "POST": # Достаем из формы данные пользователя username = request.form["FirstName"] lastname = request.form["LastName"] email = request.form["Email"] password = request.form["Password"] repeat_password = request.form["RepeatPassword"] # Проверяем был ли зарегистрирован пользователь с таким # же адресом электронной почты row = db_session.query(User).filter(User.email == email).all() # Если пользователя с таким же адресом Email нет, то регистрируем его if not row: # Проверяем пароль if password != repeat_password: flash("Ошибка при вводе паролей") else: # Шифруем пароль h_password = hash_password(password) user = User(username, lastname, email, datetime.now(), h_password) db_session.add(user) db_session.commit() # Отправляем пользователя на страницу авторизации return redirect("login") else: # Если пользователь с таким же адресом существует, то выдаем ошибку flash("Такой пользователь уже зарегистрирован. Используйте другой адрес электронной почты.") return redirect("register") return render_template("register.html")
6663508e40746484c96bb789ae091ba2c7a6662b
34,808
from pathlib import Path import re def get_detectron2_current_version(): """Version is not available for import through Python since it is above the top level of the package. Instead, we parse it from the file with a regex.""" # Get version info from detectron2 __init__.py version_source = (Path(__file__).parents[2] / "detectron2" / "__init__.py").read_text() version_number = re.findall(r'__version__ = "([0-9\.]+)"', version_source)[0] return version_number
52b7717fdee1fc64b7e8c3d4d3aa074373fcffb6
34,809
def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None): """Accuracy classification score. In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must *exactly* match the corresponding set of labels in y_true. Read more in the :ref:`User Guide <accuracy_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float If ``normalize == True``, return the fraction of correctly classified samples (float), else returns the number of correctly classified samples (int). The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- jaccard_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equal to the ``jaccard_score`` function. Examples -------- >>> from sklearn.metrics import accuracy_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> accuracy_score(y_true, y_pred) 0.5 >>> accuracy_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> import numpy as np >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) check_consistent_length(y_true, y_pred, sample_weight) if y_type.startswith('multilabel'): differing_labels = count_nonzero(y_true - y_pred, axis=1) score = differing_labels == 0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize)
306b7095ec02e4192cf7de76aa31fb8503ce0a9f
34,810
def aspect_ratio(bbox, ratios): """ Enumerate box for each aspect ratio. Args: bbox (:py:class:`BBox2D`): 2D bounding box. ratios (:py:class:`list`): list of int/float values. """ cx, cy = bbox.center() w, h = bbox.w, bbox.h size = w * h ratios = np.asarray(ratios, dtype=np.float) size_ratios = size / ratios ws = np.round(np.sqrt(size_ratios)) hs = np.round(ws * ratios) stack = np.vstack((cx - 0.5*(ws-1), cy - 0.5*(hs-1), cx + 0.5*(ws-1), cy + 0.5*(hs-1))) boxes = BBox2DList(stack.T, mode=XYXY) return boxes
73ce16b3ed755bb07e680ab5626fb2b160b4aa53
34,811
def str2bool(value): """ Convert CLI args to boolean :param str value: :return bool: """ if value.lower() in ("yes", "true", "t", "y", "1"): return True elif value.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError("Boolean value expected.")
5b204bb20913f0214048a8332229b80017c7a701
34,812
def layernorm_cx(cx, w_in): """Accumulates complexity of layernorm into cx = (h, w, flops, params, acts).""" h, w, flops, params, acts = cx["h"], cx["w"], cx["flops"], cx["params"], cx["acts"] params += 2 * w_in return {"h": h, "w": w, "flops": flops, "params": params, "acts": acts}
97ffd7678c3d4d3fbbf63b6717fe3cc11823c230
34,814
async def _get_privileges(user_id: int, chat_id: int) -> Privileges: """ Check user access :param user_id: user id :param chat_id: access chat id :return: privileges """ bot = Bot.get_current() chat_member = await bot.get_chat_member(chat_id, user_id) if chat_member.is_chat_admin(): return Privileges.admin if chat_member.is_chat_member(): return Privileges.user return Privileges.nothing
421cfeb48c2a1a4bfd8d0240d4f7c3acc25bb856
34,815
def sso_login_url(service): """Get SSO login url for service""" app_config = current_app.config login_host = app_config.get('SSO').get('HOST') sso_redirect_url = app_config.get(f'SERVER_{service}_DOMAIN_NAME') query = urlencode({ 'service': service, 'redirect_url': f'https://{sso_redirect_url}', 'language': get_language(), }) login_url = f'{login_host}/login?{query}' return login_url
28828ab734d95304e60b13812de4550587f577d9
34,817
def get_config(): """Default configuration for the Harvest level.""" config = config_dict.ConfigDict() # Basic configuration. config.individual_observation_names = ["RGB"] config.global_observation_names = ["WORLD.RGB"] # Lua script configuration. config.lab2d_settings = { "levelName": # "harvest", "levelDirectory": "examples//tutorial/harvest_stella/levels", "maxEpisodeLengthFrames": 100, "numPlayers": 1, "spriteSize": 8, "simulation": { "map": """ ******* *,,,,,* * _ * *,,,,,* *,,,,,* ******* """, "prefabs": { "avatar": AVATAR, "spawn_point": SPAWN_POINT, "wall": WALL, ",": create_ground_prefab() }, "charPrefabMap": {"_": "spawn_point", "*": "wall", ",": "ground"}, "playerPalettes": [], }, } return config
8cfbac7ebe371278332e7d71f7f89b02f701b59b
34,819
def evaluate_regression_error(predicted_output, true_output, norm=norms.euclidean_2): """Calculate the error with respect to a norm of regression output. Parameters ---------- predicted_output : numpy.ndarray The predictions made by the classifier. true_output : int The true response values. norm : func The choice of norm to use to measure error. Default is the Euclidean L_2 norm. Returns ------- error : float Measurement of error of regression model. (squared norm) """ error = norm(predicted_output - true_output)**2 return error
243bc61234f20995a8db765653bd37cf7d3f8e40
34,820
def classify(character: str) -> int: """String classifier.""" if character.isupper(): return StringType.UPPER if character.islower(): return StringType.LOWER if character.isnumeric(): return StringType.NUMERIC return StringType.OTHER
a17af3771bc2d1dd51ea735bcc78f856a01df5cd
34,821
import json def getGold(session, city): """ Parameters ---------- session : ikabot.web.session.Session city : dict Returns ------- gold : int """ url = 'view=finances&backgroundView=city&currentCityId={}&templateView=finances&actionRequest={}&ajax=1'.format(city['id'], actionRequest) data = session.post(url) json_data = json.loads(data, strict=False) gold = json_data[0][1]['headerData']['gold'] gold = gold.split('.')[0] gold = int(gold) gold_production = json_data[0][1]['headerData']['scientistsUpkeep'] + json_data[0][1]['headerData']['income'] + json_data[0][1]['headerData']['upkeep'] return gold, int(gold_production)
e312f7bda229340ec3709f025ae33194909e82ee
34,822
from typing import Iterable from typing import Mapping def _iter_but_not_str_or_map(maybe_iter): """Helper function to differ between iterables and iterables that are strings or mappings. This is used for pynads.concrete.List to determine if an iterable should be consumed or placed into a single value tuple. """ return (isinstance(maybe_iter, Iterable) and not isinstance(maybe_iter, (str, Mapping)))
3dab46cfd2d2d19bd0fa744370b9059d6a0683bc
34,823
def interrogate_decision_tree(wxtree): """ Obtain a list of necessary inputs from the decision tree as it is currently defined. Return a formatted string that contains the diagnostic names, the thresholds needed, and whether they are thresholded above or below these values. This output is used to create the CLI help, informing the user of the necessary inputs. Args: wxtree (str): The weather symbol tree that is to be interrogated. Returns: list of str: Returns a formatted string descring the diagnostics required, including threshold details. """ # Get current weather symbol decision tree and populate a list of # required inputs for printing. if wxtree == "high_resolution": queries = wxcode_decision_tree() elif wxtree == "global": queries = wxcode_decision_tree_global() else: raise ValueError("Unknown decision tree name provided.") # Diagnostic names and threshold values. requirements = {} for query in queries.values(): diagnostics = get_parameter_names( expand_nested_lists(query, "diagnostic_fields") ) thresholds = expand_nested_lists(query, "diagnostic_thresholds") for diagnostic, threshold in zip(diagnostics, thresholds): requirements.setdefault(diagnostic, set()).add(threshold) # Create a list of formatted strings that will be printed as part of the # CLI help. output = [] for requirement, uniq_thresh in sorted(requirements.items()): (units,) = set(u for (_, u) in uniq_thresh) # enforces same units thresh_str = ", ".join(map(str, sorted(v for (v, _) in uniq_thresh))) output.append("{} ({}): {}".format(requirement, units, thresh_str)) n_files = len(output) formatted_string = "{}\n" * n_files formatted_output = formatted_string.format(*output) return formatted_output
14ac92c25bccd8c549de6d7a5bbf696e18c1e47e
34,824
def script_resolve_name(script_name, name): """ Name resolver for scripts. Supports ROS_NAMESPACE. Does not support remapping arguments. @param name: name to resolve @type name: str @param script_name: name of script. script_name must not contain a namespace. @type script_name: str @return: resolved name @rtype: str """ if not name: # empty string resolves to namespace return roslib.names.get_ros_namespace() # Check for global name: /foo/name resolves to /foo/name if roslib.names.is_global(name): return name # Check for private name: ~name resolves to /caller_id/name elif roslib.names.is_private(name): return roslib.names.ns_join(roslib.names.make_caller_id(script_name), name[1:]) return roslib.names.get_ros_namespace() + name
38e0be53417719bc9521b80a4621b3127aab1749
34,825
def offbyKExtra(s1,s2,k): """Input: two strings s1,s2 and integer k Process: to check if number of extra characters in s2 as compared to s1 (or vice versa) is equal to k Output: return True when above condition is met otherwise return False""" flag=0 extra1='' if len(s1)>len(s2): for x in s1: if x not in s2: extra1=extra1+x elif s2.count(x)<s1.count(x) and x not in extra1: extra1=extra1+x*(s1.count(x)-s2.count(x)) elif s2.count(x)>s1.count(x): flag=-2 break if len(s1)<=len(s2): for y in s2: if y not in s1: extra1=extra1+y elif s1.count(y)<s2.count(y) and y not in extra1: extra1=extra1+y*(s2.count(y)-s1.count(y)) elif s1.count(y)>s2.count(y): flag=-2 break if flag==-2: return False elif len(extra1)==k: return True else: return False
10cb2480c95a729aceb219e14999dcbcf0cad1eb
34,826
def Optimizer(Efunc,x0,method='L-BFGS-B',jac=None,optns={}): """ Efunc(x,count) See the scipy.optimize.minimize manual for options """ const.CONSOLEMESSAGE('Entered in lib.Optimizer') methlst=['Nelder-Mead','Powell','CG','BFGS','Newton-CG','Anneal','L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP'] # scipy.Optimoze.Minimize in scipy ver0.16? if not method in methlst: mess='Not found method='+method MessageBoxOK(mess,'lib.Optimizer') return None nvar=len(x0) const.CONSOLEMESSAGE('lib.Optimizer nvar='+str(nvar)) if nvar <= 0: mess='No variables' MessageBoxOK(mess,'lib.Optimizer') return None optcg = {'maxiter' : False, # default value. 'disp' : True, #True, # non-default value. 'gtol' :1e-2, #1e-5, # default value. 'eps' : False }#1.4901161193847656e-08} # default value. optpw = {'maxiter' : False, # default value. 'disp' : True, #True, # non-default value. } count=0 opts=optcg if method == 'Powell': opts=optpw const.CONSOLEMESSAGE('lib.Optimizer #1') if int(scipy.__version__.split('.')[1]) >= 11: # for Scipy 0.11.x or later const.CONSOLEMESSAGE('lib.Optimizer scipy version >11') result = minimize(Efunc,x0=x0, method=method,options=optns) #, #args=(count)) else: # for Scipy 0.10.x const.CONSOLEMESSAGE('lib.Optimizer scipy version old') if method == 'CG': result = optimize.fmin_cg(Efunc, p0, # args = (count), gtol = optcg['gtol'], epsilon = optcg['eps'], maxiter = optcg['maxiter'], disp = optcg['disp']) elif method == 'Powell': result = optimize.fmin_powell(Efunc, p0, #args = (count), maxiter = optpw['maxiter'], disp = optpw['disp']) else: print 'Error unknown minimization method. method = ', method return None return result
2fe2ff9263a0cb0662c30a47b90e089b92988ad2
34,828
def plot_feature_calibrator(model_graph, feature_name, plot_submodel_calibration=True, font_size=12, axis_label_font_size=14, figsize=None): """Plots feature calibrator(s) extracted from a TFL canned estimator. ``` model_graph = estimators.get_model_graph(saved_model_path) visualization.plot_feature_calibrator(model_graph, "feature_name") ``` Args: model_graph: `model_info.ModelGraph` object that includes model nodes. feature_name: Name of the feature to plot the calibrator for. plot_submodel_calibration: If submodel calibrators should be included in the output plot, when more than one calibration node is provided. These are individual calibration layers for each lattice in a lattice ensemble constructed from `configs.CalibratedLatticeEnsembleConfig`. font_size: Font size for values and labels on the plot. axis_label_font_size: Font size for axis labels. figsize: The figsize parameter passed to `pyplot.figure()`. Returns: Pyplot figure object containing the visualisation. """ input_feature_node = [ input_feature_node for input_feature_node in _input_feature_nodes(model_graph) if input_feature_node.name == feature_name ] if not input_feature_node: raise ValueError( 'Feature "{}" not found in the model_graph.'.format(feature_name)) input_feature_node = input_feature_node[0] calibrator_nodes = _output_nodes(model_graph, input_feature_node) return plot_calibrator_nodes(calibrator_nodes, plot_submodel_calibration, font_size, axis_label_font_size, figsize)
16576f1497937fb04be2d7fea17b578177564f65
34,829
def calc_reg_cdd( temperatures, t_base_cooling, model_yeardays, crit_temp_min_max=False ): """Calculate CDD for every day and daily yd shape of cooling demand Arguments ---------- temperatures : array Temperatures t_base_cooling : array Base temperature cooling model_yeardays : list Modelled yeardays Return ------ shape_yd : array Fraction of heat for every day. Array-shape: nr_of_days, 1 Note ---- - Based on temperatures of a year, the CDD are calculated for every day in a year. Based on the sum of all CDD of all days, the relative share of heat used for any day is calculated. - The Cooling Degree Days are calculated based on assumptions of the base temperature of the current year. """ if not crit_temp_min_max: cdd_d = calc_cdd( t_base_cooling, temperatures, nr_day_to_av=1, crit_temp_min_max=crit_temp_min_max) shape_cdd_d = load_profile.abs_to_rel(cdd_d) # Select only modelled yeardays shape_cdd_d_selection = shape_cdd_d[model_yeardays] cdd_d_selection = cdd_d[model_yeardays] # If no calc_provide flat curve if np.sum(cdd_d_selection) == 0: shape_cdd_d_selection = np.full( (len(model_yeardays)), 1 / len(model_yeardays)) else: cdd_d = calc_cdd( t_base_cooling, temperatures, nr_day_to_av=1, crit_temp_min_max=crit_temp_min_max) shape_cdd_d = load_profile.abs_to_rel(cdd_d) # Select only modelled yeardays shape_cdd_d_selection = shape_cdd_d[model_yeardays] cdd_d_selection = cdd_d[model_yeardays] # If no calc_provide flat curve if np.sum(cdd_d_selection) == 0: shape_cdd_d_selection = np.full( (len(model_yeardays)), 1 / len(model_yeardays)) return cdd_d_selection, shape_cdd_d_selection
62699977be16efbdd511e987e736822fca6a82c3
34,830
def get_number_of_polymorphic_sites(pileup): """ # ======================================================================== GET NUMBER OF POLYMORPHIC SITES PURPOSE ------- Returns the number of polymorphic sites. INPUT ----- [PILEUP] [pileup] A Pileup object, which represents the pileup of aligned reads. RETURN ------ [INT] (pileup.count_polymorphic_sites()) The number of polymorphic sites in pileuip. # ======================================================================== """ return pileup.count_polymorphic_sites()
e388b20f500b141da0eedc54616703c6e444de8a
34,831
def test(sess, evaluate, ph, dataset, testmodel): """Apply the models.""" ## word model acc = Accuracies() out_sentences = [] results_w = [] for f_word in dataset.batches: batch_values, out_logits_w_out_w = sess.run( [testmodel.predictions_w, testmodel.out_logits_w], feed_dict={ph.inputs_words: f_word}) results_w.extend(out_logits_w_out_w) for a in batch_values: out_sentences.append([w for w in a]) acc.word = evaluate.simple_eval(out_sentences) ## char model out_sentences = [] results_c = [] for f_char, i_start, i_end in zip( dataset.batches_ch, dataset.index_batches_start, dataset.index_batches_end): feed = {ph.inputs_chars: f_char, ph.idx_start: i_start, ph.idx_end: i_end} batch_values, ch_out = sess.run( [testmodel.predictions_c, testmodel.out_logits_c], feed_dict=feed) results_c.extend(ch_out) for a in batch_values: out_sentences.append([w for w in a]) acc.char = evaluate.simple_eval(out_sentences) ## join models out_sentences = [] index_step = 0 for batch_w in dataset.batches: cout, wout = ([], []) for _ in batch_w: wout.append(results_w[index_step]) lsc = results_c[index_step] w_shape = results_w[index_step].shape pad_c = np.zeros(w_shape) if w_shape[0] <= lsc.shape[0]: pad_c[:w_shape[0], :w_shape[1]] = lsc[:w_shape[0], :w_shape[1]] else: pad_c[:lsc.shape[0], :lsc.shape[1]] = lsc cout.append(pad_c) index_step += 1 feed = {ph.inputs_words: batch_w, ph.lout_w: wout, ph.lout_c: cout} batch_values_joint = sess.run(testmodel.predictions_m, feed_dict=feed) for a in batch_values_joint: out_sentences.append([w for w in a]) test_stringio_joint = evaluate.write_string(out_sentences) acc.meta = evaluate.simple_eval(out_sentences) return acc, test_stringio_joint
03f2035e90f1facce191548b061c7407d9715ab7
34,832
def _compile_rules(rules_file, externals, cur_logger): """ Saves Yara rule content to file, validates the content with Yara Validator, and uses Yara python to compile the rule set. Args: rules_file: Yara rule file content. Returns: Compiled rules, compiled rules md5. """ try: validate = YaraValidator(externals=externals, logger=cur_logger) validate.validate_rules(rules_file) except Exception as e: raise e return True
dcad814fc91ae577952a926021dbfb71ae638080
34,833
async def hello(request): """Test webserver request.""" return web.Response(text="Hello, world")
d8a68eb12fd094a5beed0610d8601ea63334aaab
34,834
def user_save(form, is_patient=False, is_office=False): """Function saving the user to the database.""" user = form.save(commit=False) # The account is not active until the user activates it. user.is_active = False user.is_patient = is_patient user.is_office = is_office user.save() return user
8ce0a7af24bc72da98c015d0e9f7545069bbce19
34,836
def int_divmod(context, builder, ty, x, y): """ Integer divmod(x, y). The caller must ensure that y != 0. """ if ty.signed: return int_divmod_signed(context, builder, ty, x, y) else: return builder.udiv(x, y), builder.urem(x, y)
93fa13c703a9419ea9a5926f32a69a7269d57fd9
34,837
def initial_state(layer, dimensions=None): """ Initalizes the recurrence relation with an initial hidden state if needed, else replaces with a "None" to tell Theano that the network **will** return something, but it does not need to send it to the next step of the recurrence """ if dimensions is None: return layer.initial_hidden_state if has_hidden(layer) else None else: return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
3788adbe14604d50b5aa9701616f3bdaa5af94a7
34,838
def lin_smooth(x, window_len=15, window='hanning'): """ Smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. Parameters ---------- x : numpy.ndarray The input signal. Should be a 1D numpy array window_len : int The dimension of the smoothing window; should be an odd integer window : {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'} The type of window. A 'flat' window will produce a moving average smoothing. Returns ------- smoothed : numpy.ndarray The smoothed signal (same dimension as `x`) Example ------- >>> t = linspace(-2,2,0.1) >>> x = sin(t)+randn(len(t))*0.1 >>> y = lin_smooth(x) See also -------- numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve scipy.signal.lfilter TO-DO ----- The window parameter could be the window itself if an array instead of a string Notes ----- - length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y. - Copied from: http://scipy-cookbook.readthedocs.io/items/SignalSmooth.html """ if x.ndim != 1: raise ValueError("smooth only accepts one-dimensional arrays.") if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.") if window_len < 3: return x if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError("'Window' should be 'flat', 'hanning', 'hamming', 'bartlett', or 'blackman'") if window == 'flat': # moving average w = np.ones(window_len, 'd') else: w = eval('np.'+window+'(window_len)') y = np.convolve(w/w.sum(), x, mode='same') return y
a1a5a5c0ab76a4b38007947cc161fbcf0bc90dd9
34,839
def product_details(request, id): """ The view rendering the page for one selected product and all of its details """ selected_product = get_object_or_404(Product, id=id) # get existing number of views, increment and update model number = selected_product.num_of_views + 1 Product.objects.filter(pk=id).update(num_of_views=number) # filtering all the relevant ratings from the Rating model: ratings_data = get_the_ratings_for(selected_product) # filtering all the sizes from the Size model: sizes = Size.objects.filter(format_name=selected_product.aspect_ratio) technologies = Technology.objects.all() # Bundling the data for the template to a dictionary: pass_to_template = { "selected_prod": selected_product, "ratings_data": ratings_data, 'sizes': sizes, 'technologies': technologies, } return render( request, "product_details.html", {"pass_to_template": pass_to_template} )
d567b378286a19bdc70442f18e65922912aa7c67
34,840
def compute_colors_for_labels(labels): """Simple function that adds fixed colors depending on the class """ palette = np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1, 1]) colors = labels[:, None] * palette colors = (colors % 255).astype("float") colors /= 255 colors[:, -1] = 1 return colors
5ce1dc5a43d94a7bd316137b217c32cec9fab355
34,841
def vms_ajax_revoke_ip(request, vm_id, template_name='generic/form.html', form_class=RevokeIPForm): """ Ajax view for detaching elastip IP from a virtual machine. """ rest_data = prep_data({'vm': ('user/vm/get_by_id/', {'vm_id': vm_id})}, request.session) if request.method == 'POST': form = form_class(data=request.POST, rest_data=rest_data) if form.is_valid(): prep_data(('user/public_ip/unassign/', {'public_ip_id': form.cleaned_data['public_ip_id']}), request.session) return messages_ajax.success(_('You have successfully revoked IP address.')) else: form = form_class(rest_data=rest_data) return messages_ajax.success(render_to_string(template_name, {'form': form, 'text': _('Select an IP address to revoke:'), 'confirmation': _('Revoke')}, context_instance=RequestContext(request)), status=1)
8ae82abbde4a883673b376af05820dd882813f96
34,843
import hashlib def make_message_hash(msg, include=(), exclude=()): """ Returns hashcode for ROS message, as a hex digest. @param include message fields to include if not all, as [((nested, path), re.Pattern())] @param exclude message fields to exclude, as [((nested, path), re.Pattern())] """ hasher = hashlib.md5() def walk_message(obj, top=()): fieldmap = get_message_fields(obj) fieldmap = filter_fields(fieldmap, include=include, exclude=exclude) for k, t in fieldmap.items(): v, path = get_message_value(obj, k, t), top + (k, ) if is_ros_message(v): walk_message(v, path) elif isinstance(v, (list, tuple)) and scalar(t) not in ROS_BUILTIN_TYPES: for x in v: walk_message(x, path) else: s = "%s=%s" % (path, v) hasher.update(s.encode("utf-8", errors="backslashreplace")) if not hasattr(obj, "__slots__"): s = "%s=%s" % (top, obj) hasher.update(s.encode("utf-8", errors="backslashreplace")) walk_message(msg) return hasher.hexdigest()
325d5d0bd5d8d5f05ecfcc6983425ac121fbbea3
34,844
def non_overlap_df(input_df: pd.DataFrame) -> pd.DataFrame: """ Args: input_df: DataFrame with possibly overlapping calls Returns: a DataFrame object with non-overlapping calls (after merge). """ non_overlap = [] for file_name, file_df in input_df.groupby(by='filename'): file_df.sort_values(by='begin_time', inplace=True) merged = merge_calls(file_df) non_overlap.extend(merged) non_overlap = pd.DataFrame(non_overlap) non_overlap['call_length'] = non_overlap['end_time'] - non_overlap['begin_time'] return non_overlap
8753f0c3f28d3b9e31b415c7a4d05aab22001df1
34,845
import prometheus_client def benchmark_last_result(project,benchmark): """ Get latest benchmark result from Victoria Metrics Returns "-1" if result not found """ query = f"last_over_time(parity_benchmark_common_result_ns{{project=\"{project}\",benchmark=\"{benchmark}\"}}[1y])" query_result = prometheus_client.custom_query(query=query) if len(query_result) > 0: last_benchmark_result = int(query_result[0]['value'][1]) else: last_benchmark_result = -1 return last_benchmark_result
9e7db0dda645f977fab85a98e9efdfbc7c6683be
34,846
def get_lambda_config(module, aws): """ Returns the lambda function configuration if it exists. :param module: Ansible module reference :param aws: AWS client connection :return: """ client = aws.client('lambda') # set API parameters api_params = dict(FunctionName=module.params['function_name']) if module.params['version'] > 0: api_params.update(Qualifier=str(module.params['version'])) # check if function exists and get facts, including sha256 hash try: results = client.get_function_configuration(**api_params) except (ClientError, ParamValidationError, MissingParametersError) as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': results = None else: module.fail_json(msg='Error retrieving function configuration: {0}'.format(e)) return results
509db0f745a3aeaea07722f1135a339d720cabdc
34,847
def standard_deviation(lst, population=True): """Calculates the standard deviation for a list of numbers.""" num_items = len(lst) mean = sum(lst) / num_items differences = [x - mean for x in lst] sq_differences = [d ** 2 for d in differences] ssd = sum(sq_differences) if population is True: #print('This is POPULATION standard deviation.') variance = ssd / num_items else: #print('This is SAMPLE standard deviation.') variance = ssd / (num_items - 1) sd = sqrt(variance) return sd
7f15f6d80adf722912cba49c3dfad9b40e2f3b2a
34,848
def solve(global_step): """add solver to losses""" # learning reate lr = _configure_learning_rate(82783, global_step) optimizer = _configure_optimizer(lr) tf.summary.scalar('learning_rate', lr) # compute and apply gradient losses = tf.get_collection(tf.GraphKeys.LOSSES) loss = tf.add_n(losses) regular_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) regular_loss = tf.add_n(regular_losses) total_loss = loss + regular_loss tf.summary.scalar('total_loss', total_loss) tf.summary.scalar('loss', loss) tf.summary.scalar('regular_loss', regular_loss) # update_ops = [] # variables_to_train = _get_variables_to_train() # update_op = optimizer.minimize(total_loss) # gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train) # grad_updates = optimizer.apply_gradients(gradients, # global_step=global_step) # update_ops.append(grad_updates) ## update moving mean and variance # if FLAGS.update_bn: # update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # update_bn = tf.group(*update_bns) # # update_ops.append(update_bn) # total_loss = control_flow_ops.with_dependencies([update_bn], total_loss) # train_op = slim.learning.create_train_op(total_loss, optimizer) if FLAGS.update_bn: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = slim.learning.create_train_op(total_loss, optimizer, global_step=global_step) else: train_op = slim.learning.create_train_op(total_loss, optimizer, global_step=global_step) return train_op
e26ccdcfef8eadc7f0335ff4d36e29158c85134b
34,849
def group_points_into_lines(edges, x_coords, y_coords, x_size=10, y_size=3): """ Группирует отдельные точки в линии при помощи функции connect_line :param edges: :param x_coords: :param y_coords: :param x_size: :param y_size: :return: """ point_dict = {(x_, y_): i for i, (x_, y_) in enumerate(zip(x_coords, y_coords))} point = next(iter(point_dict.keys())) lines = [] line = [] while True: next_point = connect_line(edges, point[0], point[1], x_size, y_size) try: point_dict.pop(point) except KeyError: pass if next_point is not None: line.append(point) point = next_point else: lines.append(line) line = [] try: point = next(iter(point_dict.keys())) except StopIteration: break return lines
dd505f739b83aef2bf91f0eb816e86c1bb7df997
34,850
def start(image): """Function to find start point of the Non - White pixel""" for i in range(image.shape[0]): if 0 in image[i]: return i
504b3cfc90610f34ce1ee6791dcf4c8139f06fe8
34,851
def ip4_bytes_to_str(ip_bytes): """Convert ip address from byte representation to 127.0.0.1.""" return "%d.%d.%d.%d" % unpack_ipv4(ip_bytes)
feb662972ff8d808dfeb0794cc9bfb443234fbe6
34,856
def BuildTelemax(x: int, c: int) -> str: """ utility fct to build Telemax Pointage """ msg = '123456MAC:4c24e9870203PROT005170817100*Q:' msg += str(x + c + 1).zfill(6) msg += '9103' msg += '0071' msg += '0093' msg += '2 ' msg += '0' # msg += 'xx' msg += '10020100^1*' msg += '\r\n' return msg
ba5d51cc6e7463d693f74eb618471fb23a62b6a9
34,857
from typing import Tuple from typing import cast def create_ephemeral_key_pair(curve_type: str) -> Tuple[PublicKey, SharedKeyGenerator]: """Facilitates ECDH key exchange.""" if curve_type != "P-256": raise NotImplementedError() key_pair = create_new_key_pair(curve_type) def _key_exchange(serialized_remote_public_key: bytes) -> bytes: private_key = cast(ECCPrivateKey, key_pair.private_key) remote_point = ECCPublicKey.from_bytes(serialized_remote_public_key, curve_type) secret_point = remote_point.impl * private_key.impl secret_x_coordinate = secret_point.x byte_size = int_bytelen(secret_x_coordinate) return secret_x_coordinate.to_bytes(byte_size, byteorder="big") return key_pair.public_key, _key_exchange
ada825897fec2c818835f4d70298cf571ab79183
34,859
from typing import Dict from typing import Any def get_unlisted_livestreams_by_username(username: str) -> Dict[str, Any]: """Get a user's unlisted livestreams from their username. Args: username (str): The user's username. Returns: Dict[str, Any]: The unlisted livestream. """ items = "user_snowflake, start_date, title, description, category, unique_id" res = query_one("SELECT snowflake FROM user WHERE username = ?", [username]) stream = query_one( f"SELECT {items} FROM stream WHERE user_snowflake = ? AND unlisted = 1 AND end_date IS NULL AND start_date IS NOT NULL", # noqa: E501 [res["snowflake"]], ) if stream: stream["username"] = username return stream
53a9ac945457eaa4b4418605670b13db90e201b7
34,860
async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Add a weather entity from mapped location.""" fcst_coordinator = hass.data[DOMAIN][entry.entry_id]["fcst_coordinator"] if not fcst_coordinator.data: return cur_coordinator = hass.data[DOMAIN][entry.entry_id]["cur_coordinator"] if not cur_coordinator.data: return weather_entity = WeatherbitWeather( fcst_coordinator, cur_coordinator, entry.data, hass.config.units.is_metric, ) async_add_entities([weather_entity], True) return True
526bea80afeaa4f6c91db6d2de91f625bc5e4bcd
34,861
def parse_sync_agent_forwarder_id(json): """ Extract the sync agent forwarder id from the get response of LearningLocker. :param json: JSON statement from the get response. :type json: dict(str, list(dict(str, str)) :return: The statement forwarder id from the sync agent. :rtype: str """ temp_forwarder_id = 0 if len(json['edges']) > 0: temp_forwarder_id = json['edges'][0]['node']['_id'] return temp_forwarder_id
4b07dc13ca978cfc3fad46e432c8c21d46ee53fa
34,862
import numpy def generate_lineal_parameter(parameter_values): """Generate parameters list for lineal parameter type.""" initial_value = parameter_values['initial_value'] final_value = parameter_values["final_value"] interval = parameter_values["interval"] param_options = numpy.arange( initial_value, final_value, interval) return param_options.tolist()
6359a0c93c07aa3dfeba096501b73f69fb3b02f9
34,863
def get_plugin_arguments(plugin_name): """Gets plugin arguments, as a dict of version to argument list.""" plugin = plugins_base.PLUGINS.get_plugin(plugin_name) versions = plugin.get_versions() return {version: plugin.get_image_arguments(version) for version in versions}
ad056ecccc5ac40120493245709235b6587955b5
34,864
import re def to_snake_case(s): """Convert a string to snake-case format Parameters ---------- s: String String to convert to snake-case Returns ------- String Snake-case formatted string Notes ----- Adapted from https://gist.github.com/jaytaylor/3660565 Examples -------- >>> to_snake_case("snakesOnAPlane") == "snakes_on_a_plane" True >>> to_snake_case("SnakesOnAPlane") == "snakes_on_a_plane" True >>> to_snake_case("snakes_on_a_plane") == "snakes_on_a_plane" True >>> to_snake_case("IPhoneHysteria") == "i_phone_hysteria" True >>> to_snake_case("iPhoneHysteria") == "i_phone_hysteria" True """ s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", s) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
cf3ca065c471ed526ab15de5d6c07e9be74ddb59
34,865
def get_var(name, program=None): """ Get a variable by name from the global block of a program Args: name(str): name of the variable program(Program|None): program object. If None, default_global_program() will be used. Returns: Variable """ if program is None: program = default_main_program() assert isinstance(name, str) assert isinstance(program, Program) return program.global_block().var(name)
7f6bca1482834b1688175b6508fa269cfcee882a
34,866
def _update_cluster_config(cluster, session=None, user=None, **kwargs): """Update a cluster config.""" check_cluster_editable(cluster, user=user) return utils.update_db_object( session, cluster, **kwargs )
fe7e2f099f2719385ccde62aa94c71f2c91fbf85
34,867
def read_file(file_path="data/short.list"): """ Reads file, short.list by default. """ data = "" with open(file_path, "r", encoding="utf8", errors="ignore") as file: data = file.read().split("\n")[14:-1] return tuple(set(data))
f2ced72bfa6328c6794b629d043b837144304716
34,868
import torch def sample(lnprobs, temperature=1.0): """ Sample an element from a categorical distribution :param lnprobs: Outcome log-probabilities :param temperature: Sampling temperature. 1.0 follows the given distribution, 0.0 returns the maximum probability element. :return: The index of the sampled element. """ if temperature == 0.0: return lnprobs.argmax() if temperature != 1.0: lnprobs = lnprobs / temperature # Top-p/top-k filtering next_token_logits = modeling_utils.top_k_top_p_filtering(lnprobs[None, :], top_p=NUCLEUS_P) # Sample probs = F.softmax(next_token_logits, dim=-1) if contains_inf(probs) or contains_nan(probs): raise Exception(f'probs {probs}, logits {next_token_logits}') next_token = torch.multinomial(probs, num_samples=1).squeeze(1) return next_token # modeling_utils.top_k_top_p_filtering(lnprobs[None, :], top_p=NUCLEUS_P) # # p = F.softmax(lnprobs / temperature, dim=0) # cd = dist.Categorical(p) # # return cd.sample()
62ef43b30ffd9c6fac4254074d7b740cbfc01987
34,870
import pprint import logging def get_submit_input_metadata(body): """ Extract relevant metadata from the body message included with a submit input action. :param body: (dict) the body of the json payload, including details about the message and the button clicked :returns: user_id (str), the user who clicked the button :returns: ts (str), the timestamp of the original message :returns: image_url (str), the image_url of the message if it exists :returns: question (str), the question for the button :returns: response (str), the response from the button :returns: action_ts (str), the timestamp of the button click. """ pprint.pprint(body) try: user_id = body['user']['id'] except KeyError as e: logging.info("No user_id in submit input body %s" % body) logging.info("Error %s." % e) user_id = None try: ts = body['message']['ts'] except KeyError as e: logging.info("No message_ts in submit input body %s" % body) logging.info("Error %s." % e) ts = None try: for block in body["message"]["blocks"]: if "type" not in block: continue if block["type"] == "image": if "image_url" not in block: continue image_url = block["image_url"] break except KeyError as e: logging.info("No image_url in button click body %s" % body) logging.info("Error %s." % e) image_url = None try: question = body["message"]['blocks'][3]['label']['text'] except KeyError as e: logging.info("No question in button click body %s" % body) logging.info("Error %s." % e) question = None try: response = body['state']['values'][body["message"]['blocks'][3]["block_id"]]['plain_input']['value'] except KeyError as e: logging.info("No response in button click body %s" % body) logging.info("Error %s." % e) response = None try: action_ts = body['actions'][0]['action_ts'] except KeyError as e: logging.info("No action_ts in button click body %s" % body) logging.info("Error %s." % e) action_ts = None return user_id, ts, image_url, question, response, action_ts
7d26392d54c9996a48965fa696a9243f5ea85716
34,871
def connection_is_established(): """ Function to check if a connection to the remote server has been established """ return (not _remote_client is None)
8ea9583b6585e8af57f775d8ce974dbdaf85197f
34,872
def getLanguage(langcode): """Returns the full name of the language referred to by its two-letters language code. """ if languages.has_key(langcode): return languages[langcode] else: raise RuntimeError("cannot find language with code " + str(langcode))
777c069f24f92ff9b2f65413535bc38c4be57865
34,873
import json def get_images(username): """ Retrieves image history for given username :return: resp: (json) All the pathways to the images the user has previously uploaded """ resp = [] for image in ImPath.query.filter_by(username=username): if (is_valid_image_path(image.impath)): with open(image.impath, 'r') as f: im = f.read() im = 'data:image/png;base64,' + im.encode('base64') pred = image.pred resp.append({'image': im, 'prediction': pred}) else: return 'Invalid image pathway' return json.dumps(resp)
cdf5ed91d0c88065d07e0c992dc460f6555ee8ef
34,874
def otf2psf(otf, shape): """ Convert optical transfer function (OTF) to point-spread function (PSF). Compute the Inverse Fast Fourier Transform (ifft) of the OTF array and creates the PSF array that is not influenced by the OTF off-centering. By default, the PSF array is the same size as the OTF array. Adapted from MATLAB otf2psf function. Parameters ---------- otf : ndarray OTF array. shape : list (int) Output shape of the OTF array. Returns ------- psf : ndarray PSF array. """ if np.all(otf == 0): return np.zeros_like(otf) inshape = otf.shape # Compute the PSF psf = np.fft.ifft2(otf) # Estimate the rough number of operations involved in the FFT # and discard the PSF imaginary part if within roundoff error. n_ops = np.sum(otf.size * np.log2(otf.shape)) psf = np.real_if_close(psf, tol=n_ops) psf = np.real(psf) # Circularly shift PSF so that the 'center' of the PSF is # [0,0] element of the array for axis, axis_size in enumerate(shape): psf = np.roll(psf, int(axis_size / 2), axis=axis) # Crop output array psf = psf[0:shape[0], 0:shape[1]] return psf
b3a142d26b15951c32e5c2e10aaba69ac9552fc8
34,875
def inventreeInstanceName(): """ Returns the InstanceName settings for the current database """ return InvenTreeSetting.get_setting("InstanceName", "")
7c57f2156b69b8a0155ce168921132863483824f
34,876
def predict(sentence: str) -> [str]: """ Lemmatize a given sentence :param sentence: sentence to lemmatize :return: lemmatized sentence """ lemmatizer = WordNetLemmatizer() word_list = nltk.word_tokenize(sentence) return [lemmatizer.lemmatize(w) for w in word_list]
b9b11cbb7d540fc517d9888f9e2449e3773617f4
34,877
def is_nominal_tolerance_met(data : pd.DataFrame, criteria_nominal : list = [], nominal_tolerance : list = []): """ Checks that the tolerances for categorical columns are met. The tolerance is defined as the sum of the maximum frequency deviation between groups for each categorical column passed in criteria_nominal. Parameters ---------- data : pd.DataFrame Input data criteria_nominal : list(str) Names of the columns to use for minimizing differences between groups. Those columns must be categorical. nominal_tolerance : list(int) Maximum accepted frequency deviation between groups for categorical columns. Must be the same length as criteria_nominal and passed in the same order as the column names. Returns ------- bool Wether the grouping satisfies the tolerance for categorical variables """ if len(criteria_nominal) == 0: return True else: for column_criteria, tol in zip(criteria_nominal, nominal_tolerance): nominal_check = data.groupby([column_criteria, 'subset']).size() nominal_diff_values = [max(nominal_check[i].values) - min(nominal_check[i].values) for i in data[column_criteria].unique()] if max(nominal_diff_values) > tol: return False return True
03a432010f1ca46b6d80d491f76fa9acf3e2aa91
34,878
def test_limit_by_resource_and_method(): """ Test using a custom key_func - one which creates different buckets by resource and method """ def get_key(req, resp, resource, params) -> str: user_key = get_remote_addr(req, resp, resource, params) return f"{user_key}:{resource.__class__.__name__}:{req.method}" limiter = Limiter( key_func=get_key, default_limits=["10 per hour", "1 per second"] ) @limiter.limit() class ThingsResource: def on_get(self, req, resp): resp.body = 'Hello world!' def on_post(self, req, resp): resp.body = 'Hello world!' app = API(middleware=limiter.middleware) app.add_route('/things', ThingsResource()) client = testing.TestClient(app) r = client.simulate_get('/things') assert r.status == HTTP_200 r = client.simulate_get('/things') assert r.status == HTTP_429 # but a different endpoint can still be hit r = client.simulate_post('/things') assert r.status == HTTP_200
b20492c686411a5d7db13959a7745bade605d1d1
34,880
def pdoo_wrap(doo_obj, total_budget, nu_max=1.0, rho_max=0.9, K=2, C_init=0.8, tol=1e-3, POO_mult=0.5, Randomize=False, return_history=False): """ Wrapper for running PDOO optimisation. """ # pylint: disable=too-many-locals total_budget = total_budget * doo_obj.eval_cost_single_point_normalised([1.0]) opt_tree = OptTree(doo_obj, nu_max, rho_max, total_budget, K, C_init, tol, Randomize) results, index = opt_tree.run_PDOO(POO_mult) max_pt = doo_obj.get_unnormalised_coords(results[index][2]) # IF not return history if not return_history: return results[index][0], max_pt, None history = Namespace() max_iter = int(total_budget) query_pts, query_vals = opt_tree.get_queried_pts() max_val = max(query_vals) history.query_step_idxs = [i for i in range(max_iter)] history.query_send_times = list(range(0, max_iter)) history.query_receive_times = list(range(1, max_iter + 1)) history.query_points = [doo_obj.get_unnormalised_coords(x) for x in query_pts] history.query_vals = query_vals history.query_true_vals = query_vals history.curr_opt_vals = [] history.curr_opt_points = [] curr_max = -np.inf for idx, qv in enumerate(history.query_vals): if qv >= curr_max: curr_max = qv curr_opt_point = history.query_points[idx] history.curr_opt_vals.append(curr_max) history.curr_opt_points.append(curr_opt_point) history.query_eval_times = [1 for _ in range(max_iter)] history.curr_true_opt_vals = history.curr_opt_vals return max_val, max_pt, history
0d06a400a62021cf4c6f1f91ed3bc547ce822ca9
34,881
def chi2(sp, pars): """ Given a spectrum and some parameters, calculate the chi^2 value """ pars = list(pars) + [0] return ((sp.specfit.get_model_frompars(sp.xarr, pars) - sp.specfit.spectofit)**2 / (sp.specfit.errspec**2) ).sum()
ce8c6362eb88dfce17dc56414e3bed4f67ec6ffa
34,882
def cartesian2spherical(xyz): """ Transform cartesian coordinates (x, y, z) in spherical coordinates. The function only returns the (theta, phi) pair since the magnetisation is fixed at zero Temperature (the r-component is constant) and is fully characterised by two degrees of freedom. (We use this to specifically transform M coordinates) Are we asuming? that xyz is: [x1, x2, .... , y1, y2, ... , z1, z2 ...] """ # Transform to a 3 x -- array xyz.shape = (3, -1) r_xy = np.sqrt(xyz[0, :] ** 2 + xyz[1, :] ** 2) theta = np.arctan2(r_xy, xyz[2, :]) phi = np.arctan2(xyz[1, :], xyz[0, :]) xyz.shape = (-1,) theta_phi = np.concatenate((theta, phi)) # Return [theta1, theta2, ... , phi1, phi2, ... ] return theta_phi
8f1ed7021da943082ae336f6d52bed8df09ee52b
34,884
from typing import Mapping def dict_list_select(dict_list, keys, default_value='', include_conditions={}, exclude_conditions={}): """ Transforms a list of dictionaries into a new list of dictionaries that only includes the specified keys. List entries that are missing key(s) will get the default value assigned. """ # Make sure dict_list is a list if not isinstance(dict_list, list): raise AnsibleFilterError("dictlistfilter requires a list, got %s instead." % type(dict_list)) # Make sure include_conditions is a mapping if not isinstance(include_conditions, Mapping): raise AnsibleFilterError("dictlistfilter requires include_conditions to be a mapping, got %s instead." % type(include_conditions)) # Make sure exclude_conditions is a mapping if not isinstance(exclude_conditions, Mapping): raise AnsibleFilterError("dictlistfilter requires exclude_conditions to be a mapping, got %s instead." % type(exclude_conditions)) # If keys not a list then make it a list if not isinstance(keys, list): keys = list(keys) # Build filtered dict_list dict_list_s = [] for d in dict_list: d_s = {} included = len(include_conditions) == 0 excluded = False for key in keys: d_s[key] = d[key] if key in d else default_value # Check for inclusion if key in include_conditions and d_s[key] == include_conditions[key]: included = True # Check for exclusion if key in exclude_conditions and d_s[key] == exclude_conditions[key]: excluded = True if included and not excluded: dict_list_s += [d_s] # Return list of package items ready for Ansible iteration return dict_list_s
472f1c14eea0b40fa438aa5e0d13f42a95dd33c7
34,885
import inspect def route_hardware_function(api_version: str, function: str): """Can be used to execute standard UOS IO functions.""" if api_version not in API_VERSIONS: return jsonify( ComResult( False, exception=f"'{function}' not supported in api version {api_version}.", ) ) try: arguments = inspect.signature(getattr(UOSDevice, function)) except AttributeError as exception: return jsonify( ComResult( False, exception=f"API call on '{function}' threw error {exception.__str__()}.", ) ) possible_args = { argument.name: util.APIargument( argument.default == inspect.Parameter.empty, argument.annotation, None ) for argument in arguments.parameters.values() if argument.name != "self" and argument.name != "kwargs" } response, required_args = util.check_required_args( possible_args, request.args, add_device=True ) if response.status: device = UOSDevice( identity=required_args["identity"].arg_value, address=required_args["address"].arg_value, ) if function in dir(UOSDevice): instr_response = getattr(device, function)( *[ required_args[parameter.name].arg_value for parameter in inspect.signature( getattr(device, function) ).parameters.values() if parameter.name in required_args and required_args[parameter.name].arg_value is not None ] ) response.status = instr_response.status response.com_data = instr_response else: # dunno how to handle that function response.exception = f"function '{function}' has not been implemented." response.status = False return jsonify(response)
0be2fbe888b516dde6337ef532b05ace82217152
34,886
def calculate_ci(patICDList, version=ELIXHAUSER): """ Calculate comorbidity index """ if version not in CI_MAP: raise ValueError("Unsupported comorbidity index") patCCMap = _calculate_comorbidity_score(patICDList, CI_MAP[version]) return sum(patCCMap.values())
69d37081466f2564e3334bb60742cece19bcdc89
34,887
def _map_spectrum_weight(map, spectrum=None): """Weight a map with a spectrum. This requires map to have an "energy" axis. The weights are normalised so that they sum to 1. The mean and unit of the output image is the same as of the input cube. At the moment this is used to get a weighted exposure image. Parameters ---------- map : `~gammapy.maps.Map` Input map with an "energy" axis. spectrum : `~gammapy.modeling.models.SpectralModel` Spectral model to compute the weights. Default is power-law with spectral index of 2. Returns ------- map_weighted : `~gammapy.maps.Map` Weighted image """ if spectrum is None: spectrum = PowerLawSpectralModel(index=2.0) # Compute weights vector energy_edges = map.geom.get_axis_by_name("energy_true").edges weights = spectrum.integral( emin=energy_edges[:-1], emax=energy_edges[1:], intervals=True ) weights /= weights.sum() shape = np.ones(len(map.geom.data_shape)) shape[0] = -1 return map * weights.reshape(shape.astype(int))
0b7571bbc50aa7fed154951d2fc433e2952c0ab2
34,888
import torch def predict(processed_input): """Function to predict dog breed using the available model Args: processed_input (torch 4D tensor): transformed and preprocessed image Returns: breed_pred (str): name of the predicted breed class_prob (float): probability of the predicted breed """ model.eval() with torch.no_grad(): output = model(processed_input) all_probs = nn.functional.softmax(output, dim=-1) class_pred = torch.argmax(all_probs).item() class_prob = torch.max(all_probs).item() breed_pred = class_id_to_breed[str(class_pred)] class_prob = round(class_prob*100, 2) return breed_pred, class_prob
4af217d05b6734de67b99c9171f1e06d9d11dfd3
34,889
def getChannels(server: utils.HikVisionServer): """ It is used to get the properties of streaming channels for the device """ return utils.getXML(server, "Streaming/channels")
032718216dfba1c7011a29c4d298a16599a0c873
34,891
def is_help_command(command): """ Checks that the user inputted command is a help command, which will not go over the wire. This is a command with -h or --help. The help functionality is triggered no matter where the -h appears in the command (arg ordering) :param command: a list of strings representing the command, for example, ['node', 'list', '-h'] :return: True if it is a help command. False otherwise. """ for segment in command: if segment in ('-h', '--help'): return True return False
e68142c38d734e492f9f65dfdf04ac87f79bd666
34,893
def _holoviews_chart(): """## Dashboard Orders Chart generated by HoloViews""" data = _get_chart_data() line_plot = data.hvplot.line( x="Day", y="Orders", width=None, height=500, line_color="#007BFF", line_width=6, ) scatter_plot = data.hvplot.scatter(x="Day", y="Orders", height=300).opts( marker="o", size=10, color="#007BFF" ) fig = line_plot * scatter_plot gridstyle = {"grid_line_color": "black", "grid_line_width": 0.1} fig = fig.opts( responsive=True, toolbar=None, yticks=list(range(12000, 26000, 2000)), ylim=(12000, 26000), gridstyle=gridstyle, show_grid=True, ) return fig
4ceb6e712b72a247ead5a6e5f0b442f704e40ab8
34,894
def api_user_required(f): """A decorator for APIs that require a logged-in user.""" @wraps(f) def decorated(*args, **kwargs): if not are_logged_in(): return Response("API requires logged-in user", 401) return f(*args, **kwargs) return decorated
b4439868dc1401203a50052c430f3213352b1cbe
34,895
import warnings def fitGMM_patch_post_process( centre_patch_intensity, n_samples=1000, max_dist_thresh=10, min_area_pair=0, max_area_pair=10000): """ Fits an n-component mixture to a 2d image to resolve closely overlapping centroids This function simplifies the calling and wraps `fit_2dGMM` so we directly give the input image. Parameters ---------- centre_patch_intensity : numpy array input gray-image n_samples : int the maximum number of samples to draw if the corresponding normalised image intensity was 1. max_dist_thresh : int the upper bound on the expected distance if it was a true pair. Returns ------- filt_peaks : 3-tuple returns the resolved centriole peak positions for distancing. """ # thresholds mean + std. thresh = np.mean(centre_patch_intensity) + np.std(centre_patch_intensity) # draw samples according to intensity xy_samples = img2histogram_samples(centre_patch_intensity, thresh=thresh, samples=n_samples) if np.sum(xy_samples>=thresh) == 0: warnings.warn('not enough points with mean + std, trying mean threshold') thresh = np.mean(centre_patch_intensity) xy_samples = img2histogram_samples(centre_patch_intensity, thresh=thresh, samples=n_samples) # fit the sample to GMM. (weights, means_, covars_), fitted_y, gmm = fit_2dGMM(xy_samples[:,0], xy_samples[:,1], n_components=2) # get the centroids. coords1 = means_[0] coords2 = means_[1] cand_pair_peaks = np.vstack([coords1, coords2]); cand_pair_peaks = cand_pair_peaks[:,[1,0]] cand_pair_dist = np.linalg.norm(cand_pair_peaks[0]-cand_pair_peaks[1]) filt_peaks_backup = cand_pair_dist.copy() if cand_pair_dist <= max_dist_thresh: filt_peaks = cand_pair_peaks.copy() else: binary = centre_patch_intensity >= thresh binary_filt = image_fn.filter_masks( binary, min_area=min_area_pair, max_area=max_area_pair, keep_centre=True, dist_thresh=1., min_max_area_cutoff=20) centre_patch_intensity_new = binary_filt*centre_patch_intensity # now do the detection xy_samples = img2histogram_samples(centre_patch_intensity_new, thresh=thresh, samples=n_samples) (weights, means_, covars_), fitted_y, gmm = fit_2dGMM(xy_samples[:,0], xy_samples[:,1], n_components=2) coords1 = means_[0] coords2 = means_[1] cand_pair_peaks = np.vstack([coords1, coords2]); cand_pair_peaks = cand_pair_peaks[:,[1,0]] filt_peaks = cand_pair_peaks.copy() if len(filt_peaks) < 2: # return the original 2 peaks. filt_peaks = filt_peaks_backup.copy() return filt_peaks
5bacc80660a144572b3fbd599e2ca07e2c1f8aaa
34,896
def StartOfInterval(intervalVar:NexVar) -> NexVar: """Creates the new event. Copies the start of each interval of the specified interval variable to the result.""" return NexRun("StartOfInterval", locals())
a272a09944ccc5433182ca58fdc7c085212e8706
34,898
import ipaddress def is_global(host: str) -> bool: """ >>> assert not is_global("127.0.0.1") >>> assert not is_global("192.168.20.168") >>> assert is_global("211.13.20.168") >>> assert is_global("google.com") """ if host == "localhost": return False try: address = ipaddress.ip_address(host) except ValueError: return True return address.is_global
1e68b762a279eb7b54f32339c783a631bedfa2c9
34,899
def make_tweet(quantity, ticker, asset_description, price, tx_date, tx_time, instrument, trade_type): """Create and return a new instance of a Tweet object.""" tweet = Tweet(quantity, ticker, asset_description, price, tx_date, tx_time, instrument, trade_type) return tweet
f35f25bcbd9235d9c13864106952554a5cc4a163
34,900
import warnings def rescale_laplacian(laplacian): """ Scale graph Laplacian by the largest eigenvalue of normalized graph Laplacian, so that the eigenvalues of the scaled Laplacian are <= 1. Args: laplacian: Laplacian matrix of the graph Returns: Return a scaled Laplacian matrix. """ try: print("Calculating largest eigenvalue of normalized graph Laplacian...") largest_eigval = eigsh(laplacian, 1, which="LM", return_eigenvectors=False)[0] except ArpackNoConvergence: warnings.warn( "Eigenvalue calculation did not converge! Using largest_eigval=2 instead.", RuntimeWarning, stacklevel=2, ) largest_eigval = 2 scaled_laplacian = (2.0 / largest_eigval) * laplacian - sp.eye(laplacian.shape[0]) return scaled_laplacian
cf43000542c9c8b23001b0df0d157077a8a84d98
34,901
def get_function_signature(uplevels=0): """ RETURNS the calling function signature, at runtime. """ uplevels += 1 funcname = get_function_name(uplevels) params = get_function_parameters_and_values(uplevels) params = ", ".join(["{}={}".format(*i) for i in params]) sig = "{}({})".format(funcname, params) return sig
62b23ca9f644b46ebbe4ba122ccd7c11291efbcd
34,903
def multivariate_multiply(m1, c1, m2, c2): """ Multiplies the two multivariate Gaussians together and returns the results as the tuple (mean, covariance). Examples -------- .. code-block:: Python m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]], [3.2, 0], [[8.0, 1.1], [1.1,8.0]]) Parameters ---------- m1 : array-like Mean of first Gaussian. Must be convertable to an 1D array via numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6]) are all valid. c1 : matrix-like Covariance of first Gaussian. Must be convertable to an 2D array via numpy.asarray(). m2 : array-like Mean of second Gaussian. Must be convertable to an 1D array via numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6]) are all valid. c2 : matrix-like Covariance of second Gaussian. Must be convertable to an 2D array via numpy.asarray(). Returns ------- m : ndarray mean of the result c : ndarray covariance of the result """ C1 = np.asarray(c1) C2 = np.asarray(c2) M1 = np.asarray(m1) M2 = np.asarray(m2) sum_inv = np.linalg.inv(C1+C2) C3 = np.dot(C1, sum_inv).dot(C2) M3 = (np.dot(C2, sum_inv).dot(M1) + np.dot(C1, sum_inv).dot(M2)) return M3, C3
59cba92949533e4ec001ffa758bc125fd74960d5
34,905
def make_initial_state(): """ Create an initial state dictionary. """ return { 'geogrid' : 'waiting', 'ingest' : 'waiting', 'ungrib' : 'waiting', 'metgrid' : 'waiting', 'real' : 'waiting', 'wrf' : 'waiting', 'output': 'waiting' }
7a9e2bccb52c1a75ce2ef1d313177fd573433461
34,906
def xliff_import_confirm(request, xliff_dir): """ Confirm the XLIFF import and write changes to database :param request: The current request (used for error messages) :type request: ~django.http.HttpRequest :param xliff_dir: The directory containing the xliff files :type xliff_dir: str :return: A dict containing data about the imported xliff files :rtype: dict """ success = True # Acquire linkcheck lock to avoid race conditions between post_save signal and links.delete() with update_lock: # Iterate over all xliff files for xliff_file, deserialized_objects in xliffs_to_pages( request, xliff_dir ).items(): # Iterate over all objects of one xliff file # (typically, one xliff file contains exactly one page translation) for deserialized in deserialized_objects: page_translation = deserialized.object errors, has_changed = get_xliff_import_errors(request, page_translation) if errors: logger.warning( "XLIFF import of %r not possible because validation of %r failed with the errors: %r", xliff_file, page_translation, errors, ) error_list = "<ul>" for error in errors: error_list += f"<li><i data-feather='alert-triangle' class='pb-1'></i> {error['message']}</li>" error_list += "</ul>" messages.error( request, _( "Page {} could not be imported successfully because of the errors: {}" ).format(page_translation.readable_title, error_list), ) success = False elif not has_changed: # Update existing translation existing_translation = page_translation.latest_version existing_translation.currently_in_translation = False existing_translation.save() logger.info( "%r of XLIFF file %r was imported without changes by %r", existing_translation, xliff_file, request.user, ) messages.info( request, _("Page {} was imported without changes.").format( page_translation.readable_title ), ) else: # Check if previous version already exists existing_translation = page_translation.latest_version if existing_translation: # Delete link objects of existing translation existing_translation.links.all().delete() # Confirm import and write changes to the database page_translation.save() logger.info( "%r of XLIFF file %r was imported successfully by %r", page_translation, xliff_file, request.user, ) messages.success( request, _("Page {} was imported successfully.").format( page_translation.readable_title ), ) return success
aa022adc6c4a471c3fd1f7b9896f9b3aa5c1ddcf
34,907
def generate_gaps_time_series(nt, nx, ny, pct, gaps_type): """ Generate gaps on time series of modeled fields. Input: - pct: percentage of gaps to be generated - gaps_type: type of gaps, can be: - 'random': randomly distributed gaps across time series - 'corr': spatio-temporally correlated gaps (gaps with shapes) Output: - p*nt mask with NaN where values are missing (p=nx*ny) """ p = nx*ny if gaps_type == 'correlated': t_start, t_end = 8, 18 x, y = np.meshgrid(np.linspace(-1,1,nx), np.linspace(-1,1,ny)) rad = np.sqrt(x**2+y**2) b = np.random.normal(0, 1, (nx, ny)) e = np.linspace(1.3, 1.4, nt) corr = [noises.geo(rad, e[i]) for i in range(nt)] gaps = noises.gen_noise_series2(corr, b, nt) seuil = norm.ppf(pct/100., np.mean(gaps), np.std(gaps)) print ('seuil: %0.2f' %seuil) mask = gen_correlated_gaps(gaps, seuil, t_start, t_end) mask = np.reshape(mask, (nt, p)).T # 2. Generate random gaps elif gaps_type == 'random': nb_of_gaps = np.arange(int(p*nt*pct/100.)) zeros_mask = np.zeros((p, nt), dtype=bool) mask = gen_random_gaps(zeros_mask, p, nt, nb_of_gaps) return mask
20267e0043efbf40f1779592961012f8999ac249
34,908
from datetime import datetime def leave_feedback(): """Оставить отзыв""" json = request.get_json() if "text" in json: Feedback.create(text=json["text"], user=get_user_from_request()) Telegram(current_app.config).notify_admin_channel( "Пользователь %s оставил отзыв: %s" % (get_user_from_request().username, json["text"]) ) success = Trello(current_app.config).create_card(json["text"]) if not success: return errors.feedback_trello_error() for user in User.get_admins(): Notification.create( user=user, created_date=datetime.datetime.now(), text="Пользователь %s оставил отзыв: %s" % (get_user_from_request().username, json["text"]), object_type="feedback", ) return jsonify({"success": 1}) else: return errors.wrong_payload("text")
cdc1c5898c1f68ebcb1d4b34dd05ff2fab6c450f
34,909
from typing import Optional def get_control_panel(control_panel_arn: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetControlPanelResult: """ AWS Route53 Recovery Control Control Panel resource schema . :param str control_panel_arn: The Amazon Resource Name (ARN) of the cluster. """ __args__ = dict() __args__['controlPanelArn'] = control_panel_arn if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:route53recoverycontrol:getControlPanel', __args__, opts=opts, typ=GetControlPanelResult).value return AwaitableGetControlPanelResult( control_panel_arn=__ret__.control_panel_arn, default_control_panel=__ret__.default_control_panel, name=__ret__.name, routing_control_count=__ret__.routing_control_count, status=__ret__.status)
d8dd1280505b70ee25166941f9822827f10850c4
34,910
from typing import Sequence from typing import Optional from typing import Type from typing import Union def zeros(shape: Sequence[int], dtype: Optional[Type[np.number]] = None, backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor: """Return a Tensor of shape `shape` of all zeros. The Tensor has one dangling Edge per dimension. Args: shape : Shape of the array. dtype, optional: dtype of array (default np.float64). backend (optional): The backend or its name. Returns: the_tensor : Tensor of shape `shape`. Represents an array of all zeros. """ the_tensor = initialize_tensor("zeros", shape, backend=backend, dtype=dtype) return the_tensor
e913c5fa24095842492cec670fde2ea61afc24c6
34,911
def staff_required( function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url="login" ): """Decorator for views that checks that the logged in user is a staff/admin, redirects to the log-in page if necessary.""" actual_decorator = user_passes_test( lambda user: user.is_active and user.is_staff, login_url=login_url, redirect_field_name=redirect_field_name, ) if function: return actual_decorator(function) return actual_decorator
804ba1021993507796fd7009ffea880fcebc2edc
34,912
def from_Peppercorn_restingset_reaction(reaction, restingsets): """ Converts a condensed Peppercorn PepperReaction object to an equivalent DNAObjects RestingSetReaction object. This function requires as an argument a dict mapping Peppercorn RestingSet objects to equivalent DNAObject RestingSets. """ if reaction.rtype != 'condensed': print("KinDA: WARNING: Attempted to convert non-condensed Peppercorn reaction into a RestingSetReaction") reactants = [restingsets[r] for r in reaction.reactants] products = [restingsets[p] for p in reaction.products] return dna.RestingSetReaction(reactants = reactants, products=products)
12f368f7c2a2e5f20456c37131f28a76573cebe3
34,913
from unittest.mock import Mock def mock_stripe_checkout(monkeypatch): """Fixture to monkeypatch stripe.checkout.* methods""" mock = Mock() mock.Session.create.return_value.url = "https://example.net/stripe_checkout/" monkeypatch.setattr(stripe, "checkout", mock) return mock
78c8bf08bb41c831f2e0a602a889b937ea1534c5
34,914
import time def milliseconds(): """ Current time in milliseconds (UTC). """ return int(time.time() * 1000)
cc829688be8742423a8306b0ffae14f318541687
34,915
def test_dispatch_responses(): """ MessageSenderMixin._dispatch_responses() Test plan: -Ensure False returned if pre_dispatch_responses hook returns false -Ensure False returned if post_dispatch_responses hook returns false -Catch invalid response from hpit on [message][id] -Catch invaled response from hpit on [response] -Catch no callback exception -Catch not callable error -Ensure true returned on completions """ bad_response = [{"bad_response": "boo"}] bad_response2 = [{"message":{"message_id":"4"}}] good_response = [{"message": {"message_id":"4"},"response":{"data":"2"}}] def returnFalse(): return False def returnTrue(): return True def callback1(payload): return True test_message_sender_mixin = MessageSenderMixin() test_message_sender_mixin.send_log_entry = MagicMock() test_message_sender_mixin.outstanding_responses["4"] = 1 test_message_sender_mixin.response_callbacks["4"] = callback1 setattr(test_message_sender_mixin,"pre_dispatch_responses",returnFalse) setattr(test_message_sender_mixin,"post_dispatch_responses",returnTrue) test_message_sender_mixin._dispatch_responses(good_response).should.equal(False) setattr(test_message_sender_mixin,"pre_dispatch_responses",returnTrue) setattr(test_message_sender_mixin,"post_dispatch_responses",returnFalse) test_message_sender_mixin._dispatch_responses(good_response).should.equal(False) setattr(test_message_sender_mixin,"pre_dispatch_responses",returnTrue) setattr(test_message_sender_mixin,"post_dispatch_responses",returnTrue) test_message_sender_mixin._dispatch_responses(bad_response) test_message_sender_mixin.send_log_entry.assert_called_once_with('Invalid response from HPIT. No message id supplied in response.') test_message_sender_mixin.send_log_entry.reset_mock() test_message_sender_mixin._dispatch_responses(bad_response2) test_message_sender_mixin.send_log_entry.assert_called_once_with('Invalid response from HPIT. No response payload supplied.') del test_message_sender_mixin.response_callbacks["4"] test_message_sender_mixin.send_log_entry.reset_mock() test_message_sender_mixin._dispatch_responses(good_response) test_message_sender_mixin.send_log_entry.assert_called_once_with('No callback registered for message id: 4') test_message_sender_mixin.response_callbacks["4"] = 5 test_message_sender_mixin.send_log_entry.reset_mock() test_message_sender_mixin._dispatch_responses(good_response) test_message_sender_mixin.send_log_entry.assert_called_once_with("Callback registered for transcation id: 4 is not a callable.") test_message_sender_mixin.outstanding_responses["4"] = 1 test_message_sender_mixin.response_callbacks["4"] = callback1 test_message_sender_mixin._dispatch_responses(good_response).should.equal(True) test_message_sender_mixin.outstanding_responses.should.be.empty
c2d25ccde706246f89eb74ff11dbf673ca07f792
34,916
def fit_wave_interval(wave, old_sampling, new_sampling, new_size = None): """ Produces an array of wavelengths between two values and with a given number of elements. Parameters ---------- wave : np.ndarray, list List or ndarray with initial wavelength and final wavelength e.g.: wave = [first_wave, last_wave]. sampling_type : string Spectrum sampling type, use 'linear' if equally spaced linearly, 'ln' if equally spaced in power of e (Euler number) or 'log' if equally spaced in powers of base 10. size : integer Number of pixels in the wavelength array. Returns ------- wave_array : np.ndarray Array with wavelength values Examples -------- To produce an array of wavelengths between 3000 and 3100 (arbitrary units) with 10 elements and equally spaced. >>> sc.util.fit_wave_interval([3000,3100], 'linear', 10) array([3000. , 3011.11111111, 3022.22222222, 3033.33333333, 3044.44444444, 3055.55555556, 3066.66666667, 3077.77777778, 3088.88888889, 3100. ]) To produce the same array but equally spaced in base 10 logarithms. >>> sc.util.fit_wave_interval([3000,3100], 'log', 10) array([3000. , 3010.94987574, 3021.93971808, 3032.96967289, 3044.03988657, 3055.15050608, 3066.30167889, 3077.49355302, 3088.72627702, 3100. ]) """ # global old_edge, step, lower_edge, upper_edge assert old_sampling and new_sampling in ['linear', 'log', 'ln'] if new_size is None: new_size = len(wave) # wave = wave_model # old_sampling = 'linear' # new_size = 4300 old_edge = _build_edges(wave = wave, sampling_type = old_sampling) lower_edge = old_edge[0] upper_edge = old_edge[-1] if new_sampling == 'linear': step = (upper_edge - lower_edge) / (new_size) wave_array = np.linspace(lower_edge + step/1.99, upper_edge - step/1.99, num = new_size) elif new_sampling == 'log': lower_edge = np.log10(lower_edge) upper_edge = np.log10(upper_edge) step = (upper_edge - lower_edge) / (new_size) wave_array = np.logspace(lower_edge + step/1.99, upper_edge - step/1.99, num = new_size, base = 10.) elif new_sampling == 'ln': lower_edge = np.log(lower_edge) upper_edge = np.log(upper_edge) step = (upper_edge - lower_edge) / (new_size) wave_array = np.logspace(lower_edge + step/1.99, upper_edge - step/1.99, num = new_size, base = np.e) # wave_array[[0,-1]] = wave[0], wave[1] return wave_array
0e2473fa707f704f60ecf5a4dcf0966ab5f041be
34,917
def make_network_from_structural_and_functional(structural_edges, functional_edges): """ Combine structural and functional dataframes to get a deduplicated dataframe of edges :param structural_edges: pandas DataFrame including columns ['source_content_id', 'destination_content_id'] :param functional_edges: pandas DataFrame including columns ['source_content_id', 'destination_content_id'] :return: pandas DataFrame with columns ['source_content_id', 'destination_content_id'] """ all_edges = pd.concat([structural_edges, functional_edges], ignore_index=True, sort=True) network_edges = all_edges[ ['source_content_id', 'destination_content_id', 'weight']].drop_duplicates().reset_index(drop=True) return network_edges
fdea72dab9da4cab05d7a998e543476b761dd74a
34,918
import math def num_k_of_n(n: int, k: int) -> int: """Return number of combinations of k elements out of n.""" if k > n: return 0 if k == n: return 1 return math.factorial(n) // (math.factorial(k) * math.factorial((n - k)))
de99dd88fc6e747421e36c698a525b7e58b1e4de
34,920
def new_graph(): """Make graph to play with.""" return Graph()
eac2a6a9e65db733cc744e391fbbb6f2906cf744
34,921
def subtract_mean_batch_reward(population): """Returns new Population where each batch has mean-zero rewards.""" df = population.to_frame() mean_dict = df.groupby('batch_index').reward.mean().to_dict() def reward_for_sample(sample): return sample.reward - mean_dict[sample.batch_index] shifted_samples = [ sample.copy(reward=reward_for_sample(sample)) for sample in population ] return Population(shifted_samples)
73813c3f165dc933983b0abe48392505cc202e6e
34,922