content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def make_term_structure(rates, dt_obs): """ rates is a dictionary-like structure with labels as keys and rates (decimal) as values. TODO: Make it more generic """ settlement_date = pydate_to_qldate(dt_obs) rate_helpers = [] for label in rates.keys(): r = rates[label] h = make_rate_helper(label, r, settlement_date) rate_helpers.append(h) ts_day_counter = ActualActual(ISDA) tolerance = 1.0e-15 ts = PiecewiseYieldCurve.from_reference_date( BootstrapTrait.Discount, Interpolator.LogLinear, settlement_date, rate_helpers, ts_day_counter, tolerance ) return ts
ccef79d5f409d6dc061e546daa8b3eba1aa6d070
15,100
def skip_after_postgres(*ver): """Skip a test on PostgreSQL after (including) a certain version.""" ver = ver + (0,) * (3 - len(ver)) def skip_after_postgres_(f): @wraps(f) def skip_after_postgres__(self): if self.conn.server_version >= int("%d%02d%02d" % ver): return self.skipTest("skipped because PostgreSQL %s" % self.conn.server_version) else: return f(self) return skip_after_postgres__ return skip_after_postgres_
075aecad4bcdd2340ec57089124143cc3642a38b
15,101
def validateParameters(options): """ Who needs documentation TODO: Add some... """ #options.identity should be a valid file if os.path.isfile(options.identity): try: f = open(options.identity, "r") except IOError as err: print "Could not open the identity file %s for reading, exiting." % options.identity sys.exit(1) finally: f.close() else: print "Could not find the identity file %s, exiting." % options.identity sys.exit(1) #options.rport, options.lport, and options.port should be numeric if not options.rport.isdigit() or not options.lport.isdigit() or not options.port.isdigit(): print "rport:%s lport:%s port:%s" % (options.rport, options.lport, options.port) print "rport, lport, and port options must all be numbers, exiting." sys.exit(1) #options.host should be an IP or a hostname validIpAddressRegex = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$" validHostnameRegex = "^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$" if not re.match(validIpAddressRegex, options.host) and not re.match(validHostnameRegex, options.host): print "Supplied host: %s" % options.host print "Host appears to not be a valid host, exiting." sys.exit(1) #If we made it this far, we can return True return True
6ecfa0e919a5f54687ab27d652dcf6e4c8b56106
15,102
def make_order_embeddings(max_word_length, order_arr): """ 根据笔顺表生成具有最大字长约束的笔顺embeddings :param max_word_length: :param order_arr: :return: """ order_arr = [ row + [0] * (max_word_length - len(row)) if len(row) <= max_word_length else row[:max_word_length - 1] + [row[-1]] for row in order_arr ] order_arr = np.array(order_arr) order_embeddings = tf.convert_to_tensor(order_arr) return order_embeddings
a2f2ac2d0576b2a22145e583cc1e5b8fa9c1cc77
15,103
import numpy def agg_double_list(l): """ @param l: @type l: @return: @rtype: """ # l: [ [...], [...], [...] ] # l_i: result of each step in the i-th episode s = [numpy.sum(numpy.array(l_i), 0) for l_i in l] s_mu = numpy.mean(numpy.array(s), 0) s_std = numpy.std(numpy.array(s), 0) return s_mu, s_std
82b67e70caccb1f5d430e8e9f0a9c75348d3bc7a
15,104
def get_string_from_bytes(byte_data, encoding="ascii"): """Decodes a string from DAT file byte data. Note that in byte form these strings are 0 terminated and this 0 is removed Args: byte_data (bytes) : the binary data to convert to a string encoding (string) : optional, the encoding type to use when converting """ string_bytes = byte_data[0:(len(byte_data) - 1)] # strip off the 0 at the end of the string string = string_bytes.decode(encoding) return string
c07523139e2509fcc19b2ce1d9a933fcb648abfd
15,105
def default_component(): """Return a default component.""" return { 'host': '192.168.0.1', 'port': 8090, 'name': 'soundtouch' }
780dd84ff613f2bccb56f560e5de77e9d57d9d5a
15,106
from pathlib import Path def check_series_duplicates(patches_dir, series_path=Path('series')): """ Checks if there are duplicate entries in the series file series_path is a pathlib.Path to the series file relative to the patches_dir returns True if there are duplicate entries; False otherwise. """ entries_seen = set() for entry in _read_series_file(patches_dir, series_path): if entry in entries_seen: get_logger().warning('Patch appears more than once in series: %s', entry) return True entries_seen.add(entry) return False
58a5b6fbcf6867d770693938a2fc8308d644d54b
15,107
def is_free(board: list, pos: int) -> bool: """checks if pos is free or filled""" return board[pos] == " "
64b75aa5d5b22887495e631e235632e080646422
15,108
def rc_from_blocks(blocks): """ Computes the x and y dimensions of each block :param blocks: :return: """ dc = np.array([np.diff(b[:, 0]).max() for b in blocks]) dr = np.array([np.diff(b[:, 1]).max() for b in blocks]) return dc, dr
0837367eca7a7668a3f0b0078cf8699f5e5bc4d6
15,109
import argparse def _create_parser(): """ Creates argparser for SISPO which can be used for CLI and options """ parser = argparse.ArgumentParser(usage="%(prog)s [OPTION] ...", description=__file__.__doc__) parser.add_argument("-i", "--inputdir", action="store", default=None, type=str, help="Path to 'definition.json' file") parser.add_argument("-o", "--outputdir", action="store", default=None, type=str, help="Path to results directory") parser.add_argument("-n", "--name", action="store", default=None, type=str, help="Name of simulation scenario") parser.add_argument("--verbose", action="store_true", help="Verbose output, displays log also on STDOUT") parser.add_argument("--with-sim", action="store_true", dest="with_sim", help="If set, SISPO will simulate the scenario") parser.add_argument("--with-render", action="store_true", dest="with_render", help="If set, SISPO will render the scenario") parser.add_argument("--with-compression", action="store_true", dest="with_compression", help="If set, SISPO will compress images") parser.add_argument("--with-reconstruction", action="store_true", dest="with_reconstruction", help="If set, SISPO will attempt reconstruction.") parser.add_argument("--restart", action="store_true", help="Use cProfiler and write results to log.") parser.add_argument("--profile", action="store_true", help="Use cProfiler and write results to log.") parser.add_argument("-v", "--version", action="store_true", help="Prints version number.") parser.add_argument("--with-plugins", action="store_true", dest="with_plugins", help="Plugins that are run before rendering.") return parser
f1f62b8be37139c8c73293b376e0b9bd0540e5c5
15,110
def serialize_measurement(measurement): """Serializes a `openff.evaluator.unit.Measurement` into a dictionary of the form `{'value', 'error'}`. Parameters ---------- measurement : openff.evaluator.unit.Measurement The measurement to serialize Returns ------- dict of str and str A dictionary representation of a openff.evaluator.unit.Measurement with keys of {"value", "error"} """ return {"value": measurement.value, "error": measurement.error}
69eedd9006c63f5734c762d6113495a913d5a8c4
15,111
from exifpy.objects import Ratio def nikon_ev_bias(seq): """ http://tomtia.plala.jp/DigitalCamera/MakerNote/index.asp First digit seems to be in steps of 1/6 EV. Does the third value mean the step size? It is usually 6, but it is 12 for the ExposureDifference. Check for an error condition that could cause a crash. This only happens if something has gone really wrong in reading the Nikon MakerNote. """ if len(seq) < 4: return "" # if seq == [252, 1, 6, 0]: return "-2/3 EV" if seq == [253, 1, 6, 0]: return "-1/2 EV" if seq == [254, 1, 6, 0]: return "-1/3 EV" if seq == [0, 1, 6, 0]: return "0 EV" if seq == [2, 1, 6, 0]: return "+1/3 EV" if seq == [3, 1, 6, 0]: return "+1/2 EV" if seq == [4, 1, 6, 0]: return "+2/3 EV" # Handle combinations not in the table. a = seq[0] # Causes headaches for the +/- logic, so special case it. if a == 0: return "0 EV" if a > 127: a = 256 - a ret_str = "-" else: ret_str = "+" b = seq[2] # Assume third value means the step size whole = a / b a = a % b if whole != 0: ret_str = ret_str + str(whole) + " " if a == 0: ret_str += "EV" else: r = Ratio(a, b) ret_str = ret_str + r.__repr__() + " EV" return ret_str
09a91fc3d82851bb6411b549c282a16f02470e88
15,112
import json def process_message(schema, publisher, data): """ Method to process messsages for all the bases that uses Google's Pub/Sub. Args: schema (:obj:`dict`, required): A JSON schema for contract validation. JSON Schema is a vocabulary that allows you to annotate and validate JSON documents. publisher (:obj:`PubSub`, optional): Instance of the '.manager.PubSub'. data (:obj: `dict`, required): A dictionary representing the message body. """ try: data = json.loads(request.data) validate(data, schema, format_checker=FormatChecker()) publisher.publish(data) return data, 202 except ValidationError as validate_error: return str(validate_error), 400
01e396355e6f7fd6913eaff786af39c95da64718
15,113
def rename_record_columns(records, columns_to_rename): """ Renames columns for better desc and to match Socrata column names :param records: list - List of record dicts :param columns_to_rename: dict - Dict of Hasura columns and matching Socrata columns """ for record in records: for column, rename_value in columns_to_rename.items(): if column in record.keys(): record[rename_value] = record.pop(column) return records
41d5cc90a368f61e8ce138c54e9f5026bacd62b9
15,114
import requests import json def request_similar_resource(token, data_): """If a similar resource to the data_ passed exists, this method gets and returns it """ headers = {'Authorization': 'Token {}'.format(token.token)} # get the resource endpoint url_check_res = URL.DB_URL + 'getSimilarResource/' # only res code if shadow id not passed resource_code = data_['resource_accessing'].split('/')[1] url_check_res += '{}/'.format(resource_code) if "shadow_id" in data_: url_check_res += "{}/".format(data_['shadow_id']) req = requests.get(url=url_check_res, headers=headers) code_to_return = HTTPStatus.NOT_FOUND data_to_return = {"success": False} if req.status_code == HTTPStatus.OK: code_to_return = HTTPStatus.OK data_to_return = json.loads(req.text) return code_to_return, data_to_return
84981ff2520050651b0cb83b11198a2fc1117582
15,115
def total (initial, *positionals, **keywords): """ Simply sums up all the passed numbers. """ count = initial for n in positionals: count += n for n in keywords: count += keywords[n] return count
2df0b37ddec7e4bcdd30d302d1b7297cec0ef3cc
15,116
def login_required(f): """Ensures user is logged in before action Checks of token is provided in header decodes the token then returns current user info """ @wraps(f) def wrap(*args, **kwargs): token = None if 'x-access-token' in request.headers: token = request.headers['x-access-token'] if not token: return jsonify({ 'warning': 'Missing token. Please register or login' }), 401 is_token_valid = versions.v2.models.AuthToken.query.filter_by(token=token).first() is_token_valid = is_token_valid.valid if is_token_valid else True if not is_token_valid: return jsonify({ 'warning': 'Login again'}), 401 try: data = jwt.decode(token, app.config['SECRET_KEY']) current_user = data['id'] except jwt.ExpiredSignatureError: return jsonify({ 'warning': 'Expired token. Please login to get a new token' }), 401 except ValueError: return jsonify({ 'warning': 'Invalid token. Please register or login' }), 401 return f(current_user, *args, **kwargs) return wrap
68b36213830f9fad7f6bcf7ec5951534331c5507
15,117
def loop_to_unixtime(looptime, timediff=None): """Convert event loop time to standard Unix time.""" if timediff is None: timediff = _get_timediff() return looptime + timediff
c2da70e961a5802c2da37f04094baec2c6c88f3c
15,118
def groups(column: str) -> "pli.Expr": """ Syntactic sugar for `pl.col("foo").agg_groups()`. """ return col(column).agg_groups()
30fd3eae7abb4c47ce5d12d0c5d17184d5c25770
15,119
from authentek.internal import app import os def create_app(testing=False, cli=False) -> Flask: """Application factory, used to create application """ app.config.from_object(os.getenv('APP_SETTINGS', 'authentek.server.config.DevelopmentConfig')) if testing is True: app.config["TESTING"] = True app = configure_extensions(app, cli) register_blueprints(app) return app
044ad7531f8ca1410f2d2f232856715a9c12e0d3
15,120
def filter_roidb(roidb, config): """ remove roidb entries without usable rois """ def is_valid(entry): """ valid images have at least 1 fg or bg roi """ overlaps = entry['max_overlaps'] fg_inds = np.where(overlaps >= config.TRAIN.FG_THRESH)[0] bg_inds = np.where((overlaps < config.TRAIN.BG_THRESH_HI) & (overlaps >= config.TRAIN.BG_THRESH_LO + 0.0001))[0] valid = len(fg_inds) > 0 or len(bg_inds) > 0 return valid num = len(roidb) filtered_roidb = [entry for entry in roidb if is_valid(entry)] num_after = len(filtered_roidb) print 'filtered %d roidb entries: %d -> %d' % (num - num_after, num, num_after) return filtered_roidb
e93c4e2236c1febd773e216f109cd2657c94084e
15,121
def get_res(url): """ 使用requests获取结果 :param url: :return: """ try: requests.adapters.DEFAULT_RETRIES = 5 res = requests.get(url) time.sleep(random.randint(0, 3)) if res.status_code == 200: return res return None except Exception, e: time.sleep(20) log.debug(str(e) + ' error') return None
ed643029be33ff3e76822f74060b0b3588f97f50
15,122
def seebeck_thermometry(T_Kelvin): """ This function returns the Seebeck coefficient of the thermocouple concerned (by default type "E") at a certain temperature. The input of the function is a temperature in Kelvin, but the coefficient below are for a polynomial function with T in Celsius. The output is S in [V / K] """ coeff_E_below_270K = np.array([ 0, 5.8665508708E1, 4.5410977124E-2, -7.7998048686E-4, -2.5800160843E-5, -5.9452583057E-7, -9.3214058667E-9, -1.0287605534E-10, -8.0370123621E-13, -4.3979497391E-15, -1.6414776355E-17, -3.9673619516E-20, -5.5827328721E-23, -3.4657842013E-26 ])[::-1] # Reverse for poly1d coeff_E_above_270K = np.array([ 0, 5.8665508710E1, 4.5032275582E-2, 2.8908407212E-5, -3.3056896652E-7, 6.5024403270E-10, -1.9197495504E-13, -1.2536600497E-15, 2.1489217569E-18, -1.4388041782E-21, 3.5960899481E-25 ])[::-1] # Reverse for poly1d T_Celsius = T_Kelvin - 273.15 ## Selection of coefficients for temperature regime index_below = np.where(T_Celsius <= 0) index_above = np.where(T_Celsius > 0) S_values = np.zeros(np.size(T_Kelvin)) E_below = np.poly1d(coeff_E_below_270K) # is a poly1d object in microVolt S_below = np.polyder(E_below) # is a poly1d object in microVolt / Celsius S_values[index_below] = S_below(T_Celsius[index_below])*1e-6 # is in Volt / K E_above = np.poly1d(coeff_E_above_270K) # is a poly1d object in microVolt S_above = np.polyder(E_above) # is a poly1d object in microVolt / Celsius S_values[index_above] = S_above(T_Celsius[index_above])*1e-6 # is in Volt / K return S_values
8fca07e7e6488a98c96cc76c68d4ab1b656951e5
15,123
def correlation_permutation_test( x, y, f, side, n=10000, confidence=0.99, plot=None, cores=1, seed=None ): """This function carries out Monte Carlo permutation tests comparing whether the correlation between two variables is statistically significant :param x: An iterable of X values observed :param y: An iterable of Y values observed :param f: The function for calculating the relationship strength between X and Y :param side: The side to use for hypothesis testing :param n: The number of permutations to sample, defaults to 10000 :type n: int, optional :param confidence: The probability that the true p-value is contained in the intervals returned, defaults to 0.99 :type confidence: float, optional :param plot: The name of a file to draw a plot of permuted correlations to, defaults to None :type plot: str, optional :param cores: The number of logical CPUs to use, defaults to 1 :type cores: int, optional :param seed: The seed for randomisation, defaults to None :type seed: int, optional :return: Named tuple containing upper and lower bounds of p-value at the given confidence """ if seed: rng = _rd.Random(seed) else: rng = _rd.Random() if callable(f): _f = f elif f == "pearsonr": _f = _pearsonr elif f == "spearmanr": _f = _spearmanr else: raise ValueError( "{} not valid for f -- must be a function, 'pearsonr', or 'spearmanr'".format( f ) ) _x = list(x) _y = list(y) if side in _GT: stat_0 = _f(_x, _y) elif side in _LT: stat_0 = _f(_x, _y) elif side in _BOTH: stat_0 = abs(_f(_x, _y)) else: raise ValueError( "{} not valid for side -- should be 'greater', 'lower', or 'both'".format( side ) ) jobs = ((_x[:], _y[:], stat_0, _f, rng.randint(0, 1e100)) for _ in range(n)) if side in _GT: result = _job_hander(_correlation_greater, jobs, cores) elif side in _LT: result = _job_hander(_correlation_lower, jobs, cores) else: result = _job_hander(_correlation_both, jobs, cores) v = [] p = 0 for truth, val in result: p += truth v.append(val) p /= n if plot: plot_histogram(x=v, x0=stat_0, outfile=plot, side=side) lower, upper = wilson(p, n, confidence) return _RESULT(lower, upper, confidence)
bc6667985d3046f5b97dd01f109c94449f044bf9
15,124
def value_iteration(model, maxiter=100): """ Solves the supplied environment with value iteration. Parameters ---------- model : python object Holds information about the environment to solve such as the reward structure and the transition dynamics. maxiter : int The maximum number of iterations to perform. Return ------ val_ : numpy array of shape (N, 1) Value function of the environment where N is the number of states in the environment. pi : numpy array of shape (N, 1) Optimal policy of the environment. """ # initialize the value function and policy pi = np.ones((model.num_states, 1)) val_ = np.zeros((model.num_states, 1)) for i in range(maxiter): # initialize delta delta = 0 # perform Bellman update for each state for state in range(model.num_states): # store old value tmp = val_[state].copy() # compute the value function val_[state] = np.max( np.sum((model.R[state] + model.gamma * val_) * model.P[state,:,:], 0) ) # find maximum change in value delta = np.max( (delta, np.abs(tmp - val_[state])) ) # stopping criteria if delta <= EPS * (1 - model.gamma) / model.gamma: print("Value iteration converged after %d iterations." % i) break # compute the policy for state in range(model.num_states): pi[state] = np.argmax(np.sum(val_ * model.P[state,:,:],0)) return val_, pi
e04ffc27be47470f466832a14f9ecf9910d18f27
15,125
from typing import Callable import inspect import abc def node(function: Callable): """A decorator that registers a function to execute when a node runs""" sig = inspect.signature(function) args = [] for (name, param) in sig.parameters.items(): value = param.default if value is inspect.Parameter.empty: raise TypeError(f"{name} must have a type (e.g. {name}=InputTable)") if inspect.isclass(value) and issubclass(value, _NodeInterfaceEntry): if value.__class__ in (type, abc.ABCMeta): value = value() # noinspection PyCallingNonCallable args.append(value(name)) else: raise TypeError(f"{name} is not a valid node parameter type") return NodeFunction(function, args)
65ed2c383e1354a0663daef98b78b73382ea65ea
15,126
def mg_refractive(m, mix): """Maxwell-Garnett EMA for the refractive index. Args: m: Tuple of the complex refractive indices of the media. mix: Tuple of the volume fractions of the media, len(mix)==len(m) (if sum(mix)!=1, these are taken relative to sum(mix)) Returns: The Maxwell-Garnett approximation for the complex refractive index of the effective medium If len(m)==2, the first element is taken as the matrix and the second as the inclusion. If len(m)>2, the media are mixed recursively so that the last element is used as the inclusion and the second to last as the matrix, then this mixture is used as the last element on the next iteration, and so on. """ if len(m) == 2: cF = float(mix[1]) / (mix[0]+mix[1]) * \ (m[1]**2-m[0]**2) / (m[1]**2+2*m[0]**2) er = m[0]**2 * (1.0+2.0*cF) / (1.0-cF) m = np.sqrt(er) else: m_last = mg_refractive(m[-2:], mix[-2:]) mix_last = mix[-2] + mix[-1] m = mg_refractive(m[:-2] + (m_last,), mix[:-2] + (mix_last,)) return m
57712b6abd9b6a5a642767fa91b6212729b697dc
15,127
def locateObjLocation(data, questionDict, questionIdict): """ Locate the object of where questions. Very naive heuristic: take the noun immediately after "where". """ where = questionDict['where'] for t in range(data.shape[0] - 1): if data[t, 0] == where: for u in range(t + 1, data.shape[0]): word = questionIdict[data[u, 0] - 1] lexname = lookupLexname(word) if (lexname is not None and \ lexname.startswith('noun')) or \ (lexname is None): return data[u, 0] print 'not found' return data[-1, 0]
4b0b8ff892e7d6fdbd9b1cf9d7a9ce7a50ba90c2
15,128
def main(config, model, stid, forecast_date): """ Produce a Forecast object from bufkit data. """ # Get parameters from the config try: bufr = config['BUFKIT']['BUFR'] except KeyError: raise KeyError('bufkit: missing BUFR executable path in config BUFKIT options') try: bufkit_directory = config['BUFKIT']['BUFKIT_directory'] except KeyError: bufkit_directory = '%s/site_data' % config['THETAE_ROOT'] if config['debug'] > 50: print('bufkit warning: setting bufkit file directory to %s' % bufkit_directory) try: run_time = config['Models'][model]['run_time'] except KeyError: raise KeyError('bufkit: no run_time parameter defined for model %s in config!' % model) try: bufr_name = config['Models'][model]['bufr_name'] except KeyError: raise KeyError('bufkit: no bufr_name parameter defined for model %s in config!' % model) if 'bufr_stid' in config['Stations'][stid]: bufr_stid = config['Stations'][stid]['bufr_stid'] else: bufr_stid = str(stid) # Delete yesterday's bufkit files try: if not(config['BUFKIT']['archive']): bufr_delete_yesterday(bufkit_directory, bufr_stid, forecast_date - timedelta(days=1)) except KeyError: bufr_delete_yesterday(bufkit_directory, bufr_stid, forecast_date - timedelta(days=1)) # Get bufkit forecasts forecast = get_bufkit_forecast(config, bufr, bufkit_directory, model, bufr_name, run_time, bufr_stid, forecast_date) forecast.set_stid(str(stid)) return forecast
4a9dccc58ac96f7e3c4b90d3d94c12d602d67d15
15,129
from typing import Union from typing import List def mkshex(shapes: Union[CSVShape, List[CSVShape]]) -> Schema: """Convert list of csv2shape Shapes to ShExJSG Schema object.""" # pylint: disable=invalid-name # One- and two-letter variable names do not conform to snake-case naming style if isinstance(shapes, CSVShape): shapes = [shapes] schema_shexjsg = Schema() for s in shapes: shape_id = IRIREF(s.shapeID) if s.start: if schema_shexjsg.start: print(f"Multiple start shapes: <{schema_shexjsg.start}>, <{shape_id}>") else: schema_shexjsg.start = shape_id shape = Shape(id=shape_id) for csv_tc in s.tc_list: add_triple_constraint(shape, csv_tc) if not schema_shexjsg.shapes: schema_shexjsg.shapes = [shape] else: schema_shexjsg.shapes.append(shape) return schema_shexjsg
3cc83d3a23ca982f30c6b4b64553801e404ef1b3
15,130
def get_unsigned_js_val(abs_val: int, max_unit: int, abs_limit: int) -> int: """Get unsigned remaped joystick value in reverse range (For example if the limit is 2000, and the input valueis also 2000, the value returned will be 1. And with the same limit, if the input value is 1, the output value wwill be 2000. The same applies to the values in between). This evenly devides the value so that the maximum js range is remapped to a value in the range of the specified limit. abs_val - The current joystick value max_unit - The maximum value to remap the joystick value abs_limit - The maximum range of the joystick """ inc = abs_limit / max_unit # ignoring signs to keep results positive if abs_val > 0: abs_val *= -1 val = int((abs_val / inc) + max_unit) # if the value is zero, return 1 (maximum range) if val == 0: val = 1 return val
6e77d76423ffeef756291924d00cbdbb2c03cc07
15,131
def to_xyz(struct, extended_xyz: bool = True, print_stds: bool = False, print_forces: bool = False, print_max_stds: bool = False, print_energies: bool = False, predict_energy=None, dft_forces=None, dft_energy=None, timestep=-1, write_file: str = '', append: bool = False, labels=None) -> str: """ Function taken from the FLARE python package by Vandermause et al. at: https://github.com/mir-group/flare Reference: Vandermause, J., Torrisi, S. B., Batzner, S., Xie, Y., Sun, L., Kolpak, A. M. & Kozinsky, B. On-the-fly active learning of interpretable Bayesian force fields for atomistic rare events. npj Comput Mater 6, 20 (2020). https://doi.org/10.1038/s41524-020-0283-z Convenience function which turns a structure into an extended .xyz file; useful for further input into visualization programs like VESTA or Ovito. Can be saved to an output file via write_file. :param print_stds: Print the stds associated with the structure. :param print_forces: :param extended_xyz: :param print_max_stds: :param write_file: :return: """ species_list = [Z_to_element(x) for x in struct.coded_species] xyz_str = '' xyz_str += f'{len(struct.coded_species)} \n' # Add header line with info about lattice and properties if extended # xyz option is called. if extended_xyz: cell = struct.cell xyz_str += f'Lattice="{cell[0,0]} {cell[0,1]} {cell[0,2]}' xyz_str += f' {cell[1,0]} {cell[1,1]} {cell[1,2]}' xyz_str += f' {cell[2,0]} {cell[2,1]} {cell[2,2]}"' if timestep > 0: xyz_str += f' Timestep={timestep}' if predict_energy: xyz_str += f' PE={predict_energy}' if dft_energy is not None: xyz_str += f' DFT_PE={dft_energy}' xyz_str += ' Properties=species:S:1:pos:R:3' if print_stds: xyz_str += ':stds:R:3' stds = struct.stds if print_forces: xyz_str += ':forces:R:3' forces = struct.forces if print_max_stds: xyz_str += ':max_std:R:1' stds = struct.stds if labels: xyz_str += ':tags:R:1' clustering_labels = struct.local_energy_stds if print_energies: if struct.local_energies is None: print_energies = False else: xyz_str += ':local_energy:R:1' local_energies = struct.local_energies if dft_forces is not None: xyz_str += ':dft_forces:R:3' xyz_str += '\n' else: xyz_str += '\n' for i, pos in enumerate(struct.positions): # Write positions xyz_str += f"{species_list[i]} {pos[0]} {pos[1]} {pos[2]}" # If extended XYZ: Add in extra information if print_stds and extended_xyz: xyz_str += f" {stds[i,0]} {stds[i,1]} {stds[i,2]}" if print_forces and extended_xyz: xyz_str += f" {forces[i,0]} {forces[i,1]} {forces[i,2]}" if print_energies and extended_xyz: xyz_str += f" {local_energies[i]}" if print_max_stds and extended_xyz: xyz_str += f" {np.max(stds[i,:])} " if labels and extended_xyz: xyz_str += f" {clustering_labels[i]} " if dft_forces is not None: xyz_str += f' {dft_forces[i, 0]} {dft_forces[i,1]} ' \ f'{dft_forces[i, 2]}' if i < (len(struct.positions) - 1): xyz_str += '\n' # Write to file, optionally if write_file: if append: fmt = 'a' else: fmt = 'w' with open(write_file, fmt) as f: f.write(xyz_str) f.write("\n") return xyz_str
729a5429d2c6b4cc0c63462577b91a582bf197ed
15,132
def load_file(filename: str): """Load the .xls file and return as a dataframe object.""" df = pd.read_csv(filename, delimiter='\t') return df
09a7f6abc67bf80651dffe5d7698798f5dfc5be8
15,133
def loadRegexList(regexListFile): """Returns regexList, registries, internetSources""" regexList = [] registries = set() internetSourceTypes = set() libLF.log('Loading regexes from {}'.format(regexListFile)) with open(regexListFile, 'r') as inStream: for line in inStream: line = line.strip() if len(line) == 0: continue try: # Build the Regex regex = libLF.Regex() regex.initFromNDJSON(line) regexList.append(regex) registries = registries.union(regex.registriesUsedIn()) internetSourceTypes = internetSourceTypes.union(regex.internetSourcesAppearedIn()) except KeyboardInterrupt: raise except BaseException as err: libLF.log('Exception parsing line:\n {}\n {}'.format(line, err)) libLF.log('Loaded {} Regex\'es'.format(len(regexList))) return regexList, list(registries), list(internetSourceTypes)
7a3fe4c269aa4c868684384417f3c1d0229fcad8
15,134
def _decode_and_center_crop(image_bytes, image_size, resize_method=None): """Crops to center of image with padding then scales image_size.""" shape = tf.shape(image_bytes) image_height = shape[0] image_width = shape[1] padded_center_crop_size = tf.cast( ((image_size / (image_size + CROP_PADDING)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32) offset_height = ((image_height - padded_center_crop_size) + 1) // 2 offset_width = ((image_width - padded_center_crop_size) + 1) // 2 image = tf.image.crop_to_bounding_box(image_bytes, offset_height, offset_width, padded_center_crop_size, padded_center_crop_size) image = _resize_image(image, image_size, resize_method) return image
933bfb91a84f9fe403adf9cbfc9efeb57d1e50f0
15,135
import typing import numpy def match_beacons_translate_only( sensor_a_beacons: typing.Set[typing.Tuple[int, int, int]], sensor_b_beacons: numpy.ndarray, min_matching: int, ) -> typing.Optional[numpy.ndarray]: """ Search for matching beacons between `sensor_a_beacons` and `sensor_b_beacons`, assuming their orientation matches. Returns either the offset of sensor_b relative to sensor_a, or None if no 12 matching beacons were found. """ # naive approach: full search for beacon_a in sensor_a_beacons: for beacon_b_num in range(sensor_b_beacons.shape[0]): # assume sensor_a_beacons[beacon_a_num] is the same beacon as # sensor_b_beacons[beacon_b_num] sensor_b_relative_to_sensor_a = beacon_a - sensor_b_beacons[beacon_b_num] sensor_b_beacons_relative_to_sensor_a = sensor_b_beacons + sensor_b_relative_to_sensor_a m = num_matching_beacons(sensor_a_beacons, sensor_b_beacons_relative_to_sensor_a) if m >= min_matching: return sensor_b_relative_to_sensor_a return None
0fd12c05d9ee159e301c7bad9d1ddcaa3009a960
15,136
def txm_log(): """ Return the logger. """ return __log__
3b03daf2075549dc4d333e5a47d8e9a1cef21152
15,137
def remove_list_by_name(listslist, name): """ Finds a list in a lists of lists by it's name, removes and returns it. :param listslist: A list of Twitter lists. :param name: The name of the list to be found. :return: The list with the name, if it was found. None otherwise. """ for i in range(len(listslist)): if listslist[i].name == name: return listslist.pop(i)
356a7d12f3b2af9951327984ac6d55ccb844bf72
15,138
import math def song_clicks_metric(ranking): """ Spotify p :param ranking: :return: """ if 1 in ranking: first_idx = ranking.index(1) return math.floor(first_idx / 10) return 51 @staticmethod def print_subtest_results(sub_test_names, metric_names, results): (num_subtest, num_metrics) = results.shape print('{0: <15}'.format("Subtest"),"\t", end="") for i in range(num_metrics): print(metric_names[i], "\t", end="") print() for st in range(num_subtest): print('{0: <15}'.format(sub_test_names[st]), "\t", end="") for m in range(num_metrics): print(np.round(results[st][m],decimals=3), "\t", end="") print() @staticmethod def print_overall_results(metric_names, results): print('{0: <15}'.format(""),"\t", end="") for i in range(len(metric_names)): print(metric_names[i], "\t", end="") print() print('{0: <15}'.format("Overall"),"\t", end="") for m in range(len(metric_names)): print(np.round(results[m],decimals=3), "\t", end="") print()
ec6400e7929a2ab0f7f691fffa0ecb3be039b012
15,139
import copy def merge_reports(master: dict, report: dict): """ Merge classification reports into a master list """ keys = master.keys() ret = copy.deepcopy(master) for key in keys: scores = report[key] for score, value in scores.items(): ret[key][score] += [value] return ret
3ac633c38a8bb73a57841138cba8cbb80091cf04
15,140
import os import logging def parse_xeasy_peaks(peak_file): """ Parse Xeasy3D peakfile to Peaks object Xeasy file format stores a column labled 'unused' to indicate rather the peak has been used in a structure calculation procedure (0 or 1). This column is, however, not assigned automatically and may not be set at all by the user. :param peak_file: Xeasy3D peak file path :type peak_file: string """ assert os.path.exists(peak_file), 'Xeasy3D peakfile {0} does not exist.'.format(peak_file) logging.debug('Parsing Xeasy3D peakfile {0}'.format(peak_file)) peaks = Peaks() with open(peak_file) as peakfile: for line in peakfile.readlines(): if not line.startswith('#') and len(line): line = line.strip().split() if len(line) > 10: peak = { 'id':int(line[0]), 'w1':float(line[1]), 'w2':float(line[2]), 'w3':float(line[3]), 'spec_type':line[5], 'vol':float(line[6]), 'vol_err':float(line[7]), 'intm':line[8], 'unused':int(line[9]), 'ass1':int(line[10]), 'ass2':int(line[11]), 'ass3':int(line[12]) } peaks.add(peak) return peaks
3c17e6e6da4591e96ac5313ca3374894da96eff5
15,141
def multi_layer_images(): """ Returns complex images (with sizes) for push and pull testing. """ # Note: order is from base layer down to leaf. layer1_bytes = layer_bytes_for_contents( "layer 1 contents", mode="", other_files={"file1": "from-layer-1",} ) layer2_bytes = layer_bytes_for_contents( "layer 2 contents", mode="", other_files={"file2": "from-layer-2",} ) layer3_bytes = layer_bytes_for_contents( "layer 3 contents", mode="", other_files={"file1": "from-layer-3", "file3": "from-layer-3",} ) layer4_bytes = layer_bytes_for_contents( "layer 4 contents", mode="", other_files={"file3": "from-layer-4",} ) layer5_bytes = layer_bytes_for_contents( "layer 5 contents", mode="", other_files={"file4": "from-layer-5",} ) return [ Image( id="layer1", bytes=layer1_bytes, parent_id=None, size=len(layer1_bytes), config={"internal_id": "layer1"}, ), Image( id="layer2", bytes=layer2_bytes, parent_id="layer1", size=len(layer2_bytes), config={"internal_id": "layer2"}, ), Image( id="layer3", bytes=layer3_bytes, parent_id="layer2", size=len(layer3_bytes), config={"internal_id": "layer3"}, ), Image( id="layer4", bytes=layer4_bytes, parent_id="layer3", size=len(layer4_bytes), config={"internal_id": "layer4"}, ), Image( id="someid", bytes=layer5_bytes, parent_id="layer4", size=len(layer5_bytes), config={"internal_id": "layer5"}, ), ]
08b35fa4202a7d25ec415ed3b6d1ae6a9f37fd9c
15,142
from typing import Iterable def user_teams(config: Config, email: str) -> Iterable[Team]: """Return the teams a user member is expected to be a member of. Only the teams in which the user is a direct member are return. The ancestors of these teams are not returned. """ names = config.by_member.get(email) if not names: return [] return (_get_team_exists(config, x) for x in names)
669ec82b68e8e530dafeee4e272c85743bde7db1
15,143
import torch def se3_transform(g, a, normals=None): """ Applies the SE3 transform Args: g: SE3 transformation matrix of size ([1,] 3/4, 4) or (B, 3/4, 4) a: Points to be transformed (N, 3) or (B, N, 3) normals: (Optional). If provided, normals will be transformed Returns: transformed points of size (N, 3) or (B, N, 3) """ R = g[..., :3, :3] # (B, 3, 3) p = g[..., :3, 3] # (B, 3) if len(g.size()) == len(a.size()): b = torch.matmul(a, R.transpose(-1, -2)) + p[..., None, :] else: raise NotImplementedError b = R.matmul(a.unsqueeze(-1)).squeeze(-1) + p # No batch. Not checked if normals is not None: rotated_normals = normals @ R.transpose(-1, -2) return b, rotated_normals else: return b
9d8ca31dd6df6382e6a45fb80f30b61e9902da5c
15,144
def summarize_single_OLS(regression, col_dict, name, is_regularized=False): """Return dataframe aggregating over-all stats from a dictionary-like object containing OLS result objects.""" reg = regression try: col_dict['rsquared'][name] = reg.rsquared except AttributeError: col_dict['rsquared'][name] = 'NA' try: col_dict['rsquared_adj'][name] = reg.rsquared_adj except AttributeError: col_dict['rsquared_adj'][name] = 'NA' col_dict['f_pvalue'][name] = reg.f_pvalue col_dict['condition_number'][name] = reg.condition_number col_dict['regularized'][name] = is_regularized if not is_regularized: outliers = reg.outlier_test(method='fdr_bh')['fdr_bh(p)'] <= 0.05 col_dict['n_outliers'][name] = (outliers).sum() col_dict['outliers'][name] = ','.join(outliers.index[outliers].values) else: col_dict['n_outliers'][name] = "NA" col_dict['outliers'][name] = "NA" col_dict['aic'][name] = reg.aic return col_dict
b7dd8dfac6cf1b743491ae4e1abfc20fb73e8f31
15,145
def simplify(polynom): """Simplifies a function with binary variables """ polynom = Poly(polynom) new_polynom = 0 variables = list(polynom.free_symbols) for var_i in variables: coefficient_i = polynom.as_expr().coeff(var_i)/2 coefficient_i += polynom.as_expr().coeff(var_i ** 2) new_polynom += coefficient_i.as_coefficients_dict()[1] * var_i for var_j in variables: if var_j != var_i: coefficient_j = coefficient_i.coeff(var_j) new_polynom += coefficient_j.as_coefficients_dict()[1] *\ var_i * var_j return new_polynom + polynom.as_expr().as_coefficients_dict()[1]
62647c9a7530df8b73644e7af96b77b06bfb5285
15,146
def process_login(): """Log user into site. Find the user's login credentials located in the 'request', look up the user, and store them in the session. """ user_login = request.get_json() if crud.get_user_by_email(user_login['email']): current_user = crud.get_user_by_email(user_login['email']) print(current_user) if current_user.password == user_login['password']: session['user'] = current_user.user_name flash("You've logged in successfully. Welcome to your Shelve-It account.") return(jsonify({'status': "ok. you are logged in!", "user" : current_user.user_name})) else: session['user'] = 'unknown' return (jsonify({'status': "incorrect password"})) else: session['user'] = 'needs_to_register' flash("No account with that email exists. Please create one or try again") return(jsonify({'status': "no user with that email"}))
39e0498370e06ca3203c1212552ce435b1d047e0
15,147
def is_int(var): """ is this an integer (ie, not a float)? """ return isinstance(var, int)
09924c6ea036fc7ee1add6ccbefc3fb0c9696345
15,148
def returnstringpacket(pkt): """Returns a packet as hex string""" myString = "" for c in pkt: myString += "%02x" % c return myString
866ef7c69f522d4a2332798bdf97a966740ea0e4
15,149
def GetIndicesMappingFromTree( tree ): """ GetIndicesMappingFromTree ========================= reuse bill's idea to gives the indexes of all nodes (may they be a sub tree or a single leaf) gives a list of indices of every sublist. To do that, I add one thing: the last element of an index is the length of the present list. e.g. - get_indices_mapping_from_tree([1,2,3,4,5,6,7,8,9]) gives: [([0], 9)] - get_indices_mapping_from_tree([1,[2,3],4,5,6,7,8,9]) gives: [([0], 8), ([1], 2)] - get_indices_mapping_from_tree([1,[2,3,7],4,5,6,7,8,9]) gives: [([0], 8), ([1], 3)] - get_indices_mapping_from_tree([1,[2,3,7],4,[5,[6,[7,8,9]]]]) gives: [([0], 4), ([1], 3), ([3], 2), ([3, 1], 2), ([3, 1, 1], 3)] @param tree: a nested list representing a tree @return: a nested list representing the indexes of the nested lists by depth """ q = deque([ ([],tree) ]) list_of_index_lists = [([0],len(tree))] while q: (indices, sub_tree) = q.popleft() list_of_index_lists.append((indices,len(sub_tree))) for (ordinal, sst) in enumerate( sub_tree[1:] ): if isinstance( sst, list ): idxs = indices[:] idxs.append(ordinal+1) q.append( (idxs, sst) ) list_of_index_lists.pop(1) return list_of_index_lists
d18e85943273a1f4a75951f3f3fda176853b06e0
15,150
import warnings import os def preprocess_labs(lab_df: pd.DataFrame, material_to_include: list = ['any_blood'], verbose: bool = True) -> pd.DataFrame: """ Preprocess the labs dataframe :param lab_df: :param material_to_include: list of materials to include where material is one of the following: 'any_blood', 'urine' :param verbose: print preprocessing safety details :return: """ lab_df = lab_df.copy() lab_df['patient_admission_id'] = lab_df['patient_id'].astype(str) + '_' + lab_df['begin_date'].apply( lambda bd: ''.join(bd.split(' ')[0].split('.'))) lab_df.drop(columns_to_drop, axis=1, inplace=True) lab_names = set([c.split('_')[0] for c in lab_df.columns if c not in identification_columns]) new_lab_column_headers = set( ['_'.join(c.split('_')[1:]) for c in lab_df.columns if c not in identification_columns]) print('Labs measured:', lab_names) # split lab df into individual lab dfs for every lab name lab_df_split_by_lab_name = [] for _, lab_name in enumerate(lab_names): selected_columns = identification_columns + [c for c in lab_df.columns if c.split('_')[0] == lab_name] individual_lab_df = lab_df[selected_columns].dropna(subset=[f'{lab_name}_value']) individual_lab_df.columns = identification_columns + ['_'.join(c.split('_')[1:]) for c in individual_lab_df.columns if c.startswith(lab_name)] individual_lab_df['lab_name'] = lab_name lab_df_split_by_lab_name.append(individual_lab_df) reorganised_lab_df = pd.concat(lab_df_split_by_lab_name, ignore_index=True) equalized_reorganised_lab_df = reorganised_lab_df.copy() for equivalence_list in equivalence_lists: equalized_reorganised_lab_df.loc[ reorganised_lab_df['dosage_label'].isin(equivalence_list[1:]), 'dosage_label'] = equivalence_list[0] equalized_reorganised_lab_df = equalized_reorganised_lab_df[ ~equalized_reorganised_lab_df['dosage_label'].isin(dosage_labels_to_exclude)] # check that units correspond for dosage_label in equalized_reorganised_lab_df['dosage_label'].unique(): units_for_dosage_label = \ equalized_reorganised_lab_df[equalized_reorganised_lab_df['dosage_label'] == dosage_label][ 'unit_of_measure'].unique() print(dosage_label, units_for_dosage_label) if len(units_for_dosage_label) > 1: warnings.warn(f'{dosage_label} has different units: {units_for_dosage_label}') raise ValueError(f'{dosage_label} has different units: {units_for_dosage_label}') # fixing material equivalents and materials to exclude # raise error if pO2, pCO2 or pH come from arterial and venous blood for dosage_label in ['pO2', 'pCO2', 'pH']: dosage_label_materials = \ equalized_reorganised_lab_df[equalized_reorganised_lab_df['dosage_label'].str.contains(dosage_label)][ 'material_label'].unique() if 'sga' in dosage_label_materials and len(dosage_label_materials) > 1: raise ValueError(f'{dosage_label} has arterial and other materials: {dosage_label_materials}') equalized_reorganised_lab_df.loc[ reorganised_lab_df['material_label'].isin(blood_material_equivalents), 'material_label'] = 'any_blood' equalized_reorganised_lab_df = equalized_reorganised_lab_df[ ~equalized_reorganised_lab_df['material_label'].isin(material_to_exclude)] equalized_reorganised_lab_df = equalized_reorganised_lab_df[ equalized_reorganised_lab_df['material_label'].isin(material_to_include)] # correct non numeric values equalized_reorganised_lab_df = correct_non_numerical_values(equalized_reorganised_lab_df) # remove non numerical values in value column equalized_reorganised_lab_df = equalized_reorganised_lab_df[ ~equalized_reorganised_lab_df['value'].isin(non_numerical_values_to_remove)] equalized_reorganised_lab_df.dropna(subset=['value'], inplace=True) remaining_non_numerical_values = \ equalized_reorganised_lab_df[pd.to_numeric(equalized_reorganised_lab_df['value'], errors='coerce').isnull()][ 'value'].unique() print('Remaining non-numerical values:', remaining_non_numerical_values) if len(remaining_non_numerical_values) > 0: raise ValueError(f'Remaining non-numerical values: {remaining_non_numerical_values}') equalized_reorganised_lab_df['value'] = pd.to_numeric(equalized_reorganised_lab_df['value'], errors='coerce') # correct negative values # set negative values for dosage label 'hémoglobine' to NaN (NaN values will be removed later) equalized_reorganised_lab_df.loc[(equalized_reorganised_lab_df['dosage_label'] == 'hémoglobine') & ( equalized_reorganised_lab_df['value'] < 0), 'value'] = np.NAN # set negative values for dosage label 'glucose' to NaN (NaN values will be removed later) equalized_reorganised_lab_df.loc[(equalized_reorganised_lab_df['dosage_label'] == 'glucose') & ( equalized_reorganised_lab_df['value'] < 0), 'value'] = np.NAN equalized_reorganised_lab_df.dropna(subset=['value'], inplace=True) # warn if negative values are still present if len(equalized_reorganised_lab_df[equalized_reorganised_lab_df['value'] < 0]) > 0: warnings.warn('Negative values are present. Check data.') # remove all french accents and cedillas equalized_reorganised_lab_df = remove_french_accents_and_cedillas_from_dataframe(equalized_reorganised_lab_df) # restrict to possible value ranges possible_value_ranges_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'possible_ranges_for_variables.xlsx') possible_value_ranges = pd.read_excel(possible_value_ranges_file) for variable in possible_value_ranges['variable_label'].dropna().unique(): possible_value_ranges_for_variable = possible_value_ranges[ possible_value_ranges['variable_label'] == variable] equalized_reorganised_lab_df.loc[(equalized_reorganised_lab_df['dosage_label'] == variable) & (~equalized_reorganised_lab_df['value'].between( possible_value_ranges_for_variable['Min'].values[0], possible_value_ranges_for_variable['Max'].values[0])), 'value'] = np.NAN if verbose: print(f'Excluding {equalized_reorganised_lab_df["value"].isna().sum()} observations because out of range') equalized_reorganised_lab_df.dropna(subset=['value'], inplace=True) # get mean number of values per dosage label patient admission id median_observations_per_patient_admission_id = \ equalized_reorganised_lab_df.groupby(['patient_admission_id', 'dosage_label'])['value'].count().reset_index() if verbose: print(median_observations_per_patient_admission_id.groupby('dosage_label').median()) print(equalized_reorganised_lab_df.groupby('dosage_label')['value'].describe()) return equalized_reorganised_lab_df
2f90d6e5a89fc1a0873b0878293186d657c42eef
15,151
def find_option(command, name): """ Helper method to find command option by its name. :param command: string :param name: string :return: CommandOption """ # TODO: use all_options if command in COMMAND_OPTIONS: if name == 'help': return OPTION_HELP for opt in COMMAND_OPTIONS[command]: if name in [opt.short_name, opt.long_name]: return opt return None
229505b909c9b42d6f1e686763db3422cd1249cb
15,152
import random def generate_pairwise(params, n_comparisons=10): """Generate pairwise comparisons from a Bradley--Terry model. This function samples comparisons pairs independently and uniformly at random over the ``len(params)`` choose 2 possibilities, and samples the corresponding comparison outcomes from a Bradley--Terry model parametrized by ``params``. Parameters ---------- params : array_like Model parameters. n_comparisons : int Number of comparisons to be returned. Returns ------- data : list of (int, int) Pairwise-comparison samples (see :ref:`data-pairwise`). """ n = len(params) items = tuple(range(n)) params = np.asarray(params) data = list() for _ in range(n_comparisons): # Pick the pair uniformly at random. a, b = random.sample(items, 2) if compare((a, b), params) == a: data.append((a, b)) else: data.append((b, a)) return tuple(data)
96bea4a192d81eaf9a43f8ae493187d826dcdb21
15,153
def render_json(fun): """ Decorator for views which return a dictionary that encodes the dictionary into a JSON string and sets the mimetype of the response to application/json. """ @wraps(fun) def wrapper(request, *args, **kwargs): response = fun(request, *args, **kwargs) try: return JSONResponse(response) except TypeError: # The response isn't JSON serializable. return response return wrapper
15984f0fe7a6a5fbc5a6c9b360bb2780854868b4
15,154
from typing import List def count_smileys_concise(arr: List[str]) -> int: """ Another person's implementation. Turns the list into an string, then uses findall() on that string. Turning the result into a list makes it possible to return the length of that list. So this version is more concise, but uses more space. O(n) where n is length of arr. """ return len(list(findall(r"[:;][-~]?[)D]", " ".join(arr))))
8fbd353422cbac9840294af3d0a6022d8a45e4e1
15,155
def transform_dead_op_vars(graph, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a graph.""" return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), [graph], translator)
c10fde9cca58732bf6bd17018e963ee629a1796d
15,156
import torch def test_reconstruction_torch(): """Test that input reconstruction via backprop has decreasing loss.""" if skip_all: return None if run_without_pytest else pytest.skip() if cant_import('torch'): return None if run_without_pytest else pytest.skip() device = 'cuda' if torch.cuda.is_available() else 'cpu' J = 6 Q = 8 N = 1024 n_iters = 30 jtfs = TimeFrequencyScattering1D(J, N, Q, J_fr=4, average_fr=False, frontend='torch', out_type='array', sampling_filters_fr=('exclude', 'resample'), max_pad_factor=1, max_pad_factor_fr=2, pad_mode_fr='conj-reflect-zero', ).to(device) y = torch.from_numpy(echirp(N, fmin=1).astype('float32')).to(device) Sy = jtfs(y) div = Sy.max() Sy /= div torch.manual_seed(0) x = torch.randn(N, device=device) x /= torch.max(torch.abs(x)) x.requires_grad = True optimizer = torch.optim.SGD([x], lr=140000, momentum=.9, nesterov=True) loss_fn = torch.nn.MSELoss() losses, losses_recon = [], [] for i in range(n_iters): optimizer.zero_grad() Sx = jtfs(x) Sx /= div loss = loss_fn(Sx, Sy) loss.backward() optimizer.step() losses.append(float(loss.detach().cpu().numpy())) xn, yn = x.detach().cpu().numpy(), y.detach().cpu().numpy() losses_recon.append(float(rel_l2(yn, xn))) # unsure why CPU's worse th = 1e-5 if device == 'cuda' else 2e-5 th_end_ratio = 50 if device == 'cuda' else 30 th_recon = 1.05 end_ratio = losses[0] / losses[-1] assert end_ratio > th_end_ratio, end_ratio assert min(losses) < th, "{:.2e} > {}".format(min(losses), th) assert min(losses_recon) < th_recon, "{:.2e} > {}".format(min(losses_recon), th_recon) if metric_verbose: print(("\nReconstruction (torch):\n(end_start_ratio, min_loss, " "min_loss_recon) = ({:.1f}, {:.2e}, {:.2f})").format( end_ratio, min(losses), min(losses_recon)))
070e7e52ce44c2a875a7ad418ffb985a1827d8c6
15,157
def to_pixels(Hinv, loc): """ Given H^-1 and (x, y, z) in world coordinates, returns (c, r) in image pixel indices. """ loc = to_image_frame(Hinv, loc).astype(int) return (loc[1], loc[0])
09dff4d2045c64d753aa8229f44f049f1a6936c3
15,158
from io import StringIO import os import shutil def temporary_upload(request): """ Accepts an image upload to server and saves it in a temporary folder. """ if not 'image' in request.FILES: return HttpResponse(simplejson.dumps({'status': 'no image uploaded'})) filename = request.FILES['image']._get_name().strip().lower() imgdata = StringIO(request.FILES['image'].read()) imgdata.seek(0) try: im = Image.open(imgdata) im.size if im.size < (480, 250): return HttpResponse(simplejson.dumps({'status': "Image size should be minimum 480 width and 250 height.\n\nYours is %dx%d." % (im.size[0], im.size[1]) })) except Exception: return HttpResponse(simplejson.dumps({'status': 'couldn\'t open the image'})) local_dir = os.path.join(settings.MEDIA_ROOT, 'tmp', request.session.session_key) try: shutil.rmtree(local_dir, onerror=lambda f, p, e: None) os.makedirs(local_dir) except IOError: pass local_filename = os.path.join(local_dir, filename) url = os.path.join(settings.MEDIA_URL, 'tmp', request.session.session_key, filename) f = open(local_filename, 'wb') f.write(imgdata.getvalue()) f.close() request.session['temporary_filename'] = local_filename ret = simplejson.dumps({'status': 'ok', 'link': url, 'filename': local_filename}) return HttpResponse(ret)
432ce9fe26b9ea09c2399418e7d6027ba17ce1d2
15,159
import os def adf_test(path): """ Takes a csv file path as input (as a string) This file must have one heading as Dates and the other as Close This csv file will be converted into a series and then the ADF test will be completed using data from that csv file (Optional: will plot the data using matplotlib as a line graph) """ if not os.path.exists(path): raise Exception("The path specified does not exist") df = pd.read_csv(path, parse_dates=['Time']) series = df.loc[:, 'Close'].values # # Plotting the graph of the date against the close # df.plot(figsize=(14,8), label="Close Price", title='Series', marker=".") # plt.ylabel("Close Prices") # plt.legend() # plt.show() # ADF test result = adfuller(series, autolag="AIC") print(f"ADF Statistic = {result[0]}") print(f"p_value = {result[1]}") print(f"usedlags = {result[2]}") # Result 4 is a dictionary that contains the critical values for k, v in result[4].items(): print(f"Critical Values are:\n {k}, {v}") print(result) return result[0], result[1], result[2]
dd29d22f9739c6a4b17d7c53d968591cf575182a
15,160
def calc_distance_between_points_two_vectors_2d(v1, v2): """calc_distance_between_points_two_vectors_2d [pairwise distance between vectors points] Arguments: v1 {[np.array]} -- [description] v2 {[type]} -- [description] Raises: ValueError -- [description] ValueError -- [description] ValueError -- [description] Returns: [type] -- [description] testing: >>> v1 = np.zeros((2, 5)) >>> v2 = np.zeros((2, 5)) >>> v2[1, :] = [0, 10, 25, 50, 100] >>> d = calc_distance_between_points_two_vectors_2d(v1.T, v2.T) """ # Check dataformats if not isinstance(v1, np.ndarray) or not isinstance(v2, np.ndarray): raise ValueError("Invalid argument data format") if not v1.shape[1] == 2 or not v2.shape[1] == 2: raise ValueError("Invalid shape for input arrays") if not v1.shape[0] == v2.shape[0]: raise ValueError("Error: input arrays should have the same length") # Calculate distance if v1.shape[1] < 20000 and v1.shape[0] < 20000: # For short vectors use cdist dist = distance.cdist(v1, v2, "euclidean") dist = dist[:, 0] else: dist = [ calc_distance_between_points_2d(p1, p2) for p1, p2 in zip(v1, v2) ] return dist
75d00fae9dbe8353e1b53d12428de054e267a528
15,161
import heapq import random def get_nearest_list_index(node_list, guide_node): """ Finds nearest nodes among node_list, using the metric given by weighted_norm and chooses one of them at random. Parameters ---------- node_list : list list of nodes corresponding to one of the two search trees growing towards each other. guide_node : dict node that has been randomly chosen to expand towards Returns ------- min_ind : int index of the chosen node min_dist_choice : float distance between the chosen node and the guide_node """ k_nearest = int(len(node_list) / 100) + 1 dlist = [weighted_norm(node, guide_node) for node in node_list] k_min_dist_list = heapq.nsmallest(k_nearest, dlist) min_dist_choice = random.choice(k_min_dist_list) min_ind = dlist.index(min_dist_choice) return min_ind, min_dist_choice
7d8a373a589e87dc04f72150424685c088b535fb
15,162
def get_extensions_from_dir(path: str) -> list[str]: """Gets all files that end with ``.py`` in a directory and returns a python dotpath.""" dirdotpath = ".".join(path.split(sep)[1:]) # we ignore the first part because we don't want to add the ``./``. return [f"{dirdotpath}.{file}" for file in listdir(path) if file.endswith(".py")]
c5a12241270f970733c055493534c7f5e8548fd2
15,163
def to_normalized_exacta_dividends(x,scr=-1): """ Convert 2-d representation of probabilities to dividends :param x: :param scr: :return: """ fx = to_normalized_dividends( to_flat_exacta(x), scr=scr ) return from_flat_exacta(fx, diag_value=scr)
1c216908752326333185da7d21e7657f722e20f1
15,164
def CSVcreation(): """This functions allows to access to page for the creation of csv""" if "logged_in" in session and session["logged_in"] == True: print("User login", session["username"]) try: count1 = managedb.getCountLoginDB(session["username"]) if count1 == 0: return redirect(url_for('index')) return render_template('CSVcreation.html',data=data) except Exception as e: print("Error DB:",str(e)) return redirect(url_for('index')) return redirect(url_for('index'))
33af7221ab77d8d0d40b60d220ce8e59ba728f0f
15,165
from pathlib import Path def filter_perm(user, queryset, role): """Filter a queryset. Main authorization business logic goes here. """ # Called outside of view if user is None: # TODO: I think this is used if a user isn't logged in and hits our endpoints which is a problem return queryset # Must be logged in if not user.is_active or user.is_anonymous: return queryset.none() # Superusers can see all (not staff users) if user.is_active and user.is_superuser: return queryset # Check permissions conditions = [] model = queryset.model paths_to_checksumfile = [*get_paths(model, models.ChecksumFile)] if model == models.Collection: # Add custom reverse relationships field = model._meta.get_field('checksumfiles') path = Path(field) paths_to_checksumfile.append(path) for path in paths_to_checksumfile: # A user can read/write a file if they are the creator is_creator = path.q(created_by=user) conditions.append(is_creator) if ( getattr(settings, 'RGD_GLOBAL_READ_ACCESS', False) and role == models.CollectionPermission.READER ): # A user can read any file by default has_no_owner = path.q(created_by__isnull=True) conditions.append(has_no_owner) for path in get_paths(model, models.Collection): # Check collection permissions has_permission = path.q(collection_permissions__user=user) has_role_level = path.q(collection_permissions__role__gte=role) conditions.append(has_permission & has_role_level) whitelist = ( queryset.none() .union(*(queryset.filter(condition) for condition in conditions), all=True) .values('pk') ) return queryset.filter(pk__in=whitelist)
9489253f19af9bb0d0d4fdbcc00f349037f7b85a
15,166
import types def generate_copies(func, phis): """ Emit stores to stack variables in predecessor blocks. """ builder = Builder(func) vars = {} loads = {} # Allocate a stack variable for each phi builder.position_at_beginning(func.startblock) for block in phis: for phi in phis[block]: vars[phi] = builder.alloca(types.Pointer(phi.type)) # Generate loads in blocks containing the phis for block in phis: leaders = list(block.leaders) last_leader = leaders[-1] if leaders else block.head builder.position_after(last_leader) for phi in phis[block]: loads[phi] = builder.load(vars[phi]) # Generate copies (store to stack variables) for block in phis: for phi in phis[block]: preds, args = phi.args var = vars[phi] phi_args = [loads.get(arg, arg) for arg in args] for pred, arg in zip(preds, phi_args): builder.position_before(pred.terminator) builder.store(arg, var) # Replace phis for block in phis: for phi in phis[block]: phi.replace_uses(loads[phi]) phi.delete() return vars, loads
5ee76907970dea569c34d3bd4a5f57456bed7eb4
15,167
from typing import Sequence def calculate_dv(wave: Sequence): """ Given a wavelength array, calculate the minimum ``dv`` of the array. Parameters ---------- wave : array-like The wavelength array Returns ------- float delta-v in units of km/s """ return C.c_kms * np.min(np.diff(wave) / wave[:-1])
8e29af2644a97948330a4a5fcaaeb2e49ddad831
15,168
def check_link_errors(*args, visit=(), user="user", **kwargs): """ Craw site starting from the given base URL and raise an error if the resulting error dictionary is not empty. Notes: Accept the same arguments of the :func:`crawl` function. """ errors, visited = crawl(*args, **kwargs) for url in visit: if url not in visited: errors[url] = f"URL was not visited by {user}" if errors: for url, code in errors.items(): if isinstance(code, int): print(f"URL {url} returned invalid status code: {code}") else: print(f"Invalid URL {url} encountered at {code}") raise AssertionError(errors, visited) return visited
571b03e555894560128530c6e751c50a4aed0e21
15,169
def cart3_to_polar2(xyz_array): """ Convert 3D cartesian coordinates into 2D polar coordinates. This is a simple routine for converting a set of 3D cartesian vectors into spherical coordinates, where the position (0, 0) lies along the x-direction. Parameters ---------- xyz_array : ndarray of float Cartesian coordinates, need not be of unit vector length. Shape is (3, coord_shape). Returns ------- lon_array : ndarray of float Longitude coordinates, which increases in the counter-clockwise direction. Units of radians, shape is (coord_shape,). lat_array : ndarray of float Latitude coordinates, where 0 falls on the equator of the sphere. Units of radians, shape is (coord_shape,). """ if not isinstance(xyz_array, np.ndarray): raise ValueError("xyz_array must be an ndarray.") if xyz_array.ndim == 0: raise ValueError("xyz_array must have ndim > 0") if xyz_array.shape[0] != 3: raise ValueError("xyz_array must be length 3 across the zeroth axis.") # The longitude coord is relatively easy to calculate, just take the X and Y # components and find the arctac of the pair. lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float) # If we _knew_ that xyz_array was always of length 1, then this call could be a much # simpler one to arcsin. But to make this generic, we'll use the length of the XY # component along with arctan2. lat_array = np.arctan2( xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float ) # Return the two arrays return lon_array, lat_array
37220bd026ae48bf5a9914117075a10a51efba5a
15,170
import requests def deploy_release(rel_id, env_id): """deploy_release will start deploying a release to a given environment""" uri = config.OCTOPUS_URI + "/api/deployments" r = requests.post(uri, headers=config.OCTOPUS_HEADERS, verify=False, json={'ReleaseId': rel_id, 'EnvironmentId': env_id}) return r.json()
08eae9366a3233704f65a6f952801cdd3ffbe867
15,171
def create_static_route(dut, next_hop=None, static_ip=None, shell="vtysh", family='ipv4', interface = None, vrf = None): """ To create static route Author: Prudvi Mangadu ([email protected]) :param dut: :param next_hop: :param static_ip: :param shell: sonic|vtysh :param family: ipv4|ipv6 :return: """ if not static_ip: st.log("Provide static_ip") return False if shell == "vtysh": if family.lower() == "ipv4" or family.lower() == "": if next_hop: command = "ip route {} {}".format(static_ip, next_hop) else: command = "ip route {}".format(static_ip) elif family.lower() == "ipv6": command = "ipv6 route {} {}".format(static_ip, next_hop) if interface: command +=" {}".format(interface) if vrf: command +=" vrf {}".format(vrf) st.config(dut, command, type='vtysh') else: if family.lower() == "ipv4" or family.lower() == "": if next_hop: command = "ip route add {} via {}".format(static_ip, next_hop) else: command = "ip route add {}".format(static_ip) elif family.lower() == "ipv6": if next_hop: command = "ip -6 route add {} via {}".format(static_ip, next_hop) else: command = "ip -6 route add {}".format(static_ip) if interface: command +=" dev {}".format(interface) st.config(dut, command)
9097f016eaeb85e9b84351d50cac71d88779b1c1
15,172
def _markfoundfiles(arg, initargs, foundflags): """Mark file flags as found.""" try: pos = initargs.index(arg) - 1 except ValueError: pos = initargs.index("../" + arg) - 1 # In cases where there is a single input file as the first parameter. This # should cover cases such as: # exec input.file # exec input.file > output.file if arg == initargs[0]: foundflags.append("<") # Other cases should pretty much be formats like: # exec -flag file -flag file -flag file elif (len(initargs) > 1 and initargs[pos][0] == "-" and initargs[pos] not in foundflags): foundflags.append(initargs[pos]) # Or cases like exec -flag file -flag file inputfile > outputfile elif (len(initargs) > 1 and initargs[pos][0] != "-" and initargs[pos] not in foundflags): foundflags.append("<") return foundflags
e27ca91de403a6364cbebc8ee4ee835a9335dccc
15,173
def part_a(puzzle_input): """ Calculate the answer for part_a. Args: puzzle_input (list): Formatted as the provided input from the website. Returns: string: The answer for part_a. """ recipes_to_make = int(''.join(puzzle_input)) elf_index_1 = 0 elf_index_2 = 1 recipies = [3, 7] while len(recipies) < recipes_to_make + 10: new_recipes = recipies[elf_index_1] + recipies[elf_index_2] if new_recipes >= 10: recipies.append(1) recipies.append(new_recipes - 10) else: recipies.append(new_recipes) elf_index_1 = (elf_index_1 + (recipies[elf_index_1] + 1)) % len(recipies) elf_index_2 = (elf_index_2 + (recipies[elf_index_2] + 1)) % len(recipies) return ''.join(map(str, recipies[recipes_to_make:recipes_to_make + 10]))
50e1cf923184a15747322528a47bad248c03dfa2
15,174
def _CompareFields(field, other_field): """Checks if two ProtoRPC fields are "equal". Compares the arguments, rather than the id of the elements (which is the default __eq__ behavior) as well as the class of the fields. Args: field: A ProtoRPC message field to be compared. other_field: A ProtoRPC message field to be compared. Returns: Boolean indicating whether the fields are equal. """ field_attrs = _GetFieldAttributes(field) other_field_attrs = _GetFieldAttributes(other_field) if field_attrs != other_field_attrs: return False return field.__class__ == other_field.__class__
d6ce0b7f7caafd17dff188679800dee2dbe8e791
15,175
import os def classpath_dest_filename(coord: str, src_filename: str) -> str: """Calculates the destination filename on the classpath for the given source filename and coord. TODO: This is duplicated in `COURSIER_POST_PROCESSING_SCRIPT`. """ dest_name = coord.replace(":", "_") _, ext = os.path.splitext(src_filename) return f"{dest_name}{ext}"
e14a86897ebe6f4e75e4d7ac16737ffda83d632f
15,176
def loadtxt_rows(filename, rows, single_precision=False): """ Load only certain rows """ # Open the file f = open(filename, "r") # Storage results = {} # Row number i = 0 # Number of columns ncol = None while(True): # Read the line and split by commas line = f.readline() cells = line.split(",") # Quit when you see a different number of columns if ncol is not None and len(cells) != ncol: break # Non-comment lines if cells[0] != "#": # If it's the first one, get the number of columns if ncol is None: ncol = len(cells) # Otherwise, include in results if i in rows: if single_precision: results[i] = np.array([float(cell) for cell in cells],\ dtype="float32") else: results[i] = np.array([float(cell) for cell in cells]) i += 1 results["ncol"] = ncol return results
9393cf7df8f24910a81e7d55128164a9bb467d91
15,177
def create_signal(frequencies, amplitudes, number_of_samples, sample_rate): """Create a signal of given frequencies and their amplitudes. """ timesamples = arange(number_of_samples) / sample_rate signal = zeros(len(timesamples)) for frequency, amplitude in zip(frequencies, amplitudes): signal += amplitude * sin(2*pi*frequency*timesamples) return signal, timesamples
58876fd45e96d221220ccc4ad0129cf48912d691
15,178
import logging import requests from datetime import datetime def serp_goog(q, cx, key, c2coff=None, cr=None, dateRestrict=None, exactTerms=None, excludeTerms=None, fileType=None, filter=None, gl=None, highRange=None, hl=None, hq=None, imgColorType=None, imgDominantColor=None, imgSize=None, imgType=None, linkSite=None, lowRange=None, lr=None, num=None, orTerms=None, relatedSite=None, rights=None, safe=None, searchType=None, siteSearch=None, siteSearchFilter=None, sort=None, start=None): """Query Google and get search results in a DataFrame. For each parameter, you can supply single or multiple values / arguments. If you pass multiple arguments, all the possible combinations of arguments (the product) will be requested, and you will get one DataFrame combining all queries. See examples below. :param q: The search expression. :param cx: The custom search engine ID to use for this request. :param key: The API key of your custom search engine. :param c2coff: Enables or disables Simplified and Traditional Chinese Search. The default value for this parameter is 0 (zero), meaning that the feature is enabled. Supported values are:1: Disabled0: Enabled (default) :param cr: Restricts search results to documents originating in a particular country. You may use Boolean operators in the cr parameter's value.Google Search determines the country of a document by analyzing:the top- level domain (TLD) of the document's URLthe geographic location of the Web server's IP addressSee the Country Parameter Values page for a list of valid values for this parameter. :param dateRestrict: Restricts results to URLs based on date. Supported values include:d[number]: requests results from the specified number of past days. - d[number]: requests results from the specified number of past days. - w[number]: requests results from the specified number of past weeks. - m[number]: requests results from the specified number of past months. - y[number]: requests results from the specified number of past years. :param exactTerms: Identifies a phrase that all documents in the search results must contain. :param excludeTerms: Identifies a word or phrase that should not appear in any documents in the search results. :param fileType: Restricts results to files of a specified extension. A list of file types indexable by Google can be found in Search Console Help Center. :param filter: Controls turning on or off the duplicate content filter.See Automatic Filtering for more information about Google's search results filters. Note that host crowding filtering applies only to multi-site searches.By default, Google applies filtering to all search results to improve the quality of those results. Acceptable values are: "0": Turns off duplicate content filter. "1": Turns on duplicate content filter. :param gl: Geolocation of end user. The gl parameter value is a two-letter country code. The gl parameter boosts search results whose country of origin matches the parameter value. See the Country Codes page for a list of valid values.Specifying a gl parameter value should lead to more relevant results. This is particularly true for international customers and, even more specifically, for customers in English- speaking countries other than the United States. :param highRange: Specifies the ending value for a search range.Use lowRange and highRange to append an inclusive search range of lowRange...highRange to the query. :param hl: Sets the user interface language. Explicitly setting this parameter improves the performance and the quality of your search results.See the Interface Languages section of Internationalizing Queries and Results Presentation for more information, and Supported Interface Languages for a list of supported languages. :param hq: Appends the specified query terms to the query, as if they were combined with a logical AND operator. :param imgColorType: Returns black and white, grayscale, or color images: mono, gray, and color. Acceptable values are: "color": color "gray": gray "mono": mono :param imgDominantColor: Returns images of a specific dominant color. Acceptable values are: "black": black "blue": blue "brown": brown "gray": gray "green": green "orange": orange "pink": pink "purple": purple "red": red "teal": teal "white": white "yellow": yellow :param imgSize: Returns images of a specified size. Acceptable values are: "huge": huge "icon": icon "large": large "medium": medium "small": small "xlarge": xlarge "xxlarge": xxlarge :param imgType: Returns images of a type. Acceptable values are: "clipart": clipart "face": face "lineart": lineart "news": news "photo": photo :param linkSite: Specifies that all search results should contain a link to a particular URL :param lowRange: Specifies the starting value for a search range. Use lowRange and highRange to append an inclusive search range of lowRange...highRange to the query. :param lr: Restricts the search to documents written in a particular language (e.g., lr=lang_ja). Acceptable values are: "lang_ar": Arabic "lang_bg": Bulgarian "lang_ca": Catalan "lang_cs": Czech "lang_da": Danish "lang_de": German "lang_el": Greek "lang_en": English "lang_es": Spanish "lang_et": Estonian "lang_fi": Finnish "lang_fr": French "lang_hr": Croatian "lang_hu": Hungarian "lang_id": Indonesian "lang_is": Icelandic "lang_it": Italian "lang_iw": Hebrew "lang_ja": Japanese "lang_ko": Korean "lang_lt": Lithuanian "lang_lv": Latvian "lang_nl": Dutch "lang_no": Norwegian "lang_pl": Polish "lang_pt": Portuguese "lang_ro": Romanian "lang_ru": Russian "lang_sk": Slovak "lang_sl": Slovenian "lang_sr": Serbian "lang_sv": Swedish "lang_tr": Turkish "lang_zh- CN": Chinese (Simplified) "lang_zh-TW": Chinese (Traditional) :param num: Number of search results to return.Valid values are integers between 1 and 10, inclusive. :param orTerms: Provides additional search terms to check for in a document, where each document in the search results must contain at least one of the additional search terms. :param relatedSite: Specifies that all search results should be pages that are related to the specified URL. :param rights: Filters based on licensing. Supported values include: cc_publicdomain, cc_attribute, cc_sharealike, cc_noncommercial, cc_nonderived, and combinations of these. :param safe: Search safety level. Acceptable values are: "active": Enables SafeSearch filtering. "off": Disables SafeSearch filtering. (default) :param searchType: Specifies the search type: image. If unspecified, results are limited to webpages. Acceptable values are: "image": custom image search. :param siteSearch: Specifies all search results should be pages from a given site. :param siteSearchFilter: Controls whether to include or exclude results from the site named in the siteSearch parameter. Acceptable values are: "e": exclude "i": include :param sort: The sort expression to apply to the results. :param start: The index of the first result to return.Valid value are integers starting 1 (default) and the second result is 2 and so forth. For example &start=11 gives the second page of results with the default "num" value of 10 results per page.Note: No more than 100 results will ever be returned for any query with JSON API, even if more than 100 documents match the query, so setting (start + num) to more than 100 will produce an error. Note that the maximum value for num is 10. The following function call will produce two queries: "hotel" in the USA, and "hotel" in France >>> serp_goog(q='hotel', gl=['us', 'fr'], cx='YOUR_CX', key='YOUR_KEY') The below function call will prouce four queries and make four requests: "fligts" in UK "fligts" in Australia "tickets" in UK "tickets" in Australia 'cr' here refers to 'country restrict', which focuses on content originating from the specified country. >>> serp_goog(q=['flights', 'tickets'], cr=['countryUK', 'countryAU'], cx='YOUR_CX', key='YOUR_KEY') """ params = locals() supplied_params = {k: v for k, v in params.items() if params[k] is not None} for p in supplied_params: if isinstance(supplied_params[p], (str, int)): supplied_params[p] = [supplied_params[p]] for p in supplied_params: if p in SERP_GOOG_VALID_VALS: if not set(supplied_params[p]).issubset(SERP_GOOG_VALID_VALS[p]): raise ValueError('Please make sure you provide a' ' valid value for "{}", valid values:\n' '{}'.format(p, sorted(SERP_GOOG_VALID_VALS[p]))) params_list = _dict_product(supplied_params) base_url = 'https://www.googleapis.com/customsearch/v1?' specified_cols = ['searchTerms', 'rank', 'title', 'snippet', 'displayLink', 'link', 'queryTime', 'totalResults'] responses = [] for param in params_list: param_log = ', '.join([k + '=' + str(v) for k, v in param.items()]) logging.info(msg='Requesting: ' + param_log) resp = requests.get(base_url, params=param) if resp.status_code >= 400: raise Exception(resp.json()) responses.append(resp) result_df = pd.DataFrame() for i, resp in enumerate(responses): request_metadata = resp.json()['queries']['request'][0] del request_metadata['title'] search_info = resp.json()['searchInformation'] if int(search_info['totalResults']) == 0: df = pd.DataFrame(columns=specified_cols, index=range(1)) df['searchTerms'] = request_metadata['searchTerms'] # These keys don't appear in the response so they have to be # added manually for missing in ['lr', 'num', 'start', 'c2coff']: if missing in params_list[i]: df[missing] = params_list[i][missing] else: df = pd.DataFrame(resp.json()['items']) df['cseName'] = resp.json()['context']['title'] start_idx = request_metadata['startIndex'] df['rank'] = range(start_idx, start_idx + len(df)) for missing in ['lr', 'num', 'start', 'c2coff']: if missing in params_list[i]: df[missing] = params_list[i][missing] meta_columns = {**request_metadata, **search_info} df = df.assign(**meta_columns) df['queryTime'] = datetime.datetime.now(tz=datetime.timezone.utc) df['queryTime'] = pd.to_datetime(df['queryTime']) if 'image' in df: img_df = json_normalize(df['image']) img_df.columns = ['image.' + c for c in img_df.columns] df = pd.concat([df, img_df], axis=1) result_df = result_df.append(df, sort=False, ignore_index=True) ordered_cols = (list(set(params_list[i]).difference({'q', 'key', 'cx'})) + specified_cols) non_ordered = result_df.columns.difference(set(ordered_cols)) final_df = result_df[ordered_cols + list(non_ordered)] if 'pagemap' in final_df: pagemap_df = pd.DataFrame() for p in final_df['pagemap']: try: temp_pagemap_df = json_normalize(p) pagemap_df = pagemap_df.append(temp_pagemap_df, sort=False) except Exception as e: temp_pagemap_df = pd.DataFrame({'delete_me': None}, index=range(1)) pagemap_df = pagemap_df.append(temp_pagemap_df, sort=False) pagemap_df = pagemap_df.reset_index(drop=True) if 'delete_me' in pagemap_df: del pagemap_df['delete_me'] for col in pagemap_df: if col in final_df: pagemap_df = pagemap_df.rename(columns={col: 'pagemap_' + col}) final_df = pd.concat([final_df, pagemap_df], axis=1) if 'metatags' in pagemap_df: metatag_df = pd.DataFrame() for m in pagemap_df['metatags']: try: temp_metatags_df = json_normalize(m) metatag_df = metatag_df.append(temp_metatags_df, sort=False) except Exception as e: temp_metatags_df = pd.DataFrame({'delete_me': None}, index=range(1)) metatag_df = metatag_df.append(temp_metatags_df, sort=False) metatag_df = metatag_df.reset_index(drop=True) if 'delete_me' in metatag_df: del metatag_df['delete_me'] for col in metatag_df: if col in final_df: metatag_df = metatag_df.rename(columns={col: 'metatag_' + col}) final_df = pd.concat([final_df, metatag_df], axis=1) return final_df
ca1b32d2795c035aab8578f0dc36f4a8dd503bec
15,179
import os import subprocess def get_git_revision(): """ Get the number of revisions since the beginning. """ revision = "0" if os.path.isdir(os.path.join(basedir, '.git')): try: proc = subprocess.Popen( ['git', '-C', basedir, 'rev-list', '--count', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) rev, err = proc.communicate() if proc.returncode == 0: revision = rev.strip().decode('ascii') except OSError: pass return revision
0aa0d132ac79698f418c1200db5a730a60300d4c
15,180
def get_wiki_modal_data(term): """ runs the wikiperdia helper functions and created the wikipedia data ready for the modal """ return_data = False summary_data = get_wiki_summary(term=term) related_terms = get_similar_search(term=term) if summary_data: return_data = { 'wiki_term': term, 'summary_data': summary_data, 'related_terms': related_terms } return return_data
2d19c8ac1b3d261b3866b69a6a70d78ddac0ad0c
15,181
def format_taxa_to_js(otu_coords, lineages, prevalence, min_taxon_radius=0.5, max_taxon_radius=5, radius=1.0): """Write a string representing the taxa in a PCoA plot as javascript Parameters ---------- otu_coords : array_like Numpy array where the taxa is positioned lineages : array_like Label for each of these lineages prevalence : array_like Score of prevalence for each of the taxa that is drawn min_taxon_radius : float, optional Smallest radius for a sphere. max_taxon_radius : float, optional Largest radius for a spehere. radius : float, optional Base radius for a sphere. Outputs ------- str JavaScript string where the taxa information is written to create the spheres representing each of these, will return only the variable declaration if the inputs are empty. Notes ----- These parameters should work more as constants and once we find out that there's a value that is too big to be presented, the proper checks should be put into place. Currently we haven't found such cases in any study* min_taxon_radius: minimum value for the radius of the spheres on the plot max_taxon_radious: maximum value for the radius of the spheres on the plot radius: default value size """ js_biplots_string = [] js_biplots_string.append('\nvar g_taxaPositions = new Array();\n') # if we have prevalence scores, calculate the taxa radii values if len(prevalence): taxa_radii = radius * (min_taxon_radius + (max_taxon_radius - min_taxon_radius) * prevalence) else: taxa_radii = [] index = 0 # write the data in the form of a dictionary for taxa_label, taxa_coord, t_radius in zip(lineages, otu_coords, taxa_radii): js_biplots_string.append("g_taxaPositions['%d'] = { 'lineage': '%s', " "'x': %f, 'y': %f, 'z': %f, 'radius': %f};\n" % (index, taxa_label, taxa_coord[0], taxa_coord[1], taxa_coord[2], t_radius)) index += 1 js_biplots_string.append('\n') # join the array of strings as a single string return ''.join(js_biplots_string)
46052620ee7d4092761e728d78d6ab7b6abb6b45
15,182
import argparse from typing import List def _get_symbols_from_args(args: argparse.Namespace) -> List[icmsym.Symbol]: """ Get list of symbols to extract. """ # If all args are specified to extract only one symbol, return this symbol. if args.symbol and args.exchange and args.asset_class and args.currency: return [ icmsym.Symbol( ticker=args_symbol, exchange=args.exchange, asset_class=args.asset_class, contract_type=args.contract_type, currency=args.currency, ) for args_symbol in args.symbol ] # Find all matched symbols otherwise. file_path_generator = iasfil.FilePathGeneratorFactory.get_file_path_generator( args.provider ) latest_symbols_file = file_path_generator.get_latest_symbols_file() symbol_universe = iassym.SymbolUniverseFactory.get_symbol_universe( args.provider, symbols_file=latest_symbols_file ) if args.symbol is None: args_symbols = [args.symbol] else: args_symbols = args.symbol symbols: List[icmsym.Symbol] = [] for symbol in args_symbols: symbols.extend( symbol_universe.get( ticker=symbol, exchange=args.exchange, asset_class=args.asset_class, contract_type=args.contract_type, currency=args.currency, is_downloaded=True, frequency=args.frequency, path_generator=file_path_generator, ) ) return symbols
7257d2b43d242ee552b320826c0a417d2a508d74
15,183
def compute_heading(mag_read): """ Computes the compass heading from the magnetometer X and Y. Returns a float in degrees between 0 and 360. """ return ((atan2(mag_read[1], mag_read[0]) * 180) / pi) + 180
c160e7a69aa0d4bdfe232f45094e863d0d8dd478
15,184
def ConvertTrieToFlatPaths(trie, prefix=None): """Flattens the trie of paths, prepending a prefix to each.""" result = {} for name, data in trie.items(): if prefix: name = prefix + '/' + name if len(data) != 0 and not 'results' in data: result.update(ConvertTrieToFlatPaths(data, name)) else: result[name] = data return result
c226f3c9d72ca04d5dfe3267a92888bc6255d649
15,185
from typing import List from pathlib import Path def get_root_version_for_subset_version(root_dataset_path: str, sub_dataset_version: str, sub_dataset_path: MetadataPath ) -> List[str]: """ Get the versions of the root that contains the given sub_dataset_version at the given sub_dataset_path, if any exists. If the configuration does not exist return an empty iterable. """ root_path = Path(root_dataset_path).resolve() current_path = (root_path / sub_dataset_path).resolve() # Ensure that the sub-dataset path is under the root-dataset path current_path.relative_to(root_path) current_version = sub_dataset_version current_path = current_path.parent while len(current_path.parts) >= len(root_path.parts): # Skip intermediate directories, i.e. check only on git # repository roots. if len(tuple(current_path.glob(".git"))) == 0: current_path = current_path.parent continue current_version = find_version_containing(current_path, current_version) if current_version == "": return [] current_path = current_path.parent return [current_version]
b77da6b9f35e50e463dfba8cd2d710c357615d36
15,186
def subject() -> JsonCommandTranslator: """Get a JsonCommandTranslator test subject.""" return JsonCommandTranslator()
eed4b66f06a0257b2070e17b7cffa9f9005b6b0d
15,187
import torch def accuracy(output, target, cuda_enabled=True): """ Compute accuracy. Args: output: [batch_size, 10, 16, 1] The output from DigitCaps layer. target: [batch_size] Labels for dataset. Returns: accuracy (float): The accuracy for a batch. """ batch_size = target.size(0) v_length = torch.sqrt((output**2).sum(dim=2, keepdim=True)) softmax_v = F.softmax(v_length, dim=1) assert softmax_v.size() == torch.Size([batch_size, 10, 1]) _, max_index = softmax_v.max(dim=1) assert max_index.size() == torch.Size([batch_size, 1]) pred = max_index.view(batch_size) # max_index.squeeze() # assert pred.size() == torch.Size([batch_size]) if cuda_enabled: target = target.cuda() pred = pred.cuda() correct_pred = torch.eq(target, pred.data) # tensor # correct_pred_sum = correct_pred.sum() # scalar. e.g: 6 correct out of 128 images. acc = correct_pred.float().mean() # e.g: 6 / 128 = 0.046875 return acc
fc795bf54bfccfeea6bb3e8f1f81aa7282499d39
15,188
import sqlite3 def save_message(my_dict): """ Saves a message if it is not a duplicate. """ conn = sqlite3.connect(DB_STRING) # Create a query cursor on the db connection queryCurs = conn.cursor() if my_dict.get('message_status') == None: my_dict['message_status'] = "Unconfirmed" queryCurs.execute("SELECT rowid FROM Messages WHERE sender = ? and destination = ? and stamp = ? and hash = ?", (my_dict.get('sender'), my_dict.get('destination'), my_dict.get('stamp'), my_dict.get('hash'),)) data = queryCurs.fetchone() if data == None: queryCurs.execute('''INSERT INTO Messages (sender, destination, message, stamp, markdown, encoding, encryption, hashing, hash, decryption_key, file, filename, content_type, message_status) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', (my_dict.get('sender'), my_dict.get('destination'), my_dict.get('message'), my_dict.get('stamp'), my_dict.get('markdown'), my_dict.get('encoding'), my_dict.get('encryption'), my_dict.get('hashing'), my_dict.get('hash'), my_dict.get('decryptionKey'), my_dict.get('file'), my_dict.get('filename'), my_dict.get('content_type'), my_dict.get('message_status'))) conn.commit() conn.close() return True
57f65f949c731b8120dec1b0f1b77b3d29505497
15,189
from typing import List def getKFolds(train_df: pd.DataFrame, seeds: List[str]) -> List[List[List[int]]]: """Generates len(seeds) folds for train_df Usage: # 5 folds folds = getKFolds(train_df, [42, 99, 420, 120, 222]) for fold, (train_idx, valid_idx, test_idx) in enumerate(folds): train_fold = train.iloc[train_idx] valid_fold = train.iloc[valid_idx] ... Returns: folds: list of [train, val, test] indices for each """ folds = [] for seed in seeds: train, val, test = split_into_train_val_test(train_df, seed=seed) folds.append([list(train), list(val), list(test)]) return folds
adc25fad4530bf0f134033d95a1d936fb7eb2653
15,190
def redownload_window() -> str: """The number of days for which the performance data will be redownloaded""" return '30'
d5cc816f426f26586870def4797b91a05e37825a
15,191
def clean_flight_probs(flight_probs: np.ndarray, rng: np.random.Generator) -> np.ndarray: """ Round off probabilities in flight_probs to 0 or 1 with random bias of the current probability :param flight_probs: a vector of inclusion probabilities after the landing phase :param rng: a random number generator :returns: a vector of inclusion probabilities that have been rounded off """ for i in range(len(flight_probs)): if flight_probs[i] - 0 > tol and flight_probs[i] < 1 - tol: flight_probs[i] = 1 if rng.random() < flight_probs[i] else 0 return flight_probs
f7127433781df86dabe699575be740f775310194
15,192
async def question(session: AskSession): """ Ask user for his answer on which LeetCode problem he whats to anticipate. """ return await session.prompt( message="Enter the problem URL from LeetCode site: ", validator=LeetCodeUrlValidator(session) )
a4ac5fd194736d2850e70ee5ac89e3569abf4410
15,193
import copy def mongo_instance(instance_dict, ts_dt): """An instance as a model.""" dict_copy = copy.deepcopy(instance_dict) dict_copy["status_info"]["heartbeat"] = ts_dt return Instance(**dict_copy)
32b0547cad0d84400a879814790eed6219ddb84a
15,194
import os def remove(path, force=False): """ Remove the named file or directory Args: path (str): The path to the file or directory to remove. force (bool): Remove even if marked Read-Only. Default is False Returns: bool: True if successful, False if unsuccessful CLI Example: .. code-block:: bash salt '*' file.remove C:\\Temp """ # This must be a recursive function in windows to properly deal with # Symlinks. The shutil.rmtree function will remove the contents of # the Symlink source in windows. path = os.path.expanduser(path) if not os.path.isabs(path): raise HubbleInvocationError("File path must be absolute: {0}".format(path)) # Does the file/folder exists if not os.path.exists(path) and not is_link(path): raise CommandExecutionError("Path not found: {0}".format(path)) # Remove ReadOnly Attribute if force: # Get current file attributes file_attributes = win32api.GetFileAttributes(path) win32api.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL) try: if os.path.isfile(path): # A file and a symlinked file are removed the same way os.remove(path) elif is_link(path): # If it's a symlink directory, use the rmdir command os.rmdir(path) else: for name in os.listdir(path): item = "{0}\\{1}".format(path, name) # If its a normal directory, recurse to remove it's contents remove(item, force) # rmdir will work now because the directory is empty os.rmdir(path) except (OSError, IOError) as exc: if force: # Reset attributes to the original if delete fails. win32api.SetFileAttributes(path, file_attributes) raise CommandExecutionError("Could not remove '{0}': {1}".format(path, exc)) return True
5ff9b29999de614e24f80f5ac737ba8ae5f8445e
15,195
def get_conversion_option(shape_records): """Prompts user for conversion options""" print("1 - Convert to a single zone") print("2 - Convert to one zone per shape (%d zones) (this can take a while)" % (len(shape_records))) import_option = int(input("Enter your conversion selection: ")) return import_option
7608c588960eb3678970e0d4467c67ff9f17a331
15,196
def base_conditional(Kmn, Lm, Knn, f, *, full_cov=False, q_sqrt=None, white=False): """ Given a g1 and g2, and distribution p and q such that p(g2) = N(g2;0,Kmm) p(g1) = N(g1;0,Knn) p(g1|g2) = N(g1;0,Knm) And q(g2) = N(g2;f,q_sqrt*q_sqrt^T) This method computes the mean and (co)variance of q(g1) = \int q(g2) p(g1|g2) :param Kmn: M x N :param Kmm: M x M :param Knn: N x N or N :param f: M x R :param full_cov: bool :param q_sqrt: None or R x M x M (lower triangular) :param white: bool :return: N x R or R x N x N """ # compute kernel stuff num_func = tf.shape(f)[1] # R # Compute the projection matrix A A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # compute the covariance due to the conditioning if full_cov: fvar = Knn - tf.matmul(A, A, transpose_a=True) fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N else: fvar = Knn - tf.reduce_sum(tf.square(A), 0) fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False) # construct the conditional mean fmean = tf.matmul(A, f, transpose_a=True) if q_sqrt is not None: if q_sqrt.get_shape().ndims == 2: LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # R x M x N elif q_sqrt.get_shape().ndims == 3: L = q_sqrt A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1])) LTA = tf.matmul(L, A_tiled, transpose_a=True) # R x M x N else: # pragma: no cover raise ValueError("Bad dimension for q_sqrt: %s" % str(q_sqrt.get_shape().ndims)) if full_cov: fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # R x N x N else: fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # R x N if not full_cov: fvar = tf.transpose(fvar) # N x R return fmean, fvar
a6ddc7d2904836d7fa83557057dc42e25a8b8a9b
15,197
def find_shortest_dijkstra_route(graph, journey): """ all_pairs_dijkstra_path() and all_pairs_dijkstra_path_length both return a generator, hense the use of dict(). """ all_paths = dict(nx.all_pairs_dijkstra_path(graph)) all_lengths = dict(nx.all_pairs_dijkstra_path_length(graph)) if len(all_paths) != len(all_lengths): print("Path count is not equal to path length count, " "maybe some links are missing a weight?") return False shortest_path = [] for destination, path in all_paths[journey[0]].items(): # If all nodes in our journey are in the current path being checked if all(node in path for node in journey): if (len(shortest_path) == 0) or (len(path) < len(shortest_path)): shortest_path = path total = 0 for section in shortest_path: total += len(section) - 1 print("\nShortest dijkstra journey: {} connection(s)".format(total)) if len(shortest_path) < 1: print("No shortest dijkstra path found!\n") return False else: print("{} hop(s) {}\n".format(len(shortest_path) - 1, shortest_path)) return shortest_path
49689cc3f4b03fa6589369bf0d085ee2dbe64d5d
15,198
from functools import reduce import operator def product_consec_digits(number, consecutive): """ Returns the largest product of "consecutive" consecutive digits from number """ digits = [int(dig) for dig in str(number)] max_start = len(digits) - consecutive return [reduce(operator.mul, digits[i:i + consecutive]) for i in range(max_start + 1)]
2df16f7445e6d579b632e86904b77ec93e52a1f3
15,199