content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def dequote(s): """ Remove outer quotes from string If a string has single or double quotes around it, remove them. todo: Make sure the pair of quotes match. If a matching pair of quotes is not found, return the string unchanged. """ if s.startswith(("'", '"', '<')): return s[1:-1] return s
ab56306fd9f21e2f43cd2325182e2cae202aae6f
5,500
def display_ordinal_value(glyph: str): """Displays the integer value of the given glyph Examples: >>> display_ordinal_value('🐍')\n 128013 >>> display_ordinal_value('G')\n 71 >>> display_ordinal_value('g')\n 103 """ return ord(glyph)
7daa53180023bfec2968308d463ac615a83a4e55
5,501
def mixture_HPX( gases, Xs ): """ Given a mixture of gases and their mole fractions, this method returns the enthalpy, pressure, and composition string needed to initialize the mixture gas in Cantera. NOTE: The method of setting enthalpy usually fails, b/c Cantera uses a Newton iterator to find the temperature that yields the specified enthalpy, and it isn't very robust. Instead, approximate constant Cp's and find T_mix manually, as with the mixture_TPX() method above. """ # -------------- # X mixture_d = {} for gas,wx_i in zip(gases,Xs): for sp in gas.species_names: if sp in mixture_d: mixture_d[sp] += wx_i * gas.mole_fraction(sp) elif gas.moleFraction(sp) != 0.0: mixture_d[sp] = wx_i * gas.mole_fraction(sp) else: pass mixture_s = convert_composition_dict_to_string(mixture_d) # -------------- # H # Compute Tmix with molar heat capacities # # Define: # h_mix = sum_i n_i h_i # # where h is molar enthalpy # compute H_mix H_mix = 0 for gas, wx_i in zip(gases,Xs): Hmix += wx_i * gas.enthalpy_mole # -------------- # P press = 0.0 for gas,wx_i in zip(gases,Xs): press += wx_i * gas.P # ------------------- # Return HPX return H_mix, press, mixture_s
a04a2bdcd1a58eaf26facf2f542d2c1aaba6e611
5,502
import json def authorize(config): """Authorize in GSheets.""" json_credential = json.loads(config['credentials']['gspread']['credential']) credentials = ServiceAccountCredentials.from_json_keyfile_dict(json_credential, scope) return gspread.authorize(credentials)
fd54e0df5a71d2896f925dbd9d4e7536659906f9
5,503
def _without_command(results): """A helper to tune up results so that they lack 'command' which is guaranteed to differ between different cmd types """ out = [] for r in results: r = r.copy() r.pop('command') out.append(r) return out
67927cf56884e0e3b22d0daf37e6c02eaef3849b
5,504
def b(k, a): """ Optimal discretisation of TBSS to minimise error, p. 9. """ return ((k**(a+1)-(k-1)**(a+1))/(a+1))**(1/a)
d563a39710aec05334f38af704371db1dc7f94fc
5,505
from typing import Tuple from typing import Dict def changepoint_loc_and_score( time_series_data_window: pd.DataFrame, kM_variance: float = 1.0, kM_lengthscale: float = 1.0, kM_likelihood_variance: float = 1.0, k1_variance: float = None, k1_lengthscale: float = None, k2_variance: float = None, k2_lengthscale: float = None, kC_likelihood_variance=1.0, #TODO note this seems to work better by resetting this # kC_likelihood_variance=None, kC_changepoint_location=None, kC_steepness=1.0, ) -> Tuple[float, float, float, Dict[str, float], Dict[str, float]]: """For a single time-series window, calcualte changepoint score and location as detailed in https://arxiv.org/pdf/2105.13727.pdf Args: time_series_data_window (pd.DataFrame): time-series with columns X and Y kM_variance (float, optional): variance initialisation for Matern 3/2 kernel. Defaults to 1.0. kM_lengthscale (float, optional): lengthscale initialisation for Matern 3/2 kernel. Defaults to 1.0. kM_likelihood_variance (float, optional): likelihood variance initialisation for Matern 3/2 kernel. Defaults to 1.0. k1_variance (float, optional): variance initialisation for Changepoint kernel k1, if None uses fitted variance parameter from Matern 3/2. Defaults to None. k1_lengthscale (float, optional): lengthscale initialisation for Changepoint kernel k1, if None uses fitted lengthscale parameter from Matern 3/2. Defaults to None. k2_variance (float, optional): variance initialisation for Changepoint kernel k2, if None uses fitted variance parameter from Matern 3/2. Defaults to None. k2_lengthscale (float, optional): lengthscale initialisation for for Changepoint kernel k2, if None uses fitted lengthscale parameter from Matern 3/2. Defaults to None. kC_likelihood_variance ([type], optional): likelihood variance initialisation for Changepoint kernel. Defaults to None. kC_changepoint_location ([type], optional): changepoint location initialisation for Changepoint, if None uses midpoint of interval. Defaults to None. kC_steepness (float, optional): changepoint location initialisation for Changepoint. Defaults to 1.0. Returns: Tuple[float, float, float, Dict[str, float], Dict[str, float]]: changepoint score, changepoint location, changepoint location normalised by interval length to [0,1], Matern 3/2 kernel parameters, Changepoint kernel parameters """ time_series_data = time_series_data_window.copy() Y_data = time_series_data[["Y"]].values time_series_data[["Y"]] = StandardScaler().fit(Y_data).transform(Y_data) # time_series_data.loc[:, "X"] = time_series_data.loc[:, "X"] - time_series_data.loc[time_series_data.index[0], "X"] try: (kM_nlml, kM_params) = fit_matern_kernel( time_series_data, kM_variance, kM_lengthscale, kM_likelihood_variance ) except BaseException as ex: # do not want to optimise again if the hyperparameters # were already initialised as the defaults if kM_variance == kM_lengthscale == kM_likelihood_variance == 1.0: raise BaseException( "Retry with default hyperparameters - already using default parameters." ) from ex ( kM_nlml, kM_params, ) = fit_matern_kernel(time_series_data) is_cp_location_default = ( (not kC_changepoint_location) or kC_changepoint_location < time_series_data["X"].iloc[0] or kC_changepoint_location > time_series_data["X"].iloc[-1] ) if is_cp_location_default: # default to midpoint kC_changepoint_location = ( time_series_data["X"].iloc[-1] + time_series_data["X"].iloc[0] ) / 2.0 if not k1_variance: k1_variance = kM_params["kM_variance"] if not k1_lengthscale: k1_lengthscale = kM_params["kM_lengthscales"] if not k2_variance: k2_variance = kM_params["kM_variance"] if not k2_lengthscale: k2_lengthscale = kM_params["kM_lengthscales"] if not kC_likelihood_variance: kC_likelihood_variance = kM_params["kM_likelihood_variance"] try: (changepoint_location, kC_nlml, kC_params) = fit_changepoint_kernel( time_series_data, k1_variance=k1_variance, k1_lengthscale=k1_lengthscale, k2_variance=k2_variance, k2_lengthscale=k2_lengthscale, kC_likelihood_variance=kC_likelihood_variance, kC_changepoint_location=kC_changepoint_location, kC_steepness=kC_steepness, ) except BaseException as ex: # do not want to optimise again if the hyperparameters # were already initialised as the defaults if ( k1_variance == k1_lengthscale == k2_variance == k2_lengthscale == kC_likelihood_variance == kC_steepness == 1.0 ) and is_cp_location_default: raise BaseException( "Retry with default hyperparameters - already using default parameters." ) from ex ( changepoint_location, kC_nlml, kC_params, ) = fit_changepoint_kernel(time_series_data) cp_score = changepoint_severity(kC_nlml, kM_nlml) cp_loc_normalised = (time_series_data["X"].iloc[-1] - changepoint_location) / ( time_series_data["X"].iloc[-1] - time_series_data["X"].iloc[0] ) return cp_score, changepoint_location, cp_loc_normalised, kM_params, kC_params
ffa6c41ea9b7403130908e22fd82b396dc1a1af7
5,506
def get_network_interfaces(properties): """ Get the configuration that connects the instance to an existing network and assigns to it an ephemeral public IP if specified. """ network_interfaces = [] networks = properties.get('networks', []) if len(networks) == 0 and properties.get('network'): network = { "network": properties.get('network'), "subnetwork": properties.get('subnetwork'), "networkIP": properties.get('networkIP'), } networks.append(network) if (properties.get('hasExternalIp')): network['accessConfigs'] = [{ "type": "ONE_TO_ONE_NAT", }] if properties.get('natIP'): network['accessConfigs'][0]["natIp"] = properties.get('natIP') for network in networks: if not '.' in network['network'] and not '/' in network['network']: network_name = 'global/networks/{}'.format(network['network']) else: network_name = network['network'] network_interface = { 'network': network_name, } netif_optional_props = ['subnetwork', 'networkIP', 'aliasIpRanges', 'accessConfigs'] for prop in netif_optional_props: if network.get(prop): network_interface[prop] = network[prop] network_interfaces.append(network_interface) return network_interfaces
0f8db05c0c8b95f8bde7752a9e9766e479db098f
5,507
def MATCH(*args) -> Function: """ Returns the relative position of an item in a range that matches a specified value. Learn more: https//support.google.com/docs/answer/3093378 """ return Function("MATCH", args)
aa03f558e0948fac023622b6569bb6f504e92cba
5,508
def set_dict_to_zero_with_list(dictionary, key_list): """ Set dictionary keys from given list value to zero Args: dictionary (dict): dictionary to filter key_list (list): keys to turn zero in filtered dictionary Returns: dictionary (dict): the filtered dictionary with keys from input list turned to zero """ #Generate list of unwanted keys unwanted = (set(dictionary.keys()) - set(key_list)) #Delete keys from dictionary for unwanted_key in unwanted: dictionary[unwanted_key] = 0 return dictionary
498c0c4a7444c0bbb33168c2f17bfcf2bd8e805e
5,509
def terminal_condition_for_minitaur_extended_env(env): """Returns a bool indicating that the extended env is terminated. This predicate checks whether 1) the legs are bent inward too much or 2) the body is tilted too much. Args: env: An instance of MinitaurGymEnv """ motor_angles = env.robot.motor_angles leg_pose = minitaur_pose_utils.motor_angles_to_leg_pose(motor_angles) swing_threshold = np.radians(35.0) if (leg_pose[0] > swing_threshold or leg_pose[2] > swing_threshold or # Front leg_pose[1] < -swing_threshold or leg_pose[3] < -swing_threshold): # Rear return True roll, _, _ = env.robot.base_roll_pitch_yaw if abs(roll) > np.radians(30.0): return True return False
be80901777bc7d5c03b152e3c9af9a30c3526d1e
5,510
from sys import path def data_range(dt_start, dt_end): """read raw VP data between datetimes""" filepath_fmt = path.join(DATA_DIR, DATA_FILE_FMT) fnames = strftime_date_range(dt_start, dt_end, filepath_fmt) pns = map(vprhimat2pn, fnames) pns_out = [] for pn in pns: if not pn.empty: pns_out.append(pn) return pd.concat(pns_out, axis=2, sort=True).loc[:, :, dt_start:dt_end]
d71047a4c08dbfff4827c9baac026e981f975a38
5,511
import lt_shiso import logparser import lt_import import lt_crf import lt_shiso import lt_misc import os def init_ltmanager(conf, db, table, reset_db): """Initializing ltmanager by loading argument parameters.""" lt_alg = conf.get("log_template", "lt_alg") ltg_alg = conf.get("log_template", "ltgroup_alg") post_alg = conf.gettuple("log_template", "post_alg") sym = conf.get("log_template", "variable_symbol") ltm = LTManager(conf, db, table, reset_db, lt_alg, ltg_alg, post_alg) if lt_alg == "shiso": ltgen = lt_shiso.LTGenSHISO(ltm._table, sym, threshold = conf.getfloat( "log_template_shiso", "ltgen_threshold"), max_child = conf.getint( "log_template_shiso", "ltgen_max_child") ) elif lt_alg == "import": fn = conf.get("log_template_import", "def_path") mode = conf.get("log_template_import", "mode") lp = logparser.LogParser(conf, sep_variable = True) ltgen = lt_import.LTGenImport(ltm._table, sym, fn, mode, lp) elif lt_alg == "crf": ltgen = lt_crf.LTGenCRF(ltm._table, sym, conf) #elif lt_alg == "va": # import lt_va # ltm = lt_va.LTManager(conf, self.db, self.table, # self._reset_db, ltg_alg) else: raise ValueError("lt_alg({0}) invalid".format(lt_alg)) ltm._set_ltgen(ltgen) if ltg_alg == "shiso": ltgroup = lt_shiso.LTGroupSHISO(table, ngram_length = conf.getint( "log_template_shiso", "ltgroup_ngram_length"), th_lookup = conf.getfloat( "log_template_shiso", "ltgroup_th_lookup"), th_distance = conf.getfloat( "log_template_shiso", "ltgroup_th_distance"), mem_ngram = conf.getboolean( "log_template_shiso", "ltgroup_mem_ngram") ) elif ltg_alg == "ssdeep": ltgroup = lt_misc.LTGroupFuzzyHash(table) elif ltg_alg == "none": ltgroup = LTGroup() else: raise ValueError("ltgroup_alg({0}) invalid".format(ltg_alg)) ltm._set_ltgroup(ltgroup) ltspl = LTPostProcess(conf, ltm._table, ltm._lttable, post_alg) ltm._set_ltspl(ltspl) if os.path.exists(ltm.filename) and not reset_db: ltm.load() return ltm
c11ab3426ac8abd3ea0128934a47ac14f69ea5ac
5,512
def print_tree(tree, level=0, current=False): """Pretty-print a dictionary configuration `tree`""" pre = ' ' * level msg = '' for k, v in tree.items(): if k == 'self': msg += print_tree(v, level) continue # Detect subdevice if isinstance(v, dict) and 'self' in v: msg += pre + '|++> ' + k + '\n' msg += print_tree(v, level + 1) continue if not current: continue v = repr(v['current']) if len(v) > 50: v = v[:46] + ' ...' msg += '{}|: {} = {}\n'.format(pre, k, v) return msg
f9697b506e9254b4982a037bdfbeb8a1d27f35bb
5,513
def chaine_polynome(poly): """Renvoie la représentation dy polynôme _poly_ (version simple)""" tab_str = [str(coef) + "*X^" + str(i) if i != 0 else str(coef) for i,coef in enumerate(poly)] return " + ".join(tab_str[::-1])
79fd59afe84c1bd12e3417b9195514664d1bce20
5,514
def get_opr_from_dict(dict_of_opr_vals): """Takes in a dictionary where the keys are temperatures and values are optical rotation values. The dictionary is for all the temperatures and optical rotation values extracted for one molecule. This function determines which of the values in the dictionary to keep. Args: dict_of_opr_vals ([dict]): Keys are temperature and values are optical rotation vals. Returns: [String]: Final optical rotation value for a molecule """ if len(dict_of_opr_vals) > 0: dict_keys = list(dict_of_opr_vals.keys()) if dict_keys.count("") == len(dict_keys): return dict_of_opr_vals[""] if "" in dict_keys: dict_keys.remove("") if dict_keys.count("X") == len(dict_keys): return dict_of_opr_vals["X"] else: try: dict_keys.remove("X") except: pass return dict_of_opr_vals[dict_keys[abs_distance(dict_keys)]] else: return dict_of_opr_vals[0]
c0c688835ffb38fe4fb1a88fd91f8374d854d75a
5,515
import re def tokens(s): """Return a list of strings containing individual words from string s. This function splits on whitespace transitions, and captures apostrophes (for contractions). >>> tokens("I'm fine, how are you?") ["I'm", 'fine', 'how', 'are', 'you'] """ words = re.findall(r"\b[\w']+\b", s) return words
aee0b6fad2f9107c893496f1f3807e80c9d2e44b
5,516
def get_variable_value(schema, definition_ast, input): """Given a variable definition, and any value of input, return a value which adheres to the variable definition, or throw an error.""" type = type_from_ast(schema, definition_ast.type) if not type or not is_input_type(type): raise GraphQLError( 'Variable ${} expected value of type {} which cannot be used as an input type.'.format( definition_ast.variable.name.value, print_ast(definition_ast.type), ), [definition_ast] ) if is_valid_value(type, input): if is_nullish(input): default_value = definition_ast.default_value if default_value: return coerce_value_ast(type, default_value, None) return coerce_value(type, input) raise GraphQLError( 'Variable ${} expected value of type {} but got: {}'.format( definition_ast.variable.name.value, print_ast(definition_ast.type), repr(input) ), [definition_ast] )
09c3fa10dcb25704c6323f78d244b27605a393ed
5,517
def _convert_3d_crop_window_to_2d(crop_window): """Converts a 3D crop window to a 2D crop window. Extracts just the spatial parameters of the crop window and assumes that those apply uniformly across all channels. Args: crop_window: A 3D crop window, expressed as a Tensor in the format [offset_height, offset_width, offset_channel, crop_height, crop_width, crop_channels]. Returns: A 2D crop window as a Tensor in the format [offset_height, offset_width, crop_height, crop_width]. """ with tf.name_scope('3d_crop_window_to_2d'): return tf.gather(crop_window, [0, 1, 3, 4])
e5eb7d97c55c0ab18caf135728bb1daa6e5b2d8c
5,518
def apply_along_axis(func1d, mat, axis): """Numba utility to apply reduction to a given axis.""" assert mat.ndim == 2 assert axis in [0, 1] if axis == 0: result = np.empty(mat.shape[1], mat.dtype) for i in range(len(result)): result[i, :] = func1d(mat[:, i]) else: result = np.empty(mat.shape[0], mat.dtype) for i in range(len(result)): result[i, :] = func1d(mat[i, :]) return result
87f1dcd3ed04e8626a59aaff1caabba6c52ce8d3
5,519
def get_all_article(): """ 获取所有 文章资讯 --- tags: - 资讯文章 API responses: 200: description: 文章资讯更新成功 404: description: 资源不存在 500: description: 服务器异常 """ articles = ArticleLibrary.get_all() return jsonify(articles)
7304e862351730ace03ad8e784665cf844d1c94f
5,520
def cut_fedora_prefix(uri): """ Cut the Fedora URI prefix from a URI. """ return uri[len(FEDORA_URI_PREFIX):]
617b00bc34f4ad69b82858496ecc19bc2a5e6fd2
5,521
def get_database_login_connection(user,password,host,database): """ Return database connection object based on user and database details provided """ connection = psycopg2.connect(user = user, password = password, host = host, port = "5432", database = database, sslmode= "prefer") set_auto_commit(connection) return connection
55b8cd2fb7e9e2acc00ce76c660f709920d59eb8
5,522
def getaddrinfo(host,port,family=0,socktype=socket.SOCK_STREAM,proto=0,allow_cname=True): """Resolve host and port into addrinfo struct. Does the same thing as socket.getaddrinfo, but using `pyxmpp.resolver`. This makes it possible to reuse data (A records from the additional section of DNS reply) returned with SRV records lookup done using this module. :Parameters: - `host`: service domain name. - `port`: service port number or name. - `family`: address family. - `socktype`: socket type. - `proto`: protocol number or name. - `allow_cname`: when False CNAME responses are not allowed. :Types: - `host`: `unicode` or `str` - `port`: `int` or `str` - `family`: `int` - `socktype`: `int` - `proto`: `int` or `str` - `allow_cname`: `bool` :return: list of (family, socktype, proto, canonname, sockaddr). :returntype: `list` of (`int`, `int`, `int`, `str`, (`str`, `int`))""" ret=[] if proto==0: proto=socket.getprotobyname("tcp") elif type(proto)!=int: proto=socket.getprotobyname(proto) if type(port)!=int: port=socket.getservbyname(port,proto) if family not in (0,socket.AF_INET): raise NotImplementedError,"Protocol family other than AF_INET not supported, yet" if ip_re.match(host): return [(socket.AF_INET,socktype,proto,host,(host,port))] host=idna.ToASCII(host) try: r=dns.resolver.query(host, 'A') except dns.exception.DNSException: r=dns.resolver.query(host+".", 'A') if not allow_cname and r.rrset.name!=dns.name.from_text(host): raise ValueError,"Unexpected CNAME record found for %s" % (host,) if r: for rr in r: ret.append((socket.AF_INET,socktype,proto,r.rrset.name,(rr.to_text(),port))) return ret
1e65eb69a2d23dd93b0676be5e739545674aa021
5,523
import copy import json def get_layout_for_dashboard(available_pages_list): """ Makes the dictionary that determines the dashboard layout page. Displays the graphic title to represent the graphic. :param available_pages_list: :return: """ available_pages_list_copy = copy.deepcopy(available_pages_list) for available_page_dict in available_pages_list_copy: graphic_list = available_page_dict[GRAPHIC_CONFIG_FILES] for graphic_index, graphic_path in enumerate(graphic_list): graphic_json = json.loads(load_graphic_config_dict(graphic_path)) graphic_list[graphic_index] = { GRAPHIC_PATH: graphic_path, GRAPHIC_TITLE: graphic_json[GRAPHIC_TITLE], } return available_pages_list_copy
a391a93a70c0fc755657a6b93ef90bd4811b6d4c
5,524
def median(list_in): """ Calculates the median of the data :param list_in: A list :return: float """ list_in.sort() half = int(len(list_in) / 2) if len(list_in) % 2 != 0: return float(list_in[half]) elif len(list_in) % 2 ==0: value = (list_in[half - 1] + list_in[half]) / 2 return float(value)
261487551098b80986cbfb8e4cd28279649ac456
5,525
def search_file(expr, path=None, abspath=False, follow_links=False): """ Given a search path, recursively descend to find files that match a regular expression. Can specify the following options: path - The directory that is searched recursively executable_extension - This string is used to see if there is an implicit extension in the filename executable - Test if the file is an executable (default=False) isfile - Test if the file is file (default=True) """ ans = [] pattern = re.compile(expr) if path is None or path == ".": path = os.getcwd() elif not os.path.exists(path): raise IOError, "Unknown directory '"+path+"'" for root, dirs, files in os.walk(path, topdown=True): for name in files: if pattern.match(name): name = os.path.join(root,name) if follow_links and os.path.islink(name): ans.append( os.path.abspath(os.readlink(name)) ) elif abspath: ans.append( os.path.abspath(name) ) else: ans.append( name ) return ans
f3d2501f535865455646168ecf81a4a12e66fcfa
5,526
import logging def delete_gwlbe(gwlbe_ids): """ Deletes VPC Endpoint (GWLB-E). Accepts: - gwlbe_ids (list of str): ['vpce-svc-xxxx', 'vpce-svc-yyyy'] Usage: - delete_gwlbe(['vpce-xxxx', 'vpce-yyyy']) """ logging.info("Deleting VPC Endpoint Service:") try: response = ec2.delete_vpc_endpoints( VpcEndpointIds=gwlbe_ids ) return response except ClientError as e: logging.error(e) return None
854b9991dda8198de87895ddf7dbc65fbb6746e8
5,527
def subdivide_loop(surface:SurfaceData, number_of_iterations: int = 1) -> SurfaceData: """Make a mesh more detailed by subdividing in a loop. If iterations are high, this can take very long. Parameters ---------- surface:napari.types.SurfaceData number_of_iterations:int See Also -------- ..[0] http://www.open3d.org/docs/0.12.0/tutorial/geometry/mesh.html#Mesh-subdivision """ mesh_in = to_mesh(surface) mesh_out = mesh_in.subdivide_loop(number_of_iterations=number_of_iterations) return to_surface(mesh_out)
85fda9f2626f3fdd48c0a2eecbc4d3dffc49919a
5,528
def register(): """Register a new user. Validates that the username is not already taken. Hashes the password for security. """ if request.method == 'POST': username = request.form['username'] password = request.form['password'] phone = request.form['full_phone'] channel = request.form['channel'] db = get_db() error = None if not username: error = 'Username is required.' elif not phone: error = 'Phone number is required' elif not password: error = 'Password is required.' elif db.execute( 'SELECT id FROM user WHERE username = ?', (username,) ).fetchone() is not None: error = 'User {0} is already registered.'.format(username) if error is None: session['phone'] = phone vsid = start_verification(phone, channel) if vsid is not None: # the verification was sent to the user and the username is valid # redirect to verification check db.execute( 'INSERT INTO user (username, password, phone_number) VALUES (?, ?, ?)', (username, generate_password_hash(password), phone) ) db.commit() return redirect(url_for('auth.verify')) flash(error) return render_template('auth/register.html')
9e1b2c86a20710d56cf5cf737ab1a35d67970179
5,529
import os def gen_key(): """Function to generate a new access key which does not exist already""" key = ''.join(choice(ascii_letters + digits) for _ in range(16)) folder = storage + key # Repeat until key generated does not exist while(os.path.exists(folder)): key = ''.join(choice(ascii_letters + digits) for _ in range(16)) folder = storage + key return key
42b4619c2d61d465ec2141625954d7410e411c05
5,530
import requests import logging def get_url(url, headers=None): """ get content from specified URL """ reply = requests.get(url, headers=headers) if reply.status_code != 200: logging.debug('[get_attribute] Failed to open {0}'.format(url)) return None else: return reply.content
faf182c2dc162f25abab5875e0c4253ca98df8a6
5,531
def Read_FImage(Object, Channel, iFlags=0): """ Read_FImage(Object, Channel, iFlags=0) -> bool Read_FImage(Object, Channel) -> bool """ return _Channel.Read_FImage(Object, Channel, iFlags)
06af43adbbfaf94e9f26b1ad41d6ba6f7ae5cfe7
5,532
import json import yaml def Export(message, stream=None, schema_path=None): """Writes a message as YAML to a stream. Args: message: Message to write. stream: Output stream, None for writing to a string and returning it. schema_path: JSON schema file path. If None then all message fields are written, otherwise only fields in the schema are written. Returns: Returns the return value of yaml.dump(). If stream is None then the return value is the YAML data as a string. """ result = _ProtoJsonApiTools.Get().encode_message(message) message_dict = json.loads( encoding_helper._IncludeFields(result, message, None)) if schema_path: _FilterYAML(message_dict, schema_path) return yaml.dump(message_dict, stream=stream)
53f74ff11dfe46eab0549ea466c5ea80c6876bd7
5,533
def user_locale_get(handle, user_name, name, caller="user_locale_get"): """ gets locale for the user Args: handle (UcsHandle) user_name (string): username name (string): locale name Returns: AaaUserLocale: managed object Raises: UcsOperationError: if AaaUserLocale is not present Example: user_locale_get(handle, user_name="test", name="testlocale") """ user_dn = _base_dn + "/user-" + user_name dn = user_dn + "/locale-" + name mo = handle.query_dn(dn) if mo is None: raise UcsOperationError(caller, "User locale '%s' does not exist" % dn) return mo
a748d8fd2e349c43dfabd07108943005be95729e
5,534
from typing import Dict from typing import List from typing import Set from typing import Any from functools import reduce def get_set_from_dict_from_dict( instance: Dict[str, Dict[str, List]], field: str ) -> Set[Any]: """ Format of template field within payload Function gets field from instance-dict, which is a dict again. The values of these dicts have to be joined in a set. """ cml = instance.get(field) if cml: return reduce(lambda i1, i2: i1 | i2, [set(values) for values in cml.values()]) else: return set()
75ee6f4d46a4f57012e76b0f02fb20f629b6bf60
5,535
def initiate_os_session(unscoped: str, project: str) -> keystoneauth1.session.Session: """ Create a new openstack session with the unscoped token and project id. Params: unscoped: str project: str Returns: A usable keystone session object for OS client connections Return type: object(keystoneauth1.session.Session) """ os_auth = v3.Token( auth_url=setd["auth_endpoint_url"], token=unscoped, project_id=project ) return keystoneauth1.session.Session( auth=os_auth, verify=True, )
ab96af612721a5043c60e9a76c512301b0b1de6f
5,536
def delete_topic_collection_items(request_ctx, collection_item_id, topic_id, **request_kwargs): """ Deletes the discussion topic. This will also delete the assignment, if it's an assignment discussion. :param request_ctx: The request context :type request_ctx: :class:RequestContext :param collection_item_id: (required) ID :type collection_item_id: string :param topic_id: (required) ID :type topic_id: string :return: Delete a topic :rtype: requests.Response (with void data) """ path = '/v1/collection_items/{collection_item_id}/discussion_topics/{topic_id}' url = request_ctx.base_api_url + path.format(collection_item_id=collection_item_id, topic_id=topic_id) response = client.delete(request_ctx, url, **request_kwargs) return response
3c45e9f0b65e731480c8a81163be01b5cd5fbd83
5,537
from typing import List def xml_section_extract_elsevier(section_root, element_list=None) -> List[ArticleElement]: """ Depth-first search of the text in the sections """ if element_list is None: element_list = list() for child in section_root: if 'label' in child.tag or 'section-title' in child.tag or 'para' in child.tag: target_txt = get_xml_text_iter(child) element_type = None if 'label' in child.tag: element_type = ArticleElementType.SECTION_ID elif 'section-title' in child.tag: element_type = ArticleElementType.SECTION_TITLE elif 'para' in child.tag: element_type = ArticleElementType.PARAGRAPH element = ArticleElement(type=element_type, content=target_txt) element_list.append(element) elif 'section' in child.tag: xml_section_extract_elsevier(section_root=child, element_list=element_list) return element_list
919e1bb7f1ae96b857776f6c81c3e032cfbba4a9
5,538
def get_data_from_string(string, data_type, key=None): """ Getting data from string, type can be either int or float or str. Key is basically starts with necessary string. Key is need only when we parse strings from execution file output (not from test.txt) """ data = [] if data_type in ("int", "float"): data = Text.get_numbers(string, type_=data_type) elif data_type == "str": if key is None: data = Text.get_strings_from_tests(string) else: data = Text.get_strings_from_exec(string, key) return data
10135d96bd0cdb37d38268a795f92d80be294adc
5,539
import requests def greenline(apikey, stop): """ Return processed green line data for a stop. """ # Only green line trips filter_route = "Green-B,Green-C,Green-D,Green-E" # Include vehicle and trip data include = "vehicle,trip" # API request p = {"filter[route]": filter_route, "include": include, "filter[stop]": stop} result = requests.get("https://api-v3.mbta.com/predictions", params=p).json() return processGreenlinePredictions(result)
254a7cb43cf0789b1437b6ee3ea2262b4d22b4ca
5,540
def Get_Unread_Messages(service, userId): """Retrieves all unread messages with attachments, returns list of message ids. Args: service: Authorized Gmail API service instance. userId: User's email address. The special value "me". can be used to indicate the authenticated user. """ message_list = [] message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt="json", q='is:unread has:attachment').execute() if message_ids['resultSizeEstimate'] > 0: for message in message_ids['messages']: message_list.append(message['id']) return message_list
2aa28ff1aa093754bd293a831be2dada0e629801
5,541
def rmse(y_true, y_pred): """ rmse description: computes RMSE """ return sqrt(mean_squared_error(y_true, y_pred))
377849b692190ae880221676eb898bbe84e466e5
5,542
from pathlib import Path from typing import Type def read_model_json(path: Path, model: Type[ModelT]) -> ModelT: """ Reading routine. Only keeps Model data """ return model.parse_file(path=path)
0b0fb327efdc1acaff2adce3e5b738a1cabbf30a
5,543
def details(request, id=None): """ Show details about alert :param request: :param id: alert ID :return: """ alert = get_object_or_404(Alert, id=id) context = { "user": request.user, "alert": alert, } return render(request, "alerts/details.html", context)
9522a69fc69eb80da301541073bfc320e991fae8
5,544
def get_class_id_map(): """Get mapping between class_id and class_name""" sql = """ SELECT class_id , class_name FROM classes """ cur.execute(f"{sql};") result = [dict(x) for x in cur.fetchall()] class_map = {} for r in result: class_map[r["class_id"]] = r["class_name"] return class_map
d72df95f3f27cbfb04fe32b09d672ea1cff3cbc6
5,545
def epsilon_nfa_to_nfa(e_nfa: automata.nfa.EpsilonNFA)->automata.nfa.NFA: # todo: add tests """ Casts epsilon NFA to NFA. :param EpsilonNFA e_nfa: original epsilon NFA :return NFA: cast NFA that takes the same languages. """ assert type(e_nfa) is automata.nfa.EpsilonNFA work = e_nfa.deepcopy() closure = work.start_state.epsilon_closure # NOT the same as state.indirect_reach #setting start state as accepting if its' epsilon closure contains an accepting state for state in closure: if state.value: work.start_state.value = 1 break structure = work.deepcopy() #hold a structure, but use references from work. for state in work.states.values(): for single_input in work.inputs - {state.epsilon}: closure = structure.states[state.name].epsilon_closure caught_states = set() for state_of_closure in closure: # forward = state_of_closure.forward(single_input) # new_forward = set() # for one_state in forward: # new_forward.add(work.states[one_state.name]) caught_states |= state_of_closure.forward(single_input) state.transitions[single_input] = work.e_closures(*caught_states) if state.epsilon in state.transitions: state.transitions.pop(state.epsilon) for event in work.inputs: if not state.transitions.get(event, True): state.transitions.pop(event) work.inputs.remove(work.epsilon) for state in work.states: for event, end_states in work.states[state].transitions.items(): transitions = set() for end_state in end_states: transitions.add(work.states[end_state.name]) work.states[state].transitions[event] = transitions # print(work.states) return automata.nfa.NFA(work.states, work.inputs, work.start_state)
4ac75c1c7d4356e6ceb47b64273dea596988372b
5,546
def potatoes(p0, w0, p1): """ - p1/100 = water1 / water1 + (1 - p0/100) * w0 => water1 = w0 * p1/100 * (1 - p0/100) / (1 - p1/100) - dry = w0 * (1 - p0/100) - w1 = water1 + dry = w0 * (100 - p0) / (100 - p1) Example: 98/100 = water1 / water1 + (1- 99/100) * 100 water1 = 49 w1 = 49 + 1 = 50 """ w1 = w0 * (100 - p0) / (100 - p1) return int(w1)
f2955a58db3a48c64b6acc4980e663f33332aeea
5,547
def calc_Cinv_CCGT(CC_size_W, CCGT_cost_data): """ Annualized investment costs for the Combined cycle :type CC_size_W : float :param CC_size_W: Electrical size of the CC :rtype InvCa : float :returns InvCa: annualized investment costs in CHF ..[C. Weber, 2008] C.Weber, Multi-objective design and optimization of district energy systems including polygeneration energy conversion technologies., PhD Thesis, EPFL """ # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least # capacity for the corresponding technology from the database if CC_size_W < CCGT_cost_data['cap_min'][0]: CC_size_W = CCGT_cost_data['cap_min'][0] CCGT_cost_data = CCGT_cost_data[ (CCGT_cost_data['cap_min'] <= CC_size_W) & (CCGT_cost_data['cap_max'] > CC_size_W)] #costs of connection connection_costs = ngas.calc_Cinv_gas(CC_size_W) Inv_a = CCGT_cost_data.iloc[0]['a'] Inv_b = CCGT_cost_data.iloc[0]['b'] Inv_c = CCGT_cost_data.iloc[0]['c'] Inv_d = CCGT_cost_data.iloc[0]['d'] Inv_e = CCGT_cost_data.iloc[0]['e'] Inv_IR = (CCGT_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = CCGT_cost_data.iloc[0]['LT_yr'] Inv_OM = CCGT_cost_data.iloc[0]['O&M_%'] / 100 InvC = Inv_a + Inv_b * (CC_size_W) ** Inv_c + (Inv_d + Inv_e * CC_size_W) * log(CC_size_W) Capex_a_CCGT_USD = (InvC+connection_costs) * (Inv_IR) * (1 + Inv_IR) ** Inv_LT / ((1 + Inv_IR) ** Inv_LT - 1) Opex_fixed_CCGT_USD = InvC * Inv_OM Capex_CCGT_USD = InvC return Capex_a_CCGT_USD, Opex_fixed_CCGT_USD, Capex_CCGT_USD
92ea26dcfc66996dd564da9df73a117a57b308bd
5,548
def get_middle_slice_tiles(data, slice_direction): """Create a strip of intensity-normalized, square middle slices. """ slicer = {"ax": 0, "cor": 1, "sag": 2} all_data_slicer = [slice(None), slice(None), slice(None)] num_slices = data.shape[slicer[slice_direction]] slice_num = int(num_slices / 2) all_data_slicer[slicer[slice_direction]] = slice_num middle_slices = data[tuple(all_data_slicer)] num_slices = middle_slices.shape[2] slice_tiles = [square_and_normalize_slice(middle_slices[..., mid_slice]) for mid_slice in range(num_slices)] return slice_tiles
7ab60139c38fd79a866ed14f065a3333c532162a
5,549
import re def glewIsSupported(var): """ Return True if var is valid extension/core pair Usage: glewIsSupported("GL_VERSION_1_4 GL_ARB_point_sprite") Note: GLEW API was not well documented and this function was written in haste so the actual GLEW format for glewIsSupported might be different. TODO: - Only the extension parameter is currently checked. Validate the core as well. Will likely require scraping opengl docs for supported functionality """ var = re.sub(' +',' ',var) variables = var.split(' ') for v in variables: #if v in GLEW_OGL_INFO[GL_VERSIONS]: # return True if v in GLEW_OGL_INFO[GL_EXTENSIONS]: return True return False
114d4eb9f308f15f9169f24b418e4d78d4b792d8
5,550
def example_two(): """Serve example two page.""" return render_template('public/examples/two.j2')
759721686f0411d1ee5ad75f76ed5a0158067bae
5,551
def omegaTurn(r_min, w_row, rows): """Determines a path (set of points) representing a omega turn. The resulting path starts at 0,0 with a angle of 0 deg. (pose = 0,0,0). It will turn left or right depending on if rows is positive (right turn) or negative (left turn). Path should be translated and rotated to its proper position in the field by the calling function. Parameters ---------- r_min : float Turning radius of the vehicle. w_row : float The width of a row in the field. rows : int The number of rows between the current row and the target row e.g. Vehicle is turning from the mid-point of row i into the mid-point of row i+N Returns ---------- path : np.array [[x1, x2, x3,...] [y1, y2, y3,...]] The path that the vehicle is to follow. It is defined by a set of x,y points. distance : float The length of the path that accomplishes the requested pi-turn. """ # First check if a omega turn is possible d = rows * w_row # distance from start path to end path if rows * w_row > 2 * r_min: path = np.zeros((0, 0)) # Turn is not possible. Path is empty distance = np.nan # Distance cannot be calculated return (path, distance) if d > 0: # Turn to the right # Create the starting arc for leaving the path (60 points+endpoint) # Arc starts at pi/2 and rotates up/back toward 0, angle will be alpha alpha = np.arccos((r_min + d / 2) / (2 * r_min)) a = np.linspace(np.pi / 2, np.pi / 2 - alpha, 61) x_start = 0 + r_min * np.cos(a) y_start = r_min - r_min * np.sin(a) # Create the final arc for entering the path (60 points+endpoint) a = np.linspace(-1 * np.pi / 2 + alpha, -1 * np.pi/2, 61) x_end = 0 + r_min * np.cos(a) y_end = -1 * d - r_min - r_min * np.sin(a) # Create bulb section bulb_center_x = 2 * r_min * np.sqrt(1 - np.float_power((r_min + d / 2) / (2 * r_min), 2)) bulb_center_y = -1 * d / 2 a = np.linspace(-1 * np.pi/2 - alpha, np.pi / 2 + alpha, 61) x_bulb = bulb_center_x + r_min * np.cos(a) y_bulb = bulb_center_y - r_min * np.sin(a) else: # Create the starting arc for leaving the path (60 points+endpoint) d = d * -1 # Arc starts at pi/2 and rotates up/back toward 0, angle will be alpha alpha = np.arccos((r_min + d / 2) / (2 * r_min)) a = np.linspace(-1 * np.pi/2, -1 * np.pi / 2 + alpha, 61) x_start = 0 + r_min * np.cos(a) y_start = -1 * r_min - r_min * np.sin(a) # Create the final arc for entering the path (60 points+endpoint) a = np.linspace(np.pi / 2 - alpha, np.pi / 2, 61) x_end = 0 + r_min * np.cos(a) y_end = d + r_min - r_min * np.sin(a) # Create bulb section bulb_center_x = 2 * r_min * np.sqrt(1 - np.float_power((r_min + d / 2) / (2 * r_min), 2)) bulb_center_y = d / 2 a = np.linspace(np.pi / 2 + alpha, -1 * np.pi/2 - alpha, 61) x_bulb = bulb_center_x + r_min * np.cos(a) y_bulb = bulb_center_y - r_min * np.sin(a) # Connect segments. Each segment repeats the start and end. x = np.hstack((x_start, x_bulb[1:], x_end[1:])) y = np.hstack((y_start, y_bulb[1:], y_end[1:])) path = np.array((x, y)) distance = (4 * alpha + np.pi) * r_min return path, distance
39d3203d26199c585371e0208228c8b2839a8cd0
5,552
def sparse_ones(indices, dense_shape, dtype=tf.float32, name="sparse_ones"): """ Creates a new `SparseTensor` with the given indices having value 1 Args: indices (`Tensor`): a rank 2 tensor with the `(row,column)` indices for the resulting sparse tensor dense_shape (`Tensor` or `TensorShape`): the output dense shape dtype (`tf.DType`): the tensor type for the values name (`str`): sparse_ones op Returns: sp_tensor (`SparseTensor`): a new sparse tensor with values set to 1 """ with tf.name_scope(name=name): indices = as_tensor(indices, tf.int64) dense_shape = as_tensor(dense_shape, tf.int64) indices_shape = indices.shape values = tf.ones([indices_shape[0]], dtype) return tf.SparseTensor(indices, values, dense_shape)
1dad9ce8d1f1ab1950f744fbfa084884732ea8de
5,553
from typing import Any def ask(*args: Any, **kwargs: Any) -> Any: """Ask a modular question in the statusbar (blocking). Args: message: The message to display to the user. mode: A PromptMode. default: The default value to display. text: Additional text to show option: The option for always/never question answers. Only available with PromptMode.yesno. abort_on: A list of signals which abort the question if emitted. Return: The answer the user gave or None if the prompt was cancelled. """ question = _build_question(*args, **kwargs) # pylint: disable=missing-kwoa global_bridge.ask(question, blocking=True) answer = question.answer question.deleteLater() return answer
f5c65a4cdc83b5c22c4de97e41ed8a740f94ec3d
5,554
import re def get_sale(this_line, cattle, category): """Convert the input into a dictionary, with keys matching the CSV column headers in the scrape_util module. """ cattle = cattle.replace("MARKET","") cattle = cattle.replace(":","") cattle = cattle.strip().title() sale = {'cattle_cattle': cattle} if bool(re.search("TOWN", str(category))): for idx,title in enumerate(category): if title == "TOWN": sale['consignor_city'] = this_line[idx].strip().title() if title == "HEAD": head = this_line[idx] if '-' in head: head = head.split('-')[0] if '/' in head: head = head.split('/')[0] sale['cattle_head'] = head if title == "KIND": cattle = cattle + ' '+ this_line[idx].title() sale['cattle_cattle'] = cattle if title == "WEIGHT": sale['cattle_avg_weight'] = this_line[idx].replace(",","") if title == "PRICE": price = this_line[idx].replace("$","") price = price.replace(",","") if bool(re.search("Pairs", cattle)): sale['cattle_price'] = price else: sale['cattle_price_cwt'] = price else: sale={} sale = {k: v.strip() for k, v in sale.items() if v} return sale
f75e949558c9938a44f64ccce11bacce8d116e9f
5,555
import os import requests def get_request(term, destination, days_input, price_limit, food_preference): """ Fetches restaurant information from the Yelp API for a given meal term, meal attribute, destination, number of days of vacation, price limit, and food preference. Params: term (str) the specific meal, like "breakfast" destination (str) the requested destination, like "New York" days_input (int) the number of days of the vacation, like 3 price_limit (list) the requested list of prices to search going up to the price limit, like [1,2,3] (for $$$) food_preference (str) the requested food cuisine preferences, like "American, Chinese" Example: breakfast_list, lunch_list, dinner_list = get_request(term="breakfast",destination="New York", days_input=3, price_limit=[1,2,3], food_preference="American, Chinese") Returns the request for a specific meal through "meal_response". """ #ACQUIRE API KEY API_KEY = os.environ.get("YELP_API_KEY") #Endpoint and headers using API Key link_endpoint = 'https://api.yelp.com/v3/businesses/search' link_headers = {'Authorization': 'bearer %s' % API_KEY} #Read in the inputted parameters for a given meal meal_parameters = {'term': term, 'limit': days_input, # 1 breakfast per vacation day 'offset': 50, #basically lets you do pages 'price': price_limit, #can change this later 'radius': 10000, #Change later? 'categories': food_preference, 'location': destination, 'attributes': "good_for_" + term, } #Make a request to the Yelp API using the correct parameters meal_response = requests.get(url = link_endpoint, params = meal_parameters, headers = link_headers) print(meal_response) #Return the request return meal_response
df9b5a2534278963dc5fa0719db3f915ce8fcb8d
5,556
def eig_min(a, eps=1e-7, kmax=1e3, log=False): """ :param a: matrix to find min eigenvalue of :param eps: desired precision :param kmax: max number of iterations allowed :param log: whether to log the iterations """ mu_1 = eig_max_abs(a, eps, kmax, log) return mu_1 - eig_max_abs(mu_1 * np.eye(a.shape[0]) - a, eps, kmax, log)
0c990207fe2b3a77aba636918bf78d9a138b718d
5,557
def relacao(lista): """Crie uma função que recebe uma lista de números reais e retorna uma outra lista de tamanho 3 em que (i) o primeiro elemento é a quantidade de números maiores que zero, (ii) o segundo elemento é a quantidade de números menores que zero e (iii) o último elemento é a quantidade de zeros da lista inicial. Args: lista (list): lista recebida para ser processada pela funcao Returns: list: lista com tamanho três na ordem (maiores, menores e iguais a zero) """ maior = menor = igual = 0 for i in lista: if i > 0: maior += 1 elif i < 0: menor += 1 else: igual += 1 return f'[{maior},{menor},{igual}]'
39e45d8221d5d5b7322ebec5aa3f761d9e2ef413
5,558
def _input_to_dictionary(input_): """Convert. Args: input_: GraphQL "data" dictionary structure from mutation Returns: result: Dict of inputs """ # 'column' is a dict of DB model 'non string' column names and their types column = { 'idx_user': DATA_INT, 'enabled': DATA_INT } result = utils.input_to_dictionary(input_, column=column) return result
263eb2449e8d272ef6c7e147ca7286f70e5cdbf9
5,559
def validate(request): """Validate an authentication request.""" email_token = request.GET.get('a') client_token = request.GET.get('b') user = authenticate(email_token=email_token, counter_token=client_token) if user: login(request, user) return redirect(request.GET.get('success', '/')) else: return HttpResponseForbidden()
5a6fbf9d67a048f973126248c3a5dfcf596e5370
5,560
def strip_chr(bt): """Strip 'chr' from chromosomes for BedTool object Parameters ---------- bt : pybedtools.BedTool BedTool to strip 'chr' from. Returns ------- out : pybedtools.BedTool New BedTool with 'chr' stripped from chromosome names. """ try: df = pd.read_table(bt.fn, header=None, dtype=str) # If the try fails, I assume that's because the file has a trackline. Note # that I don't preserve the trackline (I'm not sure how pybedtools keeps # track of it anyway). except pd.parser.CParserError: df = pd.read_table(bt.fn, header=None, skiprows=1, dtype=str) df[0] = df[0].apply(lambda x: x[3:]) s = '\n'.join(df.astype(str).apply(lambda x: '\t'.join(x), axis=1)) + '\n' out = pbt.BedTool(s, from_string=True) return out
1382a71799f6de081c3ff3092792012ebac25f01
5,561
def fit_slice(fitter, sliceid, lbda_range=[5000, 8000], nslices=5, **kwargs): """ """ fitvalues = fitter.fit_slice(lbda_ranges=lbda_range, metaslices=nslices, sliceid=sliceid, **kwargs) return fitvalues
2d2b4b91b0ba3b0dca908d56e8b5184e5ae36b9e
5,562
import functools def execute_sync(function, sync_type): """ Synchronize with the disassembler for safe database access. Modified from https://github.com/vrtadmin/FIRST-plugin-ida """ @functools.wraps(function) def wrapper(*args, **kwargs): output = [None] # # this inline function definition is technically what will execute # in the context of the main thread. we use this thunk to capture # any output the function may want to return to the user. # def thunk(): output[0] = function(*args, **kwargs) return 1 if is_mainthread(): thunk() else: idaapi.execute_sync(thunk, sync_type) # return the output of the synchronized execution return output[0] return wrapper
54034aa9853c1b04e7bfc2416a34019b87556518
5,563
def mk_living_arrangements(data_id, data): # measurement group 11 """ transforms a f-living-arrangements.json form into the triples used by insertMeasurementGroup to store each measurement that is in the form :param data_id: unique id from the json form :param data: data array from the json form :return: The list of (typeid,valType,value) triples that are used by insertMeasurementGroup to add the measurements """ return [(220, 2, data_id), (95, 6, lwh.mk_category(data['alone'], ['Alone', 'With someone'])), (96, 5, lwh.mk_category(data['arrange'], ['House', 'Apartment', 'Independent living unit', 'Other'])), (97, 2, data['othertext'])]
d4a327c3fc22facf3c4e21fe0b9fd3ce600beebc
5,564
def ids_in(table): """Returns the ids in the given dataframe, either as a list of ints or a single int.""" entity, id_colname = get_entity_and_id_colname(table) # Series.to_list() converts to a list of Python int rather than numpy.int64 # Conversion to the list type and the int type are both necessary for the shared functions ids = table[id_colname].to_list() ids = process_singleton_ids(ids, entity) return ids
5bb4a912c88bc7fc7e47cd14be5520c8cce32faf
5,565
def transformer_ae_base_tpu(): """Base config adjusted for TPU.""" hparams = transformer_ae_base() transformer.update_hparams_for_tpu(hparams) hparams.batch_size = 512 return hparams
a71bb88b10400c867e0ac8fd35c7c3e79a95a119
5,566
def attribute_volume(tree, altitudes, area=None): """ Volume of each node the given tree. The volume :math:`V(n)` of a node :math:`n` is defined recursively as: .. math:: V(n) = area(n) * | altitude(n) - altitude(parent(n)) | + \sum_{c \in children(n)} V(c) :param tree: input tree :param altitudes: node altitudes of the input tree :param area: area of the nodes of the input hierarchy (provided by :func:`~higra.attribute_area` on `tree`) :return: a 1d array """ if area is None: area = hg.attribute_area(tree) height = np.abs(altitudes[tree.parents()] - altitudes) height = height * area volume_leaves = np.zeros(tree.num_leaves(), dtype=np.float64) return hg.accumulate_and_add_sequential(tree, height, volume_leaves, hg.Accumulators.sum)
91c884bcdcd4fde616870258f5d3f1582c420868
5,567
import os def save_plot( fig, filepath=None, format="png", interactive=False, return_filepath=False ): """Saves fig to filepath if specified, or to a default location if not. Args: fig (Figure): Figure to be saved. filepath (str or Path, optional): Location to save file. Default is with filename "test_plot". format (str): Extension for figure to be saved as. Ignored if interactive is True and fig is of type plotly.Figure. Defaults to 'png'. interactive (bool, optional): If True and fig is of type plotly.Figure, saves the fig as interactive instead of static, and format will be set to 'html'. Defaults to False. return_filepath (bool, optional): Whether to return the final filepath the image is saved to. Defaults to False. Returns: String representing the final filepath the image was saved to if return_filepath is set to True. Defaults to None. """ plotly_ = import_or_raise("plotly", error_msg="Cannot find dependency plotly") graphviz_ = import_or_raise( "graphviz", error_msg="Please install graphviz to visualize trees." ) matplotlib = import_or_raise( "matplotlib", error_msg="Cannot find dependency matplotlib" ) plt_ = matplotlib.pyplot axes_ = matplotlib.axes is_plotly = False is_graphviz = False is_plt = False is_seaborn = False format = format if format else "png" if isinstance(fig, plotly_.graph_objects.Figure): is_plotly = True elif isinstance(fig, graphviz_.Source): is_graphviz = True elif isinstance(fig, plt_.Figure): is_plt = True elif isinstance(fig, axes_.SubplotBase): is_seaborn = True if not filepath: extension = "html" if interactive and is_plotly else format filepath = os.path.join(os.getcwd(), f"test_plot.{extension}") filepath = _file_path_check( filepath, format=format, interactive=interactive, is_plotly=is_plotly ) if is_plotly and interactive: fig.write_html(file=filepath) elif is_plotly and not interactive: fig.write_image(file=filepath, engine="kaleido") elif is_graphviz: filepath_, format_ = os.path.splitext(filepath) fig.format = "png" filepath = f"{filepath_}.png" fig.render(filename=filepath_, view=False, cleanup=True) elif is_plt: fig.savefig(fname=filepath) elif is_seaborn: fig = fig.figure fig.savefig(fname=filepath) if return_filepath: return filepath
1b631548d7ba475e1b176032b512d39c45435516
5,568
def populate_canary(canary_id, protocol, domain, dns, filename, rdir, settings): """Create actual canary URI / URL.""" if protocol not in ['unc', 'http', 'https']: raise ValidationError('Unknown protocol specified') if dns: domain = f"{canary_id}.{domain}" else: domain = f"{settings.nginx_domain}.{domain}" if protocol == 'unc': if not rdir: canary = f"\\\\{domain}\\templates\\{filename}" else: canary = f"\\\\{domain}\\templates\\{rdir}\\{filename}" else: if not rdir: canary = f"{protocol}://{domain}/images/{filename}" else: canary = f"{protocol}://{domain}/images/{rdir}/{filename}" return canary
48a4a75cd65cd4d555a14d6c06363e46e0ced3f5
5,569
import pkg_resources def get_wastewater_location_data(): """Read in data of wastewater facility location data. :return: dataframe of wastewater location values """ data = pkg_resources.resource_filename('interflow', 'input_data/WW_Facility_Loc.csv') # return dataframe return pd.read_csv(data, dtype={'CWNS_NUMBER': str})
23f0c425eccdf173e8c8563c8d80e5e7b6a9ead1
5,570
def generate_accounts(seeds): """Create private keys and addresses for all seeds. """ return { seed: { 'privatekey': encode_hex(sha3(seed)), 'address': encode_hex(privatekey_to_address(sha3(seed))), } for seed in seeds }
b10b9616b6d4826262c9296bfe389f001e098939
5,571
def get_annotation_df( state: State, piece: Piece, root_type: PitchType, tonic_type: PitchType, ) -> pd.DataFrame: """ Get a df containing the labels of the given state. Parameters ---------- state : State The state containing harmony annotations. piece : Piece The piece which was used as input when creating the given state. root_type : PitchType The pitch type to use for chord root labels. tonic_type : PitchType The pitch type to use for key tonic annotations. Returns ------- annotation_df : pd.DataFrame[type] A DataFrame containing the harmony annotations from the given state. """ labels_list = [] chords, changes = state.get_chords() estimated_chord_labels = np.zeros(len(piece.get_inputs()), dtype=int) for chord, start, end in zip(chords, changes[:-1], changes[1:]): estimated_chord_labels[start:end] = chord keys, changes = state.get_keys() estimated_key_labels = np.zeros(len(piece.get_inputs()), dtype=int) for key, start, end in zip(keys, changes[:-1], changes[1:]): estimated_key_labels[start:end] = key chord_label_list = hu.get_chord_label_list(root_type, use_inversions=True) key_label_list = hu.get_key_label_list(tonic_type) prev_est_key_string = None prev_est_chord_string = None for duration, note, est_chord_label, est_key_label in zip( piece.get_duration_cache(), piece.get_inputs(), estimated_chord_labels, estimated_key_labels, ): if duration == 0: continue est_chord_string = chord_label_list[est_chord_label] est_key_string = key_label_list[est_key_label] # No change in labels if est_chord_string == prev_est_chord_string and est_key_string == prev_est_key_string: continue if est_key_string != prev_est_key_string: labels_list.append( { "label": est_key_string, "mc": note.onset[0], "mc_onset": note.mc_onset, "mn_onset": note.onset[1], } ) if est_chord_string != prev_est_chord_string: labels_list.append( { "label": est_chord_string, "mc": note.onset[0], "mc_onset": note.mc_onset, "mn_onset": note.onset[1], } ) prev_est_key_string = est_key_string prev_est_chord_string = est_chord_string return pd.DataFrame(labels_list)
19cf82dc77708099dc5c21695d30fd1c5d63ceb4
5,572
def prettify(elem): """Return a pretty-printed XML string for the Element.""" rough_string = ET.tostring(elem, "utf-8") reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=" ")
4469a4e5683dd3196ae188bd09517406ca8276bc
5,573
def parse_new_multipart_upload(data): """ Parser for new multipart upload response. :param data: Response data for new multipart upload. :return: Returns a upload id. """ root = S3Element.fromstring('InitiateMultipartUploadResult', data) return root.get_child_text('UploadId')
02c83634a02ec94de698735b41424e9e53a2576f
5,574
def mech_name_for_species(mech1_csv_str, mech2_csv_str, ich): """ build dictionaries to get the name for a given InCHI string """ mech1_inchi_dct = mechparser.mechanism.species_inchi_name_dct( mech1_csv_str) mech2_inchi_dct = mechparser.mechanism.species_inchi_name_dct( mech2_csv_str) if ich in mech1_inchi_dct: mech1_name = mech1_inchi_dct[ich] else: mech1_name = 'Not in Mechanism' if ich in mech2_inchi_dct: mech2_name = mech2_inchi_dct[ich] else: mech2_name = 'Not in Mechanism' return mech1_name, mech2_name
fe173853dd7b9460a016b370c60fbc6f4eeaac93
5,575
def get_api(api, cors_handler, marshal=None, resp_model=None, parser=None, json_resp=True): """Returns default API decorator for GET request. :param api: Flask rest_plus API :param cors_handler: CORS handler :param marshal: The API marshaller, e.g. api.marshal_list_with :param resp_model: The API response model """ funcs = [ cors_handler, no_cache, log_header(), ] if json_resp: funcs.append(as_json) funcs.append( api.doc(responses={ 403: 'Not Authorized', 404: 'Resource does not exist', }), ) if parser: funcs.insert(-1, api.doc(parser=parser)) if marshal and resp_model: funcs.insert(-1, marshal(resp_model)) return utils.compose(*funcs)
d4774ec394a7365418b60cc0ef7665e702c0da28
5,576
import math def fetch_total_n_items(num_items, uniform_distribution=False): """Get num_items files from internet archive in our dirty categories list""" logger.info(f"Fetching info for {num_items} internetarchive items...") categories_weights = CATEGORIES_WEIGHTS if uniform_distribution: categories_weights = [1/len(DIRTY_CATEGORIES) for x in range(len(DIRTY_CATEGORIES))] how_many_of_each_cat = [math.ceil(w * num_items) for w in categories_weights] logger.info(" ".join([f"{cat}:{quant}" for cat, quant in zip(DIRTY_CATEGORIES, how_many_of_each_cat)])) total_items = [] for amount, category in zip(how_many_of_each_cat, DIRTY_CATEGORIES): query = make_category_query(category) try: total_items.extend(fetch_items_in_query(query, amount)) except Exception as e: logger.error(f"Failed to fetch info for \"{query}\" from internetarchive") return total_items
6b661c4c83c6d7766cb0a57a7f20eaa03ce44ed9
5,577
def german_weekday_name(date): """Return the german weekday name for a given date.""" days = [u'Montag', u'Dienstag', u'Mittwoch', u'Donnerstag', u'Freitag', u'Samstag', u'Sonntag'] return days[date.weekday()]
7d2919c61438ec913abe38cccd924bb69f866655
5,578
def load_data(database_filepath): """ Input: database_filepath - path of the cleaned data file Output: X and Y for model training Category names """ # load data from database engine = create_engine('sqlite:///{}'.format(database_filepath)) df = pd.read_sql("SELECT * FROM df_clean", engine) X = df['message'] Y = df.iloc[0:, 4:] category_names = Y.columns return X, Y, category_names
8647722a0b97a8130bfadfa6dec45fb71c9e6fe3
5,579
from scipy.special import lpmn, factorial def real_spherical_harmonics(phi, theta, l, m): """Real spherical harmonics, also known as tesseral spherical harmonics with condon shortley phase. Only for scalar phi and theta!!! """ if m == 0: y = np.sqrt( (2 * l + 1) / (4 * np.pi) ) * lpmn(m, l, np.cos(theta))[0][-1][-1] elif m < 0: y = (-1)**m * np.sqrt(2) * np.sqrt( (2 * l + 1) / (4 * np.pi) * \ factorial(l - np.abs(m)) / factorial(l + np.abs(m)) ) * lpmn(np.abs(m), l, np.cos(theta))[0][-1][-1] * np.sin(np.abs(m) * phi) elif m > 0: y = (-1)**m * np.sqrt(2) * np.sqrt( (2 * l + 1) / (4 * np.pi) * \ factorial(l - np.abs(m)) / factorial(l + np.abs(m)) ) * lpmn(np.abs(m), l, np.cos(theta))[0][-1][-1] * np.cos(np.abs(m) * phi) return y
5c12cf5263676fccc2dee40c54670ea5150e2cfc
5,580
from typing import Callable def get_replace_function(replace_multiple: bool) -> Callable: """given bool:replace_multiple flag, return replace function from modifier """ if replace_multiple: return distend.modifier.replace_multiple else: return distend.modifier.replace_single
6bb05bb4dd8b28f8581e576aa0f086b55eb7cae6
5,581
def accuracy(X,Y,w): """ First, evaluate the classifier on training data. """ n_correct = 0 for i in range(len(X)): if predict(w, X[i]) == Y[i]: n_correct += 1 return n_correct * 1.0 / len(X)
bdc68859ec7d1f011dc04f641565e44aaeffe908
5,582
from typing import List def reduce_matrix(indices_to_remove: List[int], matrix: np.ndarray) -> np.ndarray: """ Removes indices from indices_to_remove from binary associated to indexing of matrix, producing a new transition matrix. To do so, it assigns all transition probabilities as the given state in the remaining indices binary, with the removed binary in state 0. This is an assumption on the noise made because it is likely that unmeasured qubits will be in that state. :param indices_to_remove: Binary index of state matrix is mapping to be removed. :type indices_to_remove: List[int] :param matrix: Transition matrix where indices correspond to some binary state, to have some dimension removed. :type matrix: np.ndarray :return: Transition matrix with removed entries. :rtype: np.ndarray """ new_n_qubits = int(log2(matrix.shape[0])) - len(indices_to_remove) if new_n_qubits == 0: return np.array([]) bin_map = dict() mat_dim = 1 << new_n_qubits for index in range(mat_dim): # get current binary bina = list(int_to_binary(index, new_n_qubits)) # add 0's to fetch old binary to set values from for i in sorted(indices_to_remove): bina.insert(i, 0) # get index of values bin_map[index] = binary_to_int(tuple(bina)) new_mat = np.zeros((mat_dim,) * 2, dtype=float) for i in range(len(new_mat)): old_row_index = bin_map[i] for j in range(len(new_mat)): old_col_index = bin_map[j] new_mat[i, j] = matrix[old_row_index, old_col_index] return new_mat
dac7755b63593044a7df1658d3205572a935e64d
5,583
def kdj(df, n=9): """ 随机指标KDJ N日RSV=(第N日收盘价-N日内最低价)/(N日内最高价-N日内最低价)×100% 当日K值=2/3前1日K值+1/3×当日RSV=SMA(RSV,M1) 当日D值=2/3前1日D值+1/3×当日K= SMA(K,M2) 当日J值=3 ×当日K值-2×当日D值 """ _kdj = pd.DataFrame() _kdj['date'] = df['date'] rsv = (df.close - df.low.rolling(n).min()) / (df.high.rolling(n).max() - df.low.rolling(n).min()) * 100 _kdj['k'] = sma(rsv, 3) _kdj['d'] = sma(_kdj.k, 3) _kdj['j'] = 3 * _kdj.k - 2 * _kdj.d return _kdj
7aa88cd6ee972063a2bd45b1b5b83da0255b336c
5,584
def identity_func(x): """The identify (a.k.a. transparent) function that returns it's input as is.""" return x
06e0296c338d68663aa87d08b21f84919be3f85e
5,585
def make_choice_validator( choices, default_key=None, normalizer=None): """ Returns a callable that accepts the choices provided. Choices should be provided as a list of 2-tuples, where the first element is a string that should match user input (the key); the second being the value associated with the key. The callable by default will match, upon complete match the first value associated with the result will be returned. Partial matches are supported. If a default is provided, that value will be returned if the user provided input is empty, i.e. the value that is mapped to the empty string. Finally, a normalizer function can be passed. This normalizes all keys and validation value. """ def normalize_all(_choices): # normalize all the keys for easier comparison if normalizer: _choices = [(normalizer(key), value) for key, value in choices] return _choices choices = normalize_all(choices) def choice_validator(value): if normalizer: value = normalizer(value) if not value and default_key: value = choices[default_key][0] results = [] for choice, mapped in choices: if value == choice: return mapped if choice.startswith(value): results.append((choice, mapped)) if len(results) == 1: return results[0][1] elif not results: raise ValueError('Invalid choice.') else: raise ValueError( 'Choice ambiguous between (%s)' % ', '.join( k for k, v in normalize_all(results)) ) return choice_validator
65ac672f16a1031a9051bc4f6769c6b1b88db727
5,586
import time def find_best_polycomp_parameters(samples, num_of_coefficients_range, samples_per_chunk_range, max_error, algorithm, delta_coeffs=1, delta_samples=1, period=None, callback=None, max_iterations=0): """Performs an optimized search of the best configuration in the parameter space given by "num_of_coefficients_space" and "samples_per_chunk_space".""" optimization_start_time = time.clock() x_range = num_of_coefficients_range y_range = samples_per_chunk_range midpoint_x, midpoint_y = [int(np.mean(k)) for k in (x_range, y_range)] param_points = PointCache(samples=samples, max_allowable_error=max_error, algorithm=algorithm, period=period) # The logic of this code is the following: # # 1. Start from a point (x, y) # 2. Sample the point and all its neighbours # 3. Move to the best point among the nine that have been sampled # 4. Repeat from point 2. until the best point is the current one # # Many points will be sampled more than once, but we use a # "PointCache" object to do all the sampling, so that only newer # points need to be recalculated every time. num_of_steps = 1 dx = delta_coeffs dy = delta_samples while True: ring_of_points = [(-dx, -dy), (0, -dy), (dx, -dy), (-dx, 0), (0, 0), (dx, 0), (-dx, dy), (0, dy), (dx, dy)] ring_of_configurations = [] for dx, dy in ring_of_points: cur_x = midpoint_x + dx cur_y = midpoint_y + dy if cur_x < x_range[0] or cur_x > x_range[1]: continue if cur_y < y_range[0] or cur_y > y_range[1]: continue chunks, params = param_points.get_point(cur_x, cur_y) if callback is not None: callback(cur_x, cur_y, params, num_of_steps) ring_of_configurations.append((cur_x, cur_y, chunks, params)) ring_of_configurations.sort(key=lambda p: p[3].compr_data_size) best_x, best_y, best_chunks, best_params = ring_of_configurations[0] # If we have ran too much iterations, stop bothering and exit the loop num_of_steps += 1 if (max_iterations > 0) and num_of_steps > max_iterations: break # If we're centered on the best value, let's explore a # narrower space around it if (best_x, best_y) == (midpoint_x, midpoint_y): repeat = False # Can the ring be shrunk any further? If so, shrink it and # keep iterating if (dx > 1) or (dy > 1): # If dx == dy, we prefer to reduce dy first if dy > dx: dy = dy // 2 else: dx = dx // 2 repeat = True if repeat: continue else: break midpoint_x, midpoint_y = best_x, best_y return (best_params, list(param_points.parameter_space.values()), num_of_steps)
47f076634c50cc18c760b7c60909a2d63a19fd3e
5,587
def moving_average(data, window_size=100): #used this approach https://stackoverflow.com/questions/11352047/finding-moving-average-from-data-points-in-python """ Calculates a moving average for all the data Args: data: set of values window_size: number of data points to consider in window Returns: Moving average of the data """ cumsum_vec = np.cumsum(np.insert(data, 0, 0)) ma_vec = (cumsum_vec[window_size:] - cumsum_vec[:-window_size]) / window_size return ma_vec
8f04d659081a68c4287024e2b6567f257f7b9d92
5,588
import re def _change_TRAVDV_to_TRAVdashDV(s:str): """ Reconciles mixcr name like TRAV29/DV5*01 to tcrdist2 name TRAV29DV5*01 Parameters ---------- s : str Examples -------- >>> _change_TRAVDV_to_TRAVdashDV('TRAV29DV5*01') 'TRAV29/DV5*01' >>> _change_TRAVDV_to_TRAVdashDV('TRAV38-2DV8*01') 'TRAV38-2/DV8*01' >>> _change_TRAVDV_to_TRAVdashDV('TRDV*01') 'TRDV*01' Notes ----- This reconciles such gene names to match the tcrdist2 reference db. see database for more details: repertoire_db.RefGeneSet(db_file = "gammadelta_db.tsv").all_genes """ if isinstance(s, str): m = re.match(pattern = "(TRAV[0-9]+)(DV.*)", string = s) m2 = re.match(pattern = "(TRAV[0-9]+-[1-2])(DV.*)", string = s) if m: new_s = "/".join(m.groups()) return(new_s) elif m2: new_s = "/".join(m2.groups()) return(new_s) else: return(s) else: return(np.NaN)
b5df8b51c96ca9695aecc0fcae4589f35b692331
5,589
def gen_event_type_entry_str(event_type_name, event_type, event_config): """ return string like: {"cpu-cycles", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES}, """ return '{"%s", %s, %s},\n' % (event_type_name, event_type, event_config)
ca89c19b45f182b8a7ae74ab76f3f42bddf46811
5,590
def encode_rotate_authentication_key_script(new_key: bytes) -> Script: """# Summary Rotates the transaction sender's authentication key to the supplied new authentication key. May be sent by any account. # Technical Description Rotate the `account`'s `DiemAccount::DiemAccount` `authentication_key` field to `new_key`. `new_key` must be a valid ed25519 public key, and `account` must not have previously delegated its `DiemAccount::KeyRotationCapability`. # Parameters | Name | Type | Description | | ------ | ------ | ------------- | | `account` | `&signer` | Signer reference of the sending account of the transaction. | | `new_key` | `vector<u8>` | New ed25519 public key to be used for `account`. | # Common Abort Conditions | Error Category | Error Reason | Description | | ---------------- | -------------- | ------------- | | `Errors::INVALID_STATE` | `DiemAccount::EKEY_ROTATION_CAPABILITY_ALREADY_EXTRACTED` | `account` has already delegated/extracted its `DiemAccount::KeyRotationCapability`. | | `Errors::INVALID_ARGUMENT` | `DiemAccount::EMALFORMED_AUTHENTICATION_KEY` | `new_key` was an invalid length. | # Related Scripts * `Script::rotate_authentication_key_with_nonce` * `Script::rotate_authentication_key_with_nonce_admin` * `Script::rotate_authentication_key_with_recovery_address` """ return Script( code=ROTATE_AUTHENTICATION_KEY_CODE, ty_args=[], args=[TransactionArgument__U8Vector(value=new_key)], )
6235409d0232e29d42de22a7bec2285adfd0db38
5,591
def retrieval_visualizations(model, savefig=True): """ Plots incremental retrieval contexts and supports, as heatmaps, and prints recalled items. **Required model attributes**: - item_count: specifies number of items encoded into memory - context: vector representing an internal contextual state - experience: adding a new trace to the memory model - activations: function returning item activations given a vector probe - outcome_probabilities: function returning item supports given a set of activations **Also** uses savefig: boolean deciding whether figures are saved (True) or displayed """ retrieval_contexts, retrieval_supports, recall = retrieval_states(model) plot_states(retrieval_contexts, 'Retrieval Contexts', savefig=savefig) plot_states(retrieval_supports, 'Supports For Each Item At Each Increment of Retrieval', savefig=savefig) return recall
96c4534a5e3057fb1bfd15068eec8cc61767c01d
5,592
from pathlib import Path def get_force_charge() -> str: """ Gets the command object for the force charge command Returns: The command object as a json string """ force_charge = Path('force_charge.json').read_text() return force_charge
c67277c62664419c3b4a19ae57ea6de027c60416
5,593
def prune_motifs(ts, sorted_dic_list, r): """ :param ts: 1-dimensional time-series either resulting from the PCA method or the original 1-dimensional time-series :type ts: 1d array :param sorted_dic_list: list of motif dictionaries returned from the emd algorithm, ordered by relevance :type sorted_dic_list: list of dic :param r: maximum distance to the center of the motif :type r: float :return: list of dictionaries with the most relevant motifs :rtype: list of dic """ pruned_motif_dic_list = [sorted_dic_list[0]] first_center_ts = extract_ts_from_pointers(ts, sorted_dic_list[0]['center_ts_pointers']) pruned_center_ts_list = [first_center_ts] for motif_dic in sorted_dic_list[1:]: cur_center_ts = extract_ts_from_pointers(ts, motif_dic['center_ts_pointers']) dist_list = dtwdist.compute_dwt_dist_between_ts_and_list(cur_center_ts, pruned_center_ts_list, 2 * r) dist_test_list = [dist <= 2 * r for dist in dist_list] if sum(dist_test_list) == 0: pruned_motif_dic_list.append(motif_dic) pruned_center_ts_list.append(cur_center_ts) else: continue return pruned_motif_dic_list
4fef0a51da25503548f6df59e09705c731a7fc6c
5,594
def xor_columns(col, parity): """ XOR a column with the parity values from the state """ result = [] for i in range(len(col)): result.append(col[i] ^ parity[i]) return result
2eff4dbf3edf2b97410e7bef17c043a30b1f3aa8
5,595
def initiate_default_resource_metadata(aws_resource): """ :type aws_resource: BaseAWSObject """ if not isinstance(aws_resource, BaseAWSObject): raise TypeError try: metadata = aws_resource.Metadata if not isinstance(metadata, dict): raise TypeError("`troposphere.BaseAWSObject.Metadata` is not a dict!") except Exception as e: if "is not a dict!" in str(e): raise e metadata = {} metadata.setdefault(TROPOSPHERE_METADATA_FIELD_NAME, {}) aws_resource.Metadata = metadata return metadata
4a510dd5a69f2499b407396f34818c79eead7c6a
5,596
def token_vault_single(chain, team_multisig, token, freeze_ends_at, token_vault_balances) -> Contract: """Another token vault deployment with a single customer.""" total = 1000 args = [ team_multisig, freeze_ends_at, token.address, total, 0 # Disable the tap ] contract, hash = chain.provider.deploy_contract('TokenVault', deploy_args=args) return contract
b42857cb7becacde9d5638f18f6dd7625eabb182
5,597
import json import numpy def pixel_pick(): """Pick the value from a pixel. Args: body parameters: catalog (str): catalog to query asset_id (str): asset id to query lng (float): longitude coordinate lat (float): latitude coordinate Returns: {'val': val, 'x': x, 'y': y} if pixel in valid range otherwise {'val': 'out of range', 'x': x, 'y': y} if pixel in valid range otherwise """ try: picker_data = json.loads(flask.request.get_data()) LOGGER.debug(str(picker_data)) catalog_entry = queries.find_catalog_by_id( picker_data["catalog"], picker_data["asset_id"]) r = gdal.OpenEx(catalog_entry.local_path, gdal.OF_RASTER) b = r.GetRasterBand(1) gt = r.GetGeoTransform() inv_gt = gdal.InvGeoTransform(gt) # transform lat/lng to raster coordinate space wgs84_srs = osr.SpatialReference() wgs84_srs.ImportFromEPSG(4326) raster_srs = osr.SpatialReference() raster_srs.ImportFromWkt(r.GetProjection()) # put in x/y order raster_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) wgs84_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) # Create a coordinate transformation wgs84_to_raster_trans = osr.CoordinateTransformation( wgs84_srs, raster_srs) point = ogr.Geometry(ogr.wkbPoint) point.AddPoint(picker_data['lng'], picker_data['lat']) error_code = point.Transform(wgs84_to_raster_trans) if error_code != 0: # error return "error on transform", 500 # convert to raster space x_coord, y_coord = [ int(p) for p in gdal.ApplyGeoTransform( inv_gt, point.GetX(), point.GetY())] if (x_coord < 0 or y_coord < 0 or x_coord >= b.XSize or y_coord >= b.YSize): response_dict = { 'val': 'out of range', 'x': x_coord, 'y': y_coord } else: # must cast the right type for json val = r.ReadAsArray(x_coord, y_coord, 1, 1)[0, 0] if numpy.issubdtype(val, numpy.integer): val = int(val) else: val = float(val) # create the response response_dict = { 'val': val, 'x': x_coord, 'y': y_coord } # and replace with no-data if set nodata = b.GetNoDataValue() if nodata is not None: if numpy.isclose(val, nodata): response_dict['val'] = 'nodata' response = flask.jsonify(response_dict) response.headers.add('Access-Control-Allow-Origin', '*') return response except Exception as e: LOGGER.exception('something bad happened') return str(e), 500
c6cab95092da6b9a1f088a0bb565ee1973729112
5,598
import re from typing import OrderedDict def read_eep_track(fp, colnames=None): """ read MIST eep tracks """ # read lines f = open(fp, "r+") s = f.readlines() # get info MIST_version = re.split(r"\s+", s[0].strip())[-1] MESA_revision = re.split(r"\s*", s[1].strip())[-1] Yinit, Zinit, FeH, aFe, vvcrit = re.split(r"\s*", s[4].strip())[1:] Yinit = np.float(Yinit) Zinit = np.float(Zinit) FeH = np.float(FeH) aFe = np.float(aFe) vvcrit = np.float(vvcrit) initial_mass, N_pts, N_EEP, N_col, phase, type_ = \ re.split(r"\s*", s[7].strip())[1:] initial_mass = np.float(initial_mass) N_pts = np.int(N_pts) N_EEP = np.int(N_EEP) N_col = np.int(N_col) # get eep info EEPs = [np.int(_) for _ in re.split(r"\s+", s[8].strip())[2:]] eep = np.arange(EEPs[0], EEPs[-1] + 1) # add eep column # _eep t = Table.read(s[11:], format="ascii.commented_header") t.add_column(Column(eep, "_eep")) # _lgmass t.add_column(Column(np.ones(len(t), )*np.log10(initial_mass), "_lgmass")) # _lgage t.add_column(Column(np.log10(t["star_age"].data), "_lgage")) # _feh t.add_column(Column(np.ones(len(t), ) * FeH, "_feh")) # add meta info meta = OrderedDict( MIST_version=MIST_version, MESA_revision=MESA_revision, Yinit=Yinit, Zinit=Zinit, FeH=FeH, aFe=aFe, vvcrit=vvcrit, initial_mass=initial_mass, N_pts=N_pts, N_EEP=N_EEP, N_col=N_col, phase=phase, type_=type_, EEPs=EEPs, INTERP=("_INTERP" in fp) ) t.meta = meta if colnames is None: return t else: for colname in colnames: try: assert colname in t.colnames except AssertionError as ae: raise(ae("{} not in track.colnames!!!".format(colname))) return t
551c8e5ba05aec5f32d9184398427fb003db78ba
5,599