content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def soft_crossentropy(predicted_logprobs, target_probs): """ Cross-entropy loss capable of handling soft target probabilities. """ return -(target_probs * predicted_logprobs).sum(1).mean(0)
8f6f0168c67cd0b3f432a5c91c7f4069c54de7c8
7,396
def tflops_per_second(flops, dt): """ Computes an effective processing rate in TFLOPS per second. TFLOP/S = flops * / (dt * 1E12) Args: flops: Estimated FLOPS in the computation. dt: Elapsed time in seconds. Returns: The estimate. """ return flops / (1E12 * dt)
f244632e1378a69ea55d4a994a9711bd3a2dca2a
7,399
import os import json def get_hosts(host_file): """ Fetches host data from a specified host_file Args: host_file: Device host file in JSON format Returns: A dict mapping keys to the corresponding host data, as follows: {u'Group-1': {u'example-device-2': {u'username': u'username', u'password': u'password', u'ipAddress': u'10.0.0.2', u'port': u'22'}, u'example-device-3': {u'username': u'username', u'password': u'password', u'ipAddress': u'10.0.0.3', u'port': u'22'} } } """ with open(os.path.join('./host_files', host_file), 'r') as host_file: return json.load(host_file)
1d0e58c5cd7d0e9fbe4ea3db6c4e91690afffbd0
7,400
import torch def huber_fn_gradient(x, mu): """ Huber function gradient """ y = torch.zeros_like(x) with torch.no_grad(): mask = torch.abs(x) <= mu y[mask] = x[mask]/mu y[~mask] = x[~mask] / torch.abs(x[~mask]) return y
1bcbe697a76c06afd49e7bbf106a3c1be0a47481
7,401
import os import binascii def generate_id(hksess): """ Generates a unique session id based on the start_time, process_id, and hksess description. Args: hksess (so3g.HKSessionHelper) """ # Maybe this should go directly into HKSessionHelper elements = [ (int(hksess.start_time), 32), (os.getpid(), 14), (binascii.crc32(bytes(hksess.description, 'utf8')), 14) ] agg_session_id = 0 for i, b in elements: agg_session_id = (agg_session_id << b) | (i % (1 << b)) return agg_session_id
2bf52c38b26e9a65071b0db0a7bb0d3edaba17ca
7,402
import os import json import logging def read_settings(path=os.getenv('MOASTROCONFIG', os.path.expandvars('$HOME/.moastro.json'))): """Read the Mo'Astro JSON configurations file. Parameters ---------- path : str Path to the ``.moastro.json`` file. Returns ------- settings : dict The settings, as a ``dict``. If the settings file is not found, an empty dictionary is returned. """ try: with open(path, 'r') as f: return json.loads(f.read()) except IOError: log = logging.getLogger('moastro') log.warning("{path} config file not found".format(path=path)) return {}
ef0d8f0dcb176bcca98be9b604c382e3111ec7e7
7,403
def is_valid(value, cast_fn, expected_data_type, allow_none=False): """ Checks whether a value can be converted using the cast_fn function. Args: value: Value to be considered cast_fn: Function used to determine the validity, should throw an exception if it cannot expected_data_type: string name of the expected data allow_none: Boolean determining if none is valid Returns: tuple: **valid (boolean)**: whether it could be casted **msg (string)**: Msg reporting what happen """ try: # Check for a non-none value if str(value).lower() != 'none': value = cast_fn(value) valid = True msg = None # Handle the error for none when not allowed elif not allow_none and str(value).lower() == 'none': valid = False msg = 'Value cannot be None' # Report all good for nones when they are allowed else: valid = True msg = None # Report an exception when the casting goes bad. except Exception: valid = False msg = "Expecting {0} received {1}".format(expected_data_type, type(value).__name__) return valid, msg
ee1a2aca4ba7d437692f5025901f9bf94031434a
7,404
def run_model(network, nodes, demand_per_person_kw_peak, mg_gen_cost_per_kw, mg_cost_per_m2, cost_wire_per_m, grid_cost_per_m2): """ """ # First calcaulte the off-grid cost for each unconnected settlement for node in nodes: if node[5] == 0: node[7] = node[4]*demand_per_person_kw_peak*mg_gen_cost_per_kw + node[3]*mg_cost_per_m2 # Then we're ready to calculate the optimum grid extension. # This is done by expanding out from each already connected node, # finding the optimum connection of nearby nodes. # This is then compared to the off-grid cost and if better, # these nodes are marked as connected. # Then the loop continues until no new connections are found. # This function recurses through the network, dragging a current c_ values along with it. # These aren't returned, so are left untouched by aborted side-branch explorations. # The best b_ values are returned, and are updated whenever a better configuration is found. # Thus these will remmber the best solution including all side meanders. def find_best(nodes, network, index, prev_arc, b_pop, b_length, b_nodes, b_arcs, c_pop, c_length, c_nodes, c_arcs): if nodes[index][6] == 0: # don't do anything with already connected nodes c_pop += nodes[index][4] c_length += network[prev_arc][8] c_nodes = c_nodes[:] + [index] c_arcs = c_arcs[:] + [prev_arc] if c_pop/c_length > b_pop/b_length: b_pop = c_pop b_length = c_length b_nodes[:] = c_nodes[:] b_arcs[:] = c_arcs[:] connected_arcs = [network[arc_index] for arc_index in nodes[index][8]] for arc in connected_arcs: if arc[9] == 0 and arc[0] != prev_arc: goto = 6 if arc[5] == index else 5 # make sure we look at the other end of the arc nodes, network, b_pop, b_length, best_nodes, best_arcs = find_best( nodes, network, arc[goto], arc[0], b_pop, b_length, b_nodes, b_arcs, c_pop, c_length, c_nodes, c_arcs) return nodes, network, b_pop, b_length, b_nodes, b_arcs while True: # keep looping until no further connections are added to_be_connected = [] for node in nodes: if node[6] == 1: # only start searches from currently connected nodes connected_arcs = [network[arc_index] for arc_index in node[8]] for arc in connected_arcs: if arc[9] == 0: goto = 6 if arc[5] == node[0] else 5 # function call a bit of a mess with all the c_ and b_ values nodes, network, b_length, b_pop, b_nodes, b_arcs = find_best( nodes, network, arc[goto], arc[0], 0, 1e-9, [], [], 0, 1e-9, [], []) # calculate the mg and grid costs of the resultant configuration best_nodes = [nodes[i] for i in b_nodes] best_arcs = [network[i] for i in b_arcs] mg_cost = sum([node[7] for node in best_nodes]) grid_cost = (cost_wire_per_m * sum(arc[8] for arc in best_arcs) + grid_cost_per_m2 * sum([node[3] for node in best_nodes])) if grid_cost < mg_cost: # check if any nodes are already in to_be_connected add = True for index, item in enumerate(to_be_connected): if set(b_nodes).intersection(item[1]): if b_pop/b_length < item[0]: del to_be_connected[index] else: add = False # if the existing one is better, we don't add the new one break if add: to_be_connected.append((b_pop/b_length, b_nodes, b_arcs)) # mark all to_be_connected as actually connected if len(to_be_connected) >= 1: print(len(to_be_connected)) for item in to_be_connected: for node in item[1]: nodes[node][6] = 1 for arc in item[2]: network[arc][9] = 1 else: break # exit the loop once nothing is added return network, nodes
02b386363ac2b18bde7e5773da91109fd234353b
7,405
def numero_lista(df): """ Función destinada a obtener el número de lista de clases :parameter: dataframe :return: numero de lista de clases """ # Selecciono la columna clase del dataframe y realizo unas modificaciones df['clase'] = df['clase'].str.replace( '[', '').str.replace(']', '').str.replace("''", "") # obtengo una lista de las cláses únicas clase_uniq = df['clase'].unique().tolist() # Genero una lista vacia lista = [] # Realizo una iteración para generar una lista de listas # selecciono cada uno de los valores de las lista for j in range(len(clase_uniq)): # divido el elemento correspondiente x = clase_uniq[j].split(',') # convierto el str en int x = [int(i) for i in x] # añado a la nueva lista lista.append(x) return len(lista)
86523b6f9eae835de6dfc960e8930cf7659cb2a7
7,406
def v_relative(v, met): """Estimates the relative air speed which combines the average air speed of the space plus the relative air speed caused by the body movement. Vag is assumed to be 0 for metabolic rates equal and lower than 1 met and otherwise equal to Vag = 0.3 (M – 1) (m/s) Parameters ---------- v : float air speed measured by the sensor, [m/s] met : float metabolic rate, [met] Returns ------- vr : float relative air speed, [m/s] """ if met > 1: return round(v + 0.3 * (met - 1), 3) else: return v
6dceae6ec076dc800d2aa3e80d7d491d94830580
7,407
def fully_qualified_name(entry): """ Calculates the fully qualified name for an entry by walking the path to the root node. Args: entry: a BeautifulSoup Tag corresponding to an <entry ...> XML node, or a <clone ...> XML node. Raises: ValueError: if entry does not correspond to one of the above XML nodes Returns: A string with the full name, e.g. "android.lens.info.availableApertureSizes" """ filter_tags = ['namespace', 'section'] parents = [i['name'] for i in entry.parents if i.name in filter_tags] if entry.name == 'entry': name = entry['name'] elif entry.name == 'clone': name = entry['entry'].split(".")[-1] # "a.b.c" => "c" else: raise ValueError("Unsupported tag type '%s' for element '%s'" \ %(entry.name, entry)) parents.reverse() parents.append(name) fqn = ".".join(parents) return fqn
68119b640509cd972770f810b80ba1a2ad54f688
7,408
def sort_tasks_by_exec_time(tasks): """ Sort tasks in descending order by the execution time Args: tasks (list(Node) Returns: list(Node) """ n = len(tasks) for i in range(n): for j in range(0, n - i - 1): if tasks[j].get_exec_time() < tasks[j+1].get_exec_time(): tasks[j], tasks[j + 1] = tasks[j + 1], tasks[j] return tasks
44f24408803c851ae7f1dd021ec19f99efc3feda
7,409
import re def shorten_int_name(interface_name): """ Returns the Cisco shortened interface name from a full one. If the full interface name is invalid, this will return None """ short = None regex = "(\w{2}).*?(\d+(?:/\d+)?(?:/\d+)?)" match = re.match(regex, interface_name) if match is not None: short = "" for group in match.groups(): short += group return short
48a6f730c8d3d2f0abaec299385b5d558cf06a00
7,410
import re def read_version(): """Read version from the first line starting with digit """ regex = re.compile('^(?P<number>\d.*?) .*$') with open('../CHANGELOG.rst') as f: for line in f: match = regex.match(line) if match: return match.group('number')
7188470ab1a794b6e72a1fe8bcd804f7290be4a4
7,411
def RefDefaults(): """ Returns dictionary of default values for all properties. These are used to provide defaults to fields that we do not want to automatically calculate dimensions for """ return { 'phi': { 'min':-180, 'max':180, 'stepsize':10}, 'L7': { 'ref': 1.465, 'stepsize':'', 'min':'', 'max':''}, 'ome':{ 'min':-180, 'max':180, 'stepsize':10}, 'chi1':{ 'min':-180, 'max':180, 'stepsize':10}, 'chi2':{ 'min':-180, 'max':180, 'stepsize':10}, 'chi3':{ 'min':-180, 'max':180, 'stepsize':10}, 'chi4':{ 'min':-180, 'max':180, 'stepsize':10}, 'chi5':{ 'min':-180, 'max':180, 'stepsize':10}, 'psi':{ 'min':-180, 'max':180, 'stepsize':10}, 'zeta':{ 'min':-180, 'max':180, 'stepsize':10} }
e6e87fd7c3b3a05f83808bebb410a6d5c687d3d3
7,412
import json def load_json(path: str): """ Load the contents of a json file into a python dictionary """ with open(path) as f: content = json.load(f) return content
b35ae26ca303347a98ea3dd3ca42370279d19a2a
7,413
def enc_backbuffer(backbuffer): """Helper function for RLE compression, encodes a string of uncompressable data.""" compdata = [] if len(backbuffer) == 0: return compdata while len(backbuffer) > 128: compdata.append(127) compdata.extend(backbuffer[0:128]) backbuffer = backbuffer[128:] compdata.append(len(backbuffer)-1) compdata.extend(backbuffer) return compdata
75e9860cd0a8563f3e5655b998b4d0dfa1658e9c
7,416
import os def output_path(inid=None, ftype='data', format='json', site_dir='_site', must_work=False): """Convert an ID into a data, edge, headline, json, or metadata path Args: inid: str. Indicator ID with no extensions of paths, eg '1-1-1'. Can also be "all" for all data. If it is None then return the directory path for this ftype. ftype: str. Which file related to this ID? One of: 1. data: Main indicator data 2. meta: Indicator metadata 3. edges: The edge file generated from data 4. headline: The headline data generated from data 5. comb: combined data and edge data 6. stats: Statistics on all indicators 7. translations: Translations by language and group format: str. What data type. One of: 1. json 2. csv site_dir: str. Location to build the site to. must_work: bool. If True an IOError is thrown if the file is not found. Returns: path to the file. If the site_dir is set this will form the base. """ # Check that the input makes sense expected_ftypes = ['data', 'meta', 'edges', 'headline', 'comb', 'stats', 'translations'] if ftype not in expected_ftypes: raise ValueError("ftype must be on of: " + ", ".join(expected_ftypes)) expected_formats = ['csv', 'json'] if format not in expected_formats: raise ValueError("format must be on of: " + ", ".join(expected_formats)) ext = '.csv' if format == 'csv' else '.json' path = os.path.join(site_dir, ftype) prefix = '' # Get the directory path if inid is None: f = path else: f = os.path.join(path, prefix + inid + ext) if must_work: if not os.path.exists(f): raise IOError(f + ' not found.') return f
39a0bd9dab42eae7c2ff9fa6fb407d291b5ecdbf
7,417
def film_availability_keys(): """ FilmAvailability definition Optional keys: "id" """ return ["service", "displayName", "country", "url"]
829ec5ea5492b58c19639f0f83245a88aedb6cc8
7,418
def V_tank_Reflux(Reflux_mass, tau, rho_Reflux_20, dzeta_reserve): """ Calculates the tank for waste. Parameters ---------- Reflux_mass : float The mass flowrate of Reflux, [kg/s] tau : float The time, [s] rho_Reflux_20 : float The destiny of waste for 20 degrees celcium, [kg/m**3] dzeta_reserve : float The coefificent of reserve, [dismensionless] Returns ------- V_tank_Reflux : float The tank for Reflux, [m**3] References ---------- &&&&&&&&&&&& """ return Reflux_mass * tau * dzeta_reserve / rho_Reflux_20
3e1adc446bbe2dd936663af895c59222cd000a48
7,419
def angular_travel(angle_array): """ Takes in an array of angular change and returns the angles travelled. This is sensitive to changes in direction, The angles travelled are returned as an array of stepwise values """ travelled = [0] for a, angle in enumerate(angle_array): travelled.append(travelled[a] + angle) return travelled
0c9eca97614a38c7400796ae3930f18707bc948d
7,420
import os def check_output_dir(output_dir): """ Checks the output directory for files generated in previous runs, these can be skipped later by detect_trs() Checking is done quite naively, only looking for files ending in '.pickle' (so no support for .pcl, .pkl ...) Parameters: output_dir (str): Directory to check for output from previous runs Retruns: finished_sequences (set): Set of genomic regions that can be skipped by detect_trs() """ finished_sequences = {i.replace(".pickle", "") for i in os.listdir(output_dir) if i.endswith(".pickle")} return finished_sequences
30e9945cabced00a65d4d5dc9d4194656d0083dc
7,421
import torch def attention_aggregator(embedding_lists, weights, embed_dim=0) -> torch.Tensor: """ Returns a weighted sum of embeddings :param embedding_lists: list of n tensors of shape (l, K) embedding tensors (l can vary) :param weights: list of n tensors of shape (l,) weights (l can vary, but matches embeddings) :param embed_dim: K, used if the embedding_lists is empty (n = 0) to return a (0, K) empty tensor :return: weighted sum of embeddings, shape (n, K) """ assert len(embedding_lists) == len(weights), f"aggregation weights different length to embeddings! weights len " \ f"{len(weights)}, embeds len {len(embedding_lists)}" if len(embedding_lists): if len(embedding_lists) == 1: return embedding_lists[0] # (1, K) tensor with the single embedding (ie: no aggregation necessary) aggregated = torch.stack([torch.sum(emb * w.view(-1, 1), dim=0) for emb, w in zip(embedding_lists, weights)]) return aggregated # (n, K) tensor of aggregated embeddings else: return torch.tensor([]).view(-1, embed_dim)
88fe01d8baea23321593bf88fd522eb0ef379be9
7,422
def _calculate_application_risk(module): """ Function to calculate Software risk due to application type. This function uses a similar approach as RL-TR-92-52 for baseline fault density estimates. The baseline application is Process Control software. Every other application is ranked relative to Process Control using the values in RL-TR-92-52, Worksheet 0 for average fault density. Baseline (low) application risk (A) is assigned a 1. Medium risk is assigned a 2. High risk is assigned a 3. Application risks are defined as: +-------+------------------------------+----------+ | | | Relative | | Index | Application | Risk | +-------+------------------------------+----------+ | 1 | Batch (General) | Low | +-------+------------------------------+----------+ | 2 | Event Control | Low | +-------+------------------------------+----------+ | 3 | Process Control | Low | +-------+------------------------------+----------+ | 4 | Procedure Control | Medium | +-------+------------------------------+----------+ | 5 | Navigation | High | +-------+------------------------------+----------+ | 6 | Flight Dynamics | High | +-------+------------------------------+----------+ | 7 | Orbital Dynamics | High | +-------+------------------------------+----------+ | 8 | Message Processing | Medium | +-------+------------------------------+----------+ | 9 | Diagnostics | Medium | +-------+------------------------------+----------+ | 10 | Sensor and Signal Processing | Medium | +-------+------------------------------+----------+ | 11 | Simulation | High | +-------+------------------------------+----------+ | 12 | Database Management | Medium | +-------+------------------------------+----------+ | 13 | Data Acquisition | Medium | +-------+------------------------------+----------+ | 14 | Data Presentation | Medium | +-------+------------------------------+----------+ | 15 | Decision and Planning Aids | Medium | +-------+------------------------------+----------+ | 16 | Pattern and Image Processing | High | +-------+------------------------------+----------+ | 17 | System Software | High | +-------+------------------------------+----------+ | 18 | Development Tools | High | +-------+------------------------------+----------+ :param module: the :py:class:`rtk.software.CSCI.Model` or :py:class:`rtk.software.Unit.Model` data model to calculate. :return: False if successful or True if an error is encountered. :rtype: bool """ if module.application_id == 0: module.a_risk = 0.0 elif module.application_id in [5, 6, 7, 11, 16, 17, 18]: module.a_risk = 3.0 elif module.application_id in [4, 8, 9, 10, 12, 13, 14, 15]: module.a_risk = 2.0 else: module.a_risk = 1.0 return False
703aaf086aecf717be5c13694a8f1dae9f70a86c
7,423
import os def get_annotations(directory): """ Returns rel path for all anvil files in a directory""" return ( os.path.join(directory, f) for f in os.listdir(directory) if f.endswith(".anvil") )
606fce3064f7fca95c0860d07829dc9f3e121be8
7,425
def bytesToBits(numBytes): """ Converts number of bytes to bits. :param numBytes: The n number of bytes to convert. :returns: Number of bits. """ return numBytes * 8
6dc14c9d9f5829337e826c63a7772ea8d3c6962c
7,426
def get_model_field(model, name): """ Gets a field from a Django model. :param model: A Django model, this should be the class itself. :param name: A Django model's field. :return: The field from the model, a subclass of django.db.models.Model """ return model._meta.get_field(name)
e0f692aff82c20c7817d7de5d1fbeec1b69d3a3d
7,427
import os def _get_answer_files(request): """ Gets the path to where the hashed and raw answers are saved. """ answer_file = f"{request.cls.__name__}_{request.cls.answer_version}.yaml" raw_answer_file = f"{request.cls.__name__}_{request.cls.answer_version}.h5" # Add the local-dir aspect of the path. If there's a command line value, # have that override the ini file value clLocalDir = request.config.getoption("--local-dir") iniLocalDir = request.config.getini("local-dir") if clLocalDir is not None: answer_file = os.path.join(os.path.expanduser(clLocalDir), answer_file) raw_answer_file = os.path.join(os.path.expanduser(clLocalDir), raw_answer_file) else: answer_file = os.path.join(os.path.expanduser(iniLocalDir), answer_file) raw_answer_file = os.path.join(os.path.expanduser(iniLocalDir), raw_answer_file) # Make sure we don't overwrite unless we mean to overwrite = request.config.getoption("--force-overwrite") storing = request.config.getoption("--answer-store") raw_storing = request.config.getoption("--raw-answer-store") raw = request.config.getoption("--answer-raw-arrays") if os.path.exists(answer_file) and storing and not overwrite: raise FileExistsError( "Use `--force-overwrite` to overwrite an existing answer file." ) if os.path.exists(raw_answer_file) and raw_storing and raw and not overwrite: raise FileExistsError( "Use `--force-overwrite` to overwrite an existing raw answer file." ) # If we do mean to overwrite, do so here by deleting the original file if os.path.exists(answer_file) and storing and overwrite: os.remove(answer_file) if os.path.exists(raw_answer_file) and raw_storing and raw and overwrite: os.remove(raw_answer_file) print(os.path.abspath(answer_file)) return answer_file, raw_answer_file
4d1faed155090f329c4d5efb78a606e25e9aca0f
7,428
def inert_masses(m_1, H, z_m, E_1): """First stage inert masses. Arguments: m_1 (scalar): First stage wet mass [units: kilogram]. H (scalar): Fraction of the recovery vehicle dry mass which is added recovery hardware [units: dimensionless]. z_m (scalar): Fraction of baseline dry mass which is to be recovered [units: dimensionless]. E_1 (scalar): Structural mass ratio w/o reuse hardware [units: dimensionless]. Returns: scalar: First stage inert mass scalar: Recovery vehicle inert mass """ chi_r = H * z_m / (1 - H) m_inert_1 = m_1 * ((1 + chi_r) / (1 + chi_r + (1 - E_1) / E_1)) m_inert_recov_1 = m_1 * ((z_m + chi_r) / (1 + chi_r + (1 - E_1) / E_1)) return m_inert_1, m_inert_recov_1
5698fcb36ef1f532cc8bc1dc0c86a25adc5bcab8
7,429
def commnets(milestone): """Filtrira milestone tako da nadje samo broj komentara za svaki pojedinacni""" comments = milestone.event_set.filter(event_kind="K") size = comments.count() return size
9c3654911fe993c359bc593433b6fde1c467a504
7,430
def buildList(pdList, matrix): """Takes a list of primary datasets (PDs) and the AlCaRecoMatrix (a dictinary) and returns a string with all the AlCaRecos for the selected PDs separated by the '+' character without duplicates.""" alCaRecoList = [] for pd in pdList: alCaRecoList.extend(matrix[pd].split("+")) # remove duplicates converting to a set alCaRecoList = set(alCaRecoList) stringList = '' for alCaReco in alCaRecoList: if stringList == '': stringList += alCaReco else: stringList += '+'+alCaReco return stringList
7e9351f115aac1064068e16f12276ed5506217e4
7,431
import os def has_image_extension(uri) -> bool: """Check that file has image extension. Args: uri (Union[str, pathlib.Path]): the resource to load the file from Returns: bool: True if file has image extension, False otherwise """ _, ext = os.path.splitext(uri) return ext.lower() in {".bmp", ".png", ".jpeg", ".jpg", ".tif", ".tiff"}
e9f338ecda0fa5842fd1aeb15362718d6f026fe6
7,432
import re def _get_ip_addr_num(file_path): """Get the next IPADDR index num to use for adding an ip addr to an ifcfg file. """ num = '' with open(file_path, 'r') as f: data = f.read() data = data.splitlines() for line in data: found = re.search(r'IPADDR(\d?)=', line) if found: if found.group(1) == '': num = 0 else: num = str(int(found.group(1)) + 1) return num
09dfd6bc8a9da240d3044bd6f5b974c69cbebf76
7,433
from numpy import nan def tobool(value): """Convert value to boolean or Not a Number if not possible""" if value is None: value = nan else: value = bool(value) return value
9517d817381111c55e73e03256d516ccbbd940a2
7,434
def tokenize(obj, tokenizer, max_seq_len): """Recursively convert to tokens.""" if isinstance(obj, str): toks = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj)[:max_seq_len]) assert all( [t < len(tokenizer.encoder) for t in toks] ) # all(toks < len(tokenizer.encoder)) return toks if isinstance(obj, dict): return dict((n, tokenize(o, tokenizer, max_seq_len)) for n, o in obj.items()) return list(tokenize(o, tokenizer, max_seq_len) for o in obj)
9e8ae8a4c1af0df29d95afcef183851792af2b43
7,437
def manifest_to_file_list(manifest_fn): """ Open a manifest file and read it into a list. Entries in the list are relative, i.e. no leading slash. manifest_fn -- the manifest file to read """ image_manifest_list = [] with open(manifest_fn) as image: image_manifest_list = [x[1:] for x in image.read().splitlines()] return image_manifest_list
982f02e0b00fad20af8d50d44673d65d9bba5a37
7,438
def intersect (sequence_a, sequence_b): """Return true if the two sequences contain items in common If sequence_a is a non-sequence then return false. """ try: for item in sequence_a: if item in sequence_b: return 1 except TypeError: return 0 return 0
f28c3b6258584fdc1821c65867b4d89a1c33373a
7,439
def get_location_dictionary(deployment): """ Construct location dictionary from ui deployment information. """ try: have_location_dict = False latitude = None longitude = None location = None depth = None orbitRadius = None if 'depth' in deployment: depth = deployment['depth'] if depth is None: depth = 0.0 else: have_location_dict = True if 'orbitRadius' in deployment: orbitRadius = deployment['orbitRadius'] if orbitRadius is None: orbitRadius = 0.0 else: have_location_dict = True if 'latitude' in deployment: latitude = deployment['latitude'] if 'longitude' in deployment: longitude = deployment['longitude'] if latitude is not None and longitude is not None: location = [longitude, latitude] have_location_dict = True else: if latitude is None: latitude = 0.0 if longitude is None: longitude = 0.0 if have_location_dict: location_dict = {} location_dict['latitude'] = latitude location_dict['longitude'] = longitude location_dict['location'] = location location_dict['depth'] = depth location_dict['orbitRadius'] = orbitRadius else: location_dict = None return location_dict except Exception as err: message = str(err) raise Exception(message)
3c2b00c97c681d89628ca8c6708545b73bf84649
7,440
from typing import List def select_dominoes(dominoes: List[str], order: int, num_players: int) -> str: """Randonly generate a set of dominoes for a player - If the number of players is 3, each player gets 9 dominoes, plus a 10th dominoe that is to be played first (all players will get it) - If the number of players is 4, each player gets 7 dominoes Keyword arguments: dominoes -- a list containing a shuffled full set of dominoes order -- the player order (player 0, 1, 2, ...) num_players -- the total of players (3 or 4) """ dominoes_text = "" num_dominoes = 0 if num_players == 3: num_dominoes = 9 elif num_players == 4: num_dominoes = 7 for i in range(num_dominoes): dominoes_text = dominoes_text + '[' + dominoes[(order * num_dominoes) + i] + ']' if num_players == 3: dominoes_text = dominoes_text + '<<<' + dominoes[27] + '>>>' return dominoes_text
c96d1a5b54e3623807ca57a3684f63fa3ff6737c
7,442
import dis def _opcode(name): """Return the opcode by name from the dis module.""" return dis.opmap[name]
d2c8612138c94da68adcc1b8979395987090157c
7,444
import re def remove_citation(paragraph: str) -> str: """Remove all citations (numbers in side square brackets) in paragraph""" return re.sub(r'\[\d+\]', '', paragraph)
dc88606e69187143d767215ddc098affdbd185d5
7,445
def get_model_field(model): """ get the verbose_name of all fields in the model """ field_dict = dict() for field in model._fields: attr = getattr(model, field) if hasattr(attr, 'verbose_name'): verbose_name = attr.verbose_name if verbose_name: field_dict[field] = verbose_name return field_dict
8ec9a4f9b571483c13d5a23330e98691279c12ad
7,446
def job_metadata_filename(metadata): """Construct relative filename to job metadata.""" return "data/{metadata}".format(metadata=metadata)
bb5e8dc6c0ec50fed6801b9c67f8234d9115372a
7,447
def get_db_cols(cur, table_name, schema='public', type_map=True): """ Gets the column names of a given table if type_map is true, returns also a dictionary mapping each column name to the corresponding postgres column type """ db_cols_sql = """SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = '{}' AND table_name = '{}'; """.format(schema, table_name) cur.execute(db_cols_sql) res_rows = [row for row in cur][1:] cols = [row[0] for row in res_rows] if type_map: return cols, dict(res_rows) return cols
936952ea0bbc0c165f089e700828ea876d30ec16
7,448
def make_modbusmap_channel(i, chan, device_type_name): """Make a channel object for a row in the CSV.""" json_obj = { "ah": "", "bytary": None, "al": "", "vn": chan['subTitle'], # Name "ct": "number", # ChangeType "le": "16", # Length(16 or 32) "grp": str(chan['guaranteedReportPeriod']), # GuaranteedReportPeriod "la": None, "chn": chan['name'], # ChannelName "un": "1", # DeviceNumber "dn": device_type_name, # deviceName "vm": None, "lrt": "0", "da": "300", # DeviceAddress "a": chan['helpExplanation'], # TagName "c": str(chan['change']), # Change "misc_u": str(chan['units']), # Units "f": "1", # FunctionCode "mrt": str(chan['minReportTime']), # MinimumReportTime "m": "none", # multiplier "m1ch": "2-{}".format(i), "mv": "0", # MultiplierValue "s": "On", "r": "{}-{}".format(chan['min'], chan['max']), # range "t": "int" # type } return json_obj
cd53f32f47653e791893b90bbbf27e540d741268
7,451
def CleanData_CB(X,drop_feature): """ This function is to clean the data for training and testing with CatBoost. X : dataframe type for train y : dataframe type for test drop_feature : feature name that is wanted to be dropped , e.g., ['Time'] """ x_new = X.drop(drop_feature,axis=1) return x_new
b6c77e29147dfbdf012f787e29ba4ac364b37fa7
7,453
def split_indexes(indexes): """Split indexes list like 1 2 5 in 1 2 and 5.""" left, right = [indexes[0], ], [] left_now = True for i in range(1, len(indexes)): prev = indexes[i - 1] curr = indexes[i] if curr > prev + 1 and left_now: left_now = False if left_now: left.append(curr) else: right.append(curr) return left, right
1bdb3b57226737280b83dbdfa3226dc344eb47c0
7,455
def get_exact_match(user_input, groups): """Return an exact match from the groups """ lower_groups = [group.lower() for group in groups] if user_input.lower() in lower_groups: return groups[lower_groups.index(user_input.lower())]
8d28c05106f308bc3f65e07b011003e968cee99d
7,456
def compute_perc_id(aln): """ Compute percent identity of aligned region on read """ length = len(aln.query_alignment_sequence) edit = dict(aln.tags)['NM'] return 100 * (length - edit)/float(length)
7bc172649a452fc0c26d4e40d3240d709fb76534
7,457
def pipeline_dict() -> dict: """Pipeline config dict. Updating the labels is needed""" pipeline_dict = { "name": "german_business_names", "features": { "word": { "embedding_dim": 16, }, }, "head": { "type": "TextClassification", "labels": [ "Unternehmensberatungen", "Friseure", "Tiefbau", "Dienstleistungen", "Gebrauchtwagen", "Restaurants", "Architekturbüros", "Elektriker", "Vereine", "Versicherungsvermittler", "Sanitärinstallationen", "Edv", "Maler", "Physiotherapie", "Werbeagenturen", "Apotheken", "Vermittlungen", "Hotels", "Autowerkstätten", "Elektrotechnik", "Allgemeinärzte", "Handelsvermittler Und -vertreter", ], "pooler": { "type": "gru", "num_layers": 1, "hidden_size": 16, "bidirectional": True, }, "feedforward": { "num_layers": 1, "hidden_dims": [16], "activations": ["relu"], "dropout": [0.1], }, }, } return pipeline_dict
9505692f13759f392b930dff33ecf7ff781dcd9c
7,458
def get_name_from_filename(filename): """Gets the partition and name from a filename""" partition = filename.split('_', 1)[0] name = filename.split('_', 1)[1][:-4] return partition, name
606cfcc998c4a8405c9ea84b95b2c63f683dd114
7,459
def process_html(html_page, this_parser): """ extract links from an html page """ this_parser.feed(html_page) return { "int_links": this_parser.int_links, "ext_links": this_parser.ext_links, "static_links": this_parser.static_links }
abd380ae2738bb98fdab4b5026d5cb9bdaa76efa
7,461
import torch def _set_device(disable_cuda=False): """Set device to CPU or GPU. Parameters ---------- disable_cuda : bool (default=False) Whether to use CPU instead of GPU. Returns ------- device : torch.device object Device to use (CPU or GPU). """ # XXX we might also want to use CUDA_VISIBLE_DEVICES if it is set if not disable_cuda and torch.cuda.is_available(): device = torch.device('cuda:0') else: device = torch.device('cpu') return device
1d7d448dd4e4a844201b73c8da4939009e70eb5f
7,462
from pathlib import Path import yaml import json def print_results_from_evaluation_dirs(work_dir_path: Path, run_numbers: list, print_results_only: bool = False) -> None: """Print the aggregated results from multiple evaluation runs.""" def float_representer(dumper, value): text = '{0:.4f}'.format(value) return dumper.represent_scalar(u'tag:yaml.org,2002:float', text) yaml.add_representer(float, float_representer) for run_number in run_numbers: eval_dir_path = work_dir_path / f'evaluation_{run_number}' eval_file_name = f'evaluation_results_{run_number}.json' print(f'--- Evaluation summary run {run_number} ---') with open(eval_dir_path / eval_file_name, 'r') as infile: results = json.load(infile) test_set_name = results['test_set_name'] if print_results_only: results = {key: val for key, val in results.items() if 'result' in key} results['test_set_name'] = test_set_name print(yaml.dump(results))
4be2d893da5f321390c4b49cd4283c0b6f98b4d5
7,463
import os def safe_quote_string(text): """ safe_quote_string(text) returns the text in quotes, with escapes for any quotes in the text itself text - input text to quote returns: text in quotes with escapes """ if os.sep != '\\': text2 = text.replace('\\', '\\\\') text3 = text2.replace('"', '\\"') else: text3 = text.replace('\\', '/') # windows does not allow " in file names anyway return '"'+text3+'"'
bcbf74e8b27ab9a76564c82fbb64110c400f5493
7,464
def find_max_burst(burst_list: list, offset_start, offset_end): """[summary] Args: burst_list (list): [description] offset_start ([type]): [description] offset_end ([type]): [description] Returns: [type]: [description] """ burst_levels = set() burst_levels.add(0) for burst in burst_list: if burst[2] < offset_start or offset_end < burst[1]: #offset_start < burst[1] < offset_end or offset_start < burst[2] < offset_end: pass else: burst_levels.add(burst[0]) return max(burst_levels)
75a15acf96324cafc806a1664e89054f6ade74d2
7,465
def center_text(baseline, text): """Return a string with the centered text over a baseline""" gap = len(baseline) - (len(text) + 2) a1 = int(gap / 2) a2 = gap - a1 return '{} {} {}'.format(baseline[:a1], text, baseline[-a2:])
c5683198cf1f28a38d307555943253bd71fe76de
7,466
def _compute_teleport_distribution_from_ratings(user_rating, all_movies): """ returns the teleporting distribution as explained in the homework if a movie M has been rated, its probability is: RATE_M / SUM_OF_ALL_RATINGS else, its probability is: 0 :param user_rating: a dict of (movie_id, rating) :param all_movies: a set of movie ids, either rated or not. It is used for filter the movies that have no rating, and then their probability will be set to 0. :return: """ distribution = {} rating_sum = sum(user_rating.values()) for movie_id, rating in user_rating.items(): distribution[movie_id]=rating/rating_sum for not_rated_movie in filter(lambda x: x not in distribution, all_movies): distribution[not_rated_movie] = 0 return distribution
7a88cf8a69c9fafc70e14d9337f0af25829bfb20
7,471
import ntpath def path_base_and_leaf(path): """ Splits path to a base part and a file or directory name, as in the following example: path: '/a/b'; base: '/a'; leaf: 'b' """ head, tail = ntpath.split(path) if not tail: # in case there is trailing slash at the end of path return {'base': ntpath.split(head)[0], 'leaf': ntpath.basename(head)} return {'base': head, 'leaf': tail}
956daa06f87cc60c8e304fa129fb86e49c4776ce
7,472
import re import zipfile def instance_name_from_zip(path): """Determines the instance filename within a SEC EDGAR zip archive.""" re_instance_name = re.compile(r'.+-\d{8}\.xml') for name in zipfile.ZipFile(path).namelist(): if re_instance_name.fullmatch(name): return name raise RuntimeError('Zip archive does not contain a valid SEC instance file.')
59b2154d433e500e9b0cdf39ee70d4c058da1d06
7,475
import argparse def get_arguments(): """Obtains command-line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( '--inputs', required=True, nargs='+', metavar='INPUT', help='read Q2 inputs from Feather files %(metavar)ss') parser.add_argument( '--output', required=True, metavar='OUTPUT', help='write the output to Feather file %(metavar)s') parser.add_argument( '--log', metavar='LOG', help='write logging information to %(metavar)s') return parser.parse_args()
118e76d633bf5ea37b85a53dcd5ba07cf7e46e04
7,476
import re def headers_ok(meta): """check that headers are 'name' or end with c/d/ll""" meta_fh = open(meta) headers = meta_fh.readline().rstrip().split('\t') return headers[0] == 'name' and \ all(map(lambda s: re.search(r'\.(c|d|ll)$', s), headers[1:]))
408975c795de8bf22529cf917ca881ca98ede4f9
7,477
import re def has_number(name): """判断名name内是否出现了数字(包括中文的数字)""" if bool(re.search(r'\d',name)): return True num_str = ['一','二','三','四','五','六','七','八','九','十'] for s in num_str: if s in name: return True return False
56dec9664e945d852cbfee4791f386aaab15f215
7,478
def cohort_to_int(year, season, base=16): """cohort_to_int(year, season[, base]) Converts cohort tuple to a unique sequential ID. Positional arguments: year (int) - 2-digit year season (int) - season ID Keyword arguments: base (int) - base year to treat as 0 Returns: (int) - integer representing the number of seasons since the beginning of the base year """ return 3*(year - base) + season
1f1981eb6c43ab6f77abf6d04ba3b92d9053953d
7,479
def dict_of_transition_matrix(mat): """ Convert a transition matrix (list of list or numpy array) to a dictionary mapping (state, state) to probabilities (as used by :class:`pykov.Chain`).""" if isinstance(mat, list): return {(i, j): mat[i][j] for i in range(len(mat)) for j in range(len(mat[i]))} else: return {(i, j): mat[i, j] for i in range(len(mat)) for j in range(len(mat[i]))}
b823ff496a751f4ffe305a31f1c1d019f7a25d33
7,481
def save_file(filename, contents): """Save a file from the editor""" if not filename: return 0, 0 with open(filename, 'w') as f: f.write(contents) return len(contents), hash(contents)
8e973f67a22a2e7b0836f8db25090c65238492e3
7,482
from typing import Tuple import hashlib def hashfile(path: str, blocksize: int = 65536) -> Tuple[str, str]: """Calculate the MD5 hash of a given file Args: path ()str, os.path): Path to the file to generate a hash for blocksize (int, optional): Memory size to read in the file Default: 65536 Returns: hash (str): The HEX digest hash of the given file path (str): The filepath that generated the hash """ # Instatiate the hashlib module with md5 hasher = hashlib.md5() # Open the file and instatiate the buffer f = open(path, "rb") buf = f.read(blocksize) # Continue to read in the file in blocks while len(buf) > 0: hasher.update(buf) # Update the hash buf = f.read(blocksize) # Update the buffer f.close() return hasher.hexdigest(), path
e38e6622534f27bed109a2e2b71373503ca4e7b0
7,483
import pathlib def package_data() -> pathlib.Path: """ Returns the absolute path to the circe/data directory. """ return pathlib.Path(__file__).parents[1].joinpath("data")
19d8fa28ba872f8633e6efddb310d30264d831e6
7,484
import re def changeFileNoInFilePath(path: str, fileNo: int) -> str: """replaces the number in the path with the given number.""" separator = r"[0-9]+\." splitted_path = re.split(separator, path, 1) new_path = splitted_path[0] + str(fileNo) + "." + splitted_path[1] return new_path
070fbe30d2937b57ef601fb764cf68ec219b9c95
7,485
def secret_view(request): """Dummy view with redirect to login.""" return {}
9b0c3a6d2fe0b6aef2328a97d6407b72f8bc3c16
7,487
def construct_organisation_role_dict(organisation_roles): """Return a dict with 3 keys: organisations, roles, and organisation_roles. Args: organisation_roles: an iterable of OrganisationRoles. """ data = {} # Defensive programming: make sure we have a unique set of # organisation_roles. At the moment of writing, models. # UserProfile.all_organisation_roles() does not... organisation_roles = set(organisation_roles) organisations = set(obj.organisation for obj in organisation_roles) roles = set(obj.role for obj in organisation_roles) data["organisation_roles"] = [ [obj.organisation.unique_id, obj.role.unique_id] for obj in organisation_roles ] data["organisations"] = [obj.as_dict() for obj in organisations] data["roles"] = [obj.as_dict() for obj in roles] return data
b1105832eab9ee89dfa5d1e43b51c05fb569b953
7,489
async def is_dark_theme(monitor=None, app=None): """Return whether or not iTerm2 theme is dark""" theme=None if monitor: theme = await monitor.async_get() elif app: theme = await app.async_get_variable("effectiveTheme") else: raise ValueError('Need a monitor or app instance to detect theme') # Themes have space-delimited attributes, one of which will be light or dark. parts = theme.split(" ") return "dark" in parts
8c356514d19219af83a36f422d78e688351c9f09
7,490
import json def load_json(path): """Load json from file""" json_object = json.load(open(path)) return json_object
17db7327b6dac16aaeaff2354f828646eff695b2
7,491
import os def module_path(): """Figures out the full path of the directory containing this file. `PACKAGE_DIR` becomes the parent of that directory, which is the root of the solvertools package.""" return os.path.dirname(__file__)
f4576fbdcca394f525b9419a7e9d8a81ed1ff223
7,492
def _bin_labels_to_segments(bin_labels: list) -> list[tuple]: """ Convert bin labels (time-axis list data) to segment data >>> _bin_labels_to_segments(['female'] * 5 + ['male'] * 10 + ['noise'] * 5) [('f', 0, 5), ('bbb', 5, 15), ('v', 15, 20)] """ if len(bin_labels) == 0: return [] current_label = None segment_start = -1 ret = [] i = 0 for i, e in enumerate(bin_labels): if e != current_label: if current_label is not None: ret.append((current_label, segment_start, i)) current_label = e segment_start = i ret.append((current_label, segment_start, i + 1)) return ret
6b0eafdaf6affee33a3b655ba8ae7aebf2b38746
7,493
def build_efficiencies(efficiencies, species_names, default_efficiency=1.0): """Creates line with list of third-body species efficiencies. Parameters ---------- efficiencies : dict Dictionary of species efficiencies species_names : dict of str List of all species names default_efficiency : float, optional Default efficiency for all species; will be 0.0 for reactions with explicit third body Returns ------- str Line with list of efficiencies """ # Reactions with a default_efficiency of 0 and a single entry in the efficiencies dict # have an explicit third body specified. if len(efficiencies) == 1 and not default_efficiency: return '' reduced_efficiencies = {s:efficiencies[s] for s in efficiencies if s in species_names} return ' '.join([f'{s}:{v}' for s, v in reduced_efficiencies.items()])
a8f8912cd290b86697c67465b4aed18220a8c889
7,494
def generate_county_dcids(countyfips): """ Args: countyfips: a county FIPS code Returns: the matching dcid for the FIPS code """ if countyfips != 59: dcid = "dcid:geoId/" + str(countyfips).zfill(5) else: dcid = "dcid:country/USA" return dcid
ae294e5467b9c735e175d4a69ff30f8ca189c71f
7,495
def _inverse_lookup(dictionary, value): """Does an inverse lookup of key from value""" return [key for key in dictionary if dictionary[key] == value]
4ad34b27fbc35b3bae95bcb8442d1a2f7df94e9f
7,496
import os def get_expected_returncode(filename): """ Reads expectrc file to determine what the expected return code is """ expected_rc = 0 expected_rc_file = filename + '.expectrc' if os.path.isfile(expected_rc_file): with open(expected_rc_file) as f: expected_rc = int(f.read()) return expected_rc
ba52ccaa3e34ed823a9a5f55c3afdcb9c59abc6f
7,497
def isentropic_beta(tab, spec, *XYf): """Isentropic bulk modulus""" return XYf[0]*tab.q['Cs2', spec](*XYf)
3592e31612c94e0000d80f8be13369338fb6b329
7,498
import os def urllist(): """加载图片链接 """ list_file = os.path.join('piclist/baidu.txt') url_list = [] with open(list_file, 'r') as f: url_list = [line.strip() for line in f] return url_list[:50]
38c87fb70fc303bf49f09dac2e5f37b9c7ca03ce
7,499
def checkdeplaid(incidence): """ Given an incidence angle, select the appropriate deplaid method. Parameters ---------- incidence : float incidence angle extracted from the campt results. """ if incidence >= 95 and incidence <= 180: return 'night' elif incidence >=90 and incidence < 95: return 'night' elif incidence >= 85 and incidence < 90: return 'day' elif incidence >= 0 and incidence < 85: return 'day' else: return False
806ef360e7b5b3d7138d88be2f83267e7668d71e
7,501
def error_test(true, predict): """ Function for classifcation of errors. """ if true == predict: return 1 else: return 0
87c1a56ffb52e1cec61a9ce3cab870b7c70fa059
7,502
def determine_high_cor_pair(correlation_row, sorted_correlation_pairs): """Select highest correlated variable given a correlation row with columns: ["pair_a", "pair_b", "correlation"]. For use in a pandas.apply(). Parameters ---------- correlation_row : pandas.core.series.series Pandas series of the specific feature in the pairwise_df sorted_correlation_pairs : pandas.DataFrame.index A sorted object by total correlative sum to all other features Returns ------- The feature that has a lower total correlation sum with all other features """ pair_a = correlation_row["pair_a"] pair_b = correlation_row["pair_b"] if sorted_correlation_pairs.get_loc(pair_a) > sorted_correlation_pairs.get_loc( pair_b ): return pair_a else: return pair_b
36eccfe0ffb0ac43caf49fe4db8c35c58d0fa29c
7,503
def get_file_content(file_name): """获取文件的内容""" try: f = open(file_name, 'rb') file_content = f.read() f.close() except Exception as e: print("打开文件异常") else: return file_content
329eb747a6513899b7ccfaf70754fad721f8d88d
7,504
def create_nonlocal_gateway_cluster_name(namespace: str) -> str: """Create the cluster name for the non-local namespace that uses a gateway.""" return "remote-{0}-gateway".format(namespace)
9ca9758a7ee68ede6e57a7f50f2d772b45ee844b
7,507
def get_go2parents_isa(go2obj): """Get set of immediate parents GO IDs""" go2parents = {} for goid_main, goterm in go2obj.items(): parents_goids = set(o.id for o in goterm.parents) if parents_goids: go2parents[goid_main] = parents_goids return go2parents
1a7d79e1233e497dce109690d3e2105f442bc3b9
7,508
from typing import Union from typing import Dict from typing import List def flatten_errors(error_message: Union[Dict, List, str]) -> str: """Flatten Cerberus' error messages.""" def flatten_dict(error_dict: Dict) -> str: """Return a string version of the dict.""" return ", ".join([f"{key}: {flatten_errors(value)}" for key, value in error_dict.items()]) def flatten_list(error_list: List) -> str: """Return a string version of the list.""" return ", ".join([flatten_errors(item) for item in error_list]) if isinstance(error_message, dict): return flatten_dict(error_message) if isinstance(error_message, list): return flatten_list(error_message) return error_message
ab6f359cc214a5a1929254e6066fdc6740938f09
7,509
import os import pickle def get_best_estimators(classification): """ Loads the estimators that are pickled in `grid` folder Note that if you want to use different or more estimators, you can fine tune the parameters in `grid_search.py` script and run it again ( may take hours ) """ grid_dir = os.path.join(os.path.dirname(__file__), 'grid') if classification: return pickle.load(open(os.path.join(grid_dir, 'best_classifiers.pickle'), "rb")) else: return pickle.load(open(os.path.join(grid_dir, 'best_regressors.pickle'), "rb"))
ac35183c83ce90c0f13c5237634735f08a5d644f
7,510
def get_subset(container, subset_bounds): """Returns a subset of the given list with respect to the list of bounds""" subset = [] for bound in subset_bounds: subset += container[bound[0]: bound[1]] return subset
4932ecba987c4936f9f467f270c6c07fd8681840
7,511
def _encode_decimal128(name, value, dummy0, dummy1): """Encode bson.decimal128.Decimal128.""" return b"\x13" + name + value.bid
f0d72f1fdef51559eb66dd2ac65ba6a43a91bf85
7,512
from typing import OrderedDict def _get_exp_uri(): """Return expected basic result for OpenUri action.""" return OrderedDict( ( ("@type", "OpenUri"), ("name", "Open URL"), ( "targets", [OrderedDict((("os", "default"), ("uri", "http://www.python.org")))], ), ) )
a3e527a52769083fd7adb8cf04a27ebcfc272922
7,514
import getpass def get_html_connector_kwargs_options_from_args(args): """Take a parsed ArgumentParser and return a dict of argument.""" if not args.password: args.password = getpass.getpass( "Please enter the password for {} with login {}:\n".format(args.url, args.login) ) return { "url": args.url, "login": args.login, "password": args.password, "use_ftml": args.use_ftml, "render_cover_page": args.render_cover_page, }
d96f070363c6db33632974028b6363cbad739537
7,516
def yesnoquery(message): """ Displays `message` and waits for user Y/N input. Returns Boolean where true means Y. """ useryn = None while useryn is None: if not isinstance(message, str): raise ValueError("Must pass a valid string to query") useryn = input(message).lower() if useryn != "y" and useryn != "n": print("Must enter either a 'Y' or 'N'", useryn) useryn = None if useryn == "y": return True elif useryn == "n": return False else: return -1
87ec3cb01e4a2e52ce1cd900e5446cbab9a05373
7,518
def formatProccessingTime(ss, verbose: int = 1, estimate: bool = True, keep_seconds=False): """ Format processing time to string Args: ss: Time in seconds or a string """ if isinstance(ss, (str, bytes)): res = ss else: if ss < 0: res = '-1' elif ss < 60: if keep_seconds: res = '%.1f seconds' % (float(ss)) else: res = '&lt; 1 minute' elif ss < 3600: res = '%.1f minutes' % (float(ss) / 60.) if estimate: res += ' (estimate)' else: res = '%.1f hours' % (float(ss) / 3600.) if estimate: res += ' (estimate)' return res
a2bea60365530169013322f8a767c9da3cc44c31
7,519
def lorenz(xyz, t, sigma, beta, rho): """The most famous of the strange attractors.""" x, y, z = xyz dx = sigma * (y - x) # dt dy = x * (rho - z) - y # dt dz = x * y - beta * z # dt return dx, dy, dz
4241c36b8d4b924289edaa522a49855949208327
7,520
import copy def add_classes_to_geojson(geojson, class_map): """Add missing class_names and class_ids from label GeoJSON.""" geojson = copy.deepcopy(geojson) features = geojson['features'] for feature in features: properties = feature.get('properties', {}) if 'class_id' not in properties: if 'class_name' in properties: properties['class_id'] = \ class_map.get_by_name(properties['class_name']).id elif 'label' in properties: # label is considered a synonym of class_name for now in order # to interface with Raster Foundry. properties['class_id'] = \ class_map.get_by_name(properties['label']).id properties['class_name'] = properties['label'] else: # if no class_id, class_name, or label, then just assume # everything corresponds to class_id = 1. class_id = 1 class_name = class_map.get_by_id(class_id).name properties['class_id'] = class_id properties['class_name'] = class_name feature['properties'] = properties return geojson
9aadf15fbe64995e7e52b2f6182e76ab722f06b5
7,521
from pathlib import Path import errno import argparse def is_file_ro(filename: Path) -> Path: """Verifies file exists and can be open for read-only Args: filename (Path): path/filename to check """ try: with open(filename) as f: f.read() f.close() return filename except IOError as x: if x.errno == errno.ENOENT: raise argparse.ArgumentTypeError(f"filename: {filename} does not exist") elif x.errno == errno.EACCES: raise argparse.ArgumentTypeError(f"filename: {filename} cannot be read") else: raise argparse.ArgumentTypeError(f"error accessing {filename}")
48082137363ac60b8c6666b0236d5f4314daf47d
7,522
import re def replace_urls(text, filler='<url>'): """Replaces URLs in text with `f' {filler}'`. Potentially induces duplicate whitespaces. Includes punctuation in websites (which is not really a problem, because URLs on Twitter are rendered as https://t.co/randomnum). The regex doesn't account for what's behind. """ # url_regex = re.compile( # r'((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))') twitter_url_regex = re.compile( # r'https?://t.co/[A-Za-z0-9]+') r'https?://t.co(?:/[0-9a-zA-Z]+)?') # Replace other urls by filler # Added space to separate the mention from non-space characters behind text = re.sub(twitter_url_regex, f' {filler} ', text) return text
0556120c0b8ab8a888acad550cb4cad24c5961ae
7,523
def solution(X, A): """Find the earliest time that a frog can jump to position X. In order to reach X, a leaf must be present at every position from 1 to X. Args: X (int): The position that the frog must reach. A (list): A list of integers from 1 to X, where A[k] represents a leaf falling at minute k into position A[k]. Returns: int: The number of minutes that the frog must wait. Complexity: Time: O(N) Space: O(X) """ counter = [False] * X total = 0 for i, val in enumerate(A): if (val < 1) or (val > X): raise ValueError if not counter[val-1]: counter[val-1] = True total += 1 if total == X: return i else: return -1
d1fec5a3ec4c6dc06cd0feab295c90cb4c920ced
7,524