content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import inspect def get_calling_module(point=2): """ Return a module at a different point in the stack. :param point: the number of calls backwards in the stack. :return: """ frm = inspect.stack()[point] function = str(frm[3]) line = str(frm[2]) modulepath = str(frm[1]).split('/') module = str(modulepath.pop()) return "%s:%s" % (module, line)
638006a14fb062810db34beefcd906b898ba45a5
8,089
def _pad_keys_tabular(data, sort): """Pad only the key fields in data (i.e. the strs) in a tabular way, such that they all take the same amount of characters Args: data: list of tuples. The first member of the tuple must be str, the rest can be anything. Returns: list with the strs padded with space chars in order to align in tabular way """ if sort: data = sorted(data, key=lambda tup: tup[0]) sizes = [len(t[0]) for t in data] pad = max(sizes) + 2 data = [(t[0].ljust(pad), *t[1:]) for t in data] return data
eba58694354a89e0a6d808c08f964755f3b11822
8,090
import os def get_file_names(path): """ Given a dir path, returns a list of files in this dir, not includes its child dir :param path: :return: """ names = [] if os.path.exists(path): for (dirpath, dirnames, filenames) in os.walk(path): names.extend(filenames) break else: print('path: %s does not exit, please check.' % path) return names
574cbe862f16347809675895bddeb1d5c3b6cfff
8,091
def timecoverage(): """ Time intervals of GLDAS data """ return [ ('All Available Times', 'alltimes'), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), ]
f6f279c8f223a361785cc3a72cebfa02101f099f
8,093
import bz2 import base64 def passx_decode(passx): """decode the obfuscated plain text password, returns plain text password""" return bz2.decompress(base64.b64decode(passx.encode("ascii"))).decode("ascii")
b8b2138c55dd28734661484a231128e6f3ccbbb7
8,094
def checksum_file(summer, path): """ Calculates the checksum of the file 'path' using the provided hashlib digest implementation. Returns the hex form of the digest. """ with open(path, "rb") as f: # Read the file in 4KB chunks until EOF. while True: chunk = f.read(4096) if not chunk: break summer.update(chunk) return summer.hexdigest()
729b8f895fe74856e83046d0fd5177e584f835f2
8,097
def camel_case(value): """Convert an identifier to CamelCase""" return "".join(ele.title() for ele in value.split("_"))
e7c74ebe7611eb567f3eae8f16fb01aadd201252
8,098
def format_directory_path(path: str) -> str: """Replaces windows style path seperators to forward-slashes and adds another slash to the end of the string. """ if path == ".": return path formatted_path = path.replace('\\', '/') if formatted_path[-1] is not '/': formatted_path += '/' return formatted_path
0daa0cc65e50bd29c76da64d302c0e01c1bb333b
8,101
def get_metagen_search_body(self): """ Get the MetaGenSearchView view body. Attributes ---------- measure: str the genomic measure label. gene: str the gene name. Returns ------- html: str the MetaGenSearchView view body. """ # Get parameters measure = self._cw.form["measure"] gene = self._cw.form["gene"] # Get the HTML body code view = self._cw.vreg["views"].select("metagen-search", req=self._cw, rset=None) html = view.render(measure=measure, gene=gene, export_type="data", subjects="all") return html
9b1e8c6072991765cf566f1bccccb38b178141a4
8,102
def _toCamelCase(string): """Convert a snake case string (PyTorch) to camel case (PopART)""" words = string.split("_") return words[0] + "".join(w.capitalize() for w in words[1:])
f1f21b0313c03b3d63944ee3fcbd5e16b435da6d
8,103
def pulse(x): """Return the pulse fn of the input.""" return 2*(x % 1 < .5) -1
f54f73ab6656c0242508170c16ab6ee6a0cc5b92
8,104
def index_singleton_clusters(clusters): """Replace cluster labels of -1 with ascending integers larger than the maximum cluster index. """ clusters = clusters.copy() filt = clusters == -1 n = clusters.max() clusters[filt] = range(n, n + len(filt)) return clusters
9cad0df27d2d99ef3a7478f3c3753cd7795beb54
8,105
def bboxes_filter(): """ """ def _augment( image, bboxes, classes=None ): return image, bboxes, classes return _augment
deee3384f7567181eda6e6735aab01d6967787af
8,107
import os def write_model_to_file(model: str, file_path: str = "model.py") -> str: """Write the Pydantic Model string to a Python file. Args: model: The Pydantic Model string. file_path: The path must include the .py file extension. * The file_path is relative to the Workspace Root. Returns: The path to the Model Python file. """ root = os.path.join(os.path.dirname(__file__), "../") fp = os.path.join(root, file_path) with open(fp, "w") as f: f.write(model) return fp
01ab968d8130c03d748d39338b634e02d6332a0a
8,108
def scaled_up_roi(roi, scale: int, shape=None): """ Compute ROI for a scaled up image. Given a crop region in the original image compute equivalent crop in the upsampled image. :param roi: ROI in the original image :param scale: integer scale to get scaled up image :return: ROI in the scaled upimage """ roi = tuple(slice(s.start * scale, s.stop * scale) for s in roi) if shape is not None: roi = tuple( slice(min(dim, s.start), min(dim, s.stop)) for s, dim in zip(roi, shape) ) return roi
24f160bde7f995861aee3f0c20001ce4093aa58a
8,109
import torch def lowpass_filtering_in_frequency_domain( image_grad: torch.Tensor, lowpass: torch.Tensor ) -> torch.Tensor: """Applies lowpass filtering in the frequency domain as descibed in Walker et al. 2019. Args: grad (torch.Tensor): gradient lowpass (torch.Tensor): losspass tensor (constant) Returns: torch.Tensor: filtered gradient """ # print("inputs:") # print("lowpass:") # print(lowpass.shape) # fig, ax = plt.subplots(figsize=(3, 2)) # ax.imshow(lowpass) # ax.set_title("lowpass filter") # plt.show(block=False) # plt.pause(1) # plt.close("all") # print("image gradient:") batch_size, channels, height, width = image_grad.shape image_grad = image_grad.squeeze() # print(image_grad.shape) # print("") # fig, ax = plt.subplots(figsize=(3, 2)) # ax.imshow(image_grad) # ax.set_title("gradient") # plt.show(block=False) # plt.pause(1) # plt.close("all") grad_fft = torch.fft.fft2(image_grad.data) # print("pp (in frequency domain)") # print(grad_fft.shape) # print("") filtered_tensor = ( grad_fft * lowpass ) # convolution in frequency domain is dotproduct! # print("filtered tensor (in frequency domain") # print(filtered_tensor.shape) # print("") out = torch.fft.ifft2(filtered_tensor) out = out.real # print("out (in image domain)") # print(out.shape) out = out.reshape(batch_size, channels, height, width) # fig, ax = plt.subplots(figsize=(3, 2)) # ax.imshow(out.squeeze()) # ax.set_title("Out") # plt.show(block=False) # plt.pause(1) # plt.close("all") return out
847d89f9f0078838b594b61df9dd4544cb2bd062
8,110
import torch def ldot(u, v, keepdim=False): """Lorentzian scalar product""" uv = u * v uv.narrow(-1, 0, 1).mul_(-1) return torch.sum(uv, dim=-1, keepdim=keepdim)
59c26e622ef5ffe94d92a2e8c0fc40d316b58f5b
8,112
def test_depends(func): """Decorator to prevent a test being executed in individual mode""" def invalid(self, test): if self.test_individual: test.description = "Invalid" return test.DISABLED("This test cannot be performed individually") else: return func(self, test) invalid.__name__ = func.__name__ invalid.__doc__ = func.__doc__ return invalid
4b2db29fc8c0a30ec3a4ec6c3fb93ed958f0094e
8,113
import re def compiler_call(executable): """ A predicate to decide the entry is a compiler call or not. """ compilers = [ re.compile(r'^([^/]*/)*([^-]*-)*c(c|\+\+)$'), re.compile(r'^([^/]*/)*([^-]*-)*g(cc|\+\+)(-\d+(\.\d+){0,2})?$'), re.compile(r'^([^/]*/)*([^-]*-)*clang(\+\+)?(-\d+(\.\d+){0,2})?$'), re.compile(r'^([^/]*/)*llvm-g(cc|\+\+)$'), ] return any((pattern.match(executable) for pattern in compilers))
d8fa6fa22f13b13154579e19a4ab18e016a56caf
8,115
def fix_month(bib_str: str) -> str: """Fixes the string formatting in a bibtex entry""" return ( bib_str.replace("{Jan}", "jan") .replace("{jan}", "jan") .replace("{Feb}", "feb") .replace("{feb}", "feb") .replace("{Mar}", "mar") .replace("{mar}", "mar") .replace("{Apr}", "apr") .replace("{apr}", "apr") .replace("{May}", "may") .replace("{may}", "may") .replace("{Jun}", "jun") .replace("{jun}", "jun") .replace("{Jul}", "jul") .replace("{jul}", "jul") .replace("{Aug}", "aug") .replace("{aug}", "aug") .replace("{Sep}", "sep") .replace("{sep}", "sep") .replace("{Oct}", "oct") .replace("{oct}", "oct") .replace("{Nov}", "nov") .replace("{nov}", "nov") .replace("{Dec}", "dec") .replace("{dec}", "dec") )
9bdcb06dc43a6d6748af20d5279ca38ec6aa1d0a
8,118
import os def _is_attribute_file(filepath): """ Check, if ``filepath`` points to a valid udev attribute filename. Implementation is stolen from udev source code, ``print_all_attributes`` in ``udev/udevadm-info.c``. It excludes hidden files (starting with a dot), the special files ``dev`` and ``uevent`` and links. Return ``True``, if ``filepath`` refers to an attribute, ``False`` otherwise. """ filename = os.path.basename(filepath) return not (filename.startswith('.') or filename in ('dev', 'uevent') or os.path.islink(filepath))
6fbfc3b7de0f192ad96118f412ff7ea4ab42e06a
8,119
def pad(text: str, width: int, align: str = '<', fill: str = ' '): """ pad the string with `fill` to length of `width` :param text: text to pad :param width: expected length :param align: left: <, center: ^, right: > :param fill: char to fill the padding :return: """ assert align in ('<', '^', '>') return f"{text:{fill}{align}{width}}"
74befd22927438961b85e370ed16239d7df52707
8,120
import torch def slerp(val, low, high): """ val, low, high: bs x frames x coordinates if val == 0 then low if val == 1 then high """ assert low.dim() == 3, low.dim() assert val.dim() == 3, val.dim() assert high.dim() == 3, high.dim() low_norm = low / torch.norm(low, dim=2, keepdim=True) high_norm = high / torch.norm(high, dim=2, keepdim=True) omega = torch.acos((low_norm * high_norm).sum(dim=2)).unsqueeze(-1) # bs x frames x 1 so = torch.sin(omega) res = (torch.sin((1.0 - val) * omega) / so) * low + (torch.sin(val * omega) / so) * high return res
96a74c366139f3f617676e5e011884f698569b97
8,121
import torch def create_input(shape): """Create a random input tensor.""" return torch.rand(shape).float()
88a907bae19882a4a7c1a0b819eb0deccb065752
8,123
def input_thing(): """输入物品信息""" name_str, price_str, weight_str = input('物品名称,价格,重量: ').split() return name_str, int(price_str), int(weight_str)
d48452a942263ae522e93474c6cc4a5cdd9e097c
8,124
import string import random def gene_text(number=6): """生成随机字符""" source = string.ascii_letters+string.digits return ''.join(random.sample(source,number))
40970c8b938797243d7626ebad48e78116df657a
8,125
def add_data_to_list(twitter_return): """ Extract the data from the twitter_return dictionary and place in a list """ twitter_dict = twitter_return ['response_dict'] # Grab the twitter data twitter_data_list=twitter_dict['data'] return twitter_data_list
9e2a2a5e22926b604856c1ec1ae20ebf765b8610
8,128
def check_n_files(subject, collector, keep, n_file=1): """ Organise download path per subject for the two sessions. If the number of total file is not the same as expected, drop the subject. Parameters ---------- subject: str Subject ID. collecter: dict Dictionary collecting files per subject keep: list List of zipped tuple of source and target path n_files: int Number of files Return ------ dict Updated `collecter` dictionary """ download_these = collector.copy() if not download_these.get(subject): # first session download_these[subject] = keep.copy() elif len(download_these[subject]) < n_file: download_these[subject] += keep.copy() if len(download_these[subject]) != n_file: download_these.pop(subject) return download_these
56e9ec830d573b8fc8f5c50c1b6bc126a44653a7
8,129
from typing import Dict def get_noise_range_pcts(db_range_exps: dict, length: float) -> Dict[int, float]: """Calculates percentages of aggregated exposures to different noise levels of total length. Note: Noise levels exceeding 70 dB are aggregated and as well as noise levels lower than 50 dB. Returns: A dictionary containing noise level values with respective percentages. (e.g. { 50: 35.00, 60: 65.00 }) """ return { db_range: round(db_range_length * 100 / length, 3) for db_range, db_range_length in db_range_exps.items() }
723c7e45a24c149df6f5f19b3f8aabb1f8d5b184
8,130
def calc_n_max_vehicle(n2v_g_vmax, v_max): """Calc `n_max3` of Annex 2-2.g from `v_max` (Annex 2-2.i). """ return n2v_g_vmax * v_max
457562edce05aebf7d7b870a232b4e0a01df5055
8,133
import unittest def test_suite(): """Discover unittests""" test_loader = unittest.TestLoader() test_suite = test_loader.discover('str_analysis', pattern='*tests.py') return test_suite
351d2f09e9fcd0e709d04ed3af5283cd7d94768d
8,136
def distance(woolrgb, cpixel): """ ricerca il blocco di lana colorata piu' vicino al colore del pixel """ r = cpixel[0] g = cpixel[1] b = cpixel[2] did = 0 dmin = 255*255*3 for i in woolrgb: dr = r - woolrgb[i][0] dg = g - woolrgb[i][1] db = b - woolrgb[i][2] d = dr*dr+dg*dg+db*db if d < dmin: dmin = d did = i return did
d6247e40ba12a271dc6784f71088ad0e838c600a
8,139
def get_filepath_wo_ext(file_spec): """ Get file path without extension Parameters ---------- file_spec : DataStruct The function use attributes output_dir and fname_wo_ext for construct file path Returns ------- out : str Constructed file path """ return file_spec.output_dir + '/' + file_spec.fname_wo_ext
6ef9d329292769a3f163678915ff696ef3b8fe1a
8,141
def prob_double_roll(x, n): """ Expected probabilities for the sum of two dice.""" # For two n-sided dice, the probability of two rolls summing to x is # (n − |x−(n+1)|) / n^2, for x = 2 to 2n. return (n - abs(x - (n+1))) / n**2
30d891203a09807ce9dcd16c3f9c05cddef00b1c
8,142
def amplitude(data): """ Calculates the amplitude of a data list. """ n = len(data) if (n == 0) : amplitude = None else : min_value = min(data) max_value = max(data) amplitude = (max_value + min_value) / 2.0 return amplitude
4dc053287f2de3748961943a8d9d064c9a3d1f87
8,143
def ranks_from_scores(scores): """Return the ordering of the scores""" return sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)
f49fe306f456d990be5ad8308eae07f186a89da6
8,145
import struct def unpack_word(str, big_endian=False): """ Unpacks a 32-bit word from binary data. """ endian = ">" if big_endian else "<" return struct.unpack("%sL" % endian, str)[0]
8da8d168b1828062bd44ca3142c8b389bfd634c7
8,146
import ast def parse_source(source, path=None): """Parse python source into an AST.""" path = "<unknown>" if path is None else path return ast.parse(source, filename=path)
7d1188e96b3a72220eca084cf18fc5f7b0b35ef3
8,147
import argparse def post_process_arguments(args: argparse.Namespace) -> argparse.Namespace: """ post process sertop arguments :param args: args parsed :return: new name space """ for dirpath in args.coq_args_I: args.sertop_args.extend(("-I", dirpath)) for pair in args.coq_args_R: args.sertop_args.extend(("-R", ",".join(pair))) for pair in args.coq_args_Q: args.sertop_args.extend(("-Q", ",".join(pair))) return args
f31ef03bb1df0a13d3f3715e24588ed5373c66eb
8,148
def is_hit(x, y): """Return wheter given coords hit a circular target of r=1.""" return x*x + y*y <= 1
4f71afa458ad0a891010e1f5a2be3049b0818c71
8,149
def set_fpn_weights(training_model, inference_model, verbose=False): """ Set feature pyramid network (FPN) weights from training to inference graph Args: training_model: MaskRCNN training graph, tf.keras.Model inference_model: MaskRCNN inference graph, tf.keras.Model verbose: Print layers that get weights, bool Returns: inference_model """ fpn_layers = ['fpn_c5p5', 'fpn_c4p4', 'fpn_c3p3', 'fpn_c2p2', 'fpn_p5', 'fpn_p4', 'fpn_p3', 'fpn_p2', ] for layer_name in fpn_layers: # Get weights from training graph layer_weights = training_model.get_layer(layer_name).get_weights() # Set weights in inference graph inference_model.get_layer(layer_name).set_weights(layer_weights) if verbose: print(f'Set weights: {layer_name}') return inference_model
29aadfcd0dcb50edb41938433ff644b1b62f209e
8,150
def _get_persistent_binding(app, device_addr): """ :return: bool """ x = app.__dict__.get('persistent_binding', False) if x and device_addr is None: msg = ('In case of `persistent_binding` set to `True`, ' 'the `device_addr` should be set and fixed.') raise ValueError(msg) return x
3f6f4f71297fd8a6a9615d2a76529e5376e4786e
8,151
import math def pop_age_data(pop, code, age, percent_pop): """Select and return the proportion value of population for a given municipality, gender and age""" n_pop = pop[pop['code'] == str(code)][age].iloc[0] * percent_pop rounded = int(round(n_pop)) # for small `percent_pop`, sometimes we get 0 # when it's better to have at least 1 agent if rounded == 0 and math.ceil(n_pop) == 1: return 1 return rounded
f3a6a0c972eeea5a351d0f734a41a498035e5ecc
8,152
def navamsa_from_long(longitude): """Calculates the navamsa-sign in which given longitude falls 0 = Aries, 1 = Taurus, ..., 11 = Pisces """ one_pada = (360 / (12 * 9)) # There are also 108 navamsas one_sign = 12 * one_pada # = 40 degrees exactly signs_elapsed = longitude / one_sign fraction_left = signs_elapsed % 1 return int(fraction_left * 12)
d151f66c0e69541ccdcc3cabc4d0de82e7aa84bd
8,153
def compare_connexion(conn1, conn2): """See if two connexions are the same. Because the :class:`connexion` could store the two components in different orders, or have different instances of the same component object, direct comparison may fail. This function explicitly compares both possible combinations of serial numbers. Parameters ---------- conn1 : :obj:`connexion` The first connexion object. conn2 : :obj:`connexion` The second connexion object. Returns ------- :obj:`True` if the connexions are the same, :obj:`False` otherwise. """ sn11 = conn1.comp1.sn sn12 = conn1.comp2.sn sn21 = conn2.comp1.sn sn22 = conn2.comp2.sn if (sn11 == sn21 and sn12 == sn22) or (sn11 == sn22 and sn12 == sn21): return True else: return False
25c3737cfcdb0ab6516237ea5423e2237cc94529
8,154
def without_end_slash(url): """Makes sure there is no end slash at the end of a url.""" return url.rstrip("/")
19d6b49f7d2a788ea4bb81179e596eb6f019843e
8,155
def CloneNodeList(nodeList): """Return a clone of the given nodeList.""" # This is tricky because we want to maintain client/server links # make a list of cloned nodes, map maps original nodes to new nodes cloneList = [] map = {} for node in nodeList: newNode = node.Clone() newNode.RemoveAllServers() cloneList.append(newNode) map[node] = newNode # fixup client/server links for node in nodeList: for s in node.GetServers(): if s in nodeList: # connect the corresponding new server to # corresponding new client assert s in map.keys() newServer = map[s] assert newServer in cloneList newNode = map[node] newNode.LastSPU().AddServer(newServer) return cloneList
cfaeab6edc6d40dd3373ecedc1641b3f542deda4
8,157
from stat import S_ISDIR def list_remote_files(con, directory): """ List the files and folders in a remote directory using an active SFTPClient from Paramiko :param con: SFTPClient, an active connection to an SFTP server :param directory: string, the directory to search :return: (generator, generator), the files and directories as separate generators """ print(directory) all_files = [file for file in con.listdir_attr(directory)] files = [] dirs = [] for file in all_files: if S_ISDIR(file.st_mode): file.path = directory + f'/{file.filename}' # ad-hoc add the remote filepath since Paramiko ignores this?! dirs.append(file) else: file.path = directory + f'/{file.filename}' # ad-hoc add the remote filepath since Paramiko ignores this?! files.append(file) files = (file for file in files) dirs = (dir for dir in dirs) return (files, dirs)
f65e4a5d48f793ff3703ea5e4fc88a0e9b7ea39d
8,159
def error_rate(predictions, imgs): """Return the error rate based on dense predictions and sparse labels.""" return 0.5 * ((predictions - imgs)**2).mean()
22fb7ccee4facff54e41e784cdb7c317e0e3f8dc
8,160
import json def unjsonb(bytes): """bytes -> list""" return [json.loads(s.decode("utf-8")) for s in bytes.splitlines()]
e831851ea0149d46d73610fc1979168aba9ef1cd
8,161
def doHammingByte(byte): """Secures one byte by hamming code""" #P1 = A1 xor A2 xor A4 xor A5 xor A7 P1 = str( int(byte[0]) ^ int(byte[1]) ^ int(byte[3]) ^ int(byte[4]) ^ int(byte[6]) ) #P2 = A1 xor A3 xor A4 xor A6 xor A7 P2 = str( int(byte[0]) ^ int(byte[2]) ^ int(byte[3]) ^ int(byte[5]) ^ int(byte[6]) ) #P3 = A2 xor A3 xor A4 xor A8 P3 = str( int(byte[1]) ^ int(byte[2]) ^ int(byte[3]) ^ int(byte[7]) ) #P4 = A5 xor A6 xor A7 xor A8 P4 = str( int(byte[4]) ^ int(byte[5]) ^ int(byte[6]) ^ int(byte[7]) ) #PX = C1 to C12 PX = str( int(P1) ^ int(P2) ^ int(byte[0]) ^ int(P3) ^ int(byte[1]) ^ int(byte[2]) ^ int(byte[3]) ^ int(P4) ^ int(byte[4]) ^ int(byte[5]) ^ int(byte[6]) ^ int(byte[7]) ) return PX + P1 + P2 + byte[0] + P3 + byte[1] + byte[2] +\ byte[3] + P4 + byte[4] + byte[5] + byte[6] + byte[7]
fca53ce3958820ac3ed122d00045edd46d7e36d8
8,162
def get_sleep_time(time_start, time_now, poll_int): """Calculate time to sleep. Args: time_start(datetime): Time loop started time_now(datetime): Current time poll_int(int): Poll interval in seconds Returns: sleep_time(float) """ time_sleep = poll_int - (time_now - time_start).total_seconds() if time_sleep < 0: time_sleep = 0 return time_sleep
6d3a00d12c8ff5677c9625228a2150bca55762de
8,163
import os def expand_session(name): """ Helper function that creates the session names for the original mkdir and the actual code being run. """ user_name = os.environ.get('SUDO_USER') or os.environ['USER'] return "{}_cp_{}".format(user_name, name), "{}_run_{}".format(user_name, name)
9426d2f3d867b7c69a0b366d0cecef585ec9ecc7
8,164
def distinct_count(daskDf, columnName): """Counts distint number of values in Dask dataframe Keyword arguments: daskDf -- Dask dataframe columnName -- Column name Return: return -- Distinct number of values """ return daskDf[columnName].drop_duplicates().size
27a03a6eef1f9c949d22f02e5d880ef626b6acf6
8,165
def verse(num_of_bottles): """bottle verse""" b = "bottle" if num_of_bottles == 1 else "bottles" if num_of_bottles==1: last = "No more bottles" elif num_of_bottles==2: last = "1 bottle" else: last = f'{num_of_bottles-1} bottles' return '\n'.join([ f'{num_of_bottles} {b} of beer on the wall,', f'{num_of_bottles} {b} of beer,', 'Take one down, pass it around,', f'{last} of beer on the wall!' ])
57cf46e430a7a75deb9e6b88c5defa35991fda4e
8,167
def fizz_buzz(tuple): """ Transform the input tuple to a string that follows the Fizz Buzz rules Args: tuple: tuple Returns: string """ if tuple == 0: return None ret_val = "" if tuple % 3 == 0: ret_val += "Fizz" if tuple % 5 == 0: ret_val += "Buzz" if len(ret_val) == 0: ret_val += str(tuple) else: ret_val += "!" return ret_val
4053cad878c05fa0430835a9a9728a6c0b8e420d
8,168
from typing import OrderedDict def save_chain(config, chain): """ Encode a chain of operation classes as json. :param config: dictionary with settings. :param chain: OrderedDict of operation class lists. :return: string-encoded version of the above. """ di = OrderedDict() di['__config__'] = config for key, ops in chain.items(): di[key] = [op.to_json() for op in ops] return di #return dumps(di, indent = 2)
d6838dfe1f079233ba8e3d93c62ddc1ebbcec5e4
8,170
def db_connection_string(dbconf): # type: (dict) -> str """ Constructs a database connection string from the passed configuration object. """ user = dbconf["user"] password = dbconf["password"] db_name = "traffic_ops" if dbconf["type"] == "Pg" else dbconf["type"] hostname = dbconf["hostname"] port = dbconf["port"] return "postgresql://{user}:{password}@{hostname}:{port}/{db_name}".format(user=user, password=password, hostname=hostname, port=port, db_name=db_name)
3fbb52c398f5150f6101b9d0d286f1db1b8aa99f
8,171
def normalize_units( df, unitsmap, targetunit, paramcol="parameter", rescol="res", unitcol="units", napolicy="ignore", ): """ Normalize units of measure in a dataframe. Parameters ---------- df : pandas.DataFrame Dataframe contained results and units of measure data. unitsmap : dictionary Dictionary where keys are the units present in the df and values are the conversion factors required to standardize results to a common unit. targetunit : string or dict The desired final units of measure. Must be present in ``unitsmap``. If a string, all rows in df will be assigned ``targetunit``. If a dictionary, the units will be mapped from the dictionary using the values of ``paramcol`` as keys. paramcol, rescol, unitcol : string, optional Labels for the parameter, results, and units columns in the df. napolicy : string, optional Determines how mull/missing values are stored. By default, this is set to "ignore". Use "raise" to throw a ``ValueError`` if when unit conversion or target units data cannot be found. Returns ------- normalized : pandas.DataFrame Dataframe with normalized units of measure. """ # determine the preferred units in the wqdata target = df[paramcol].map(targetunit) # factors to normialize to standard units normalization = df[unitcol].map(unitsmap) # factor to convert to preferred units conversion = target.map(unitsmap) if napolicy == "raise": msg = "" if target.isnull().any(): nulls = df[target.isnull()][paramcol].unique() msg += "Some target units could not be mapped to the {} column ({})\n".format( paramcol, nulls ) if normalization.isnull().any(): nulls = df[normalization.isnull()][unitcol].unique() msg += "Some normalization factors could not be mapped to the {} column ({})\n".format( unitcol, nulls ) if conversion.isnull().any(): nulls = target[conversion.isnull()] msg += "Some conversion factors could not be mapped to the target units ({})".format( nulls ) if len(msg) > 0: raise ValueError(msg) # convert results normalized = df.assign( **{rescol: df[rescol] * normalization / conversion, unitcol: target} ) return normalized
89aa2692ae778eede36d02b8bea756793a55c172
8,172
def shorten(k): """ k an attrname like foo_bar_baz. We return fbb, fbbaz, which we'll match startswith style if exact match not unique. """ parts = k.split('_') r = ''.join([s[0] for s in parts if s]) return r, r + parts[-1][1:]
6d9c29849cc5a63ec466d2548ea2492d112946cf
8,173
def standardizeText(line, forward=True): """ Remove whitespace, lowercase, and end with termination character \r """ text = line.strip().lower()[:63] return (text if forward else text[::-1]) + '\r'
5487f416abe78385f712c7b0ec652b4548accbf0
8,175
def selstr(a, start, stop): """ Select elements of a string from an array. :param a: array containing a string. :param start: int referring to the first character index to select. :param stop: int referring to the last character index to select. :return: array of strings """ if type(a) not in [str]: raise TypeError(f"a: must be a single string") out = [] for i in range(start, stop): out.append(a[i]) out = "".join(out) return out
91400815c1be10f1691be2799ce84229d121afec
8,176
from typing import List from typing import Dict from typing import Any def format_sents_for_output(sents: List[str], doc_id: str) -> Dict[str, Dict[str, Any]]: """ Transform a list of sentences into a dict of format: { "sent_id": {"text": "sentence text", "label": []} } """ formatted_sents = {} for i, sent in enumerate(sents): formatted_sents.update({f"{doc_id}_sent_{i}": {"text": sent, "label": []}}) return formatted_sents
d6178ac48da4d95e8d3727ca9220168e06ba223e
8,177
def get_guess(guesses): """ This function will get the user's guess. """ # Get the user's guess. guess = input("------\nGuess a letter: ") # Check if the user has already guessed the letter. if guess in guesses: print("You've already guessed that letter.") return get_guess(guesses) # Return the guess. return guess
04b559d3850421ef91fa1ce5d9850b2f4852f917
8,180
from typing import Counter def generate_common_dict(d1, d2): """Generate a dictionary by combining d1 and d2 Args: d1 (list): a list of words d2 (list): a list of words Returns: Counter: combined dictionary """ word1 = d1.copy() word1.extend(d2) c = Counter(i for i in word1) return c
581a41f60ae47caadced8a08ef1c4356a9b7d3b8
8,182
def calc_process_time(t1, t2): """Calculates difference between times Args: t1 (float): initial time t2 (float): end time Returns: str: difference in times """ return str(t2 - t1)
72e59c1a053041aae53cdb4415b241499eefdd4c
8,183
from typing import Iterable import math def percentile(N: Iterable, percent: int): """ Find the percentile of a list of values. Stolen from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/ """ if not N: raise ValueError('N must be non-empty iterable') if not (0 < percent < 100 and type(percent) == int): raise ValueError('percent parameter must be integer from 0 to 100') N = sorted(N) k = (len(N) - 1) * percent / 100 prev_index = math.floor(k) next_index = math.ceil(k) if prev_index == next_index: return N[int(k)] d0 = N[prev_index] * (next_index - k) d1 = N[next_index] * (k - prev_index) return d0 + d1
9e6402b60ec077fe43ca807fa73aac27267cfd2b
8,184
from typing import Any def _lower(obj: Any) -> str: """Helper for the sort filter""" try: return str(obj).lower() except AttributeError: return ""
45b8d5de8b74cb40b32f2369a8a3d8ff9ae8d6d1
8,186
import ast def _visit_local(gen_sym, node, to_mangle, mangled): """ Replacing known variables with literal values """ is_name = type(node) == ast.Name node_id = node.id if is_name else node.arg if node_id in to_mangle: if node_id in mangled: mangled_id = mangled[node_id] else: mangled_id, gen_sym = gen_sym('mangled') mangled = mangled.set(node_id, mangled_id) if is_name: new_node = ast.Name(id=mangled_id, ctx=node.ctx) else: new_node = ast.arg(arg=mangled_id, annotation=node.annotation) else: new_node = node return gen_sym, new_node, mangled
26bd532d8f3c73cd25395a4982aff63e75fdc5ac
8,187
def get_longest_key(tuple_of_tuples): """ Why is this needed? Because sometimes we want to know how long a CharField should be -- so let's have it as long as the longest choice available. (For example, when we have a radio button and we want to store a single value.) INPUT=( ('short', 'blahblahblah'), ('longer', 'blahblahblah'), ('longest', 'blahblahblah') ) OUTPUT=len(longest) USAGE: BLAH_CHOICES=(...) blah=CharField(max_length=get_longest_key(BLAH_CHOICES)) """ return max(len(i) for i in dict(tuple_of_tuples).values())
a0a55ced79bb6e27edb82790ee7ea4d1c1efc3c7
8,188
def mass_within_region(gals, x_bound, y_bound): """ Calculate the total mass and number of galaxies within a specified region. Parameters ---------- gals: list of ``Galaxy`` class instances. Galaxies that we're calculating the mass for. x_bound, y_bound: [float, float] The minimum and maximum bounds that define the region we're averaging inside. Returns ------- mass_in_region: float The total galaxy mass within the specified region. num_gals_in_region: int The number of galaxies within the specified region. """ # Initialize our counters. mass_in_region = 0.0 num_gals_in_region = 0 region_bounds = [x_bound, y_bound] for gal in gals: gal_pos = [gal.x, gal.y] in_region = True # We're going to go through each dimension of the galaxy and ask if the position is # inside the region. for region_bound, dim_pos in zip(region_bounds, gal_pos): # Galaxy is outside the region. Flag it and move to the next galaxy. if dim_pos < region_bound[0] or dim_pos > region_bound[1]: in_region = False break # Galaxy was in the region, add it. if in_region: mass_in_region += gal.mass num_gals_in_region += 1 return mass_in_region, num_gals_in_region
b9d1564a88239ab33402255c44646101f8116060
8,189
def splitCentersByStrips(centers, splitter_array): """ Split list of note centers by strip. """ # Sort by rows row_sorted_centers = sorted(centers, key=lambda x: x[1]) # Split notehead into strips strips = {} current_offset = 0 counter = 0 for i in range(1, len(splitter_array) + 1): strip = [] for note_center in row_sorted_centers[current_offset:]: strip.append(note_center) counter += 1 if counter == splitter_array[i - 1]: break strips[i] = strip current_offset = counter if current_offset == len(centers): return strips, i assert False, "Should haven't been here" return None
9ae82e74f12e298f1a1924883db66e11cb86d0fc
8,190
import json def read_status(path="status.json"): """ opens the written status file in case of restarts that we do not start with an empty status file. If there is no file, create empty status dict :path: str :returns: dict """ try: with open(path, "r") as jsonfile: status = json.load(jsonfile) except (IOError,json.decoder.JSONDecodeError): status = {} return status
d44529230a6fa39d9655425508f968220d6fb67e
8,191
def get_json_for_r_log_entry(req, x_set): """Returns a dict used to match r_log entries returned through API.""" return { "request": req.to_dct(), "x_set": x_set }
989650af29c25f3d11346945d905d48af6718d25
8,193
def _get_sub_types_of_compositional_types(compositional_type: str) -> tuple: """ Extract the sub-types of compositional types. This method handles both specification types (e.g. pt:set[], pt:dict[]) as well as python types (e.g. FrozenSet[], Union[]). :param compositional_type: the compositional type string whose sub-types are to be extracted. :return: tuple containing all extracted sub-types. """ sub_types_list = list() if compositional_type.startswith("Optional") or compositional_type.startswith( "pt:optional" ): sub_type1 = compositional_type[ compositional_type.index("[") + 1 : compositional_type.rindex("]") ].strip() sub_types_list.append(sub_type1) if ( compositional_type.startswith("FrozenSet") or compositional_type.startswith("pt:set") or compositional_type.startswith("pt:list") ): sub_type1 = compositional_type[ compositional_type.index("[") + 1 : compositional_type.rindex("]") ].strip() sub_types_list.append(sub_type1) if compositional_type.startswith("Tuple"): sub_type1 = compositional_type[ compositional_type.index("[") + 1 : compositional_type.rindex("]") ].strip() sub_type1 = sub_type1[:-5] sub_types_list.append(sub_type1) if compositional_type.startswith("Dict") or compositional_type.startswith( "pt:dict" ): sub_type1 = compositional_type[ compositional_type.index("[") + 1 : compositional_type.index(",") ].strip() sub_type2 = compositional_type[ compositional_type.index(",") + 1 : compositional_type.rindex("]") ].strip() sub_types_list.extend([sub_type1, sub_type2]) if compositional_type.startswith("Union") or compositional_type.startswith( "pt:union" ): inside_union = compositional_type[ compositional_type.index("[") + 1 : compositional_type.rindex("]") ].strip() while inside_union != "": if inside_union.startswith("Dict") or inside_union.startswith("pt:dict"): sub_type = inside_union[: inside_union.index("]") + 1].strip() rest_of_inside_union = inside_union[ inside_union.index("]") + 1 : ].strip() if rest_of_inside_union.find(",") == -1: # it is the last sub-type inside_union = rest_of_inside_union.strip() else: # it is not the last sub-type inside_union = rest_of_inside_union[ rest_of_inside_union.index(",") + 1 : ].strip() elif inside_union.startswith("Tuple"): sub_type = inside_union[: inside_union.index("]") + 1].strip() rest_of_inside_union = inside_union[ inside_union.index("]") + 1 : ].strip() if rest_of_inside_union.find(",") == -1: # it is the last sub-type inside_union = rest_of_inside_union.strip() else: # it is not the last sub-type inside_union = rest_of_inside_union[ rest_of_inside_union.index(",") + 1 : ].strip() else: if inside_union.find(",") == -1: # it is the last sub-type sub_type = inside_union.strip() inside_union = "" else: # it is not the last sub-type sub_type = inside_union[: inside_union.index(",")].strip() inside_union = inside_union[inside_union.index(",") + 1 :].strip() sub_types_list.append(sub_type) return tuple(sub_types_list)
4fb1e67f8b6db717ccdf8c33e0b2458baf98c661
8,194
import random def unsort(list): """Return a copy of unsorted list""" new_list = [] for chance in range(len(list)): char = random.choice(list) list.remove(char) new_list.append(char) return new_list
249a61de0de500305bd9f1fe54ea7ac8f2d07d84
8,195
def count_lines(file): """Given a file, returns the number of lines it contains. The current file position should be preserved as long as the file supports tell() and seek().""" old_position = file.tell() file.seek(0) count = 0 while file.readline() != '': count += 1 file.seek(old_position) return count
53c1578d96f7bf031c4a8a5131739e36d35be5e7
8,196
def cite(): """Returns BibTeX citation for the dataset.""" return """@misc{stackoverflow2019, title={TensorFlow Federated Stack Overflow dataset}, author={The TensorFlow Federated Authors.}, year={2019}, }"""
b156145455d1f9c69f612bcf9958f1d49fc94137
8,197
def join_apply(df, func, new_column_name): """ Join the result of applying a function across dataframe rows. This method does not mutate the original DataFrame. This is a convenience function that allows us to apply arbitrary functions that take any combination of information from any of the columns. The only requirement is that the function signature takes in a row from the DataFrame. The example below shows us how to sum the result of two columns into a new column. .. code-block:: python df = ( pd.DataFrame({'a':[1, 2, 3], 'b': [2, 3, 4]}) .join_apply(lambda x: 2 * x['a'] + x['b'], new_column_name="2a+b") ) This following example shows us how to use conditionals in the same function. .. code-block:: python def take_a_if_even(x): if x['a'] % 2: return x['a'] else: return x['b'] df = ( pd.DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4]}) .join_apply(take_a_if_even, 'a_if_even') ) :param df: A pandas DataFrame :param func: A function that is applied elementwise across all rows of the DataFrame. :param new_name: New column name. """ df = df.copy().join(df.apply(func, axis=1).rename(new_column_name)) return df
474fc3628d7f4066f33154df78ac945a72e8166f
8,198
import threading def is_main_thread() -> bool: """ Check if we are in the main thread. """ return threading.main_thread() == threading.current_thread()
1f91ae9e2d5b395cd995efcc2f87002ade53e6a9
8,199
from typing import List from typing import Tuple def fast_text_prediction_to_language_code(res: List[Tuple[str, str]]) -> List[str]: """Convert fastText language predictions to language codes""" labels, _ = res return [tmp[tmp.rfind("_") + 1 :] for tmp in labels]
3a39c3d416b4f66d1519284496fbaab940b202fc
8,200
def add_newline_to_end_of_each_sentence(x: str) -> str: """This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.""" if "<n>" in x: return x.replace("<n>", "\n") # remove pegasus newline char else: return x
54842070d316d07fd98a06d8c33d5171d5ae7a57
8,203
def _excluded_scenario(test_name, scenario): """Skip list generator for scenarios to skip in test_name. Arguments --------- test_name : str, name of test scenario : instance of TestScenario, to be used in test Returns ------- bool, whether scenario should be skipped in test_name """ # for forecasters tested in test_methods_do_not_change_state # if fh is not passed in fit, then this test would fail # since fh will be stored in predict through fh handling # as there are scenarios which pass it early and everything else is the same # we skip those scenarios if test_name == "test_methods_do_not_change_state": if not scenario.get_tag("fh_passed_in_fit", True, raise_error=False): return True # this line excludes all scenarios that are not 1:1 to the "pre-scenario" state # pre-refactor, all tests pass, so all post-refactor tests should with these lines # comment out to run the full test suite with new scenarios if not scenario.get_tag("pre-refactor", False, raise_error=False): return True return False
34adc91987c86c8f8af2243f0d414c7d00eaa748
8,204
def select(element, selector): """Syntactic sugar for element#cssselect that grabs the first match.""" matches = element.cssselect(selector) return matches[0]
a99c684073fe898bc297dd747cff4abf3c0cb524
8,206
def is_in_bbox(x, y, bbox): """ Answers True or Folse if the x, y is inside the BBOX. """ xMin, yMin, xMax, yMax = bbox if xMin <= x <= xMax and yMin <= y <= yMax: return True return False
911089af818c5e15e6ba857b1dd46f0182b1ea31
8,207
def convert_tuple_to_8_int(tuple_date): """ Converts a date tuple (Y,M,D) to 8-digit integer date (e.g. 20161231). """ return int('{0}{1:02}{2:02}'.format(*tuple_date))
8584bb9ade995e95d12c9d09c4a6d52f7df44f5d
8,208
def coord_lister(geom): """[summary] when given a geometry pandas geoseries, returns an exterior list of coordinates for all of the entries, given should Args: geom ([type]): [description] Returns: [type]: [description] """ coords = list(geom.exterior.coords) return (coords)
b9d28a32241bf8f2b13988ef149eb4cf05ffb315
8,209
def get_request_raw_header(request, name) : """ Return raw header value of request by header name """ name = name.lower() for header in request.raw_headers : if header[0].decode("utf-8").lower() == name: return header[1].decode("utf-8") return ""
37abaac86ae770354bacd6a96326d8b43f54999a
8,211
import os def readTimeStamp(fname,path): """reads an insight tstmp file and returns an array of the times at which photos were taken at relative to the begining of aquasition""" fname = os.path.join(os.path.abspath(path),fname) num_lines = sum(1 for line in open(fname)) f = open(fname) for i in range(3): f.readline() strt = [f.readline().split()[1] for i in range(num_lines-4)] t = [float(i)/1000000 for i in strt] return t
b1145882bedf011eed599f2e873635df86c9145a
8,212
def scale_values_based_on_eich_peak(lead_list, gamma=0.5): """ scale values on the Y-axis :param lead_list: list of the value :param gamma: scaling factor :return: rescaled list """ new_lead_list = [] for xy_pair in lead_list: new_y_value = xy_pair[1] * gamma new_lead_list.append([xy_pair[0], new_y_value]) return new_lead_list
34d2c79f07c23e4fe23ab0eee8642ce17e03dbb7
8,213
def import_class(class_path): """Imports a class using a type string. :param class_path: Type string of the class. :type class_path: str :rtype: type """ components = class_path.split('.') mod = __import__(components[0]) for comp in components[1:]: mod = getattr(mod, comp) return mod
bcfeed25c2b5f6672df63e63a031cfa580c0e275
8,214
import subprocess def getYARNApplicationID(app_name): """Returns the YARN application ID.""" state = 'RUNNING,ACCEPTED,FINISHED,KILLED,FAILED' out = subprocess.check_output(["yarn","application","-list", "-appStates",state], stderr=subprocess.DEVNULL, universal_newlines=True) lines = [x for x in out.split("\n")] application_id = '' for line in lines: if app_name in line: application_id = line.split('\t')[0] break return application_id
f7b6f5a77c0e0fe6ed602455609fa1475108eeac
8,215
def create_system_id(os_string, architecture): """ Create a system-ID by joining the OS-String and the architecture with a hyphen. Args: os_string (str): The Operating system string. architecture (str): The Architecture string. Returns: The System-ID string. """ system_id_format = '{os_string} {architecture}' return system_id_format.format(os_string=os_string.replace('_', ' '), architecture=architecture)
7ae682e2d57784ca771c1e50b7e980b56f631947
8,217
def circular_array_rotation(a, k, queries): """Hackerrank Problem: https://www.hackerrank.com/challenges/circular-array-rotation/problem John Watson knows of an operation called a right circular rotation on an array of integers. One rotation operation moves the last array element to the first position and shifts all remaining elements right one. To test Sherlock's abilities, Watson provides Sherlock with an array of integers. Sherlock is to perform the rotation operation a number of times then determine the value of the element at a given position. For each array, perform a number of right circular rotations and return the value of the element at a given index. For example, array a = [3, 4, 5], number of rotations, k = 2 and indices to check, m = [1, 2]. First we perform the two rotations: [3, 4, 5] -> [5, 3, 4] -> [4, 5, 3] Now return the values from the zero-based indices and as indicated in the array. a[1] = 5 a[2] = 3 Args: a (list): array of integers to rotate k (int): the number of times to shift the array right queries (list): list of indices to query on the newly shifted array Returns: list: a list of values returned from the queries """ query_values = [] for query in queries: query_values.append(a[(query-k) % len(a)]) return query_values
940e193fec0ad1f78c499ee8604e418dd0261109
8,219
from typing import Dict def normalize_score_dict(input_dict: Dict[str, float], exponent=1) -> Dict[str, float]: """Takes a dictionary of scores and applies L1-normalization (dividing each value by the sum). This is the simplest way of turning a collection of scores into a probability distribution. The exponent can be used to make the normalization use L2 or some other norm. """ total_weight = sum([pow(val, exponent) for val in input_dict.values()]) norm = pow(total_weight, 1/exponent) output_dict = {key: value / norm if total_weight > 0 else 1 / len(input_dict) for key, value in input_dict.items()} return output_dict
6a8d65d42d7f356b23a0e814841e8005f7daff30
8,222
import tempfile import os def _tempfile_path(*args, **kwargs): """Generate a sure-to-be-free tempfile path. It's hacky but it works. """ fd, tmpfile = tempfile.mkstemp() # close and delete; we only want the path os.close(fd) os.remove(tmpfile) return tmpfile
f1460bafa8aa250202510e1cad4c860a3bbd0889
8,223
def prepareDataForClassification(dataset, start_test): """ generates categorical output column, attach to dataframe label the categories and split into train and test """ features = dataset.columns[0:-1] X = dataset[features] y = dataset.UpDown X_train = X[X.index < start_test] y_train = y[y.index < start_test] X_test = X[X.index >= start_test] y_test = y[y.index >= start_test] return X_train, y_train, X_test, y_test
88b42a8145b1f8da469337a8af214cc57c24848e
8,225
def remove_ambiguous_solutions(fn_in, db_lines, strict=True, verbose=True): """ Removes features with identical solutions. During solving, some tags may be tightly coupled and solve to the same solution. In these cases, those solutions must be dropped until disambiguating information can be found. """ solutions = {} dropped_solutions = set() for l in db_lines: parts = l.split() feature = parts[0] bits = frozenset(parts[1:]) if bits in solutions: if strict: assert False, "Found solution {} at least twice, in {} and {}".format( bits, feature, solutions[bits]) else: dropped_solutions.add(bits) else: solutions[bits] = feature if strict: return 0, db_lines drops = 0 output_lines = [] for l in db_lines: parts = l.split() feature = parts[0] bits = frozenset(parts[1:]) if bits not in dropped_solutions: output_lines.append(l) drops += 1 else: if verbose: print( "WARNING: dropping line due to duplicate solution: %s" % l) if drops > 0: print("WARNING: %s dropped %s duplicate solutions" % (fn_in, drops)) return drops, output_lines
59dc2de17c1311b1dc7096661dc6b39d0d8fc373
8,226
def residuals(fit, obs): """Calculate residuals for fit compared to observed data :fit: list of discrete fit data points :obs: list of observed data points :returns: fit minus observed data points """ return fit-obs
46c5eac3620ab8bce58502822aa7d8824bed0988
8,227
def deEmojify(inputString): """ Drop emojis :param inputString: :return: """ return inputString.encode('ascii', 'ignore').decode('ascii')
f0e2ad0ce597e74a133b37244a6bc047b90b1ecd
8,230