content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os import subprocess import shlex def python_minifiy(file): """Outputs a minified version of the given Python file. Runs pyminifier on the given file. The minified filename has '.min' added before the '.py' extension. This function is used to help code fit under the 4k limit when uploading lambda functions, directly, as opposed to pointing to a zip file in S3. The minification process strips out all comments and uses minimal whitespace. Example: lambda.py => lambda.min.py Args: file (string): File name of Python file to minify. Returns: (string): File name of minified file. Raises: (subprocess.CalledProcessError): on a non-zero return code from pyminifier. """ file_parts = os.path.splitext(file) min_filename = file_parts[0] + '.min' + file_parts[1] cmd = 'pyminifier -o ' + min_filename + ' ' + file result = subprocess.run( shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) if result.returncode != 0: print(result.stderr) # Package up exception with output and raise if there was a failure. result.check_returncode() return min_filename
457016ab4f4aa9ac8d9d04dae10075114d97bb0d
10,116
def multi_dict_unpacking(lst): """ Receive a list of dictionaries, join them into one big dictionary """ result = {} for d in lst: for key, val in d.items(): result[key] = val return result
530947224d682cffb809e83f308a97a108002080
10,118
import math def dist(a: list, b: list) -> float: """Calculate the distancie between 2 points Args: a (list): point a b (list): point b Returns: float: Distance between a and b """ return math.sqrt((a[0]-b[0])**2+(a[1]-b[1])**2)
7ad6d2f36c0bccee106c7b54a4abebd246c4a3fa
10,119
def get_middle_opt(string: str) -> str: """Get middle value of a string (efficient). Examples: >>> assert get_middle_opt('middle') == 'dd' """ return ( string[int(len(string) // 2) - 1 : int(len(string) // 2) + 1] if not len(string) % 2 else string[int(len(string) // 2)] )
1636f39e22bacc4f571de876dd71add690e940e6
10,121
def duration(duration): """Filter that converts a duration in seconds to something like 01:54:01 """ if duration is None: return '' duration = int(duration) seconds = duration % 60 minutes = (duration // 60) % 60 hours = (duration // 60) // 60 s = '%02d' % (seconds) m = '%02d' % (minutes) h = '%02d' % (hours) output = [] if hours > 0: output.append(h) output.append(m) output.append(s) return ':'.join(output)
7cd89654a84c2e3e41d96cb1b13688833ee54387
10,122
import types def name(function): """ Retrieve a pretty name for the function :param function: function to get name from :return: pretty name """ if isinstance(function, types.FunctionType): return function.__name__ else: return str(function)
4002d7a945c3b3e3e4d957c0b10ff202f2cf0246
10,123
import requests def get_response_json(url: str): """ Function get response json dictionary from url. :param url: url address to get response json :type url: str :return: dictionary data :rtype: json :usage: function calling .. code:: python get_response_json(url) """ response = requests.get(url) response.raise_for_status() return response.json()
dd209b1dba7f4320cd91addb1e49fa98bab0ae2b
10,124
import pickle def dumps_content(content): """ pickle 序列化响应对象 :param content: 响应对象 :return: 序列化内容 """ return pickle.dumps(content)
f88159b9d016a6e39744e7af09a76705c9f4e76f
10,125
import argparse def create_arg_parser(): """ Create an argument parser return: argparse.ArgumentParser """ parser = argparse.ArgumentParser() # Read in the input file parser = argparse.ArgumentParser( description="Create metrics plots in a batch mode") parser.add_argument(dest='main_inputfile', default=None, help='main input file name') return parser
1f8c1e1723f9feaa26519e42662ab7f98d0726c4
10,126
def _gen_setup_file(project_name: str) -> str: """ Generate setup.py module content. """ content = """\ from setuptools import setup, find_packages setup( name='%s', description='App description.', version='1.0', packages=find_packages(), # package_data={'package': ['subfolder/*']}, # install_requires=[ # 'numpy', # 'Pillow' # ], # entry_points=''' # [console_scripts] # cmd=package.module:cmd_fun # ''' ) """ % project_name return content
293003969f52cec6d5314d703848b4ed16853ba4
10,127
import subprocess def generate_manifest_in_from_git(): """Generate MANIFEST.in from 'git ls-files'""" cmd = r'''git ls-files | sed 's/\(.*\)/include \1/g' > MANIFEST.in''' return subprocess.call(cmd, shell=True)
bf346f109a79d07dfd66c02d61bfa886e994d9a2
10,128
def strictwwid(wwid): """Validity checks WWID for invalid format using strict rules - must be Brocade format""" if len(wwid) != 23: # WWPN must be 23 characters long print("WWID has invalid length " + wwid) return None # Colon separators must be in the correct place if wwid[2:3] != ":" and wwid[5:6] != ":" and wwid[8:9] != ":" and wwid[11:12] != ":" \ and wwid[14:15] != ":" and wwid[17:18] != ":" and wwid[20:21] != ":": print(("WWID invalid format - colon not where expected " + wwid)) return None # Remove colons from expected locations in wwid string. hexwid = wwid[0:2] + wwid[3:5] + wwid[6:8] + wwid[9:11] + wwid[12:14] + wwid[15:17] + wwid[18:20] + wwid[21:23] # Only hex characters allowed in wwpn after extracting colons for nibble in hexwid: if (nibble < "0" or nibble > "f") or (nibble > "9" and nibble < "a"): print("WWID has invalid hex character " + wwid) return None return wwid
539d9e0ef9f1f9f6d24dbc4a1a514d9a07229388
10,129
import re def re_repl_escape(s): """ Escape backreferences in a string, for the second arg of re.sub(). Parameters: s: the string to escape Dependencies: modules: re """ return re.sub(r'\\([1-9][0-9]?)', r'\\\\\1', s)
4ae7e4198dd65eb46d8dbbf71cf8f28230eb2c76
10,132
def split_data(data, ratio, shuffle=1): """Split dataset according to ratio, larger split is first return""" n_exs = len(data[0]) split_pt = int(n_exs * ratio) splits = [[], []] for col in data: splits[0].append(col[:split_pt]) splits[1].append(col[split_pt:]) return tuple(splits[0]), tuple(splits[1])
fc837dd9f721950dde18cee9ec33eab5e3e96f77
10,133
def jtag(tag): """ Helper to translate tag to serializable format""" return (tag[0].devanagari(strict_io=False), [t.devanagari(strict_io=False) for t in list(tag[1])])
cb8245a435d67d9678e71d449ae9401b9562cb4c
10,134
def filter_builtin(): """filter: Filter items using a function. Prefer list comprehension.""" return "{}ged top secret".format( ''.join(filter(lambda info: info not in 'open', "pentagone")))
ae1d96fb5bfa4a675538bf33a5adcd2d2f64c6e5
10,135
def is_named(name): """ :samp:`Predicate for loose class-name detection.` :: f(cls) = cls.name == name or cls.qualified_name == name :param name: the class-name or qualified class-name in the detection :return: lambda for loose class-name detection """ return lambda cls: name == cls.__name__ or name == cls.__module__ + "." + cls.__name__
d26fbb6d853f195cfcc70a6f1791cbacfc7aebd7
10,136
def pid_from_context_or_data(value, context, **kwargs): """Get PID from marshmallow context.""" pid = (context or {}).get('pid') if pid is None: return value else: return pid.pid_value
dff32845ea0d2541503802bdd85ccb299091325b
10,138
def delBlank(strs): """ Delete all blanks in the str. """ ans = "" for e in strs: if e != " ": ans += e return ans
a6155af200918819055533057f63b4b44c8dd508
10,139
import re def splitn(s, n): """split string s into chunks no more than n characters long""" parts = re.split("(.{%d,%d})" % (n, n), s) map(parts.remove, [""] * parts.count("")) return parts
3f0f697f54515e26f98bc08b53cb4d39952ed128
10,140
def skinHasImage(image): """ Returns ``True`` if the image file exists in the skin. :param image: string - image filename .. note:: If the media resides in a subfolder include it. (eg. home-myfiles\home-myfiles2.png). You can use the above as keywords for arguments. example:: exists = xbmc.skinHasImage('ButtonFocusedTexture.png') """ return bool(1)
f64af64c4d18298fe56ebff8388d6fc3020ff5e1
10,141
import csv def LoadMOS(mos_csv): """Load a csv file withMOS scores. Args: mos_csv: Path to a csv file that has degraded and MOS. Returns: Dictionary with filename keys and MOS values """ mos_dict = {} with open(mos_csv, 'r') as csvfile: mos_reader = csv.reader(csvfile) for row in mos_reader: # Skip header if row[1] == 'MOS': continue mos_dict[row[0]] = row[1] return mos_dict
e98cbf93e3bc889a31a98ff32a9f9189a850a55f
10,142
import unicodedata def url_is_invalid_unicode(url_string): """ Check for unicode control characters in URL """ for x in str(url_string): if unicodedata.category(x)[0] == "C": return True return False
d1a3974b24023c46a337c7e17718e00aa5916c7d
10,143
def pg_capital(s : str) -> str: """ If string contains a capital letter, wrap it in double quotes returns: string """ if '"' in s: return s needs_quotes = False for c in str(s): if ord(c) < ord('a') or ord(c) > ord('z'): if not c.isdigit(): needs_quotes = True break if needs_quotes: return '"' + s + '"' return s
bfb928b88e837c78ddd46f7d231217442ef4e24c
10,145
import string def escape_bytes(bytes_): """ Convert a bytes object to an escaped string. Convert bytes to an ASCII string. Non-printable characters and a single quote (') are escaped. This allows to format bytes in messages as f"b'{utils.escape_bytes(bytes)}'". """ res = "" for byte in bytes_: char = chr(byte) if char == '\\': res += "\\\\" elif char == '\'': res += "\\'" elif char in (string.digits + string.ascii_letters + string.punctuation + ' '): res += char else: res += "\\x%0.2x" % byte return res
c1a6c2875824d8576e7d9c349f19548e2c3313e8
10,147
def shrink_dimensions(width, height, max_dimension): """ Resize dimensions so max dimension is max_dimension. If dimensions are too small no resizing is done Args: width (int): The width height (int): The height max_dimension (int): The maximum size of a dimension Returns: tuple of (small_width, small_height): A resized set of dimensions, as integers """ max_width_height = max(width, height) if max_width_height < max_dimension: return width, height ratio = max_width_height / max_dimension return int(width / ratio), int(height / ratio)
60d4a6ec9b310f7c22f341f1309b4677c3617fd2
10,148
def addAge(data, ages): """ The new format in CMD v3.2 does not include the commented line with the 'Age' value, and the logAge column is rounded to 3 decimal places so this value it can not be taken from there. Add this line back to each age for each metallicity file. """ data = data.split('\n') # Indexes of "# Zini" comments idx = [] for i, line in enumerate(data): if line.startswith("# Zini"): idx.append(i) # Insert new comments in their proper positions for i, j in enumerate(idx): data.insert(j + i, "# Age = {:.6E} yr".format(ages[i])) return "\n".join(data)
e9c6145dd424ed2149d950246172004bea24d2ca
10,150
def group_spans(article_id, span_intervals): """ Groups together spans from the same article """ dictionary = dict.fromkeys(article_id, span_intervals) spans = {} for key, value in dictionary.items(): spans.setdefault(key, []) spans[key].append(value) return spans
3be0f2cb8b07a9fef7f116171b81829a86301a04
10,151
import json def get_id_set(id_set_path: str) -> dict: """ Parses the content of id_set_path and returns its content. Args: id_set_path: The path of the id_set file Returns: The parsed content of id_set """ with open(id_set_path, 'r') as id_set_file: id_set = json.load(id_set_file) return id_set
244495603bf423324ef4b3ffe4b1fdc3ca7fdf74
10,152
def transpose(transform, df, dicts): """Transposes the given dataframe""" df = df.reset_index().set_index('index').T.reset_index() df.columns = df.iloc[0] return (df[1:], dicts)
fc80a8a471ddf4af4329dc069b45d442f5bab0a0
10,153
def format_label_value(*values): """ Construct a label value. If multiple value components are provided, they are joined by underscores. """ return '_'.join(values)
46bb4e968577c95d5dd0df8b9687efbfdcc0568e
10,154
def get_failed_info(res): """获取失败用例信息""" intf_summary_list, full_summary_list = res failed_intf_dic = {} for system_dic in intf_summary_list: if system_dic['failCaseNum'] == 0: continue for intf_dic in system_dic['children']: if intf_dic['failCaseNum'] == 0: continue failed_intf_id = intf_dic['intfId'] failed_intf_dic[failed_intf_id] = [] for case_dic in intf_dic['tableData']: if case_dic['testResult'] == '失败': failed_intf_dic[failed_intf_id].append(case_dic['caseId']) failed_full_dic = {} for product_line_dic in full_summary_list: if product_line_dic['failCaseNum'] == 0: continue failed_product_line_id = product_line_dic['productLineId'] failed_full_dic[failed_product_line_id] = [] for case_dic in product_line_dic['tableData']: if case_dic['testResult'] == '失败': failed_full_dic[failed_product_line_id].append(case_dic['caseId']) return failed_intf_dic, failed_full_dic
cb038319617f82a36fcc9ce5b45abac20fac89d7
10,155
def create_batch_groups(test_groups, batch_size): """Return batch groups list of test_groups.""" batch_groups = [] for test_group_name in test_groups: test_group = test_groups[test_group_name] while test_group: batch_groups.append(test_group[:batch_size]) test_group = test_group[batch_size:] return batch_groups
70de6428ab94a104b8a2573e2dc920a55712351a
10,157
def _get_timeout(value): """ Turn an input str or int into a float timeout value. :param value: the input str or int :type value: str or int :raises ValueError: :returns: float """ maximum_dbus_timeout_ms = 1073741823 # Ensure the input str is not a float if isinstance(value, float): raise ValueError( "The timeout value provided is a float; it should be an integer." ) try: timeout_int = int(value) except ValueError: raise ValueError("The timeout value provided is not an integer.") # Ensure the integer is not too large if timeout_int > maximum_dbus_timeout_ms: raise ValueError( "The timeout value provided exceeds the largest acceptable value, %s." % maximum_dbus_timeout_ms ) # Convert from milliseconds to seconds return timeout_int / 1000
b8205cb34b90b856b4dda2a028f801b4cd70d7b4
10,158
def pad_input_ids_msmarco(input_ids, max_length, pad_on_left=False, pad_token=0): """An original padding function that is used only for MS MARCO data.""" padding_length = max_length - len(input_ids) padding_id = [pad_token] * padding_length if padding_length <= 0: input_ids = input_ids[:max_length] else: if pad_on_left: input_ids = padding_id + input_ids else: input_ids = input_ids + padding_id return input_ids
dc75d5d72e4e1633301ff083936e08420ef17883
10,159
def getAnswer(x,input_string): """Iegūst mainīgajam x vērtību 1 vai 2, parādot tekstu input_string. Ja ievadīta nepareiza vērtība, tad tiek paradīta kļūdas ziņa.""" while True: try: x = int(input(input_string)) if x < 1: raise ValueError elif x > 2: raise ValueError break except: print("Lūdzu ievadiet 1 vai 2.\n") return x
66f8b1cccef5fb710713f1c6498248dd0f3517ea
10,160
from typing import Optional from typing import Union import struct def encode_key( key: str, value: Optional[Union[int, float, str]] = None, value_type: str = "str" ) -> bytes: """Encode given header key to a bytes string. Parameters ---------- key : str header key value : Optional[Union[int, float, str]], optional value of the header key, by default None value_type : str, optional type of the header key, by default "str" Returns ------- bytes bytes encoded string """ if value is None: return struct.pack("I", len(key)) + key.encode() if value_type == "str" and isinstance(value, str): return ( struct.pack("I", len(key)) + key.encode() + struct.pack("I", len(value)) + value.encode() ) return struct.pack("I", len(key)) + key.encode() + struct.pack(value_type, value)
b38e04fdbd761bd121f0e08a9eee9dfb692f616c
10,161
import json def loads(*args, **kwargs): """ calls json.loads """ return json.loads(*args, **kwargs)
2696e9a9482898d4dfe8fc9cf9f4bb47ec1da050
10,162
def hashIter(bytesiter, hasher, ashexstr=True): """Will hash the blockIter generator and return digest""" for block in bytesiter: hasher.update(block) return hasher.hexdigest() if ashexstr else hasher.digest()
216435e32901661f3f7c90953dd86e5a00a5b703
10,163
import json def get_numeric_project_id(gcloud, project_id): """Get the numeric project ID.""" project_info = json.loads( gcloud.run('projects', 'describe', project_id, '--format=json')) return project_info['projectNumber']
f9e3086ed1436d4ce075960c191ff4f64a76582b
10,165
def count_local_minmax(A): """ Count local max and min @author Akafael """ n = len(A) # Trivial Solution if n <= 1: return 1 # Calculate the diff B = [] count = 0 isRising = False isFalling = False for i in range(1, n - 1): B.append(A[i + 1] - A[i]) # Count Pikes if B[i] > 0: isRising = True if isFalling == True: count += 1 isFalling == False elif B[i] < 0: isFalling = True if isRising == True: count += 1 isRising == False return count
03fc5439aad1bf93e8d08d8056beece2c2fe2aee
10,166
from functools import reduce from operator import add def fitness(individual,target): """ individual : the individual to evaluate (a list) target : the target sum the individuals are aiming for """ sum = reduce(add,individual,0) return abs(target-sum)
bcfb4d857be926df249149e224babbc98d358780
10,167
def create_schema(name: str) -> str: """Function for generating create schema statements :param str name: The name of the schema :return: Create statement :rtype: str """ statement = f"CREATE SCHEMA {name}" return statement
a82d392ec411e2c412ef3f1959ff7ab229130532
10,168
def courant(dx, dt, v_max, **kwargs): """ Calculate the Courant's number describing stability of the numerical scheme. Parameters ---------- dx : float Size of the spatial grid cell [m]. dt : float Time step [s]. v_max : float Max. velocity of the model. Returns ------- C : float Courant's number used later in CFL criterion C < C_max where C_max is scheme-specific. Notes ----- The smaller the more stable. If C > 1 the fastest wave in the model will be covering more than 1 grid cell per time step. """ # GRID VELOCITY (1 GRID-CELL PER TIME-STEP) IN PHYSICAL UNITS v_grid = dx / float(dt) # RATIO OF MAX AND GRID VELOCITY C = v_max / float(v_grid) return C
58b09b545c4679b845c3e2702af8bffe4262c599
10,169
def weighted_count(instances_obj, m_weights): """Weight nt count by nt weights at each position""" weighted_counts = {} for letter in instances_obj.alphabet: weighted_counts[letter] = [0] * instances_obj.length for idx, instance in enumerate(instances_obj): for position, letter in enumerate(instance): weighted_counts[letter][position] += m_weights[idx] return weighted_counts
458a6c013fd72c1e418a27c3a6f5511c86be834d
10,170
def space_linear_tree(actions, sent=None, SHIFT=0, REDUCE=1): """ Use `( ` instead of `(`. Similarly for ` )`. """ stack = [] pointer = 0 if sent is None: sent = list(map(str, range((len(actions)+1) // 2))) for action in actions: if action == SHIFT: word = sent[pointer] stack.append(word) pointer += 1 elif action == REDUCE: right = stack.pop() left = stack.pop() stack.append('( ' + left + ' ' + right + ' )') assert(len(stack) == 1) return stack[-1]
36a7c1d2ba669259f5a2810d1746766470966da4
10,171
import math def standard_deviation(series): """ implements the sample standard deviation of a series from scratch you may need a for loop and your average function also the function math.sqrt you should get the same result as calling .std() on your data https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.std.html See numpy documenation for implementation details: https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html """ pass avgnum = sum(series) / len(series) sumsq = 0 for each in series: t = each - avgnum sumsq = sumsq + pow(t,2) stdnum = math.sqrt(sumsq/(len(series)-1)) return stdnum
01a488f4bc8168a92d4a0590daa88bad08032fed
10,172
def unify_projection(dic): """Unifies names of projections. Some projections are referred using different names like 'Universal Transverse Mercator' and 'Universe Transverse Mercator'. This function replaces synonyms by a unified name. Example of common typo in UTM replaced by correct spelling:: >>> unify_projection({'name': ['Universe Transverse Mercator']}) {'name': ['Universal Transverse Mercator']} :param dic: The dictionary containing information about projection :return: The dictionary with the new values if needed or a copy of old one """ # the lookup variable is a list of list, each list contains all the # possible name for a projection system lookup = [['Universal Transverse Mercator', 'Universe Transverse Mercator']] dic = dict(dic) for l in lookup: for n in range(len(dic['name'])): if dic['name'][n] in l: dic['name'][n] = l[0] return dic
a3cdf4bb2a26ee0391281e8f8b0d3e7ea5c8a962
10,173
import os def list_all_csv_files(dir): """List all sgf-files in a dir Recursively explores a path and returns the filepaths for all files ending in .sgf """ root_dir = os.path.abspath(dir) sgf_files = [] for root, sub_dirs, files in os.walk(root_dir): for file in files: path = os.path.join(root, file) name, extension = os.path.splitext(path) if extension == '.csv': sgf_files.append(path) return sgf_files
239b43a80123d16a21b7dd23bf0c8e50a6a7f8ad
10,175
def creer_entete(nomFichier) : """!Crée l'en-tête du fichier Prend le nom de base du fichier et sa taille, et l'écrit dans l'en-tête. @param nomFichier Nom du fichier à concaténer @return L'en-tête à écrire dans le fichier d'archive """ # Pour l'en-tête, on fait un truc simple avec le nom du fichier et la taille return "[F : {}]".format(nomFichier)
f35b8391da7a77beea4e1932cb280f8f54cf879b
10,176
import os def validate_auth_keys(path, extension): """ validates and maintains ZMQ Auth Keys/Certificates """ #check for valid path if not(os.path.exists(path)): return False #check if directory empty if not(os.listdir(path)): return False keys_buffer = [] #stores auth-keys # loop over auth-keys for key_file in os.listdir(path): key = os.path.splitext(key_file) #check if valid key is generated if key and (key[0] in ['server','client']) and (key[1] == extension): keys_buffer.append(key_file) #store it #remove invalid keys if found if(len(keys_buffer) == 1): os.remove(os.path.join(path,keys_buffer[0])) #return results return True if(len(keys_buffer) == 2) else False
95742b4c8eb2080a4b74803a18aa5f6e5f9fda13
10,178
def abbreviate_id(a_string): """ 1. Hack off the first 6 and the last 4 characters. 2. Then join them with an ellipsis. :param a_string: :return: An abbreviated id string """ return str(a_string[0:6]+'...'+a_string[-4::])
8127ae7e45638d77c7c0b3c414ea96f64dae7228
10,179
def reverse_integer(x): """Given a signed 32-bit integer x, return x with its digits reversed. If reversing x causes the value to go outside the signed 32-bit integer range [-2^31, (2^31) - 1], then return 0. :type x: int :rtype: int """ x_string = str(x) if x_string[0] == "-": # in case the number is negative x_string = x_string[1:] # remove the negative sign x_string = x_string[::-1] # reverse the number x_string = "-" + x_string # append the negative sign to the new number else: x_string = x_string[::-1] # if number is positive, reverse the number if (-2) ** 31 <= int(x_string) < ((2 ** 31) - 1): # test boundaries of 32-bit integers return int(x_string) # if number is valid, return it else: return 0
0fead2fbc52e09d9442f9c1f4efe5d7eb4a7e0be
10,180
def optimal_compatible_subset(requests): """ Get optimal weighted subset Uses dynamic programming to get the subset of the requests with the largest weight """ dp_weights = dict() dp_subsets = dict() finishes = {r.finish for r in requests} while finishes: x = max(finishes) finishes.remove(x) subsets = dict() for req in [r for r in requests if r.start >= x]: w = req.weight + dp_weights[req.finish] subsets[w] = [req] + dp_subsets[req.finish] if not subsets: dp_weights[x] = 0 dp_subsets[x] = [] else: w = max(subsets) dp_weights[x] = w dp_subsets[x] = subsets[w] for req in [r for r in requests if r.start < min(dp_weights)]: w = req.weight + dp_weights[req.finish] if 0 not in dp_weights or w > dp_weights[0]: dp_weights[0] = w dp_subsets[0] = [req] + dp_subsets[req.finish] return (dp_weights[0], dp_subsets[0])
70562bf5289f1608b7ffe918678cadcc1b956e14
10,182
import codecs def OpenDictionary(filenames): """Reads the dictionary into a list """ print("Opening the Chinese Notes dictionary") words = [] for dictfile in filenames.split(","): with codecs.open(dictfile, 'r', "utf-8") as f: for line in f: line = line.strip() if not line: continue fields = line.split('\t') if fields and len(fields) > 14: entry = {} entry["id"] = fields[0] entry["simplified"] = fields[1] entry["traditional"] = fields[2] entry["pinyin"] = fields[3] entry["english"] = fields[4] entry["grammar"] = fields[5] entry["notes"] = fields[14] words.append(entry) print("OpenDictionary completed with {} entries".format(len(words))) return words
f9c473254984e36ac8ae49b24d20be484416cf23
10,183
import requests def api_request(url, api_key=None): """ This function takes a url and returns the parsed json object. If necessary, it submits the auth header """ if api_key: return requests.get(url, headers={'Authorization': 'Token ' + api_key}).json() else: return requests.get(url).json()
ea8572d761dffb952b970ab14e73307a02f67ee8
10,184
import torch def get_device(gpu_idx: int = 0) -> torch.device: """ Get default gpu torch device. :param gpu_idx: :return: """ device: torch.device = torch.device(f"cuda:{gpu_idx}" if torch.cuda.is_available() else "cpu") # device: torch.device = torch.device(f"cuda:0" if torch.cuda.is_available() else "cpu") return device
acc8e9b65b21045cb0dc9a6fb2942d6dab0ce6a2
10,185
def checkfile(file): """Checks file to to make sire itsx and itsxpress exited with an exit code of O and that ITSx did not throw a fatal error message Args: file: data file path Returns: Bool: True Raises: AssertError: if two lines with Exit Status 0 are not present ValueError: if ITSx returned a fatal error message (needed because ITSx does not return a nonzero exit status when doing so) """ status_count = 0 with open(file, "r") as f: for line in f: if "Exit status: 0" in line: status_count += 1 if "FATAL ERROR" in line: raise ValueError("There was an issue with how itsx ran in the file {}".format(file)) assert (status_count==2),"There were not two nonzero exit codes in the file {}".format(file) return True
62f5aa5be467be99c5be324c3104c623bfae2701
10,186
def validator_key(validator_account_and_key): """Private key of the validator running the confirmation sender.""" _, key = validator_account_and_key return key
365120ee93d30f191a51153505f8164cc588a74f
10,187
import pandas as pd def todataframe(table, index=None, exclude=None, columns=None, coerce_float=False, nrows=None): """ Load data from the given `table` into a `pandas <http://pandas.pydata.org/>`_ DataFrame. E.g.:: >>> import petl as etl >>> table = [('foo', 'bar', 'baz'), ... ('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, .1)] >>> df = etl.todataframe(table) >>> df foo bar baz 0 apples 1 2.5 1 oranges 3 4.4 2 pears 7 0.1 """ it = iter(table) header = next(it) if columns is None: columns = header return pd.DataFrame.from_records(it, index=index, exclude=exclude, columns=columns, coerce_float=coerce_float, nrows=nrows)
9588a547fec8e4f532b89a36969764bf3bd13a86
10,188
from typing import Dict from typing import Callable from typing import Any def dispatch_value(default_func): """Decorator to select a function implementation according to the value of its fist parameter""" registry: Dict[type, Callable[..., Any]] = {} default: Callable[..., Any] = default_func def register(value: Any): """Registers the decorated function f as the implementaton to use if the first argument has the same value a the value parameter""" def decorator(f: Callable[..., Any]): registry[value] = f return f return decorator def dispatch(value: Any): func = registry.get(value) if func: return func return default def wrapper(value: Any, *args, **kwargs): """Calls the function registered for value""" func = dispatch(value) return func(value, *args, **kwargs) # wrapper.register = register genarates mypy error setattr(wrapper, "register", register) # noqa B010 return wrapper
de460bc64f17dc5bb24a6995c87f3c4da938b50c
10,189
def dp_longest_palindrome(dp, S, i, j): """ Recursive function for finding the longest palindromic subsequence from a string using a modified version of the algorithm above """ if i == j: return S[i] if (i, j) in dp: return dp[(i, j)] if S[i] == S[j]: if i + 1 == j: dp[([i, j])] = 2 * S[i] else: dp[(i, j)] = \ S[i] + \ dp_longest_palindrome(dp, S, i + 1, j - 1) + \ S[i] else: s_i = dp_longest_palindrome(dp, S, i + 1, j) s_j = dp_longest_palindrome(dp, S, i, j - 1) dp[(i, j)] = s_i if len(s_i) > len(s_j) else s_j return dp[(i, j)]
92d20e8960bffad7254450e93951789fdb6ac306
10,190
import torch def get_available_devices(): """Return CPU and, if present, GPU device. Returns: [torch.device]: Available devices for `torch`. """ devices = [torch.device("cpu")] if torch.cuda.is_available(): devices.append(torch.device("cuda")) return devices
a97b5601792d8d1fa87e9c353f87cdcb6497dcf4
10,192
import os import time def file_exist(file_path): """check file is exist :type file_path: file abs path :return bool file not exist return False else return True """ count = 0 while 1: if os.path.exists(file_path): return True if count >= 300: return False count += 1 time.sleep(0.01)
4cfcc6f85258ce2fc9938b6fccbb786360bf8bef
10,193
def getbitlen(bint): """ Returns the number of bits encoding an integer """ return bint.bit_length()
f6ae3253e767382959d372eb5e4612fb85c89ffc
10,195
import os import re def _resolve_any_to_text(name, ns, dom): """Shell out to dig instead of using RPC because of RPC rate-limiting.""" ret = [] cmdline = ("dig +noadditional +noquestion +nocmd " "+nostats +nocomment %s any @%s | grep ^%s" % (name, ns, name)) for line in os.popen(cmdline, "r"): line = re.sub(r'\s+', ' ', line).strip() line = re.sub(r'\.%s. ' % (dom), ' ', line) line = re.sub(r'^%s. ' % (dom), '@ ', line) line = "%-30s %6s %3s %6s %s" % tuple(re.split(r'\s+', line, 4)) ret.append(line) return ret
a97be27b50f4e1bcb6a23d806be8d00d249ce375
10,196
import six def bi2bt(num): """将无符号整数转换为字节序""" MSBin = b'' while num > 0: # if six.PY2: # MSBin = chr(num % 256) + MSBin # elif six.PY3: MSBin = six.int2byte(num % 256) + MSBin num >>= 8 return MSBin
87d65b017d7c3ea4129fb7f9cfb14cc17055a437
10,197
def calc_max_cpu(records): """ Returns the CPU usage at the max temperature, ever :param records: a list of records of min, max temp and associated CPU avg load :return: a record with the highest recorded temperature, and the associated list of CPU loads """ max_temp = 0 cpu_loads = [] for record in records: if record['max'] > max_temp: max_temp = record['max'] cpu_loads = [100 * record['cpu']] elif record['max'] == max_temp: cpu_loads.append(100 * record['cpu']) return max_temp, cpu_loads
bee21fadb3fcf9cdbbce79f6281f48350b3ffceb
10,198
import os import codecs def get_version(): """ Extract the __version__ from a file without importing it. :return: The version that was extracted. :rtype: :py:obj:`str` :raises AssertionError: When a version cannot be determined. """ path = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(path, "__init__.py") if not os.access(filepath, os.R_OK): raise OSError("Cannot open __init__.py file for reading") with codecs.open(filepath, encoding="utf-8") as fp: for line in fp: if line.startswith("__version__"): try: _, vers = line.split("=") except ValueError: msg = "Cannot extract version from __version__" raise AssertionError(msg) version = vers.strip().replace('"', "").replace("'", "") try: major, minor, patch = version.split(".") digits = ( major.isdigit(), minor.isdigit(), patch.isdigit(), ) if not all(digits): msg = f"{version} is not a valid version number" raise AssertionError(msg) except ValueError: msg = f"{version} is not a valid version number" raise AssertionError(msg) return version raise AssertionError("No __version__ assignment found")
054b4d2c90a35cca058b00b132663b141f44ee0e
10,199
def datetime_to_str(dt): """ Convert default datetime format to str & remove digits after int part of seconds """ result = dt.strftime('%Y-%m-%d %H:%M:%S.%f')[:-7] return result
5a08bb546f8afc8a6dbbbf827389f86d8004de28
10,201
from typing import List def longest_substring_using_nested_for_loop(s: str) -> int: """ Given a string s, find the length of the longest substring without repeating characters. https://leetcode.com/problems/longest-substring-without-repeating-characters/ 3946 ms 14.2 MB >>> longest_substring_using_nested_for_loop("abcabcbb") 3 >>> longest_substring_using_nested_for_loop("bbbbb") 1 >>> longest_substring_using_nested_for_loop("pwwkew") 3 """ total = 0 for idx, _ in enumerate(s): vals: List[str] = [] for c2 in s[idx:]: if c2 not in vals: vals += [c2] total = max((total, len(vals))) else: total = max((total, len(vals))) break return total
996ce5fcb956012cb75fef35c6bec8be6ecad461
10,202
import re def isSane(filename): """Check whether a file name is sane, in the sense that it does not contain any "funny" characters""" if filename == '': return False funnyCharRe = re.compile('[\t/ ;,$#]') m = funnyCharRe.search(filename) if m is not None: return False if filename[0] == '-': return False return True
d9e8bac7ebad1fd2af024f8880e255fcb40db68c
10,203
def get_list_of_keys( d, *keys ): """ crea un nuevo dicionario con el subconjunto de llaves Parameters ========== d: dict keys: tuple Examples ======== >>>origin = { 'a': 'a', 'b': 'b': 'c': 'c' } >>>get_list_of_keys( origin, 'b', 'c' ) { 'b': 'b': 'c': 'c' } """ return { key: d[ key ] for key in keys }
c20291f2b3fe720ccf25e9a08dde15927fce97fb
10,204
import csv import itertools def csv_read_row(filename, n): """ Read and return nth row of a csv file, counting from 1. """ with open(filename, 'r') as f: reader = csv.reader(f) return next(itertools.islice(reader, n-1, n))
a95cf8ed35b61acff418a02bfa5f8285f589e6d6
10,205
def load_proto(fpath, proto_type): """Load the protobuf Args: fpath: The filepath for the protobuf Returns: protobuf: A protobuf of the model """ with open(fpath, "rb") as f: return proto_type().FromString(f.read())
7b6bcf6d2e56f2ca4087a9ec769b16a986511c55
10,206
from typing import Tuple from pathlib import Path def saved_model(od_detection_learner, tiny_od_data_path) -> Tuple[str, Path]: """ A saved model so that loading functions can reuse. """ model_name = "test_fixture_model" od_detection_learner.save(model_name) assert (Path(tiny_od_data_path) / "models" / model_name).exists() return model_name, Path(tiny_od_data_path) / "models"
383cd80905992775814be6f0024c836ec0c73c3d
10,207
def formatted_value(value, array=True): """Format a given input value to be compliant for USD Args: array (bool): If provided, will treat iterables as an array rather than a tuple """ if isinstance(value, str): value = '"{}"'.format(value.replace('"', '\\"')) elif isinstance(value, (list, tuple)): temp = [] for val in value: if isinstance(val, str): val = formatted_value(val, array=False) temp.append(str(val)) value = '{}{}{}'.format( '[' if array else '(', ', '.join(temp), ']' if array else ')' ) return str(value)
915e3b2952d43f45a7a75ab266802e22702600c8
10,208
from typing import Union from pathlib import Path def _filename(path: Union[str, Path]) -> str: """Get filename and extension from the full path.""" if isinstance(path, str): return Path(path).name return path.name
552655ff66ec6cd42b57ada6080df9dc34db1bd0
10,211
def map_dimensions_to_integers(dimensions): """ FragmentSelector requires percentages expressed as integers. https://www.w3.org/TR/media-frags/#naming-space """ int_dimensions = {} for k, v in dimensions.items(): int_dimensions[k] = round(v) return int_dimensions
e265e71912c6d582cf21d331767acca3511452f2
10,213
def unbox(boxed_pixels): """ assumes the pixels came from box and unboxes them! """ flat_pixels = [] for boxed_row in boxed_pixels: flat_row = [] for pixel in boxed_row: flat_row.extend(pixel) flat_pixels.append(flat_row) return flat_pixels
d74741a206448273330b866d8ec045d59dea02fb
10,214
def fec_old_patch(): """ Further Education College UK RSF patch generated with the old `rsfcreate.py` script. """ with open("tests/fixtures/fec_old_patch.rsf") as handle: return handle.read().splitlines()
41f382e1b31be2633a0d2c352e352f8926a8bae0
10,217
import time def log_str(proc_str, start): """Create a preamble string for stdout.""" former = "[{:>4}".format(proc_str) latter = "] {:.2f}".format(round(time.time() - start, 3)) return former + latter
5f4f82e758cf87ff643a20928e5c0f7368ef537f
10,218
def linear_kernel(X, Y=None): """Compute linear Gram matrix between X and Y (or X) Parameters ---------- X: torch.Tensor of shape (n_samples_1, n_features) First input on which Gram matrix is computed Y: torch.Tensor of shape (n_samples_2, n_features), default None Second input on which Gram matrix is computed. X is reused if None Returns ------- K: torch.Tensor of shape (n_samples_1, n_samples_2) Gram matrix on X/Y """ if Y is None: Y = X K = X @ Y.T return K
b1389059e755fa19bb26c88cf8c9f6ab39d51aec
10,219
def split_date(dates): """ Split datetime64 dates into year, month, day components. """ y = dates.astype("<M8[Y]").astype(int) + 1970 m = dates.astype("<M8[M]").astype(int) % 12 + 1 d = (dates - dates.astype("<M8[M]")).astype("<m8[D]").astype(int) + 1 return y, m, d
7152dbdd1f839ef3c401de06631fb0a249c36642
10,220
def hex_to_chars(num): """ Convert the hex representation of the number to a correctly ordered string. Parameters ---------- val: int The integer representing the string. This is in reverse order. Ie. XXYYZZ -> chr(ZZ)chr(YY)chr(XX) Returns ------- str The actual string. """ # convert to an actual number return (chr(num & 0xFF) + chr((num & 0xFF00) >> 8) + chr((num & 0xFF0000) >> 16))
2f87de04dc2442a125ed355b4d6ce2043138700d
10,221
import numpy as np def calculate_g(lat): """calculate the gravitational acceleration with lat in degrees""" g0 = 9.780325 nom = 1.0 + 0.00193185 * np.sin(np.deg2rad(lat)) ** 2.0 denom = 1.0 - 0.00669435 * np.sin(np.deg2rad(lat)) ** 2.0 g = g0 * (nom / denom)**0.5 return g
41c3e4bfd1678279c2e39529cae0ab7a5602d10a
10,223
def process_failed(returncode, stdout): """Return True if the process failed.""" if returncode != 0: return True if 'badly_formatted' in stdout: return True return False
2d3c84872acf3bf6592316aa95c39e41a6da849e
10,224
def evaluate_quadratic(J, g, s, diag=None): """Compute values of a quadratic function arising in least squares. The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s. """ if s.dim() == 1: Js = J.mv(s) q = Js.dot(Js) if diag is not None: q += s.dot(s * diag) else: Js = J.matmul(s.T) q = Js.square().sum(0) if diag is not None: q += (diag * s.square()).sum(1) l = s.matmul(g) return 0.5 * q + l
cde6067f22ef3a146b80fb5324c0c1a8ce2e65a4
10,225
from typing import List from pathlib import Path from typing import Union def expand_files( files: List[Path], extension: Union[str, List[str]] = ".sql", base_path: Path = Path("."), ) -> List[Path]: """ Expands the list of files or folders, with all SQL files in a folder as seperate files. """ if isinstance(extension, str): extensions = [extension] else: extensions = extension result: List[Path] = [] for f in files: rel_file = base_path / f if rel_file.is_file(): result.append(rel_file) elif rel_file.is_dir(): for ext in extensions: result.extend(rel_file.glob(f"**/*{ext}")) else: raise Exception(f"unexpected path: {rel_file}") return result
8243b80a7afaae0426308a1b447f4813eeb5f0c7
10,232
def f1_score_missing(y_true, y_pred, individual=False, micro=False): """ Compute the f1 score, accounting for missing labels in `y_true`. """ assert not (individual and micro) assert y_true.shape == y_pred.shape assert len(y_true.shape) == 2 if micro: # average over attributes using "micro" method (treat each attribute as a separate sample) y_true = y_true.view(-1) y_pred = y_pred.view(-1) missing_mask = y_true != -1 tp = (y_true == y_pred) * (y_true == 1) err = (y_true != y_pred) # any missing values don't contribute to the sums tp = (tp * missing_mask).sum(0) err = (err * missing_mask).sum(0) f1 = (tp / (tp + .5 * err)) if not individual: # if no individual scores desired, average over attributes ("macro" average) f1 = f1.mean() return f1
71145d714e594943d31fb3486e2b355d2adc9469
10,233
def vdowham(eta, vel_entrain, e_eff, r_eff): """ Calculate the velocity parameter of the contact problem according to Dowson-Hamrock. Parameters ---------- eta: ndarray, scalar The dynamic viscosity of the lubricant. vel_entrain: ndarray, scalar The entrainment velocity of the contact problem. e_eff: ndarray, scalar The effective modulus of the contact problem. r_eff: ndarray, scalar The effective radius of the contact problem. Returns ------- param_velocity: ndarray, scalar The velocity parameter of the contact problem. """ param_velocity = eta * vel_entrain / (e_eff * r_eff) return param_velocity
e8abfcc3d312ffce0c192a3b890fe8f3a6785e76
10,234
def compute_convergence(output, output_prev): """ Compute the convergence by comparing with the output from the previous iteration. The convergence is measured as the mean of a XOR operation on two vectors. Parameters ---------- output, output_prev : np.ndarray Current and previous iteration binary outputs. Returns ------- float The model convergence between 0 (fully converged) and 1 (fully chaotic) """ if output is None or output_prev is None: return None return (output ^ output_prev).mean()
7ea1e931598ed8dcf8466fb4df327391943187c1
10,235
import sqlite3 def viewData(): """ displays data from the database """ con = sqlite3.connect("library.db") cur = con.cursor() cur.execute("SELECT * FROM book") row = cur.fetchall() con.close() return row
2717b438fce5986078cfe6ca798d05499465cb57
10,236
import json def get_profile_name(host, network): """ Get the profile name from Docker A profile is created in Docker for each Network object. The profile name is a randomly generated string. :param host: DockerHost object :param network: Network object :return: String: profile name """ info_raw = host.execute("docker network inspect %s" % network.name) info = json.loads(info_raw) # Network inspect returns a list of dicts for each network being inspected. # We are only inspecting 1, so use the first entry. return info[0]["Id"]
37128df21074c75e53e9567c51301c76578947f2
10,237
import functools def checker(func, options=None): """Checks for producers methods. Print: * Alteration in objects hash. * Numpy errors. """ if options is None: options = [] @functools.wraps(func) def wrapper(*args, **kwargs): self = args[0] if hasattr(self, 'logger'): pfunc = self.logger.info else: pfunc = print hash_before = {key: hash(val) for key, val in self.objects.items()} result = func(*args, **kwargs) hash_after = {key: hash(val) for key, val in self.objects.items()} hash_diff = {key: {'before': hash_before[key], 'after': hash_after[key]} for key in hash_before if hash_before[key] != hash_after[key]} if hash_diff: pfunc(f"Object(s) hash changed:\n" f" {hash_diff}") if hasattr(self, '_np_error_stat'): pfunc(f"Numpy error(s) occurs:\n" f" {self._np_error_stat}") self._np_error_stat = {} return result return wrapper
f65982d4450ddecd7ccdc16f44a8d8e5752432ea
10,239
def _absolute_path(repo_root, path): """Converts path relative to the repo root into an absolute file path.""" return repo_root + "/" + path
bae7013db933e58343f0d6b3f90dfa100de74b7e
10,240
import glob def task_pot(): """Re-create .pot .""" return { "actions": ['pybabel extract -F locales/babel-mapping.ini -o jackalify.pot jackalify'], "file_dep": glob.glob('**/*.py', recursive=True), "targets": ['jackalify.pot'], "clean": True, }
7b6996691535f0468887e61942deb1415cc09ca1
10,241
def aic(L, k): """ Akaike information criterion. :param L: maximized value of the negative log likelihood function :param k: number of free parameters :return: AIC """ return 2*(k + L)
159a02cc19d2b4eab30dd13c1cd2b802777277ad
10,242
def prepare_update_sql(list_columns, list_values, data_code=100): """ Creates a string for the update query :param list_columns: columns that are in need of an update :param list_values: values where the columns should be updated with :param data_code: data code to add to the columns :return: string with the columns and update values """ string = '' for i in enumerate(list_columns): if list_values[i[0]] is not None: if len(i[1]) >= 50: new_label = i[1][:50] + '_' + str(data_code) string = string + '`' + new_label + '`' + '= ' + '\'' + str(list_values[i[0]]) + '\'' + ', ' else: string = string + '`' + i[1] \ + '_' + str(data_code) + '`' + '= ' + '\'' + str(list_values[i[0]]) + '\'' + ', ' string = string.replace("None", "Null") return string[:-2]
cc3f3c548623bdeb4d2e4e12cc3205baf0a9e9ba
10,245
import os def read_rttm_lines(rttm_file_path): """ Read rttm files and return the rttm information lines. Args: rttm_file_path (str): Returns: lines (list): List containing the strings from the RTTM file. """ if rttm_file_path and os.path.exists(rttm_file_path): f = open(rttm_file_path, 'r') else: raise FileNotFoundError( "Requested to construct manifest from rttm with oracle VAD option or from NeMo VAD but received filename as {}".format( rttm_file_path ) ) lines = f.readlines() return lines
1d47b8470ee56bc752aa70fa7d436e0587603c4b
10,246