content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def delete_redshift_cluster(config, redshift): """ Deletes the Redshift cluster specified in config Args: config: a ConfigParser object redshift: a boto3 client object for the AWS Redshift service """ try: print("Deleting Redshift Cluster: ", config['CLUSTER']['IDENTIFIER']) return redshift.delete_cluster( ClusterIdentifier=config['CLUSTER']['IDENTIFIER'], SkipFinalClusterSnapshot=True ) except Exception as e: print(e)
2267eb4f017354563c9a7cf047a3a84983cd0044
7,659
def add_trailing_load(axle_spacing, axle_wt, space_to_trailing_load, distributed_load, span1_begin, span2_end, pt_load_spacing=0.5): """Approximates the distributed trailing load as closely spaced point loads. The distributed trailing load is approximated as discretly spaced point loads. The point load spacing is assumed to be 0.5 unless the user specifically enters a different spacing. The number of loads to add is determined by dividing the total span length, span 1 plus span 2, by the point load spacing. Args: axle_spacing (list of floats): spacing of axles used for analysis axle_wt (list of floats): weight of axles used for analysis space_to_trailing_load (float): distance from last discrete axle to beginning of distributed load distributed_load (float): uniformly distributed trailing load magnitude span1_begin (float): coordinate location of beginning of span 1 span2_end (float): coordinate location of end of span 2 point_load_spacing (float, optional): spacing of approximate discretely spaced point loads, defaults to 0.5 Returns: axle_spacing (list of floats): user input axle spacing appended with axle spacing for discretely spaced loads to approximate the distributed load axle_wt (list of floats): user input axle weights appended with axle weights for discretely spaced loads to approximate the distributed load Notes: Based on testing it can be shown that a reasonable level of accuracy is found in the forces and reactions using a discrete point load spacing of 0.5. This spacing assumes the span lengths are entered in feet. If the user does not want to have a distributed load on the entire length of the bridge it is suggested that the actual axle spacing and axle weights of the trailing load are entered and no distributed load is specified. """ #approximate a distributed trailing load as closely spaced point loads #each point load is the distributed load times the point load spacing #the point load spacing is a function of the span lenght and number of #divisions required mod_axle_spacing = axle_spacing[:] mod_axle_wt = axle_wt[:] if space_to_trailing_load < 0.0: raise ValueError("Must enter a positive float for space to trialing" "load.") elif distributed_load < 0.0: raise ValueError("Must enter a positive float for distributed load.") elif pt_load_spacing <= 0.0: raise ValueError("Must enter a positive float (or nothing for default" "value of 0.5) for the point load spacing.") elif distributed_load != 0.0 and space_to_trailing_load != 0.0: total_span_length = span2_end - span1_begin num_loads = int(total_span_length/pt_load_spacing) equivalent_pt_load = distributed_load*pt_load_spacing mod_axle_spacing.append(space_to_trailing_load) mod_axle_wt.append(equivalent_pt_load) for x in range(num_loads): mod_axle_spacing.append(pt_load_spacing) mod_axle_wt.append(equivalent_pt_load) return mod_axle_spacing, mod_axle_wt
3eac900cff7d5e66c399e7f846d66aeff3e7389c
7,662
def str_append(string, add): """Append add in end string. Example: str_append('hou', 'se'); Return='house'""" return string + str(add) + "\n"
efbc9a085d1e63f290af3e6c447cde13bce5f5d0
7,663
def create_function(treatment_system, parameter, values): """ Creates a function based on user input """ func = str(treatment_system.loc[0, parameter]) func = func.replace(' ', '') for key, value in values.items(): func = func.replace(key, str(value)) return func
d0afaf42e50aef943b7551c60f19d180bbbeba0b
7,664
def unformat(combination: str) -> str: """Unformats a formatted string to it's original state""" return str(combination).replace("<", "").replace(">", "")
d017903ddaac78adf5085198d25eb508b62a78b4
7,665
import re def _size_to_bytes(size): """ Parse a string with a size into a number of bytes. I.e. parses "10m", "10MB", "10 M" and other variations into the number of bytes in ten megabytes. Floating-point numbers are rounded to the nearest byte. :type size: ``str`` :param size: The size to parse, given as a string with byte unit. No byte unit is assumed to be in bytes. Scientific notation is not allowed; must be an integer or real number followed by a case-insensitive byte unit (e.g. as "k" or "KB" for kilobyte, "g" or "Gb" for gigabyte, or a similar convention). Positive/negative sign in front of number is allowed. :rtype: ``long`` :return: The number of bytes represented by the given string. """ units = 'KMGTPEZY' # note that position of letter is same as power - 1 match = re.search(r'^\s*([-+]?\s*[0-9]*\.?[0-9]*)\s*([' + units + r']?\s*B?\s*S?)\s*', size, re.IGNORECASE) if match is None or match.group(1) == '': raise ValueError("size string not in proper format 'number [kmgtpezy]': " + size) mem_size = float(re.sub(r'\s*', '', match.group(1))) unit = re.sub(r'\s*', '', match.group(2)).upper() unit = re.sub(r'B?S?$', '', unit) # remove trailing units symbol if unit == '': unit_pow = 0 else: unit_pow = units.find(unit) + 1 byte_size = int(round(mem_size * (1024 ** unit_pow))) return byte_size
833657e51bb2c54b0e86684759e263d2f8b03ffe
7,666
def is_valid_group(group_name, nova_creds): """ Checks to see if the configuration file contains a SUPERNOVA_GROUP configuration option. """ valid_groups = [value['SUPERNOVA_GROUP'] for key, value in nova_creds.items() if 'SUPERNOVA_GROUP' in nova_creds[key].keys()] if group_name in valid_groups: return True else: return False
6f94e88cfcea8775bab3c05a0720ba7df11f68cc
7,669
def insertDoubleQuote(string, index): """ Insert a double quote in the specified string at the specified index and return the string.""" return string[:index] + '\"' + string[index:]
00d16f3bc619765895408f9fcdd3a7a6e428b153
7,672
def powerlaw(x, a, b, c): """Powerlaw function used by fitting software to characterise uncertainty.""" return a * x**b + c
e67a0be2f5faaff7867b713b43caec48910bad87
7,673
def readPeakList(peak_file): """ Read in list of peaks to delete from peaks_file. Comment lines (#) and blank lines are ignored. """ f = open(peak_file,'r') peak_list = f.readlines() f.close() peak_list = [l for l in peak_list if l[0] != "#" and l.strip() != ""] peak_list = [l.strip() for l in peak_list] return peak_list
7c99f9fb18b36b658fe142a43adf18db7c42c7bd
7,675
import numpy def get_pandas_field_metadata(pandas_col_metadata, field_name): """ Fetch information for a given column. The column statistics returned will be a bit different depending on if the types in the column are a number or a string. 'NAN' values are stripped from statistics and don't even show up in output. """ pmeta = pandas_col_metadata.get(field_name) # Pandas may return numpy.nan for statistics below, or nothing at all. # ALL possibly missing values are treated as NAN values and stripped at # the end. metadata = { 'name': field_name, 'type': 'string' if str(pmeta.dtype) == 'object' else str(pmeta.dtype), 'count': int(pmeta['count']), 'top': pmeta.get('top', numpy.nan), # string statistics 'unique': pmeta.get('unique', numpy.nan), 'frequency': pmeta.get('freq', numpy.nan), # numerical statistics '25': pmeta.get('25%', numpy.nan), '50': pmeta.get('50%', numpy.nan), '75': pmeta.get('75%', numpy.nan), 'mean': pmeta.get('mean', numpy.nan), 'std': pmeta.get('std', numpy.nan), 'min': pmeta.get('min', numpy.nan), 'max': pmeta.get('max', numpy.nan), } # Remove all NAN values cleaned_metadata = {k: v for k, v in metadata.items() if isinstance(v, str) or not numpy.isnan(v)} # Pandas has special types for things. Coerce them to be regular # ints and floats for name in ['25', '50', '75', 'mean', 'std', 'min', 'max']: if name in cleaned_metadata: cleaned_metadata[name] = float(cleaned_metadata[name]) for name in ['count', 'unique', 'frequency']: if name in cleaned_metadata: cleaned_metadata[name] = int(cleaned_metadata[name]) return cleaned_metadata
cbf1a740a202c36fa7b008451d44582e195d71f8
7,678
def deep_replace(arr, x, y): """ Help function for extended_euclid """ for i in range(len(arr)): element = arr[i] if type(element) == list: arr[i] = deep_replace(element, x, y) else: if element == x: arr[i] = y return arr
55ef1c7efe04d436f9ce96bda0f565a092131400
7,679
def find_pmp(df): """simple function to find Pmp on an IV curve""" return df.product(axis=1).max()
5ed7c14bc58a62f6168308ccd1dfa17e56e2db89
7,680
def readable_timedelta(days): """Print the number of weeks and days in a number of days.""" #to get the number of weeks we use integer division weeks = days // 7 #to get the number of days that remain we use %, the modulus operator remainder = days % 7 return "{} week(s) and {} day(s).".format(weeks, remainder)
120f517939842b4e0686a57a3117221e3db63004
7,681
def isMessageBody(line: str) -> bool: """ Returns True if line has more than just whitepsace and unempty or is a comment (contains #) """ return not (line.isspace() or line.lstrip().startswith('#'))
990ae3ff01f794a6c8d4d45ecb766a763c51dff8
7,683
def get_combinations(limit, numbers_count, combination): """Get all combinations of numbers_count numbers summing to limit.""" if sum(combination) >= limit: return None if numbers_count == 1: return [combination + [limit - sum(combination)]] combinations = [] for number in range(1, limit - numbers_count + 2): next_combinations = get_combinations(limit, numbers_count - 1, combination + [number]) if next_combinations: combinations.extend(next_combinations) return combinations
caecaeb8a5ef5f68bc47936c3b4db3d7514b6c52
7,685
def bit_list_to_int(bit_list): """ Converts binary number represented as a list of 0's and 1's into its corresponding base 10 integer value. Args: bit_list: a binary number represented as a list of 0's and 1's Returns: The base 10 integer value of the input binary number """ bit_string = ''.join([('0','1')[b] for b in bit_list]) base_ten_representation = int(bit_string, 2) return base_ten_representation
ade66899fe1d23a22c76cccf4ba57e9ad9bf0ba1
7,686
def max_labor_budget_rule(M): """ Put upper bound on total labor cost. Using individual shift variables multiplied by their length and a tour type specific cost multiplier. Could easily generalize this to make costs be complex function of time of day, day of week, shift length, tour type, or some combination of. :param M: Model :return: Constraint rule """ return sum(M.Shift[i, j, w, k, t] * M.lengths[k] * M.tt_cost_multiplier[t] for (i, j, w, k, t) in M.okShifts) <= M.labor_budget
f2637e4b2dba8cc4eb6e5afcae57c45d1b9560d7
7,687
import re def email(value: str): """ Extract email from document Example Result: ['[email protected]', '[email protected]'] """ _email_pat = r'[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+' return re.findall(_email_pat, value)
c8f3dcb4163e99f0aefe7eb42e61b127ffbaa393
7,688
def TransposeTable(table): """Transpose a list of lists, using None to extend all input lists to the same length. For example: >>> TransposeTable( [ [11, 12, 13], [21, 22], [31, 32, 33, 34]]) [ [11, 21, 31], [12, 22, 32], [13, None, 33], [None, None, 34]] """ transposed = [] rows = len(table) cols = max(len(row) for row in table) for x in range(cols): transposed.append([]) for y in range(rows): if x < len(table[y]): transposed[x].append(table[y][x]) else: transposed[x].append(None) return transposed
d53dc20a9eff391560269e818e99d41f8dc2ce94
7,689
import os import logging def checksum_from_label(path): """Extract checksum from a label rather than calculating it. :param path: Product path :type path: str :return: MD5 Sum for the file indicated by path :rtype: str """ checksum = "" product_label = path.split(".")[0] + ".xml" if os.path.exists(product_label): with open(product_label, "r") as lbl: for line in lbl: if "<md5_checksum>" in line: checksum = line.split("<md5_checksum>")[-1] checksum = checksum.split("</md5_check")[0] logging.warning( f"-- Checksum obtained from existing label:" f" {product_label.split(os.sep)[-1]}" ) break return checksum
69da6b07d091b0c296d09ec0d9a3d0586c34b978
7,690
def vvisegment2dict( link): """ Intern rutine for å gjøre om visveginfo-data til håndterbar liste """ start = round( float( link['FromMeasure'] ), 8 ) slutt = round( float( link['ToMeasure'] ), 8 ) mydict = { 'vegref' : str( link['County']).zfill(2) + str( link['Municipality'] ).zfill(2) + ' ' + \ link['RoadCategory'].upper() + link['RoadStatus'].lower() + str( link['RoadNumber']) + \ 'hp' + str( link['RoadNumberSegment']) + ' m' + \ str(link['RoadNumberSegmentStartDistance'] ) + '-' + \ str( link['RoadNumberSegmentEndDistance']), 'veglenkesekvensid' : link['ReflinkOID'], 'startposisjon' : start, 'sluttposisjon' : slutt, 'kortform' : str( start ) + '-' + str( slutt ) + \ '@' + str( link['ReflinkOID']), 'wkt' : link['WKTGeometry'] } return mydict
ad8c5de2065ee2e935b63674c7f4d11ba11bcff8
7,691
def percentage(value): """Return a float with 1 point of precision and a percent sign.""" return format(value, ".1%")
43567c120e4994b54a92570405c02934eb989a6f
7,692
def decode(packed_list): """ implement the function packed the string """ # Empty list of tuple decode_str = "" list_len = len(packed_list) for i in range(list_len): # element of packed list str_tuple_count = packed_list[i] # find number of counting from tuple count = str_tuple_count[1] while count: # add the character in string for all count decode_str = decode_str + str_tuple_count[0] count = count - 1 return decode_str
d6fe8de66ead935a14e99b069d3b416089d5a93d
7,695
def travel_space_separated(curr): """Print space separated linked list elements.""" if curr is None: return "" print(curr.data, end=' ') travel_space_separated(curr._next)
19213588f06a569560b236563975bcd6cb5254a0
7,696
def identity_matrix(dim): """Construct an identity matrix. Parameters ---------- dim : int The number of rows and/or columns of the matrix. Returns ------- list of list A list of `dim` lists, with each list containing `dim` elements. The items on the "diagonal" are one. All other items are zero. Examples -------- >>> identity_matrix(4) [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] """ return [[1. if i == j else 0. for i in range(dim)] for j in range(dim)]
dd37f0c7df41478e23dd26df727341a37a201ec1
7,697
def kg2m3H2O_hot(kg): """kg -> m^3 (50 C hot water)""" return kg/988.1
060b039db404014ab6cbea6aa5e416efc70aa8a2
7,698
import yaml def load_yaml(file_path): """Load a yaml file into a dictionary""" try: with open(file_path, 'r') as file: return yaml.safe_load(file) except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available return None
3d4fa37794bc99c352959e49057d2e9cfb0d4c92
7,700
import time def timed_call(f, args): """Call function f with arguments args and time its run time. Args: f: The function to call args: The arguments to pass to f Returns: Return the result of the function call and how much time it takes as tuple e.g. (result, time). """ start_time = time.time() result = f(*args) elapsed_time = time.time() - start_time return result, round(elapsed_time, 3)
e592ecdf5ebb4aa3391b2500b2a3a20d2faa9b40
7,703
def count_words(texts): """ Counts the words in the given texts, ignoring puncuation and the like. @param texts - Texts (as a single string or list of strings) @return Word count of texts """ if type(texts) is list: return sum(len(t.split()) for t in texts) return len(texts.split())
f08cbb1dcac3cbd6b62829cf4467167ae9b7694e
7,704
def get_subnetwork(project_id, context): """ Gets a subnetwork name. """ subnet_name = context.properties.get('subnetwork') is_self_link = '/' in subnet_name or '.' in subnet_name if is_self_link: subnet_url = subnet_name else: subnet_url = 'projects/{}/regions/{}/subnetworks/{}' subnet_url = subnet_url.format( project_id, context.properties['region'], subnet_name ) return subnet_url
de0217b7a78d3278d6dbf70db10b4c270aff2b15
7,705
def quadraticEval(a, b, c, x): """given all params return the result of quadratic equation a*x^2 + b*x + c""" return a*(x**2) + b*x + c
cfb808435b50ec262ec14cd54cf9caf30f2bc4b8
7,706
def stringdb_escape_text(text): """Escape text for database_documents.tsv format.""" return text.replace('\\', '\\\\').replace('\t', '\\t')
5d41b0b224cb314141b669ff721896d04a2fe2e8
7,707
import argparse def parse_args(): """ Parse command arguments. """ parser = argparse.ArgumentParser(description='validate data from starbust algo I for test (ex. chessboard test)') parser.add_argument('path', help='path to starburst filtered file') return parser.parse_args()
544372e75b2dd56923883f13b6c7f3070ecc9e14
7,708
def friends(graph, user): """Returns a set of the friends of the given user, in the given graph""" return set(graph.neighbors(user))
125c3cc21be4cc29f9ff6f0ff0bb60b35a1074ba
7,710
def diagonal(a, offset=0, axis1=None, axis2=None, extract=True, axes=None): """ diagonal(a, offset=0, axis1=None, axis2=None) Return specified diagonals. If `a` is 2-D, returns the diagonal of `a` with the given offset, i.e., the collection of elements of the form ``a[i, i+offset]``. If `a` has more than two dimensions, then the axes specified by `axis1` and `axis2` are used to determine the 2-D sub-array whose diagonal is returned. The shape of the resulting array can be determined by removing `axis1` and `axis2` and appending an index to the right equal to the size of the resulting diagonals. Parameters ---------- a : array_like Array from which the diagonals are taken. offset : int, optional Offset of the diagonal from the main diagonal. Can be positive or negative. Defaults to main diagonal (0). axis1 : int, optional Axis to be used as the first axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to first axis (0). axis2 : int, optional Axis to be used as the second axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to second axis (1). Returns ------- array_of_diagonals : ndarray If `a` is 2-D, then a 1-D array containing the diagonal and of the same type as `a` is returned unless `a` is a `matrix`, in which case a 1-D array rather than a (2-D) `matrix` is returned in order to maintain backward compatibility. If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` are removed, and a new axis inserted at the end corresponding to the diagonal. Raises ------ ValueError If the dimension of `a` is less than 2. Notes ----- Unlike NumPy's, the cuNumeric implementation always returns a copy See Also -------- numpy.diagonal Availability -------- Multiple GPUs, Multiple CPUs """ return a.diagonal( offset=offset, axis1=axis1, axis2=axis2, extract=extract, axes=axes )
e18a9ca2dcab7beb5891f701cdc0f26c3943f749
7,711
def ctd_sbe16digi_preswat(p0, t0, C1, C2, C3, D1, D2, T1, T2, T3, T4, T5): """ Description: OOI Level 1 Pressure (Depth) data product, which is calculated using data from the Sea-Bird Electronics conductivity, temperature and depth (CTD) family of instruments. This data product is derived from SBE 16Plus instruments outfitted with a digiquartz pressure sensor. This applies to the CTDBP-N,O instruments only. Implemented by: 2013-05-10: Christopher Wingard. Initial Code. 2013-05-10: Christopher Wingard. Minor edits to comments. 2014-01-31: Russell Desiderio. Standardized comment format. 2014-01-31: Russell Desiderio. Modified algorithm to use pressure [Hz] (pf) to calculate pressure period instead of pressure [counts] (p0). See SeaBird 16Plus V2 User Manual (reference (2)), page 57, item 5. Usage: p = ctd_sbe16digi_preswat(p0,t0,C1,C2,C3,D1,D2,T1,T2,T3,T4,T5) where p = sea water pressure (PRESWAT_L1) [dbar] p0 = raw pressure (PRESWAT_L0) [counts] t0 = raw temperature from pressure sensor thermistor [counts] C1 = digiquartz pressure calibration coefficients C2 = digiquartz pressure calibration coefficients C3 = digiquartz pressure calibration coefficients D1 = digiquartz pressure calibration coefficients D2 = digiquartz pressure calibration coefficients T1 = digiquartz pressure calibration coefficients T2 = digiquartz pressure calibration coefficients T3 = digiquartz pressure calibration coefficients T4 = digiquartz pressure calibration coefficients T5 = digiquartz pressure calibration coefficients References: OOI (2012). Data Product Specification for Pressure (Depth). Document Control Number 1341-00020. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-00020_Data_Product_SPEC_PRESWAT_OOI.pdf) OOI (2011). SeaBird 16Plus V2 User Manual. 1341-00020_PRESWAT Artifact. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> >> REFERENCE >> Data Product Specification Artifacts >> 1341-00020_PRESWAT >> PRESWAT_SeaBird_16PlusV2_2009.pdf) """ # Convert raw pressure input to frequency [Hz] pf = p0 / 256.0 # Convert raw temperature input to voltage tv = t0 / 13107.0 # Calculate U (thermistor temp): U = (23.7 * (tv + 9.7917)) - 273.15 # Calculate calibration parameters C = C1 + C2 * U + C3 * U**2 D = D1 + D2 * U T0 = T1 + T2 * U + T3 * U**2 + T4 * U**3 + T5 * U**4 # Calculate T (pressure period, in microseconds): T = (1.0 / pf) * 1.0e6 # compute pressure in psi, rescale and compute in dbar and return p_psi = C * (1.0 - T0**2 / T**2) * (1.0 - D * (1.0 - T0**2 / T**2)) p_dbar = (p_psi * 0.689475729) - 10.1325 return p_dbar
3756752c661773bd74436311a278efdaa3d3913f
7,714
import hashlib def sha256(file: str): """ Reads a file content and returns its sha256 hash. """ sha = hashlib.sha256() with open(file, "rb") as content: for line in content: sha.update(line) return sha.hexdigest()
c6babc2939e25228df25827a5a0b383d6c68dd07
7,715
def lookup_capacity(lookup, environment, ant_type, frequency, bandwidth, generation): """ Use lookup table to find the combination of spectrum bands which meets capacity by clutter environment geotype, frequency, bandwidth, technology generation and site density. """ if (environment, ant_type, frequency, bandwidth, generation) not in lookup: raise KeyError("Combination %s not found in lookup table", (environment, ant_type, frequency, bandwidth, generation)) density_capacities = lookup[ (environment, ant_type, frequency, bandwidth, generation) ] return density_capacities
3bd132f97022acfe33c4bc6d706265e808679eae
7,718
import json def to_json(response): """ Return a response as JSON. """ assert response.status_code == 200 return json.loads(response.get_data(as_text=True))
4fb4d62eb8b793363394b6d0759a923f90315072
7,719
def dots2utf8(dots): """ braille dots to utf-8 hex codes""" code=0 for number in dots: code += 2**(int(number)-1) return hex(0x2800 + code)
0406c3cf18d5dbd66ea35b0862785371cdd68796
7,723
import ast def doCompare(op, left, right): """Perform the given AST comparison on the values""" top = type(op) if top == ast.Eq: return left == right elif top == ast.NotEq: return left != right elif top == ast.Lt: return left < right elif top == ast.LtE: return left <= right elif top == ast.Gt: return left > right elif top == ast.GtE: return left >= right elif top == ast.Is: return left is right elif top == ast.IsNot: return left is not right elif top == ast.In: return left in right elif top == ast.NotIn: return left not in right
b82a1c4d101428cf9ded532d65539cfe3195d8a1
7,725
def get_interface_by_name(interfaces, name): """ Return an interface by it's devname :param name: interface devname :param interfaces: interfaces dictionary provided by interface_inspector :return: interface dictionary """ for interface in interfaces: if interface['devname'] == name: return interface
9d63bf667a0677ba7d0c3fdde2b4b35affc3b72b
7,726
import unicodedata def trata_texto(texto): """ Trata textos convertendo para maiusulo,\n sem acentos e espaços indesejaveis. """ texto = texto.strip().upper() texto = unicodedata.normalize("NFKD", texto) texto = texto.encode("ascii", "ignore") texto = texto.decode("utf-8").upper() # print(f"trata texto: \"{texto}\"") return(texto)
0cced9e55fd3fc15a9cdbaa3899519658668025c
7,727
def normalize_trans_probs(p): """ Normalize a set of transition probabilities. Parameters ---------- p : pandas.DataFrame, dtype float Unnormalized transition probabilities. Indexed by source_level_idx, destination_level_idx. Returns ------- pandas.DataFrame, dtype float Normalized transition probabilities: the sum of all probabilites with the same source_level_idx sum to one. Indexed by source_level_idx, destination_level_idx. """ p_summed = p.groupby(level=0).sum() index = p.index.get_level_values("source_level_idx") p_norm = p / p_summed.loc[index].values p_norm = p_norm.fillna(0.0) return p_norm
d484c4ac08ee785e5451b1aa92ff2b85fc945384
7,728
def get_number_of_ones(n): """ Deterine the number of 1s ins the binary representation of and integer n. """ return bin(n).count("1")
83fb14c29064008dd9f8e7ecea4c1d9dfae1dafa
7,729
from typing import Iterable from typing import Dict from typing import Sequence import hashlib import mmap def verify_checksums(sources: Iterable[str], hashes: Dict[str, Sequence[str]]) -> bool: """Verify checksums for local files. Prints a message whenever there is a mismatch. Args: sources: An iterable of source strings. hashes: A dictionary of hash name -> hashes. For each entry, the list of hashes corresponds to `sources` in order but may be shorter. Returns: True if no checksums are mismatched, otherwise False. """ valid = True hashlib_warnings = set() for i, source in enumerate(sources): try: source_file, _ = source.split("::") except ValueError: source_file = source for hashname, source_hashes in hashes.items(): try: expected_digest = source_hashes[i] except IndexError: continue if expected_digest.lower() == "skip": continue try: h = hashlib.new(hashname) except ValueError: # Hopefully unlikely. As of the time of writing, all of HASHES are # in hashes.algorithms_guaranteed. if hashname not in hashlib_warnings: print( f"Warning: Your version of hashlib doesn't support {hashname}" ) hashlib_warnings.add(hashname) continue try: with open(source_file, "rb") as f: # Memory map in case the file is large contents = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) h.update(contents) # type: ignore except FileNotFoundError: break # No point trying other hashes actual_digest = h.hexdigest() if expected_digest != actual_digest: print(source_file) print(f"\tExpected ({hashname}): {expected_digest}") print(f"\tActual ({hashname}): {actual_digest}") valid = False return valid
95abd66c3e6a007b8df8b2caaecde8a85a1f0886
7,730
def add_column(colname, desc=None): """Adds column in the form of dict.""" return {'name': colname, 'desc': desc}
e0b985f71e17bfef6096d1433c84b5c163d343ff
7,732
def _extreact_qml_file_info(file): """Returns file object in QML-ready format""" return { "name": file["name"], "path": file["path"], "isFile": file["is_file"], "isDir": file["is_dir"], "level": file["level"] }
4d28a0c1e440023ca887a693a2aea5dbd71d336b
7,734
def check_cn_match(sv_list, cn_increase, cn_decrease, final_cn): """ Check that the CNV combination produces the right final copy number. """ if sv_list == []: return False initial_cn = 2 for sv in sv_list: if sv in cn_increase: initial_cn += 1 if sv in cn_decrease: initial_cn -= 1 if initial_cn == final_cn: return True return False
a9d45b1421b5b6f5ce085df7e2f0c8040bffd163
7,735
def rotateMatrix(m): """ rotate matrix :param m: matrix :type m: list of list of integers :return: rotated matrix :rtype: list of list """ for i in range(int(len(m) / 2)): last_layer = len(m) - i - 1 for j in range(i, last_layer): offset = j - i top = m[i][j] m[i][j] = m[last_layer-offset][i] m[last_layer- offset][i] = m[last_layer][last_layer - offset] m[last_layer][last_layer - offset] = m[j][last_layer] m[j][last_layer] = top return m
198c8c28c29c49fee1d26d603e52ec2da43d82a5
7,736
import re def GetUniqueName(context, name): """ Fixup any sbsar naming collision """ foundName = False nameCount = 1 lastChar = '' for sb in context.scene.loadedSbsars: if sb.name.startswith(name): foundName = True # find the highest value suffix on the name lastChar = re.findall('^.*_([0-9]+)$', sb.name) if len(lastChar) > 0: if lastChar[0].isnumeric: currentCount = int(lastChar[0]) + 1 if currentCount > nameCount: nameCount = currentCount # append the new suffix if foundName: name = name + '_' + str(nameCount) return name
fe8b868ec791af091eb0e78abea2549b7e724adf
7,737
def get_least_sig_bit(band): """Return the least significant bit in color value""" mask = 0x1 last_bit = band & mask return str(last_bit)
9d56bc5fdbf613f31bf7b21ee08f4f753b3a92db
7,738
import os import json import subprocess import re def get_status(jobdir, jobid=None): """ Given list of jobs, returns status of each. """ cmd_template = "aws batch describe-jobs --jobs {}" if jobid is None: print(("Describing jobs in {}/ids/...".format(jobdir))) jobs = os.listdir(jobdir + "/ids/") for job in jobs: with open("{}/ids/{}".format(jobdir, job), "r") as inf: submission = json.load(inf) cmd = cmd_template.format(submission["jobId"]) print(("... Checking job {}...".format(submission["jobName"]))) out = subprocess.check_output(cmd, shell=True) status = re.findall( '"status": "([A-Za-z]+)",', out.decode("utf-8"))[0] print(("... ... Status: {}".format(status))) return 0 else: print(("Describing job id {}...".format(jobid))) cmd = cmd_template.format(jobid) out = subprocess.check_output(cmd, shell=True) status = re.findall('"status": "([A-Za-z]+)",', out.decode("utf-8"))[0] print(("... Status: {}".format(status))) return status
665ca433ca90ed7d0851bfd4d0d1a1e800fcc4ad
7,742
def denormalize_image(image): """ Undo normalization of image. """ image = (image / 2) + 0.5 return image
c49a1465d89e317a1c8013969fbee913bf705f4a
7,744
def find_overlapping_selections(selections, selection_strings): """ Given a list of atom selections (:py:class:`scitbx.array_family.flex.bool` arrays) and corresponding selection strings, inspect the selections to determine whether any two arrays overlap. Returns a tuple of the first pair of selection strings found to overlap, or None if all selections are unique. """ assert (len(selections) == len(selection_strings)) for i_sel, selection1 in enumerate(selections[:-1]): for j_sel in range(i_sel + 1, len(selections)): selection2 = selections[j_sel] if ((selection1 & selection2).count(True) > 0): return (selection_strings[i_sel], selection_strings[j_sel]) return None
fffd25a98cbb2184d372c5904718f30cd7c97d1a
7,745
import imghdr def check_bmp(input_file_name): """ Check if filename is a BMP file :param input_file_name: input file name :type input_file_name: string :return whether the file is .bmp :rtype boolean """ return 'bmp' == imghdr.what(input_file_name)
3a8749832418d3976825a79a0bd89c7a77649fe8
7,746
import socket def allowed_gai_family(): """ https://github.com/shazow/urllib3/blob/master/urllib3/util/connection.py """ return socket.AF_INET #* this to force use ipv4 (issue with ipv6, get 2s delay)
97d983d7c573ba73a2833ca07513a37ce17b521d
7,748
def _get_list_pairs(pairs, idx_class_dict, num_feature): """Creates flattened list of (repeated) pairs. The indexing corresponds with the flattened list of T values and the flattened list of p-values obtained from _get_list_signif_scores(). Arguments: pairs: [(int, int)] list of pairs of classes which were compared during interpretation idx_class_dict: {int: string} dictionary mapping class indices to classes num_feature: int number of features Returns: list_pairs: [(string, string)] list of pairs of compared classes with length num_feature * num_pair """ list_pairs = [[p] * num_feature for p in pairs] list_pairs = [p for sublist in list_pairs for p in sublist] # flatten list_pairs = [[idx_class_dict[p[0]], idx_class_dict[p[1]]] for p in list_pairs] # lookup class return list_pairs
fbdff91f18587894a15a9eeb77fc1427779bc6ae
7,749
def evaluate_predictions_per_char(predictions, original_sentences, answers): """Evaluates predictions per char, returning the accuracy and lists of correct and incorrect sentences.""" predicted_chars = [] sentences_with_preds = [] errors = set() correct_sentences = [] total = 0 correct = 0 for pred in predictions: predicted_chars.append(pred["predictions"]["predictions"]) sentences_with_preds.append(pred["predictions"]["text_with_preds"]) for pred_chars, sent_with_pred, ans, orig_sent in zip( predicted_chars, sentences_with_preds, answers, original_sentences ): sent_correct = True for p, a in zip(pred_chars, ans): total += 1 if p != a: errors.add(f"{sent_with_pred}\t{orig_sent}\t{ans}") sent_correct = False else: correct += 1 if sent_correct: correct_sentences.append(sent_with_pred) acc = correct / total return acc, errors, correct_sentences
fb27ce85ad0843e474930802b06ab89849d87aba
7,750
def n2es(x): """None/Null to Empty String """ if not x: return "" return x
cf73dd72230040cfc1c71b248b4cdd490004a213
7,751
def get_default_benchmark_simulated_datasets(): """Default parameter sets to generate simulated data for benchmarking. The training periods and forecast horizon are chosen to complement default real datasets. Every tuple has the following structure: (data_name, frequency, training_periods, forecast_horizon)""" simulation_parameters = [ # daily data ("daily_simulated", "D", 3*30, [30]), ("daily_simulated", "D", 2*365, [365]), # hourly data ("hourly_simulated", "H", 7*24, [24]), ("hourly_simulated", "H", 30*24, [7*24]), ("hourly_simulated", "H", 365*24, [6*30*24]), ("hourly_simulated", "H", 4*365*24, [365*24]) ] return simulation_parameters
aa0d7017fc693e71c016d80f7e50cc1c9a6cdc24
7,752
import os import json def load_config(cfg_path): """Load the config from a json file""" if not os.path.exists(cfg_path): raise RuntimeError('file {} does not exists!'.format(cfg_path)) with open(cfg_path, 'r') as f: cfg = json.load(f) return cfg
dcb1f309f7868191854203994b91cb28f759b5dd
7,753
import re def __remove_punctuation(string): """ Remove all the punctuation symbols and characters in a string. :param string: the string where the punctation characters must be removed. :return: a string without punctuation. """ return re.sub("[!@#£$.()/-]", "", string)
bb2015dc040fedb3656099b57b103f7fb9c416b9
7,755
def pad_chunk_columns(chunk): """Given a set of items to be inserted, make sure they all have the same columns by padding columns with None if they are missing.""" columns = set() for record in chunk: columns.update(record.keys()) for record in chunk: for column in columns: record.setdefault(column, None) return chunk
2e5d91ad03ad613b55bcaea97fd8c0785eec977f
7,756
def get_next_match( map ): """ This changes the inverse table by removing hits""" todelete = [] retval = None for px,s in map.iteritems(): if len(s) > 1: retval = s.pop(),s.pop() if retval[0][0] == retval[1][0]: s.add( retval[1] ) #print retval, s retval = None continue else: break else: todelete.append(px) for k in todelete: del map[k] return retval
a1154773fb466f976c8145787200c6489f800f5f
7,757
import os import pickle def read_pickle(text_folder_path, file_name): """ Read a pickled file from a directory. Parameters: text_folder_path (string): path to the directory where the file is located file_name (string): name of file to read. Returns: read_text (string): text read from the pickle file. """ with open(os.path.join(text_folder_path, file_name), 'rb') as filehandle: # read the data as binary data stream read_text = pickle.load(filehandle) return read_text
046dba6305a7b786715d3af89fbda597851ce684
7,758
def is_negligible(in_text): """" Checks if text or tail of XML element is either empty string or None""" if in_text is None: return True elif type(in_text) is str: if in_text.strip(chr(160) + ' \t\n\r') == '': return True else: return False else: raise TypeError
3e9e5276e0b58518d942fc3e2a16f64223eb4e0d
7,759
def make_a_tweet(Hash, Message, Reduced): """ Generate a valid tweet using the info passed in. """ tweet = Hash + ': ' + Message if Reduced: tweet += '…' return tweet
1d0c3246874f8a6c9b3cb1b1f7cf27040ff1bd1b
7,761
import os def search_with_id(student_id): """ Obtain the username for a user with a given Student ID number (if server is tied into WPI network). :param student_id: Student ID number to use in the search :return: The user's network username """ try: username = os.popen('id +' + str(student_id) + ' -un').read().replace('\n', '') if username not in ['', None]: return username except: pass return None
f7792ba2a2c891c22b07988846dd6bd8016676c7
7,763
def remainder(numbers): """Function for finding the remainder of 2 numbers divided. Parameters ---------- numbers : list List of numbers that the user inputs. Returns ------- result : int Integer that is the remainder of the numbers divided. """ return numbers[0] % numbers[1]
4c17d717ef52a7958af235e06feff802ed9c3802
7,764
def ret_int(potential): """Utility function to check the input is an int, including negative.""" try: return int(potential) except: return None
682ab4987e94d7d758be5957b610dc1ee72156a1
7,769
def read_file(file_name): """Read contents of file.""" with open(file_name, encoding='utf8') as file: return file.read().rstrip().lstrip()
cb8e85c076baa97d8f1a5361abe6ab4ee5b9f00c
7,770
def RGB2HEX(color): """In: RGB color array Out: HEX string""" return "#{:02x}{:02x}{:02x}".format(int(color[0]), int(color[1]), int(color[2]))
7283f0a8a72d83496c93084ab5c514a0184682c7
7,771
def signed_area(contour): """Return the signed area of a contour. Parameters ---------- contour: sequence of x,y coordinates, required Returns ------- area: :class:`float` Signed area of the given closed contour. Positive if the contour coordinates are in counter-clockwise order, negative otherwise. """ result = 0 for index, (x1, y1) in enumerate(contour): x2, y2 = contour[(index + 1) % len(contour)] result += (x2 - x1) * (y2 + y1) return result
79a60d064fad70afb8902d6d66b980d778215de3
7,772
def set_initxval(constr_func, constr_values): """ Calculates the initial value of xval. Args: constr_func (:obj:`list`): constraint functions applied. constr_values (:obj:`list`): Values of constraint functions applied. Returns: initial_xval (:obj:`float`): First value of xval """ idx = [i for i, e in enumerate(constr_func) if e == constr_func[0]] if len(idx) > 1: initial_xval = ((abs(constr_values[idx[0]]) + abs(constr_values[idx[1]]))/2)/100 else: initial_xval = abs(constr_values[idx[0]])/100 return initial_xval
434608817ed261538e605b8a8d4e4b53c0749906
7,774
def deep_dictionary_check(dict1: dict, dict2: dict) -> bool: """Used to check if all keys and values between two dicts are equal, and recurses if it encounters a nested dict.""" if dict1.keys() != dict2.keys(): return False for key in dict1: if isinstance(dict1[key], dict) and not deep_dictionary_check(dict1[key], dict2[key]): return False elif dict1[key] != dict2[key]: return False return True
b5011c2c79c79ecc74953e5f44db5c4a62464c07
7,776
def unshared_copy(inList): """perform a proper deepcopy of a multi-dimensional list (function from http://stackoverflow.com/a/1601774)""" if isinstance(inList, list): return list( map(unshared_copy, inList) ) return inList
44cfd186e02a70a51cd29a3cdf01c698c1380d02
7,777
def SEARCH(find_text, within_text, start_num=1): """ Returns the position at which a string is first found within text, ignoring case. Find is case-sensitive. The returned position is 1 if within_text starts with find_text. Start_num specifies the character at which to start the search, defaulting to 1 (the first character of within_text). If find_text is not found, or start_num is invalid, raises ValueError. >>> SEARCH("e", "Statements", 6) 7 >>> SEARCH("margin", "Profit Margin") 8 >>> SEARCH(" ", "Profit Margin") 7 >>> SEARCH('"', 'The "boss" is here.') 5 >>> SEARCH("gle", "Google") 4 >>> SEARCH("GLE", "Google") 4 """ # .lower() isn't always correct for unicode. See http://stackoverflow.com/a/29247821/328565 return within_text.lower().index(find_text.lower(), start_num - 1) + 1
1afc843583695a801aca28b5013a6afa21221094
7,778
def GetDefaultAndCustomPreset(presets): """ Get the default and custom preset values from the saved property group""" defaultPreset = '' customPreset = '' if presets: for p in presets: if p.name == 'Default': defaultPreset = p.value if p.name == 'Custom': customPreset = p.value return defaultPreset, customPreset
ad5ee60ec995a1662f7c674a11ebf11bf16ab3be
7,779
def getvaluelist(doclist, fieldname): """ Returns a list of values of a particualr fieldname from all Document object in a doclist """ l = [] for d in doclist: l.append(d.fields[fieldname]) return l
b85d171b537636477b00021ce717788b5e4735da
7,780
import numpy def to_cpu_async(array, stream=None): """Copies the given GPU array asynchronously to host CPU. Args: array: Array to be sent to GPU. stream (~pycuda.driver.Stream): CUDA stream. Returns: ~numpy.ndarray: Array on CPU. If given ``array`` is already on CPU, then this function just returns ``array`` without performing any copy. """ if isinstance(array, numpy.ndarray): return array return array.get_async(stream=stream)
2149ddf3de42a7ea41e59810dea3151f5eb97d9b
7,781
import numpy def preprocess_depth(depth_data): """ preprocess depth data This function "reverses" the original recorded data, and convert data into grayscale pixel value. The higher the value of a pixel, the closer to the camera. Parameters ---------- depth_data : numpy.ndarray The data coming from the depth channel Returns ------- numpy.ndarray : The preprocessed depth data, to be saved as an image """ # get background / foreground (i.e. zero-valued pixels are considered as background) background = numpy.where(depth_data <= 0) foreground = numpy.where(depth_data > 0) # trick such that the highest value is the closest to the sensor depth_data = depth_data * (-1) max_significant = numpy.max(depth_data[foreground]) min_significant = numpy.min(depth_data[foreground]) # normalize to 0-255 and set background to zero new_depth_data = 255 * ((depth_data - min_significant) / float(max_significant - min_significant)) new_depth_data[background] = 0 return new_depth_data
25aa5f13594752f262a27b9ea99ee72c38ab3db7
7,782
def changed_keys(a: dict, b: dict) -> list: """Compares two dictionaries and returns list of keys where values are different""" # Note! This function disregards keys that don't appear in both dictionaries return [k for k in (a.keys() & b.keys()) if a[k] != b[k]]
77ae93614a2c736091886024338c1b4ecb1f6ec1
7,783
def _negation(value): """Parse an optional negation after a verb (in a Gherkin feature spec).""" if value == "": return False elif value in [" not", "not"]: return True else: raise ValueError("Cannot parse '{}' as an optional negation".format(value))
c13f06b8a11ecbe948a4c2d710e165e1731f08fd
7,784
import re import requests import json def get_all_stealth_cards(): """Returns a list of all the Stealth Cards""" class StealthCard: def __init__(self, card_info): self.name = card_info["Name"] self.img_url = card_info["ImageUrl"] self.cost = card_info["Cost"] self.description = card_info["CardText"] self.intrigue = 0 if "Intrigue" in card_info["CardText"]: intrigue_val = re.search("Intrigue (\d+):", card_info["CardText"]) if intrigue_val: self.intrigue = int(intrigue_val.group(1)) if card_info["Influence"]: self.influence = { "F": card_info["Influence"].count("F"), "T": card_info["Influence"].count("T"), "J": card_info["Influence"].count("J"), "P": card_info["Influence"].count("P"), "S": card_info["Influence"].count("S"), } else: self.influence = {"F": 0, "T": 0, "J": 0, "P": 0, "S": 0} all_stealth_cards = list() response = requests.get("https://eternalwarcry.com/content/cards/eternal-cards.json") data = json.loads(response.text.encode('utf8')) for row in data: try: if re.search("Stealth", row["CardText"]): all_stealth_cards.append(StealthCard(row)) except KeyError: """Card does not contain Card Text.""" pass return all_stealth_cards
93abd19276a600f9b344ca5d72a87d7a7f0a9e1a
7,785
from typing import Any from pathlib import Path def config_to_ext(conf: Any) -> str: """Find the extension(flag) of the configuration""" if isinstance(conf, dict): return "dict" conf = Path(conf) out = conf.suffix.lstrip(".").lower() if not out and conf.name.lower().endswith("rc"): out = "rc" if out in ("ini", "rc", "cfg", "conf", "config"): return "ini" if out == "yml": return "yaml" return out
53a4c452c050266736d1fddc1bd18634702e2f5a
7,786
def get_status_messages(connection, uid, timeline='home:', page=1, count=30): """默认从主页从时间线获取给定页数的最新状态消息,另外还可以获取个人时间线""" # 获取时间线上最新的状态消息ID statuses = connection.zrevrange('%s%s' % (timeline, uid), (page - 1) * count, page * count - 1) pipe = connection.pipeline(True) for sid in statuses: pipe.hgetall('status:%s' % sid) return filter(None, pipe.execute())
21af458155d7de793b420047178507d1f77296d2
7,787
import warnings def ensure_all_columns_are_used(num_vars_accounted_for, dataframe, data_title='long_data'): """ Ensure that all of the columns from dataframe are in the list of used_cols. Will raise a helpful UserWarning if otherwise. Parameters ---------- num_vars_accounted_for : int. Denotes the number of variables used in one's function. dataframe : pandas dataframe. Contains all of the data to be converted from one format to another. data_title : str, optional. Denotes the title by which `dataframe` should be referred in the UserWarning. Returns ------- None. """ dataframe_vars = set(dataframe.columns.tolist()) num_dataframe_vars = len(dataframe_vars) if num_vars_accounted_for == num_dataframe_vars: pass elif num_vars_accounted_for < num_dataframe_vars: msg = "Note, there are {:,} variables in {} but the inputs" msg_2 = " ind_vars, alt_specific_vars, and subset_specific_vars only" msg_3 = " account for {:,} variables." warnings.warn(msg.format(num_dataframe_vars, data_title) + msg_2 + msg_3.format(num_vars_accounted_for)) else: # This means num_vars_accounted_for > num_dataframe_vars msg = "There are more variable specified in ind_vars, " msg_2 = "alt_specific_vars, and subset_specific_vars ({:,}) than there" msg_3 = " are variables in {} ({:,})" warnings.warn(msg + msg_2.format(num_vars_accounted_for) + msg_3.format(data_title, num_dataframe_vars)) return None
0470503c8adac107f85dd628409fc3ca8de641d3
7,788
def arrayizeDict(g): """Transforms a dict with unique sequential integer indices into an array""" mk = max(g.keys()) ga = [None] * mk for k, v in g.items(): ga[k - 1] = v return ga
d2da3848436be8d47b3f338797eefd87cfa4344c
7,790
def get_values(units, *args): """ Return the values of Quantity objects after optionally converting to units. Parameters ---------- units : str or `~astropy.units.Unit` or None Units to convert to. The input values are converted to ``units`` before the values are returned. args : `~astropy.units.Quantity` Quantity inputs. """ if units is not None: result = [a.to_value(unit) for a, unit in zip(args, units)] else: result = [a.value for a in args] return result
462e336fa2f4bcdfd77ba43658c37cf4c6782c75
7,792
def cli(ctx, library_id, filesystem_paths, folder_id="", file_type="auto", dbkey="?", link_data_only="", roles=""): """Upload a set of files already present on the filesystem of the Galaxy server to a library. Output: """ return ctx.gi.libraries.upload_from_galaxy_filesystem(library_id, filesystem_paths, folder_id=folder_id, file_type=file_type, dbkey=dbkey, link_data_only=link_data_only, roles=roles)
c0b269344da39a2ae9f43280ec1d7bf69a6a345c
7,793
import numpy def h2e(az, za, lat): """ Horizon to equatorial. Convert az/za (radian) to HA/DEC (degrees, degrees) given an observatory latitude (degrees) """ sa = numpy.sin(az) ca = numpy.cos(az) se = numpy.sin(numpy.pi / 2.0 - za) ce = numpy.cos(numpy.pi / 2.0 - za) sp = numpy.sin(lat * numpy.pi / 180.0) cp = numpy.cos(lat * numpy.pi / 180.0) # HA,Dec as x,y,z */ x = - ca * ce * sp + se * cp y = - sa * ce z = ca * ce * cp + se * sp # To spherical */ r = numpy.sqrt(x * x + y * y) ha = numpy.arctan2(y, x) * 180.0 / numpy.pi dec = numpy.arctan2(z, r) * 180.0 / numpy.pi return (ha, dec)
89f82c0035eaf9b73d3c2adf07b2ed1145c822f9
7,794
def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert): """Add metadata attributes to DataArray""" if dtype_out_vert == 'vert_int': if units != '': units = '(vertical integral of {0}): {0} kg m^-2)'.format(units) else: units = '(vertical integral of quantity with unspecified units)' data.attrs['units'] = units data.attrs['description'] = description return data
3bdead2b0b341a065ef1e147660605c9c591c0df
7,795
def load_uvarint_b(buffer): """ Variable int deserialization, synchronous from buffer. :param buffer: :return: """ result = 0 idx = 0 byte = 0x80 while byte & 0x80: byte = buffer[idx] result += (byte & 0x7F) << (7 * idx) idx += 1 return result
f45534114fa310c027e9ff7627a41bfd51950b48
7,796
def parse_print_dur(print_dur): """ Parse formatted string containing print duration to total seconds. >>> parse_print_dur(" 56m 47s") 3407 """ h_index = print_dur.find("h") hours = int(print_dur[h_index - 2 : h_index]) if h_index != -1 else 0 m_index = print_dur.find("m") minutes = int(print_dur[m_index - 2 : m_index]) if m_index != -1 else 0 s_index = print_dur.find("s") seconds = int(print_dur[s_index - 2 : s_index]) if s_index != -1 else 0 return hours * 60 * 60 + minutes * 60 + seconds
7b1a29f31ba38e7d25b4dca9600d4be96a1da3ac
7,797
def welcoming(): """ Welcoming for user """ return(""" ************************************************* ** ** ** Welcome to speech emotion recognition! ** ** ** ************************************************* """)
b3bcd19adda9cc8aa9678e823d1e524ba36f80af
7,798
def _find_vlan(mac, domain_interfaces): """ Given a mac address and a collection of domains and their network interfaces, find the domain that is assigned the interface with the desired mac address. Parameters ---------- mac : str The MAC address. domain_interfaces : dict The list of domain interfaces. Returns ------- The domain """ for d, i in domain_interfaces.items(): if i.get(mac): return d return None
c4f667dd80146de83157e8966cb34e5867457397
7,799
import os import re def canonicalize_path(path, prefix=None): """Canonicalize a given path. Remove the prefix from the path. Otherwise, if the path starts with /build/XXX/package-version then remove this prefix. Args: path Returns: Canonicalized path. """ dummy_prefix = '/build/dummy_pkg/pkg-version/' if not path.startswith('/'): path = dummy_prefix + path path = os.path.abspath(path) # Remove /build prefix prefix_regex = re.match(r'(/build/[^/]*/[^/]*-[^/]*/)(.*)', path) if prefix_regex: path = prefix_regex.groups()[1] return path
47bd737502466f15679047ec77b9d4f5a3cbea33
7,801
def tests(): """ Make these Unittests for each function. """ tests_list = [ 'assertNotEqual', 'assertEqual' ] return tests_list
cdba4e6293df2231640d6896a5104791cdf073be
7,802