content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def create_virtual_cdrom_spec(client_factory, datastore, controller_key, file_path, cdrom_unit_number): """Builds spec for the creation of a new Virtual CDROM to the VM.""" config_spec = client_factory.create( 'ns0:VirtualDeviceConfigSpec') config_spec.operation = "add" cdrom = client_factory.create('ns0:VirtualCdrom') cdrom_device_backing = client_factory.create( 'ns0:VirtualCdromIsoBackingInfo') cdrom_device_backing.datastore = datastore cdrom_device_backing.fileName = file_path cdrom.backing = cdrom_device_backing cdrom.controllerKey = controller_key cdrom.unitNumber = cdrom_unit_number cdrom.key = -1 connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo') connectable_spec.startConnected = True connectable_spec.allowGuestControl = False connectable_spec.connected = True cdrom.connectable = connectable_spec config_spec.device = cdrom return config_spec
8f62c70c432c368f0f94b3c1dc48f38d75093b07
7,270
def plot_mean_and_CI(axes, mean, lb, ub, label, freqs, linestyle='-'): """ Plot mean and confidence boundaries. Args: axes: plt.axes mean: np.ndarray lb: np.ndarray ub: np.ndarray label: string freqs: list linestyle: string Returns: plt.axes """ axes.fill_between(freqs, ub, lb, alpha=.25) axes.plot(freqs, mean, label=label, marker = 'o', linestyle=linestyle) return axes
77b7ecaa6dddae474495c0a65efafbf08717584c
7,273
def locked(acquire, release): """Decorator taking two methods to acquire/release a lock as argument, returning a decorator function which will call the inner method after having called acquire(self) et will call release(self) afterwards. """ def decorator(f): def wrapper(self, *args, **kwargs): acquire(self) try: return f(self, *args, **kwargs) finally: release(self) return wrapper return decorator
92f8ae3b36375d14962997436bc1d210510b4cdb
7,274
def check_next_in_request(request): """ Проверяет наличие слова 'next' в объекте запроса request методе POST :param request: :return: """ return True if 'next' in request.POST else False
b70ba54fa32234798b842ffcb24834fdcd95c827
7,276
def overlap_slices(large_array_shape, small_array_shape, position): """ Modified version of `~astropy.nddata.utils.overlap_slices`. Get slices for the overlapping part of a small and a large array. Given a certain position of the center of the small array, with respect to the large array, tuples of slices are returned which can be used to extract, add or subtract the small array at the given position. This function takes care of the correct behavior at the boundaries, where the small array is cut of appropriately. Parameters ---------- large_array_shape : tuple Shape of the large array. small_array_shape : tuple Shape of the small array. position : tuple Position of the small array's center, with respect to the large array. Coordinates should be in the same order as the array shape. Returns ------- slices_large : tuple of slices Slices in all directions for the large array, such that ``large_array[slices_large]`` extracts the region of the large array that overlaps with the small array. slices_small : slice Slices in all directions for the small array, such that ``small_array[slices_small]`` extracts the region that is inside the large array. """ # Get edge coordinates edges_min = [int(pos - small_shape // 2) for (pos, small_shape) in zip(position, small_array_shape)] edges_max = [int(pos + (small_shape - small_shape // 2)) for (pos, small_shape) in zip(position, small_array_shape)] # Set up slices slices_large = tuple(slice(max(0, edge_min), min(large_shape, edge_max)) for (edge_min, edge_max, large_shape) in zip(edges_min, edges_max, large_array_shape)) slices_small = tuple(slice(max(0, -edge_min), min(large_shape - edge_min, edge_max - edge_min)) for (edge_min, edge_max, large_shape) in zip(edges_min, edges_max, large_array_shape)) return slices_large, slices_small
ef86928b3ef619f209247bb72e2e391d14d541c4
7,277
import numpy def rotmat(x, origin=(0, 0, 0), upvector=(0, 0, 1)): """ Given a position vector x, find the rotation matrix to r,h,v coordinates. """ x = numpy.asarray(x) - numpy.asarray(origin) nr = x / numpy.sqrt((x * x).sum()) nh = numpy.cross(upvector, nr) if all(nh == 0.0): nh = numpy.cross((1, 0, 0), nr) if all(nh == 0.0): nh = numpy.cross((0, 1, 0), nr) nh = nh / numpy.sqrt((nh * nh).sum()) nv = numpy.cross(nr, nh) nv = nv / numpy.sqrt((nv * nv).sum()) return numpy.array([nr, nh, nv])
519d7aaa15dd48bb31aed0059c01f365f4e8118b
7,278
def product(data): """ Generate the product for the entries specified by the data. """ return "tRNA-{aa} ({anticodon})".format( aa=data["metadata"]["isotype"], anticodon=data["metadata"]["anticodon"], )
5f347cfa1fd7d7030fb1b1a5d2e88eb664c831ae
7,280
def get_zygosity(call): """Check if a variant position qualifies as a variant 0,1,2,3==HOM_REF, HET, UNKNOWN, HOM_ALT""" zygdict = dict([(0, "nocall"), (1, "het"), (2, "nocall"), (3, "hom")]) return zygdict[call]
aff88ed481beeb6406261822eca482430494b6f6
7,281
import torch def create_batch(sentences, params, dico): """ Convert a list of tokenized sentences into a Pytorch batch args: sentences: list of sentences params: attribute params of the loaded model dico: dictionary returns: word_ids: indices of the tokens lengths: lengths of each sentence in the batch """ bs = len(sentences) slen = max([len(sent) for sent in sentences]) word_ids = torch.LongTensor(slen, bs).fill_(params.pad_index) for i in range(len(sentences)): sent = torch.LongTensor([dico.index(w) for w in sentences[i]]) word_ids[:len(sent), i] = sent lengths = torch.LongTensor([len(sent) for sent in sentences]) return word_ids, lengths
c08430651ea20f633169187f62f7b22e09bbd17e
7,283
from typing import List def make_pos_array_and_diff(trials: List[dict]) -> List[dict]: """ Parameters ---------- trials : Non-filtered refinement trials Returns ------- A list of dictionaries with updated position and difference of neighbours """ _trials = trials[:] for i, c in enumerate(trials): x_array = sorted([d['pos'] for d in trials[i]['comb']]) _trials[i]['pos_array'] = x_array _trials[i]['diff_array'] = [x_array[i] - x_array[i + 1] for i in range(len(x_array) - 1)] # print(x_array) # print(len(trials)) # trials[i]['comb'] return _trials
ebdb0a63d9399985d11b06ec28d4032a54b22f89
7,284
def coords_json_to_api_pan(ang_clockwise): """converts from robot coordinates to API coordinates.""" return ang_clockwise
6511b7cf196a171095b184d48281f25f97fc6792
7,285
def dict_other_json(imagePath, imageData, shapes, fillColor=None, lineColor=None): """ :param lineColor: list :param fillColor: list :param imageData: str :param imagePath: str :return: dict"" """ # return {"shapes": shapes, "lineColor": lineColor, "fillColor": fillColor, "imageData": imageData, # "imagePath": imagePath} return {"imagePath": imagePath, "imageData": imageData, "shapes": shapes, "fillColor": fillColor, "lineColor": lineColor }
3742664276d70ce5f037ba99994c8c2e61114107
7,286
def get_ast_field_name(ast): """Return the normalized field name for the given AST node.""" replacements = { # We always rewrite the following field names into their proper underlying counterparts. '__typename': '@class' } base_field_name = ast.name.value normalized_name = replacements.get(base_field_name, base_field_name) return normalized_name
c5cf0acbca963e7dc0d853064a2599b732d6b0d1
7,287
def olivine(piezometer=None): """ Data base for calcite piezometers. It returns the material parameter, the exponent parameter and a warn with the "average" grain size measure to be use. Parameter --------- piezometer : string or None the piezometric relation References ---------- | Jung and Karato (2001) https://doi.org/10.1016/S0191-8141(01)00005-0 | Van der Wal et al. (1993) https://doi.org/10.1029/93GL01382 Assumptions ----------- - The piezometer of Van der Wal (1993) requires entering the linear mean apparent grain size in microns calculated from equivalent circular diameters (ECD) with no stereological correction. The function will convert automatically this value to linear intercept (LI) grain size using the De Hoff and Rhines (1968) correction. It is assumed that LI was multiplied by 1.5 (correction factor), the final relation is: LI = (1.5 / sqrt(4/pi)) * ECD - The piezometer of Jung and Karato (2001) requires entering the linear mean apparent grain size in microns calculated from equivalent circular diameters (ECD) with no stereological correction. The function will convert automatically this value to linear intercept (LI) grain size using the De Hoff and Rhines (1968) empirical equation. Since LI was originally multiplied by 1.5 (correction factor), the final relation is: LI = (1.5 / sqrt(4/pi)) * ECD """ if piezometer is None: print('Available piezometers:') print("'Jung_Karato'") print("'VanderWal_wet'") print("'Tasaka_wet'") return None elif piezometer == 'Jung_Karato': B, m = 5461.03, 0.85 warn = 'Ensure that you entered the apparent grain size as the arithmetic mean in linear scale' linear_interceps = True correction_factor = 1.5 elif piezometer == 'VanderWal_wet': B, m = 1355.4, 0.75 warn = 'Ensure that you entered the apparent grain size as the arithmetic mean in linear scale' linear_interceps = True correction_factor = 1.5 elif piezometer == 'Tasaka_wet': B, m = 719.7, 0.75 warn = 'Ensure that you entered the apparent grain size as the arithmetic mean in linear scale' linear_interceps = False correction_factor = 1.2 else: olivine() raise ValueError('Piezometer name misspelled. Please choose between valid piezometers') return B, m, warn, linear_interceps, correction_factor
387ea9413acdf551abe108ba5ba7dda51e162c51
7,288
from typing import Tuple import re import typing def hex_to_rgb(hex: str, hsl: bool = False) -> Tuple[int, int, int]: """Converts a HEX code into RGB or HSL. Taken from https://stackoverflow.com/a/62083599/7853533 Args: hex (str): Takes both short as well as long HEX codes. hsl (bool): Converts the given HEX code into HSL value if True. Returns: Tuple[int, int, int]: Tuple of RGB values. Raises: ValueError: If given value is not a valid HEX code. """ if re.compile(r"#[a-fA-F0-9]{3}(?:[a-fA-F0-9]{3})?$").match(hex): div = 255 if hsl else 0 if len(hex) <= 4: rgb = tuple( int(int(hex[i] * 2, 16) / div) if div else int(hex[i] * 2, 16) for i in (1, 2, 3) ) else: rgb = tuple( int(int(hex[i : i + 2], 16) / div) if div else int(hex[i : i + 2], 16) for i in (1, 3, 5) ) rgb = typing.cast(Tuple[int, int, int], rgb) return rgb raise ValueError(f"{hex} is not a valid HEX code.")
2c912dacfcf6c52c21c94c5d7bb9b9763279245d
7,290
def get_mode_C_array(mode_C): """冷房の運転モードを取得する Args: mode_C(str): 冷房方式 Returns: tuple: 冷房の運転モード """ # 運転モード(冷房) if mode_C == '住戸全体を連続的に冷房する方式': return tuple(["全館連続"] * 12) else: return ('居室間歇', '居室間歇', '居室間歇', '居室間歇', '居室間歇', None, None, None, None, None, None, None)
6b9bce2eccab7698ec78ef3b843b17f3c3c9200d
7,291
from datetime import datetime def get_date(): """ gets current date """ return datetime.now()
dccf420bc6eb216bf76ee153504696ec1c390b5d
7,292
def singular(plural): """ Take a plural English word and turn it into singular Obviously, this doesn't work in general. It know just enough words to generate XML tag names for list items. For example, if we have an element called 'tracks' in the response, it will be serialized as a list without named items in JSON, but we need names for items in XML, so those will be called 'track'. """ if plural.endswith('ies'): return plural[:-3] + 'y' if plural.endswith('s'): return plural[:-1] raise ValueError('unknown plural form %r' % (plural,))
92ab7e074387d943d5593d759a10b3fafa67deca
7,293
def crop_and_revenue_to_df(financial_annual_overview, waste_adjusted_yields, total_sales, vadded_sales, education_rev, tourism_rev, hospitality_rev, grants_rev): """Adding yields and sales information to financial overview Notes: Adds waste-adjusted yields for crops 1, 2, 3 and 4 with total sales to dataframe Args: financial_annual_overview (dataframe): An annual overview of financial data w1 (list): Timeseries of expected yield for crop 4 w2 (list): Timeseries of expected yield for crop 4 w3 (list): Timeseries of expected yield for crop 4 w4 (list): Timeseries of expected yield for crop 4 total_sales (list): Timeseries of total sales Returns: financial_annual_overview (dataframe): Financial overview of important data """ for i, w in enumerate(waste_adjusted_yields): financial_annual_overview.loc[f"Yield Crop {i+1}"] = w financial_annual_overview.loc['Revenue - Crop Sales'] = total_sales financial_annual_overview.loc['Revenue - Value-Added Products'] = vadded_sales financial_annual_overview.loc['Revenue - Education'] = education_rev financial_annual_overview.loc['Revenue - Tourism'] = tourism_rev financial_annual_overview.loc['Revenue - Hospitality'] = hospitality_rev financial_annual_overview.loc['Revenue - Grants'] = grants_rev financial_annual_overview.loc['Total Revenue'] = financial_annual_overview.loc['Revenue - Hospitality'] + \ financial_annual_overview.loc['Revenue - Crop Sales'] + financial_annual_overview.loc['Revenue - Value-Added Products'] + financial_annual_overview.loc['Revenue - Tourism'] \ + financial_annual_overview.loc['Revenue - Education'] # REMOVAL OF + financial_annual_overview.loc['Revenue - Grants'] return financial_annual_overview
6be32e6f4ff3ae0ed6434313b8bcc2f44192231b
7,294
def filter_rows_via_column_matching(data, column, index=0): """ Filter data, by keeping rows whose particular field index matches the column criteria It takes parameters: data (data in the form of a list of lists) column (used as the match criteria for a particular field of the data) and optionally: index (by default 0, it is the data field used to match against column) Note that column can be many iterables, but probably ought to be a set It returns a filtered list of lists """ return [ row for row in data if row[index] in column ]
fcd5548677290a34d94c2eb8d5fefcb2bb50f0b4
7,295
import re def _normalize_name(name: str) -> str: """ Normalizes the given name. """ return re.sub(r"[^a-zA-Z0-9.\-_]", "_", name)
b38a90c05b0a6ec5a26db6d0da85bed2ae802cea
7,296
def filter_claims_by_date(claims_data, from_date, to_date): """Return claims falling in the specified date range.""" return [ claim for claim in claims_data if (from_date <= claim.clm_from_dt <= to_date) ]
d1568d0fd52382bdb3f1f02414f591d5f4da3596
7,297
def unicode2str(content): """Convert the unicode element of the content to str recursively.""" if isinstance(content, dict): result = {} for key in content.keys(): result[unicode2str(key)] = unicode2str(content[key]) return result elif isinstance(content, list): return [unicode2str(element) for element in content] elif isinstance(content, int) or isinstance(content, float): return content else: return content.encode("utf-8")
30618f0305d28646af36bcff488af971643fd142
7,298
import sys def validate_min_python_version(major, minor, error_msg=None, exit_on_fail=True): """If python version does not match AT LEAST requested values, will throw non 0 exit code.""" version = sys.version_info result = False if version.major > major: return True if major == version.major: result = version.minor >= minor if not result: if exit_on_fail: msg = ( error_msg if error_msg else "Python version {}.{} or higher required for this functionality.".format( major, minor ) ) sys.exit(msg) return result
49f078af83d956b3e099d792b5a364c359991df0
7,299
import os import argparse def get_arg_parser(): """Allows arguments to be passed into this program through the terminal. Returns: argparse.Namespace: Object containing selected options """ def dir_path(string): if os.path.isfile(string): return string else: raise NotADirectoryError(string) parser = argparse.ArgumentParser(description="Input document file paths") parser.add_argument( "csv_path", help="Full path to CSV labeled file", type=dir_path) parser.add_argument("output_name", help="Name of output file", type=str) return parser.parse_args()
035bb8df1743aca4ba78c17d60836b9f485e769d
7,300
import os def current_umask() -> int: """Get the current umask which involves having to set it temporarily.""" mask = os.umask(0) os.umask(mask) return mask
8d24ace1eba3746cb4f38f91c127075c0bce6aaf
7,301
import json def encode_json(struct): """Encode a structure as JSON bytes.""" return bytes(json.dumps(struct), "utf-8")
6724c0a687a98230a32fef81b3a4447c12d164fc
7,302
def categories_to_errors_dict(): """Categories to error dictionary function maps common error substrings to categories. The categories are keys in the dictionary and the values are arrays of strings or arrays of list. Some error categories, such as User Errors, has many subcategories. Thus, each subcategory has its own list, but each list is placed in the larger User Error array that maps to the key User Error. This function returns a dictionary used to organize error logs""" # App Errors app_errors = ["call_features_rRNA_SEED", "Model likely contains numerical instability", "Object doesnt have required fields", "has invalid provenance reference", "Mandatory arguments missing", "Authentication required for AbstractHandle", "Can't locate object method quality via package", "Can't use an undefined value as an ARRAY reference", "KBaseReport parameter validation status"] # User Errors general_user_errors = ["Illegal character in object name", "Not one protein family member", "is used for forward and reverse", "duplicate genome display names", "Proteome comparison does not involve genome", "incompatible read library types", "MISSING DOMAIN ANNOTATION FOR", "ALL genomes have no matching Domain Annotation", "There is no taxonomy classification assignment against", "There are no protein translations in genome", "not found in pangenome", "You must include the following additional Genomes in the Pangenome Calculation", "Undefined compound used as reactant", "Duplicate gene ID", "The input directory does not have any files with one of the following extensions", "is not a valid KBase taxon ID.", "Duplicate objects detected in input", "unable to fetch assembly:", "may not read workspace", "No bins produced - skipping the creation", "Must configure at least one of 5 or 3 adapter", "There is no taxonomy classification assignment against", "cannot be accessed", "Object #", "does not exist in the supplied genome"] assembly_user_errors = ["There are no contigs to save", "assembly method was not specified", "takes 2 positional arguments but 3 were given"] annotation_user_errors = ["Too many contigs", "You must run the RAST SEED Annotation App", "You must supply at least one", "Fasta file is empty.", "Illegal number of separators", "Unable to parse version portion of object reference"] blast_user_errors = ["not a valid EXCEL nor TSV file", "Duplicate model names are not permitted", "Must select at least two models to compare", "input_one_sequence", "input_one_ref", "output_one_name", "Query is Empty", "No sequence found in fasta_str"] modeling_user_errors = ['not a valid EXCEL nor TSV file', 'Duplicate model names are not permitted', 'Must select at least two models to compare'] import_user_errors = ["Both SRA and FASTQ/FASTA file given. Please provide one file type", "FASTQ/FASTA input file type selected. But missing FASTQ/FASTA file", "reads files do not have an equal number of records", "File is not a zip file", "error running command: pigz", "is not a FASTQ file", "Cannot connect to URL", "Invalid FTP Link", "Plasmid assembly requires that one", "Premature EOF", "Reading FASTQ record failed", "Invalid FASTQ", "missing FASTQ/FASTA file", "line count is not divisible by", "But missing SRA file", "Features must be completely contained", "was not found in feature ID list", "unable to parse", "Every feature sequence id must match a fasta sequence id", "Did not recognise the LOCUS line layout", "Could not determine alphabet for", "This FASTA file has non nucleic acid characters", "Not a valid FASTA file", "This FASTA file has non nucleic acid characters", "This FASTA file may have amino acids", "appears more than once in the file", "Duplicate gene ID"] other_apps_user_errors = ["missing or empty krona input file", "FeatureSet has multiple reference Genomes", "You must enter either an input genome or input reads"] # No such file/dir & other miscellaneous errors file_directory_missing = ["No such file or directory"] server = ["500 Server closed connection"] no_space = ["exit code is 123", "No space left on device"] exit = ["exit code is 137"] badstatus = ["BadStatusLine"] badserver = ["Bad Gateway", "NJSW failed"] compression = ["compression tyep 9"] job_service = ['Kafka', 'Job service side status'] lost_connection = ["ProtocolError(Connection aborated)", "Connection has been shutdown"] nosuchcontainer = ["No such container"] readtimeout = ["ReadTimeError", "504 Gateway Time-out"] nooutput = ["Output file is not found"] canceled = ["Job was cancelled"] noassemblyref = ["does not have reference to the assembly object"] user_errors = [general_user_errors, blast_user_errors, annotation_user_errors, modeling_user_errors, assembly_user_errors, other_apps_user_errors, import_user_errors] # Construct error dictionary error_dictionary = {'User_Error': user_errors, 'App Error': app_errors, 'NoFileDir': file_directory_missing, '500': server, "NoSpace": no_space, "Exit 137": exit, "BadStatusLine": badstatus, 'BadServer': badserver, "Compression": compression, "JobService": job_service, "LostConnection": lost_connection, 'NoSuchContainer': nosuchcontainer, "NoOutput": nooutput, "Canceled": canceled, "NoAssemblyRef": noassemblyref, "ReadTimeout": readtimeout} return error_dictionary
4b59fc9775d3d5d808ecedee7756dad5d302848a
7,303
import re def rhyme_analyser(str, rhyme_db): """ Rhyme Analyzer - Print out the rhyme scheme of a poem. Given: A string - like If you want to smell a rose You must put it to your nose. If you want to eat some bread You must not eat it in bed Or I will eat your toes. Output: Rhyme scheme of the poem. - AABBA Use a rhyme list such as this one. """ lines = str.lower().split('\n') rhyme_letter = {} rhyme_scheme = [] letter = 'A' for line in lines: last_word = re.sub('[^a-z]', '', line.split(' ')[-1]) for rhyme in rhyme_db: if last_word in rhyme_db[rhyme]: if rhyme not in rhyme_letter: rhyme_letter[rhyme] = letter letter = chr(ord(letter) + 1) rhyme_scheme.append(rhyme_letter[rhyme]) return ''.join(rhyme_scheme)
0fb166c92b2e60d1739638b23a3de5dae18ce26b
7,305
def is_chosen(bbox, area_threshold=None) -> bool: """ Calculate area of bounding boxes and return True if area >= threshold Args: bbox: (x1, y1, width, heigh) area_threshold: Returns: True/False """ are = bbox[2] * bbox[3] if area_threshold is not None: if are < area_threshold: return False return True
091fda7a389a74e92703c6eeca05aec413bc65a5
7,306
def _float(value): """Return env var cast as float.""" return float(value)
254b1e3a542c5a74153cd58d3f43e86dab964028
7,308
def isjsonclass(class_: type) -> bool: """Check if a class is jsonclass. Args: class_ (type): The class to check. Returns: bool: True if it's a jsonclass, otherwise False. """ return hasattr(class_, '__is_jsonclass__')
3234cf62beb03aa968888dd8ec3b65f4c5f4cab3
7,312
import math def mu_from_pdiv(pdiv, nobj=3): """ Get population count based on divisions per objective for NSGA-III """ h = int(math.factorial(nobj + pdiv - 1) / (math.factorial(pdiv) * math.factorial(nobj - 1))) mu = int(h + (4 - h % 4)) return mu
b4087bcab34a0fe03c6a1d5c9e2d9fb1f47d79f0
7,313
def locals_in_putty(): """Hard-coded information regarding local variables """ locals_d = { # k: index # v: dict of local properties 0x14007DA84: { 6: {'name': 'v6', 'size': 8, 'type_name': '__int64'}, 7: {'name': 'v7', 'size': 8, 'type_name': '__int64'}, 8: {'name': 'v8', 'size': 8, 'type_name': '__int64'}, 9: {'name': 'v9', 'size': 8, 'type_name': '__int64'}, 11: {'name': 'v11', 'size': 8, 'type_name': '__int64'}, 12: {'name': 'v12', 'size': 16, 'type_name': '__int128'}, 13: {'name': 'v13', 'size': 8, 'type_name': '__int64'}, 14: {'name': 'v14', 'size': 8, 'type_name': '__int64'}, 15: {'name': 'v15', 'size': 1, 'type_name': 'char'}, 16: {'name': 'v16', 'size': 1, 'type_name': 'char'}, 17: {'name': 'v17', 'size': 8, 'type_name': '__int64'}, 18: {'name': 'v18', 'size': 16, 'type_name': '__int128'}, 19: {'name': 'v19', 'size': 8, 'type_name': '__int64'}, 20: {'name': 'v20', 'size': 8, 'type_name': '__int64'}, 21: {'name': 'v21', 'size': 8, 'type_name': '__int64'}, 22: {'name': 'v22', 'size': 4, 'type_name': 'int'}, 23: {'name': 'v23', 'size': 4, 'type_name': 'int'}, 24: {'name': 'v24', 'size': 2, 'type_name': '__int16'}, 25: {'name': 'v25', 'size': 1, 'type_name': 'char'}, 26: {'name': 'v26', 'size': 8, 'type_name': '__int64'}, 27: {'name': 'v27', 'size': 4, 'type_name': 'int'}, 28: {'name': 'v28', 'size': 1, 'type_name': 'char'}, 29: {'name': 'v29', 'size': 4, 'type_name': 'int'}, 30: {'name': 'v30', 'size': 4, 'type_name': 'int'}, 31: {'name': 'v31', 'size': 8, 'type_name': 'char *'}, 32: {'name': 'v32', 'size': 8, 'type_name': '__int64'}, 33: {'name': 'v33', 'size': 8, 'type_name': '__int64'}, 34: {'name': 'v34', 'size': 8, 'type_name': '__int64'}, 35: {'name': 'v35', 'size': 8, 'type_name': '__int64'} }, # NOTE: # Insert additional functions here } return locals_d
52398e84aa324f8f751e2ca50bee9ad33be6eeb5
7,315
def dvi( redchan, nirchan ): """ DVI: Difference Vegetation Index dvi( redchan, nirchan ) """ redchan = 1.0*redchan nirchan = 1.0*nirchan result = ( nirchan - redchan ) return result
afa5ffffccc3053598cd3597de1efd3dcfc4cd8f
7,316
def needs_min_max_values(mode, buckets): """ Returns True, if an encoding mode needs minimum and maximum column values, otherwise False """ return not buckets and mode in ['one-hot', 'one-hot-gaussian', 'one-hot-gaussian-fluent', 'unary', 'unary-gaussian', 'unary-gaussian-fluent']
50ae2b899e5957347061dd59905290506c460093
7,317
def listed_list(list_list): """Return presentable string from given list """ return '{} and {}'.format(', '.join(list_list[:-1]), list_list[-1]) if ( len(list_list) > 1) else list_list[0]
da35296196fff56816fe7b0427985ee278238dab
7,318
import re def remove_plus_signs(_s: str) -> str: """Removes plus signs from string""" return re.sub(pattern=r'\+', repl=r'', string=_s)
53cf3117221ce82578a20d75e7eb807c2d41b8fc
7,319
def Intersection(S1x, S1y, D1x, D1y, S2x, S2y, D2x, D2y): """ Find intersection of 2 line segments :param S1x: x coordinate of segment 1's start point :param S1y: y coordinate of segment 1's start point :param D1x: x coordinate of segment 1's end point :param D1y: y coordinate of segment 1's end point :param S2x: x coordinate of segment 2's start point :param S2y: y coordinate of segment 2's start point :param D2x: x coordinate of segment 2's end point :param D2y: y coordinate of segment 2's end point :return: Intersection point [x,y] """ if ((D1y - S1y) * (S2x - D2x) - (D2y - S2y) * (S1x - D1x)) == 0: return [None, None] else: x = ((S2x - D2x) * (((D1y - S1y) * (S1x) + (S1x - D1x) * (S1y))) - (S1x - D1x) * ((D2y - S2y) * (S2x) + (S2x - D2x) * (S2y))) / ((D1y - S1y) * (S2x - D2x) - (D2y - S2y) * (S1x - D1x)) y = ((D1y - S1y) * ((D2y - S2y) * (S2x) + (S2x - D2x) * (S2y)) - (D2y - S2y) * (((D1y - S1y) * (S1x) + (S1x - D1x) * (S1y)))) / ((D1y - S1y) * (S2x - D2x) - (D2y - S2y) * (S1x - D1x)) return [x,y]
2dba8839ebf24b55fe3c1b7797e53c7e0c3ed72a
7,321
import os def write_jobfile(cmd, jobname, sbatchpath='./sbatch/', nodes=1, ppn=1, gpus=0, mem=16, nhours=3): """ Create a job file. Args: cmd : str, Command to execute. jobname : str, Name of the job. sbatchpath : str, Directory to store SBATCH file in. scratchpath : str, Directory to store output files in. nodes : int, optional, Number of compute nodes. ppn : int, optional, Number of cores per node. gpus : int, optional, Number of GPU cores. mem : int, optional, Amount, in GB, of memory. ndays : int, optional, Running time, in days. queue : str, optional, Queue name. Returns: jobfile : str, Path to the job file. """ os.makedirs(sbatchpath, exist_ok=True) jobfile = os.path.join(sbatchpath, jobname + '.s') # logname = os.path.join('log', jobname) with open(jobfile, 'w') as f: f.write( '#! /bin/sh\n' + '\n' # + '#SBATCH --nodes={}\n'.format(nodes) # + '#SBATCH --ntasks-per-node=1\n' # + '#SBATCH --cpus-per-task={}\n'.format(ppn) + '#SBATCH --mem-per-cpu={}gb\n'.format(mem) # + '#SBATCH --partition=xwang_gpu\n' + '#SBATCH --gres=gpu:1\n' + '#SBATCH --time={}:00:00\n'.format(nhours) # + '#SBATCH --mem=128gb\n' # + '#SBATCH --job-name={}\n'.format(jobname[0:16]) # + '#SBATCH --output={}log/{}.o\n'.format(scratchpath, jobname[0:16]) + '\n' # + 'cd {}\n'.format(scratchpath) # + 'pwd > {}.log\n'.format(logname) # + 'date >> {}.log\n'.format(logname) # + 'which python >> {}.log\n'.format(logname) # + '{} >> {}.log 2>&1\n'.format(cmd, logname) + cmd + '\n' + '\n' + 'exit 0;\n' ) print(jobfile) return jobfile
0493f480fa42eb5dc8d108c59999d8a9430e4669
7,323
def collect_nodes(points, highlighted_nodes=[], color='#79FF06', highlighted_color='blue', width=200, highlighted_width=400): """ Собирает необходимые нам вершины в нужный формат Parameters ---------- points : [str, str, ...] Вершины графа. highlighted_nodes : [str, str, ...], optional Выделенные вершины графа. По умолчанию []. color : str, optional Цвет обычных вершин. По умолчанию '#79FF06'. highlighted_color : str, optional Цвет выделенных вершин. По умолчанию 'blue'. width : int, optional Ширина обычных вершин. По умолчанию 200. highlighted_width : int, optional Ширина выделенных вершин. По умолчанию 400. Returns ------- result : [(str, {'color': str, 'width': int}), (str, {'color': str, 'width': int}), ...] Список вершин с их параметрами. """ result = [] for p in points: if p in highlighted_nodes: result.append((p, {"color": highlighted_color, "width": highlighted_width})) else: result.append((p, {"color": color, "width": width})) return result
a3a218b5f8c8c25a0f13a4154f12779a84726f9d
7,324
def feature_names_from_extractor_list(feature_extractors): """ get a list of feature names from a list of feature extractors :param feature_extractors: a list of feature extractors :return: a list of the feature names for each extractor (think first row of .csv file) """ feature_names = [feature_name for feature_extractor in feature_extractors for feature_name in feature_extractor.get_feature_names()] return feature_names
2f23aa860137e70270e9c7d564df5dacfa1d22a2
7,325
import io def read_txt(filename, encoding='utf-8'): """Text file reader.""" with io.open(filename, 'r', encoding=encoding) as f: return f.read()
2ca0d80bddc49b793e8cbc63c513410154b4d460
7,326
def get_job_type(name): """Returns job type based on its name.""" if 'phase1' in name: return 'phase1' elif 'phase2' in name: return 'phase2' elif 'dfg' in name: return 'dfg' else: return 'other'
50db4a7833028b0a0944a4b915d82a8cabf91595
7,327
def is_resnet(name): """ Simply checks if name represents a resnet, by convention, all resnet names start with 'resnet' :param name: :return: """ name = name.lower() return name.startswith('resnet')
6310d849b76a1006c7c2e97405aa9f0ebc53a78b
7,328
import os import re import logging def convert_output_record(data): """Covert data record into a list for output csv """ output_record = [] # Participant ID output_record.append(os.path.basename(data['filename']).replace('.eaf', '')) # Speaker output_record.append(data['speaker']) # Timestamp output_record.append(data['timestamp']) # Responsivity responsivity = data['Responsivity'] if re.search(r'passive', responsivity, re.IGNORECASE): responsivity = 0 elif re.search(r'aborative', responsivity, re.IGNORECASE): responsivity = 1 elif re.search(r'disconnected', responsivity, re.IGNORECASE): responsivity = 2 else: responsivity = '' output_record.append(responsivity) # Emotion Words (count) match = re.search(r'\d+', data['Emotion Words']) if match is not None: output_record.append(match.group(0)) else: output_record.append(0) # Type of Speech type_of_speech = data['Type of Speech'] if re.search(r'recited', type_of_speech, re.IGNORECASE): type_of_speech = 0 elif re.search(r'spontan', type_of_speech, re.IGNORECASE): type_of_speech = 1 output_record.append(type_of_speech) # Directed Speech directed_speech = data['Directed Speech'] if re.search(r'assistant', directed_speech, re.IGNORECASE): directed_speech = 0 elif re.search(r'toddler', directed_speech, re.IGNORECASE): directed_speech = 1 elif data['speaker'] == 0: logging.warning("Unexpected annotation value found for 'Directed Speech': %s", data['Directed Speech']) output_record.append(directed_speech) # Time Period time_period = data['Time Period'] if re.search(r'story', time_period, re.IGNORECASE): time_period = 0 elif re.search(r'conversation', time_period, re.IGNORECASE): time_period = 1 else: logging.warning("Unexpected annotation value found for 'Time Period': %s", data['Time Period']) output_record.append(time_period) # Trash (noise) output_record.append(data['Trash']) return output_record
046fe65ae63e5559e4bccde5817a4c4f238b0467
7,329
def handle_pmid_25502872(filename): """Bergseng, ..., Sollid. Immunogenetics 2015 [PMID 25502872]""" return None
25a11d19293c59e4ae636b0b26389dbfd6b74955
7,330
from typing import Dict def invert_dictionary(mapping: Dict) -> Dict: """Invert the keys and values of a dictionary.""" remap = {v: k for k, v in mapping.items()} if len(remap) != len(mapping): raise ValueError("Dictionary cannot be inverted; some values were not unique") return remap
911aee48eff3bf0d980e5ad054c77c8a3081e232
7,331
def store_file(file): """Stores the file to specified destination""" destination = "/".join(["api/test_docs", file.name]) file.save(destination) return destination
1118415a1c1b7c2a33ecc539df7bf27dfd783d16
7,332
def mpl_dict(params): """"Convert _ to . for easier mpl rcparams grid definition""" return {k.replace('_', '.'): v for k, v in params.items()}
a8c4ae683205a1412a73f00e6965cb345ec63a3e
7,333
def test_df_keys(): """List of keys to be used for populating a bucket with DataFrames""" return { 'avro': ['df.avro'], 'csv': ['df.csv'], 'csv.gz': ['df.csv.gz'], 'csv.zip': ['df.csv.zip'], 'csv.bz2': ['df.csv.bz2'], 'csv.xz': ['df.csv.xz'], 'psv': ['df.psv'], 'psv.gz': ['df.psv.gz'], 'psv.zip': ['df.psv.zip'], 'psv.bz2': ['df.psv.bz2'], 'psv.xz': ['df.psv.xz'], 'feather': ['df.feather'], 'json': ['df.json'], 'pkl': ['df.pkl', 'df.pickle'], 'pq': ['df.pq', 'df.parquet'] }
3b008664744fb6abf8960caebe658ecc2f6525af
7,335
def first_existing(d, keys): """Returns the value of the first key in keys which exists in d.""" for key in keys: if key in d: return d[key] return None
eb9f34f1f5adb0a8e44127fe777e35ca8d36dc04
7,336
import os def resolve_usr_filename(filename): """Resolve the filename to an absolute path. :param filename: The input file name. """ full_filename = filename if os.path.isabs(full_filename) == False: full_filename = os.path.join(os.path.expanduser("~"), full_filename) return os.path.normpath(full_filename)
e239c80c9e11f79c786557d4de3e1fc51a4f791d
7,337
def get_requirements(): """Return a list of package requirements from the requirements.txt file.""" with open('requirements.txt') as f: return f.read().split()
85efbe71d02ced7c5987f08a56a1a966bff2e1ef
7,338
def integer_to_real_mapper(integers): """Define Integer Mapping Function Here(If Needed.)""" real_numbers = [] range_of_integers = 32767 # each value goes from 0-32767(2 ^ length_of_chromosome) integer_to_real_number_mapper = 1 / range_of_integers # will produce number between 0-1 when multiplied by a value # in the range of 0 to 32767 (1/32767 * 32767 = 1) AND (1/32767 * 0 = 0) for integer in integers: real_numbers.append(integer * integer_to_real_number_mapper) return real_numbers
18cc9dc1efe2dd8122d4953a4256cedd5fe234c9
7,339
import os def get_feature_normalizer_filename(fold, path, extension='cpickle'): """Get normalizer filename Parameters ---------- fold : int >= 0 evaluation fold number path : str normalizer path extension : str file extension (Default value='cpickle') Returns ------- normalizer_filename : str full normalizer filename """ return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
c2956766f891b6285dfb8b1d5a0a7f36c160351f
7,340
def filter_files(file_path, selected=('bacteria', 'virus')): """filter files based on filepath""" base = file_path.stem for s in selected: if s in base: return True return False
e666d091eb41a128d39238181b9fef0396432cf8
7,341
def VerboseCompleter(unused_self, event_object): """Completer function that suggests simple verbose settings.""" if '-v' in event_object.line: return [] else: return ['-v']
e536e221f8f3465f72071d969b11b6623359cf58
7,342
def load_input_into_list(): """ Takes our input and returns it into a comprehensive list with split terms :return: The list of lists for our input :rtype: list """ return [line.replace('-', ' ').replace(':', '').split(' ') for line in open("inputs/day2_01.txt", "r").read().splitlines()]
9d8f1a313712f11249f4612d38ed9831d666287b
7,343
def computeTF(doc_info,freqDict_list): """ tf =(frequency of the term in the doc/total number of terms in the doc :param doc_info: :param freqDict_list: :return: """ TF_scores =[] for tempDict in freqDict_list: id = tempDict['doc_id'] for k in tempDict['freq_dict']: temp ={'doc_id' :id, 'TF_score' : tempDict['freq_dict'][k]/doc_info[id-1]['doc_length'], 'key' :k } TF_scores.append(temp) return TF_scores
8a8567a98227226bc54de6c41010c9103d10be47
7,344
import pathlib def get_file(file_path): """ 获取给定目录下的所有文件的绝对路径 :param file_path: 文件目录 :param pattern: 默认返回所有文件,也可以自定义返回文件类型,例如:pattern="*.py" :return: 文件路径列表 """ all_file = [] files = pathlib.Path(file_path) f = files.rglob('*.md') for file in f: pure_path = pathlib.PurePath(str(file)) dirname = pure_path.parent name = pure_path.name if '-' in name and not name.startswith('pep'): new_name = f"{dirname}/pep-{name.split('-')[0].zfill(4)}.md" print(new_name) target = pathlib.Path(new_name) p = pathlib.Path(file) p.rename(target) # p = pure_path.with_name(new_name) # print(p) # break # return list(files) all_file.append(file) return all_file
b3819e7f8d307d5ad8dcc62442817b99b3d134fc
7,346
def gauss_to_tesla(gauss): """Converts gauss to tesla""" return gauss*1e-4
4f0239432a3436fd5c6cad4ae9747c8849289f34
7,347
def createKey(problemData): """ Creates the key for a given 'problemData' list of number of item types. """ key = '' for itData in problemData: key += str(itData) + ',' # Remove the last comma return key[:-1]
420a4e96dc6442ba2ae18f4bca3d8a10f8a19284
7,348
def lfsr_next_one_seed(seed_iter, min_value_shift): """High-quality seeding for LFSR generators. The LFSR generator components discard a certain number of their lower bits when generating each output. The significant bits of their state must not all be zero. We must ensure that when seeding the generator. In case generators are seeded from an incrementing input (such as a system timer), and between increments only the lower bits may change, we would also like the lower bits of the input to change the initial state, and not just be discarded. So we do basic manipulation of the seed input value to ensure that all bits of the seed input affect the initial state. """ try: seed = next(seed_iter) except StopIteration: return 0xFFFFFFFF else: if seed is None: return 0xFFFFFFFF else: seed = int(seed) & 0xFFFFFFFF working_seed = (seed ^ (seed << 16)) & 0xFFFFFFFF min_value = 1 << min_value_shift if working_seed < min_value: working_seed = (seed << 24) & 0xFFFFFFFF if working_seed < min_value: working_seed ^= 0xFFFFFFFF return working_seed
9cc82109ff3f491e6f880bdba2f598344556f92c
7,351
def function_to_dump(hallo,welt,with_default=1): """ non class function to be dumped and restored through create_pickled_dataset and load_pickled_data """ return hallo,welt,with_default
09bd551300d6672c685f3eeedd2e37fe528dfe14
7,352
def get_hashtag_spans(tokens): """ Finds the spans (start, end) of subtokes in a list of tokens Args: tokens: list[str] Returns: spans: list[tuple[int]] """ is_part = ["##" in t for t in tokens] spans = [] pos_end = -1 for pos_start, t in enumerate(is_part): if pos_start <= pos_end: continue if t: last_pos = len(is_part[pos_start:]) - 1 for j, t_end in enumerate(is_part[pos_start:]): if not t_end: pos_end = pos_start + j break if j == last_pos: pos_end = pos_start + j + 1 spans.append((pos_start, pos_end)) return spans
2e70466370e1171a2d29636d13c908c2e8b8e30e
7,353
def roundToNearest(number, nearest): """ Rounds a decimal number to the closest value, nearest, given Arguments: number: [float] the number to be rounded nearest: [float] the number to be rouned to Returns: rounded: [float] the rounded number """ A = 1/nearest rounded = round(number*A)/A return rounded
5f3974611b529e93ae8157182ff8b7dbc100a234
7,354
import os def is_directory(filename): """Tells if the file is a directory""" return os.path.isdir(filename)
0e6d6c8aa9b666ec7d25d1a353e692bcb5220b06
7,355
def compare_dict_keys(d1, d2): """ Returns [things in d1 not in d2, things in d2 not in d1] """ return [k for k in d1 if not k in d2], [k for k in d2 if not k in d1]
4b68c06d1598e325c5baa5ad8eefaa7af1e82d27
7,356
import os def icon_path(name): """ Load an icon from the res/icons folder using the name without the .png """ path = os.path.dirname(os.path.dirname(__file__)) return os.path.join(path, 'res', 'icons', '%s.png' % name)
dd8254d4aae3b930623ec46dc6e262b37f8170ca
7,358
def project_name(settings_dict): """Transform the base module name into a nicer project name >>> project_name({'DF_MODULE_NAME': 'my_project'}) 'My Project' :param settings_dict: :return: """ return " ".join( [ x.capitalize() for x in settings_dict["DF_MODULE_NAME"].replace("_", " ").split() ] )
07411942978ad769d25234f8c7286aaddc365470
7,359
def parse_config_vars(config_vars): """Convert string descriptions of config variable assignment into something that CrossEnvBuilder understands. :param config_vars: An iterable of strings in the form 'FOO=BAR' :returns: A dictionary of name:value pairs. """ result = {} for val in config_vars: try: name, value = val.split('=', 1) except ValueError: raise ValueError("--config-var must be of the form FOO=BAR") result[name] = value return result
1bc1b96b6a2b0bf8e42fca0bb4ee9601c883b124
7,360
def is_power_of_two(x): # type: (int) -> bool """Check if `x` is a power of two: >>> is_power_of_two(0) False >>> is_power_of_two(1) True >>> is_power_of_two(2) True >>> is_power_of_two(3) False """ return x > 0 and x & (x-1) == 0
c657fc5c74dacd2acd7855d99bd277933423b1eb
7,361
def add_malicious_key(entity, verdict): """Return the entity with the additional 'Malicious' key if determined as such by ANYRUN Parameters ---------- entity : dict File or URL object. verdict : dict Task analysis verdict for a detonated file or url. Returns ------- dict The modified entity if it was malicious, otherwise the original entity. """ threat_level_text = verdict.get('threatLevelText', '') if threat_level_text.casefold() == 'malicious activity': entity['Malicious'] = { 'Vendor': 'ANYRUN', 'Description': threat_level_text } return entity
a20ba12ae04d09047f228a26ef6f39e334225cb3
7,362
def remove(property_name): """ Removes the given property. :param property: The property (or property identifier) to remove :type property: Host Specific :return: True if the property was removed """ return None
6f0fd282164d91cf2772ac4155a842a4cb3ccfd2
7,363
from typing import Counter def count_terms(terms: list) -> dict: """ Count the number of terms :param terms: term list :return dict_term: The dictionary containing terms and their numbers """ entity_dict = dict(Counter(terms)) print('There are %s entities in total.\n' % entity_dict.__len__()) # print({key: value for key, value in entity_dict.items()}) return entity_dict
77e362894fbbae3d0cec99daea845734d30e8a2d
7,364
def pretty_duration(seconds): """Return a pretty duration string Parameters ---------- seconds : float Duration in seconds Examples -------- >>> pretty_duration(2.1e-6) '0.00ms' >>> pretty_duration(2.1e-5) '0.02ms' >>> pretty_duration(2.1e-4) '0.21ms' >>> pretty_duration(2.1e-3) '2.1ms' >>> pretty_duration(2.1e-2) '21ms' >>> pretty_duration(2.1e-1) '0.21s' >>> pretty_duration(2.1) '2.10s' >>> pretty_duration(12.1) '12.1s' >>> pretty_duration(22.1) '22s' >>> pretty_duration(62.1) '1:02' >>> pretty_duration(621.1) '10:21' >>> pretty_duration(6217.1) '1:43:37' """ miliseconds = seconds * 1000 if miliseconds < 1: return "{:.2f}ms".format(miliseconds) elif miliseconds < 10: return "{:.1f}ms".format(miliseconds) elif miliseconds < 100: return "{:.0f}ms".format(miliseconds) elif seconds < 10: return "{:.2f}s".format(seconds) elif seconds < 20: return "{:.1f}s".format(seconds) elif seconds < 60: return "{:.0f}s".format(seconds) else: minutes = seconds // 60 seconds = int(seconds - minutes * 60) if minutes < 60: return "{minutes:.0f}:{seconds:02}".format(**locals()) else: hours = minutes // 60 minutes = int(minutes - hours * 60) return "{hours:.0f}:{minutes:02}:{seconds:02}".format(**locals())
ceec602cb07ab5c27831c4ed9e1cd552c5b9dde8
7,365
import numpy def get_buffered_points(points, centroid=None, factor=2.0): """ Add buffer to points in a plane. For example, to expand a convex hull param points: Points we want to buffer param centroid: Centroid of the points param factor: Defines scalar product for point vectors return numpy array of new point coordinates """ if centroid is None: centroid = [0, 0] buffered_points = [] if len(points) == 0: return numpy.array(buffered_points) centroid = numpy.array(centroid) for p in numpy.array(points): buffered_points.append(((p - centroid) * factor) + centroid) buffered_points = [[numpy.rint(p[0]), numpy.rint(p[1])] for p in buffered_points] return numpy.array(buffered_points)
08fdc70a867fddda82bc9b1207587db66852d7fb
7,366
def roman(num): """ Examples -------- >>> roman(4) 'IV' >>> roman(17) 'XVII' """ tokens = 'M CM D CD C XC L XL X IX V IV I'.split() values = 1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1 result = '' for t, v in zip(tokens, values): cnt = num//v result += t*cnt num -= v*cnt return result
e0f51cefd16a098336cd28fb3e35249063c9761c
7,367
def create_url(url, data): """ Method which creates new url from base url :param url: base url :param data: data to append to base url :return: new url """ return url + "/" + str(data)
b08fdc2c9e7ecef589ac5208905de8934f667f2b
7,368
import ast def is_side_effecting(node): """ This determines whether node is a statement with possibly arbitrary side-effects """ node = node.value return isinstance(node, ast.Call) and isinstance(node.func, ast.Name)
137eabb0cbb1b92b48ef05e2800c10e8e598ed1b
7,369
def read_wordlist_file(file_name): """ Reads a one-word-per-line list of words to accumulate (w,c) counts for. Duplicates are collapsed via a dictionary. """ word_list = {} f = open(file_name,'r') lines = f.readlines() for l in lines: word = l.strip() if len(word) > 0: word_list[word] = None return list(word_list.keys())
aa304c23431c6c4a462fac21fbf96377dcdcafcf
7,370
import re def add_locations(string, args): """ Adds location links to a snippet string """ string = string.replace(' ', ' ') locs = args[0] text = args[1] for loc in locs: pattern = re.compile(r'(?<!=)\b{0}[a-zA-Z-]*\b'.format(loc['location']), flags=re.I) for (match) in re.findall(pattern, string): string = re.sub(r'{0}'.format(match), '<a href="/search/?text={2}">{2}</a>'.format(loc['locid'], text, match), string) break; return string
0dca0118e1bc3fae7c6ad5a7c7317f11b2bedb68
7,371
def extract_results(results, mode, limit=50): """extract result from json style list returned by download_results: parameters: results: json style - list with results mode: str- "ppa" for questions, "organic" for link of answers limit: int - max number of items per keyword Returns list of lists """ clean_results = {} for r in results: if mode == "ppa": for n, item in enumerate(r['items']): if item["type"] == "people_also_ask": ppas = [i['title'] for i in item['items']] clean_results[r['keyword']] = ppas if mode == "link": links = [item['url'] for item in r['items'] if item['type'] == 'organic'] clean_results[r['keyword']] = links[:limit] return clean_results # do something with result
64a9a159f5499295c3cb52239b0cdc49c0cd6ecd
7,372
import os def get_last_version(): """Get the last version number in guids file if exists. Returns: version number. """ version_cmake_path = os.path.join(os.getcwd(), "cmake", "firebase_unity_version.cmake") with open(version_cmake_path, "r") as f: datafile = f.readlines() for line in datafile: if "FIREBASE_UNITY_SDK_VERSION" in line: result = line.split() return result[-1].strip("\"") return None
18afd90dcc9e5f473e88ff3e391d0d32e85a1415
7,374
def getsize(datadescriptor): """Get the size of a data descriptor tuple.""" if datadescriptor[0] == 'reg': size = datadescriptor[1][2] elif datadescriptor[0] == 'mem': size = datadescriptor[1][1] elif datadescriptor[0] == 'heap': size = datadescriptor[1][2] elif datadescriptor[0] == 'perp': size = datadescriptor[1][2] elif datadescriptor[0] == 'pmem': size = datadescriptor[1][2] else: return (15, "Not a supported destination type.") return (0, size)
feaaa9d0698b58649a55c53ba399a46ba81520b6
7,375
import random def crossover(parent1, parent2, d): """One point crossover Args: parent1 (int, (int, int)[])[]): chromosome of parent1 parent2 (int, (int, int)[])[]): chromosome of parent2 d (int): Total duration, in durks, of song Returns: ((int, int)[],(int, int)[]): returns two child genotypes in form (genotype1, genotype2) """ genotype1 = parent1[1] genotype2 = parent2[1] new_genotype1 = [] new_genotype2_first = [] new_genotype2_last = [] # pick random number between 0 and d split = random.randint(0, d) total_dur = 0 add_to_genotype1 = True for i, (ed, dur) in enumerate(genotype1): total_dur += dur if not add_to_genotype1: new_genotype2_last.append((ed, dur)) elif total_dur == split: new_genotype1.append((ed, dur)) add_to_genotype1 = False elif total_dur > split: new_genotype1.append((ed, (split - (total_dur - dur)))) new_genotype2_last.append((ed, (dur - (split - (total_dur - dur))))) add_to_genotype1 = False else: new_genotype1.append((ed, dur)) total_dur = 0 add_to_genotype2 = True for i, (ed, dur) in enumerate(genotype2): total_dur += dur if not add_to_genotype2: new_genotype1.append((ed, dur)) elif total_dur == split: new_genotype2_first.append((ed, dur)) add_to_genotype2 = False elif total_dur > split: new_genotype2_first.append((ed, (split - (total_dur - dur)))) new_genotype1.append((ed, (dur - (split - (total_dur - dur))))) add_to_genotype2 = False else: new_genotype2_first.append((ed, dur)) new_genotype2 = new_genotype2_first + new_genotype2_last return (new_genotype1, new_genotype2)
bea05705b758e8e37dc637bec09566fda8978c6b
7,376
def get_j2k_parameters(codestream): """Return some of the JPEG 2000 component sample's parameters in `stream`. .. deprecated:: 1.2 Use :func:`~pydicom.pixel_data_handlers.utils.get_j2k_parameters` instead Parameters ---------- codestream : bytes The JPEG 2000 (ISO/IEC 15444-1) codestream data to be parsed. Returns ------- dict A dict containing the JPEG 2000 parameters for the first component sample, will be empty if `codestream` doesn't contain JPEG 2000 data or if unable to parse the data. """ try: # First 2 bytes must be the SOC marker - if not then wrong format if codestream[0:2] != b'\xff\x4f': return {} # SIZ is required to be the second marker - Figure A-3 in 15444-1 if codestream[2:4] != b'\xff\x51': return {} # See 15444-1 A.5.1 for format of the SIZ box and contents ssiz = ord(codestream[42:43]) parameters = {} if ssiz & 0x80: parameters['precision'] = (ssiz & 0x7F) + 1 parameters['is_signed'] = True else: parameters['precision'] = ssiz + 1 parameters['is_signed'] = False return parameters except (IndexError, TypeError): return {}
722a84eadb6f381a531d09d3b6279b7775bca1d3
7,378
def to_lowercase(word_list): """Convert all characters to lowercase from list of tokenized word_list Keyword arguments: word_list: list of words """ lowercase_word_list = [word.lower() for word in word_list] return lowercase_word_list
025e3edaa79723f8656d10a8d52fe16a402644ae
7,379
def convert_to_score(label_name, label_dict): """Converts the classification into a [0-1] score. A value of 0 meaning non-toxic and 1 meaning toxic. """ if label_name=='non-toxic': return 1-label_dict[label_name] else: return label_dict[label_name]
608caf76f62a70d09e1592367daeb2ad3ebae248
7,380
import pandas as pd import six def csv2df(csv_string): """http://stackoverflow.com/a/22605281""" return pd.read_csv(six.StringIO(csv_string),index_col=False)
2aeda7db7f36dac8fcec20e1f09a9299d362dfa6
7,382
def _generate_csv_header_line(*, header_names, header_prefix='', header=True, sep=',', newline='\n'): """ Helper function to generate a CSV header line depending on the combination of arguments provided. """ if isinstance(header, str): # user-provided header line header_line = header + newline else: if not (header is None or isinstance(header, bool)): raise ValueError(f"Invalid value for argument `header`: {header}") else: if header: header_line = header_prefix + sep.join(header_names) + newline else: header_line = "" return header_line
b9a7f32404a432d2662c43f4fe6444241698bf37
7,383
def filter_labeled_genes(genes): """Filter genes which already have a label and return number of labels. Args: genes(dict): dictionary of genes {g_name: gene object} Returns: dict: dictionary of genes without label {g_name: gene object} int: number of distinct labels in the set of labeled genes """ unlabeled_genes = {} labels = set() for g_name, gene in genes.items(): if not gene.plot_label: unlabeled_genes[g_name] = gene else: labels.add(gene.plot_labelID) num_labels = len(labels) return unlabeled_genes, num_labels
4d50580a07ad6825b4c28e7c91780f1964568056
7,384
def get_floss_params(str_floss_options, filename): """Helper routine to build the list of commandline parameters to pass to Floss.""" # First parameter is the name of the Floss "main" routine. list_floss_params = ['main'] # Add the options from app.config list_options = str_floss_options.split(",") for option in list_options: list_floss_params.append(option) # Add the filename of the binary file. list_floss_params.append(filename) return list_floss_params
e637c25d299c8217fef31b85a2610ec46e53d1f3
7,385
def is_leap_year(year: int) -> bool: """Whether or not a given year is a leap year. If year is divisible by: +------+-----------------+------+ | 4 | 100 but not 400 | 400 | +======+=================+======+ | True | False | True | +------+-----------------+------+ Args: year (int): The year in question Returns: bool: True if year is a leap year, false otherwise """ def is_div(x: int) -> bool: return year % x == 0 return is_div(4) and ((not is_div(100)) or is_div(400))
e4cca9a2b9f0475aadc763fed679eee8b5dddc4a
7,387
def borders(district, unit): """Check if a unit borders a district.""" if district == []: return True neighbour_coords = [(unit.x+i, unit.y+j) for i in [1, 0, -1] for j in [1, 0, -1] if bool(i) ^ bool(j)] district_coords = [(d_unit.x, d_unit.y) for d_unit in district] return bool([i for i in neighbour_coords if i in district_coords])
d95bf55b54f0df63980236def80610dcdc6cbfeb
7,389
def get_step_g(step_f, norm_L2, N=1, M=1): """Get step_g compatible with step_f (and L) for ADMM, SDMM, GLMM. """ # Nominally: minimum step size is step_f * norm_L2 # see Parikh 2013, sect. 4.4.2 # # BUT: For multiple constraints, need to multiply by M. # AND: For multiple variables, need to multiply by N. # Worst case of constraints being totally correlated, otherwise Z-updates # overwhelm X-updates entirely -> blow-up return step_f * norm_L2 * N * M
4adec493ff82450ff0546088af86bac2372b862f
7,390
def MakeCircleRange(circle_size, slice_side): """factory function that pre computes all circular slices slices are assumed to be 2*slice_side+1 in length """ assert (circle_size - 1) % 2 == 0 slice_collection = {} full_indices = list(range(0, circle_size)) for centre_index in range(circle_size): left = centre_index - slice_side right = centre_index + slice_side + 1 if left < 0: indices = full_indices[left:] + full_indices[:right] elif right > circle_size: indices = full_indices[left:] + full_indices[: right - circle_size] else: indices = full_indices[left:right] slice_collection[centre_index] = indices def call(centre_index): return slice_collection[centre_index] return call
f952b7b110233780164916afcef102364c3b7399
7,391
def ramp_geometric(phi, A): """ Weighted geometric mean according to phi. """ return A[0]**(0.5*(1.+phi))*A[1]**(0.5*(1.-phi))
0f79e345336f4038e03947b10446031bd9f3e404
7,392
def rot13(encoded: str) -> str: """ >>> rot13("har crefbaar abeznyr crafr dh’ha xvyb-bpgrg rfg étny à 1000 bpgrgf, ha vasbezngvpvra rfg pbainvaph dh’haxvybzèger rfg étny à 1024 zègerf.") 'une personne normale pense qu’un kilo-octet est égal à 1000 octets, un informaticien est convaincu qu’unkilomètre est égal à 1024 mètres.' """ alphabet = "abcdefghijklmnopqrstuvwxyz" decoded_alphabet = "nopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" decoded = "" for character in encoded: if character not in alphabet: decoded_character = character else: decoded_character = decoded_alphabet[alphabet.find(character)] decoded += decoded_character return decoded
790d260a1cc517c49d3ea6e4c481fba32dca0831
7,395