content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from datetime import datetime def get_default_law_key(): """ Default key needs to be unique timestamp until changed """ x = str(datetime.now()) key = x[5:25] return key
569d1ff7cc9c6d0e95dded80c1e068ff885e95c7
6,716
import os def file_parts(file_path): """ Lists a files parts such as base_path, file name and extension Example base, name, ext = file_parts('path/to/file/dog.jpg') print(base, name, ext) --> ('path/to/file/', 'dog', '.jpg') """ base_path, tail = os.path.split(file_path) name, ext = os.path.splitext(tail) return base_path, name, ext
3f366dcd54bcc6e218655e1df038541a20de66d2
6,719
import numpy def lazy_matrix_mul(m_a, m_b): """ multiply 2 matrix that is given Args: m_a: input first matrix m_b: input second matrix Returns: return m_a * m_b """ return numpy.matmul(m_a, m_b)
3e58214d944d1962260b747af53dc8c82cc79b40
6,721
import gzip def read_consanguineous_samples(path, cutoff=0.05): """ Read inbreeding coefficients from a TSV file at the specified path. Second column is sample id, 6th column is F coefficient. From PLINK: FID, IID, O(HOM), E(HOM), N(NM), F Additional columns may be present but will be ignored. """ consanguineous_samples = {} myopen = gzip.open if path.endswith('.gz') else open with myopen(path) as inf: _ = inf.readline() for line in inf: cols = line.strip().split() if float(cols[5]) > cutoff: consanguineous_samples[cols[1]] = True return consanguineous_samples
be3515e6704966ae927bfaff2594be9191063889
6,722
def get_xy_coords(xda): """Return the dimension name for x and y coordinates e.g. XC or XG Parameters ---------- xda : xarray DataArray with all grid information Returns ------- x,y : str with e.g. 'XC' or 'YC' """ x = 'XC' if 'XC' in xda.coords else 'XG' y = 'YC' if 'YC' in xda.coords else 'YG' return x,y
6aca5de1eda17df617027c742a06f97cf77af1d5
6,723
def obv(df, price, volume, obv): """ The On Balance Volume (OBV) is a cumulative total of the up and down volume. When the close is higher than the previous close, the volume is added to the running total, and when the close is lower than the previous close, the volume is subtracted from the running total. Parameters: df (pd.DataFrame): DataFrame which contain the asset price. price (string): the column name of the price of the asset. volume (string): the column name of the volume of the asset. obv (string): the column name for the on balance volume values. Returns: df (pd.DataFrame): Dataframe with obv of the asset calculated. """ df["diff"] = df[price].diff() df = df.fillna(1) df.loc[df["diff"] > 0, obv + "_sign"] = 1 df.loc[df["diff"] < 0, obv + "_sign"] = -1 df.loc[df["diff"] == 0, obv + "_sign"] = 0 volume_sign = df[volume] * df[obv + "_sign"] df[obv] = volume_sign.cumsum() df.drop(["diff", obv + "_sign"], axis=1, inplace=True) return df
19f4c456ed501523d2b349e2766d482bd1fef13b
6,727
def is_data(line): """ Function utilized by itertool's groupby method in determining the delimiter between our blocks of data. """ return True if line.strip() else False
da3db970c5c5a3169446513cb4148ffedf598095
6,728
import subprocess def _cmd_7zip_list(src) -> bytes: """获取列表""" p = subprocess.run(["7z", "l", "-ba", src, "-p"], capture_output=True, timeout=1) return p.stdout or p.stderr
3f4be38dba9cf862576799866270dd7292952509
6,729
def c_string_arguments(encoding='UTF-8', *strings): """ Convenience function intended to be passed to in_format which allows easy arguments which are lists of null-terminated strings. """ payload = b"" # Add each string, followed by a null character. for string in strings: payload += string.encode(encoding) payload += b"\0" return payload
5c93afae01d199f31a27f658e133024c8fb9f92f
6,730
def anneal(c_max, step, iteration_threshold): """Anneal function for anneal_vae (https://arxiv.org/abs/1804.03599). Args: c_max: Maximum capacity. step: Current step. iteration_threshold: How many iterations to reach c_max. Returns: Capacity annealed linearly until c_max. """ return min(c_max * 1., c_max * 1. * (step) / iteration_threshold)
ca6cbb5fe109e5d6b36870b398604ee79042827f
6,731
import json def update_timer_interval(acq_state, chart_data_json_str, chart_info_json_str, active_channels, samples_to_display): """ A callback function to update the timer interval. The timer is temporarily disabled while processing data by setting the interval to 1 day and then re-enabled when the data read has been plotted. The interval value when enabled is calculated based on the data throughput necessary with a minimum of 500 ms and maximum of 4 seconds. Args: acq_state (str): The application state of "idle", "configured", "running" or "error" - triggers the callback. chart_data_json_str (str): A string representation of a JSON object containing the current chart data - triggers the callback. chart_info_json_str (str): A string representation of a JSON object containing the current chart status - triggers the callback. active_channels ([int]): A list of integers corresponding to the user selected active channel checkboxes. samples_to_display (float): The number of samples to be displayed. Returns: """ chart_data = json.loads(chart_data_json_str) chart_info = json.loads(chart_info_json_str) num_channels = int(len(active_channels)) refresh_rate = 1000*60*60*24 # 1 day if acq_state == 'running': # Activate the timer when the sample count displayed to the chart # matches the sample count of data read from the HAT device. if 0 < chart_info['sample_count'] == chart_data['sample_count']: # Determine the refresh rate based on the amount of data being # displayed. refresh_rate = int(num_channels * samples_to_display / 2) if refresh_rate < 500: refresh_rate = 500 # Minimum of 500 ms return refresh_rate
1bc695ab2e5d63d4734d27417efc3d17a5e3a471
6,732
def rectify(x): """Rectify activation function :math:`\\varphi(x) = \\max(0, x)` Parameters ---------- x : float32 The activation (the summed, weighted input of a neuron). Returns ------- float32 The output of the rectify function applied to the activation. """ # The following is faster than T.maximum(0, x), # and it works with nonsymbolic inputs as well. # Thanks to @SnipyHollow for pointing this out. Also see: # https://github.com/Lasagne/Lasagne/pull/163#issuecomment-81765117 return 0.5 * (x + abs(x))
f781c4a382d211fbcadfe599c470523d3a59c2f1
6,733
def input_reading_mod(input_dir, input): """This helper convert input""" with open('%s/%s' %(input_dir, input), 'r') as input_fid: pred = input_fid.readlines() det = [x.strip('\n') for x in pred] return det
34fd8e5fe53d809ee1cc870c031bca8691756a63
6,734
def _group_auto_update_helper(auto_update): """Helper that prepares the given group auto update for JSON serialization. :param GroupAutoUpdate auto_update: the auto update to serialize :return: dictionary suitable for JSON serialization :rtype: dict """ fields = { 'to': auto_update.recipient } if auto_update.add_word_pair[0]: fields.setdefault('add', {})['first_word'] = \ auto_update.add_word_pair[0] if auto_update.add_word_pair[1]: fields.setdefault('add', {})['second_word'] = \ auto_update.add_word_pair[1] if auto_update.remove_word_pair[0]: fields.setdefault('remove', {})['first_word'] = \ auto_update.remove_word_pair[0] if auto_update.remove_word_pair[1]: fields.setdefault('remove', {})['second_word'] = \ auto_update.remove_word_pair[1] return fields
fd95526cbea8d4888b7e581ab5eec5260f557517
6,736
def equal_angle_stereographic_projection_conv_YZ_plane(x,y,z): """Function to take 3D grid coords for a cartesian coord system and convert to 2D equal area projection.""" Y = y/(1+x) Z = z/(1+x) return Y,Z
121b24f20ef0ff7f0655a4b39c7ede70c632ef2a
6,737
def getCategorySentiments(termSentiData,trainLexicon,finalDF): """ Module to extract category-wise sentiment scores and generate final dataframe with predicted and true sentiment Args: termSentiData: dictionary of aspect terms and its sentiment trainLexicon: Lexicon of defining terms under each category finalDF: data frame with predicted and true category labels Returns: finaDF: data frame with predicted and true category sentiment labels """ categorySentiScore={} for key,values in termSentiData.iteritems(): if len(values)>0: for k,v in values.iteritems(): for entKey,entVal in trainLexicon.iteritems(): if k in entVal: if entKey in categorySentiScore: categorySentiScore[entKey] += v else: categorySentiScore[entKey] = v predictedCategory = finalDF['predictedCategory'] predictedCategorySentiment=[] for category in predictedCategory: if category in categorySentiScore.keys(): if categorySentiScore[category] > 0: predictedCategorySentiment.append('pos') elif categorySentiScore[category] == 0: predictedCategorySentiment.append('neu') elif categorySentiScore[category] < 0: predictedCategorySentiment.append('neg') else: predictedCategorySentiment.append('neu') finalDF['predictedCategorySentiment'] = predictedCategorySentiment return finalDF
d8efc2ea0d1ffb18d3949f104c21ce6eae923a2e
6,738
from typing import Union def _value_to_int(value: Union[int, str]) -> int: """String value to int.""" try: return int(value) except ValueError as error: raise Exception("The value is not integer")
635afb6d75edec8df64b12dad8db7dd408502250
6,740
def DT2str(dt): """ convert a datetime to a string in the GNOME format. """ dt_string = "%3i, %3i, %5i, %3i, %3i"%(dt.day, dt.month, dt.year, dt.hour, dt.minute) return dt_string
7d0b26c9d4517738be448e2ab897b1ffb419179f
6,741
def get_active_lines(lines, comment_char="#"): """ Returns lines, or parts of lines, from content that are not commented out or completely empty. The resulting lines are all individually stripped. This is useful for parsing many config files such as ifcfg. Parameters: lines (list): List of strings to parse. comment_char (str): String indicating that all chars following are part of a comment and will be removed from the output. Returns: list: List of valid lines remaining in the input. Examples: >>> lines = [ ... 'First line', ... ' ', ... '# Comment line', ... 'Inline comment # comment', ... ' Whitespace ', ... 'Last line'] >>> get_active_lines(lines) ['First line', 'Inline comment', 'Whitespace', 'Last line'] """ return list(filter(None, (line.split(comment_char, 1)[0].strip() for line in lines)))
b49c8cd034c8fa8e7dcf0b5153c8d5bcce52a1f3
6,742
def is_shutout(goalie_dict, goalies_in_game): """ Checks whether current goalie game can be considered a shutout. """ # only goalies that played and didn't concede any goals can have a shutout if (goalie_dict['games_played'] and not goalie_dict['goals_against']): # if more than two goalies (for both teams) were in the game, we # have to check whether goalies shared a game with no goals against if len(goalies_in_game) > 2: # counting goalies per team goalies_per_team_cnt = 0 for team, _ in goalies_in_game: if team == goalie_dict['team']: goalies_per_team_cnt += 1 # if current team had more than one goalie in the game, this can't # be a personal shutout if goalies_per_team_cnt > 1: return False # games lost in overtime or the shootout are no shutouts regardless if goalie_dict['sl'] or goalie_dict['ol']: return False # only games solely played with no goals against throughout regulation, # overtime, and shootout are shutouts return True return False
c9aa7adead449366e8845b562fd04b98396321cc
6,743
def renderInlineStyle(d): """If d is a dict of styles, return a proper style string """ if isinstance(d, (str, int, float)): result = str(d) else: style=[] for k,v in d.items(): style.append("{}:{};".format(k, v)) separator = ' ' result = separator.join(style) return result
f08ea415e4fa29404b7c879f2346832dc84d2e67
6,745
def test_dup_args_in_call(x): """The naive gradient update rule fails when a function's arguments contain the same variable more than once.""" return x * x
978f4f6e901b4b4aba01bbad098c107eacab59f3
6,746
def snake_to_camel_case(snake_text): """ Converts snake case text into camel case test_path --> testPath :param snake_text:str :return: str """ components = snake_text.split('_') # We capitalize the first letter of each component except the first one with # the 'title' method and join them together. return components[0] + ''.join(x.title() for x in components[1:])
b42e1393cf99b88e2ebbcf4b38643c770e218ceb
6,747
def limit(resource, value): """ Check if this is a valid limit for the number of matching resources to be specified. :param resource: :type resource: :param value: specified limit :type value: int :return: True if valid limit, False otherwise :rtype: bool """ return value > 0
3ee75e7e41752e2bddebb94915bbf9161e02caec
6,748
import gc def sequence(values, rasts): """ Iterates through a sequence of linearly interpolated rasters. Args: values: The unknown values for which new rasters will be interpolated and returned. rasts: A dictionary of the known values and rasters between which the interpolated rasters are calculated. Dictionary entries consist of value-raster pairs, where raster can be either a preloaded raster, or a function that loads and returns a raster (useful to avoid memory errors). """ def _lerp(value, fromval, fromrast, toval, torast): if value == fromval: return fromrast elif value == toval: return torast elif not fromval < value < toval: raise Exception("Value to interpolate must be between fromval and toval") # figure out relative position between rasters, and multiply this to the difference prog = (value - fromval) / float(toval - fromval) #print "prog",prog diffband = torast.bands[0] - fromrast.bands[0] #print diffband, diffband.summarystats() offsetband = diffband * prog #print offsetband, offsetband.summarystats() newband = fromrast.bands[0] + offsetband # finally assign to raster outrast = fromrast.copy(shallow=True) outrast.add_band(newband) del diffband,offsetband gc.collect() return outrast # allow preloaded rasters or callables that load upon request def _make_callable(rast): if not hasattr(rast, '__call__'): return lambda: rast else: return rast rasts = ((val,_make_callable(rast)) for val,rast in rasts.items()) # loop pairs of fromrast torast rasts = sorted(rasts, key=lambda valrast: valrast[0]) # NEW rasts = iter(rasts) fromval,fromrast = next(rasts) fromrast = fromrast() toval,torast = next(rasts) torast = torast() for val in values: if val < fromval: raise NotImplementedError('Extrapolation not currently supported') # increment to next pair if val > toval: if val > values[-1]: raise NotImplementedError('Extrapolation not currently supported') del fromrast gc.collect() fromval,fromrast = toval,torast toval,torast = next(rasts) torast = torast() # interp rast = _lerp(val, fromval, fromrast, toval, torast) yield val,rast # OLD
6ff4d0f192f1bc92030693aba4e903378d06b636
6,749
def page_list_return(total, current=1): """ page 分页,返回本次分页的最小页数到最大页数列表 """ min_page = current - 4 if current - 6 > 0 else 1 max_page = min_page + 6 if min_page + 6 < total else total return range(min_page, max_page + 1)
99b099a7e90e1e150881d93129b1558eb8bc9a20
6,750
def is_occ_conflict_exception(e): """ Is the exception an OccConflictException? :type e: :py:class:`botocore.exceptions.ClientError` :param e: The ClientError caught. :rtype: bool :return: True if the exception is an OccConflictException. False otherwise. """ is_occ = e.response['Error']['Code'] == 'OccConflictException' return is_occ
3df46480341b617570e1e980ade194c9bd3fb26e
6,753
def replace_list_element(l, before, after): """Helper function for get_cluster_idx """ for i, e in enumerate(l): if e == before: l[i] = after return l
b15f43332efdcec878fbd16df64d46b1e23d2630
6,754
def average(v): """ :param v: a list of numerical values :return: average for a list of values expressed as a float """ return sum(v) * 1.0 / len(v)
cbc9e450ee854289c62b613c257655fcd0c3e62c
6,755
def get_comments(user): """Returns all of the user's comments""" comments = '' for comment in user.get_comments(limit=None): comments = comments + ' ' + comment.body return comments
cb095b78a2ac304c849e75a7b988c581a826aef1
6,757
def function_with_exception(val): """Return a `val` if it is non-negative""" if val < 0: raise ValueError("val cannot be negative.") return val
f9a4a50879477a5e45fcb9a9d54a10695fc526df
6,760
def get_days_word_ending(days: int) -> str: """Определяет окончание слова "дня", "дней" и т.д. в зависимости от входящего числа""" last_numeral = days % 10 prelast_numeral = days % 100 prelast_numeral = prelast_numeral // 10 if prelast_numeral == 1: return 'дней' if last_numeral == 0 or last_numeral >= 5: return 'дней' elif last_numeral == 1: return 'день' else: return 'дня'
4f2887b438ab8909b29a0fa572c5735477da2262
6,761
def compute_throughputs(batch_size, gpu_times): """ Given a batch size and an array of time running on GPU, returns an array of throughputs """ return [batch_size / gpu_times[i] * 1000 for i in range(len(gpu_times))]
14b20806ad8e21126c460613a99f9b68bce31ef0
6,762
import os def get_resource(chmod_permission, *paths): """Take a relative filepath and return the actual path. chmod_permission is needed because our packaging might destroy the permission.""" full_path = os.path.join(os.path.dirname(__file__), *paths) os.chmod(full_path, chmod_permission) return full_path
e6d48cb4068ecfedbfa456a8f614543546eb22c5
6,763
def get_color(r, g, b, a): """ converts rgba values of 0 - 255 to the equivalent in 0 - 1 """ return (r / 255.0, g / 255.0, b / 255.0, a / 255.0)
78b4d71e04c7f3271462461641ec71e9fb849347
6,764
import re def escape_version(version): """ Escaped version in wheel filename. Doesn't exactly follow the escaping specification in :pep:`427#escaping-and-unicode` because this conflicts with :pep:`440#local-version-identifiers`. """ return re.sub(r"[^\w\d.+]+", "_", version, flags=re.UNICODE)
ad382dc611a87b66db49f0332698618bda4cf86b
6,766
def to_bs(): """Example Registry Pipeline that loads existing pipelines""" return [("pbsmrtpipe.pipelines.dev_04:pbsmrtpipe.tasks.dev_hello_world:0", "pbsmrtpipe.tasks.dev_txt_to_fasta:0")]
36c08e38ccdefda793708b88a41f61d5f05e4197
6,767
import uuid def get_unique_id(): """Generate and set unique identifier of length 10 integers""" identifier = uuid.uuid4() return str(identifier.int)[:10]
52ac119a062f454faad77df2fecb1f902bdd8530
6,770
def bond_quatinty(price, investment, minimum_fraction=0.1): """ Computes the quantity of bonds purchased given the investment, bond price per unit, and the minimum fraction of a bond that can be purchased :param investment: Amount of money that will be invested :param minimum_fraction: Minimum fraction that can be purchased :param price: Price of bond per unit :return: [quantity of bonds purchased, Total Value Invested, Eror%] """ Qf = int(investment / (minimum_fraction * price)) Q = Qf * minimum_fraction value = Q * price error = (investment - value) / value * 100 return [Q, value, error]
7b42ae44d2e2db2229251088cf3645e965887e0d
6,771
import re def check_for_repeating_characters(tokens, character): """ References: :func:`re.findall` Args: tokens (): character (): Returns: """ replacements = [] pattern = "([" + character + "{2,}]{2,4})" for token in tokens: if len(token) > 12: if not re.findall(r'{}'.format(pattern), token): pass else: m_strings = re.findall(r'{}'.format(pattern), token) if len(m_strings) > 2: replacements.append((token, ' ')) else: pass else: pass return replacements
9a421e634ad1cd330c2933fda84eb2430e7ef2ed
6,772
from datetime import datetime def time_stamp(): """Current time stamp""" ts = datetime.now() return ts.strftime('%d-%b-%Y %H:%M:%S')
b17e6841b4c79cc1098123e154c9dd6e59098953
6,774
def reset_color_picker(modal_open, font_color): """ Reset the color-picker to white font color after closing the modal component. Parameters ---------- modal_open : bool A boolean that describes if the modal component is open or not font_color : dict of { 'hex': str, 'rgb': dict of { 'rgb' : 'r': int, 'g': int, 'b': int, 'a': int } } The hex and rgb value of the selected font color Returns ------- dict of { 'rgb': dict of { 'rgb' : 'r': int, 'g': int, 'b': int } } The rgb value of the selected font color """ # The modal window is closed so reset the font color. if not modal_open: return dict(rgb=dict(r=255, g=255, b=255)) # The modal window is open so return the current font color. return font_color
03aaf2207f351eee70fbc8dda406ec0d8bc04530
6,776
def obtainTagVersionList(adshList, fileLocation): """ Scans the Pre file to find the Tags and the Versions used in the filing in question so as to populate the Tags table with only that subset """ with open(fileLocation + 'pre.txt') as fileHandle: tagVersionList = list() # read schema and advance one line schema = tuple(fileHandle.readline().replace('\n', '').split('\t')) adsh_position = schema.index('adsh') tag_position = schema.index('tag') version_position = schema.index('version') # scan for the appropriate adsh's and create a tuple for line in fileHandle: parsed_line = line.split('\t') if parsed_line[adsh_position] in adshList: tagVersionList.append((parsed_line[tag_position], parsed_line[version_position])) return tagVersionList
217aaf4a9458404cb2082ec0f8bb6bba0a5d991a
6,777
def traverse(tree, it): """ Traverse tree until a leaf node is reached, and return its symbol. This function consumes an iterator on which next() is called during each step of traversing. """ nd = tree while 1: nd = nd.child[next(it)] if not nd: raise ValueError("prefix code does not match data in bitarray") if nd.symbol is not None: return nd.symbol if nd != tree: raise ValueError("decoding not terminated")
5f4be6c4fedfe4220855c0ec12547a9f9eb2ac87
6,780
import re def normalize_date(date): """normalze date Returns: [string] -- [normalized result date] """ char = re.compile('[年月./]') date=date.group(0) date = char.sub('-',date) if '日' in date: date=date.replace('日','') return date
11197775c975fd09aa54a12de47b5e113d99f954
6,782
def unsupported_sequence_terminals(request): """Terminals that emit warnings for unsupported sequence-awareness.""" return request.param
6a85fcd35d3d4813ec14c530b867171518cb584e
6,784
import math def compute_frequency(occurrence_count): """ frequency is too coarse. taking the log from it is going to smooth it somehow :param occurrence_count: :return: """ return round(math.log(occurrence_count), 2)
e797bf26feee3379a781b3eefe8a988700728c8f
6,786
def convert_params_to_string(params: dict) -> str: """ Create a string representation of parameters in PBC format """ return '\n'.join(['%s %s' % (key, value) for (key, value) in params.items()])
d121ea62f14333ad7f02727a7a6777b8880fef45
6,787
def generate_numbers(limit): """ @param: limit - length of the sequence of natural numbers to be generated @return: list_of_numbers - list of the generated sequence """ if limit <= 0: raise ValueError('Invalid limit specified') list_of_numbers = list() for i in range(1,limit+1): list_of_numbers.append(False) return list_of_numbers
f0cd027f6978be01b80c4a86914ecc8d3444d251
6,788
import requests import json def questions (category, token): """Contacts API and retrieves questions + answers based on category""" # Retrieve questions and answers from API try: response = requests.get(f"https://opentdb.com/api.php?amount=1&category={category}&type=multiple&token={token}") response.raise_for_status() except requests.RequestException: return None # Turn JSON format into a python dictionary response = json.loads(response.text) # Check for Response Code if response['response_code'] == 0: # A dictionary keyed by an integer with values being dicts as values question + (incorrect)answers try: question_list = [] for i, item in enumerate(response['results']): question_list.append({}) question_list[i]['question'] = item['question'] question_list[i]['correct'] = item['correct_answer'] question_list[i]['incorrects'] = item['incorrect_answers'] except (KeyError, TypeError, ValueError): return None # Something went wrong with the API else: return None return question_list
7ff86a66e82dd442fc57b68ac02a7b793336d290
6,789
def table_append(row, category='default'): """ Take any number of string args and put them together as a HTML table row. """ html_row = '</td><td>'.join(row) html_row = f'<tr><td>{html_row}</td></tr>' if category == 'header': html_row = html_row.replace('td>', 'th>') return html_row
cd0aa400c80ebc0ac8aad199439d5fc20d7a99a3
6,790
import argparse import time def basic_argument_parser( distributed=True, requires_config_file=True, requires_output_dir=True, ): """ Basic cli tool parser for Detectron2Go binaries """ parser = argparse.ArgumentParser(description="PyTorch Object Detection Training") parser.add_argument( "--runner", type=str, default="d2go.runner.GeneralizedRCNNRunner", help="Full class name, i.e. (package.)module.class", ) parser.add_argument( "--config-file", help="path to config file", default="", required=requires_config_file, metavar="FILE", ) parser.add_argument( "--output-dir", help="When given, this will override the OUTPUT_DIR in the config-file", required=requires_output_dir, default=None, type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) if distributed: parser.add_argument( "--num-processes", type=int, default=1, help="number of gpus per machine" ) parser.add_argument("--num-machines", type=int, default=1) parser.add_argument( "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)", ) parser.add_argument( "--dist-url", default="file:///tmp/d2go_dist_file_{}".format(time.time()) ) parser.add_argument("--dist-backend", type=str, default="NCCL") if not requires_config_file: # NOTE if not passing yaml file, user should explicitly set the # following args, and use `opts` for non-common usecase. parser.add_argument( "--datasets", type=str, nargs="+", required=True, help="cfg.DATASETS.TEST", ) parser.add_argument( "--min_size", type=int, required=True, help="cfg.INPUT.MIN_SIZE_TEST", ) parser.add_argument( "--max_size", type=int, required=True, help="cfg.INPUT.MAX_SIZE_TEST", ) return parser return parser
842c2f6a17aba77580df49af8c179cf4e5739666
6,791
def contains_end_of_message(message, end_sequence): """Función que dado un string message, verifica si es que end_sequence es un substring de message. Parameters: message (str): Mensaje en el cual buscar la secuencia end_sequence. end_sequence (str): Secuencia que buscar en el string message. Returns: bool: True si end_sequence es substring de message y False de lo contrario. """ return False if message.find(end_sequence) == -1 else True
3966e3e2ddf62843c2eb12cf6ae144e53408c360
6,792
def utility_num2columnletters(num): """ Takes a column number and converts it to the equivalent excel column letters :param int num: column number :return str: excel column letters """ def pre_num2alpha(num): if num % 26 != 0: num = [num // 26, num % 26] else: num = [(num - 1) // 26, 26] if num[0] > 26: num = pre_num2alpha(num[0]) + [num[1]] else: num = list(filter(lambda x: False if x == 0 else True, num)) return num return "".join(list(map(lambda x: chr(x + 64), pre_num2alpha(num))))
295b8c5391d5250f91781c2f6df1626a0acb8022
6,794
import os import json def get(id=None): """ Returns all offline STILT stations geographica info. Parameters ---------- id : STR, default None -> which returns ALL stations Stilt station ID Returns ------- DICT Geographical information locally stored for the Stilt station. If lat/lon is within a countryborder, the result from icoscp.coutntry9[lat, lon]) is returned. """ d = os.path.abspath(__file__) stn = os.path.join(os.path.split(d)[0], 'stations.json') with open(stn, 'r') as f: data = json.loads(f.read()) if id in data.keys(): return data[id] return data
20cdcfb0e79217c94e9ac6cfb5046b342aa09424
6,795
def binarize(query, mlb): """Transform a single query into binary representation Parameters ---------- query : ndarray, shape = [n_samples, n_classes] The tags. n_samples : int The number of samples in the training set. Returns ------- bin_query_vector : ndarray, shape = [n_samples] Binary query vector. """ return mlb.transform([query]).ravel()
aafd2072569c0c740b034caa0f8591da231b5b6d
6,796
from typing import Optional from typing import Dict def no_response_from_crawl(stats: Optional[Dict]) -> bool: """ Check that the stats dict has received an HTTP 200 response :param stats: Crawl stats dictionary :return: True if the dict exists and has no 200 response """ if not stats or not isinstance(stats, Dict): return False status_codes = stats.get("status_codes", {}) if not status_codes or not isinstance(status_codes, Dict): return False return 200 not in status_codes
461d3deb9ec6a162dfd7f3f3c0ac73262090c35b
6,797
import time import math def get_local_time_zone(): """Return the current local UTC offset in hours and minutes.""" utc_offset_seconds = -time.timezone if time.localtime().tm_isdst == 1 and time.daylight: utc_offset_seconds = -time.altzone utc_offset_minutes = (utc_offset_seconds // 60) % 60 utc_offset_hours = math.floor(utc_offset_seconds / float(3600)) if \ utc_offset_seconds > 0 else math.ceil(utc_offset_seconds / float(3600)) return int(utc_offset_hours), utc_offset_minutes
4e90d0a0bf2e831aaf6629542451e75c0a1942a4
6,798
def binary_search(array, target): """ Does a binary search on to find the index of an element in an array. WARNING - ARRAY HAS TO BE SORTED Keyword arguments: array - the array that contains the target target - the target element for which its index will be returned returns the index of target in array """ lower = 0 upper = len(array) while lower < upper: x = lower + (upper - lower) // 2 val = array[x] if target == val: return x elif target > val: if lower == x: break lower = x elif target < val: upper = x
6dfe09367e2dd7ae2e30f45009fa7059d0da0f18
6,799
def paralog_cn_str(paralog_cn, paralog_qual, min_qual_value=5): """ Returns - paralog CN: string, - paralog qual: tuple of integers, - any_known: bool (any of the values over the threshold). If paralog quality is less than min_qual_value, corresponding CN is replaced with '?' and quality is replaced with 0. Additionally, quality is rounded down to integers. """ paralog_cn_str = [] new_paralog_qual = [] any_known = False for cn, qual in zip(paralog_cn, paralog_qual): if qual < min_qual_value: paralog_cn_str.append('?') new_paralog_qual.append(0) else: paralog_cn_str.append(str(cn)) new_paralog_qual.append(int(qual)) any_known = True return ','.join(paralog_cn_str), tuple(new_paralog_qual), any_known
07012dd9b065622365798ba65024c316cdb8c0c7
6,800
def offbyK(s1,s2,k): """Input: two strings s1,s2, integer k Process: if both strings are of same length, the function checks if the number of dissimilar characters is less than or equal to k Output: returns True when conditions are met otherwise False is returned""" if len(s1)==len(s2): flag=0 for i in range(len(s1)): if s1[i]!=s2[i]: flag=flag+1 if flag==k: return True else: return False else: return False
a64c02b85acca64427852fc988ed2f769f750aa7
6,801
def SmiNetDate(python_date): """ Date as a string in the format `YYYY-MM-DD` Original xsd documentation: SmiNetLabExporters datumformat (ÅÅÅÅ-MM-DD). """ return python_date.strftime("%Y-%m-%d")
8d43d99516fed915b344f42da8171689f8f9ef0b
6,802
def get_node_with_children(node, model): """ Return a short list of this top node and all its children. Note, maximum depth of 10. """ if node is None: return model new_model = [node] i = 0 # not really needed, but keep for ensuring an exit from while loop new_model_changed = True while model != [] and new_model_changed and i < 10: new_model_changed = False append_list = [] for n in new_model: for m in model: if m['supercatid'] == n['catid']: append_list.append(m) for n in append_list: new_model.append(n) model.remove(n) new_model_changed = True i += 1 return new_model
6ea214063f7937eacec5bbe5abd5c78c5b7badbd
6,803
def correct_capitalization(s): """Capitalizes a string with various words, except for prepositions and articles. :param s: The string to capitalize. :return: A new, capitalized, string. """ toret = "" if s: always_upper = {"tic", "i", "ii", "iii", "iv", "v", "vs", "vs.", "2d", "3d", "swi", "gnu", "c++", "c/c++", "c#"} articles = {"el", "la", "las", "lo", "los", "un", "unos", "una", "unas", "a", "an", "the", "these", "those", "that"} preps = {"en", "de", "del", "para", "con", "y", "e", "o", "in", "of", "for", "with", "and", "or"} words = s.strip().lower().split() capitalized_words = [] for word in words: if word in always_upper: word = word.upper() elif (not word in articles and not word in preps): word = word.capitalize() capitalized_words.append(word) toret = ' '.join(capitalized_words) return toret
f49b3203bf7ea19b84ac707bcc506c2571082e39
6,804
def dsigmoid(x): """ return differential for sigmoid """ ## # D ( e^x ) = ( e^x ) - ( e^x )^2 # - --------- --------- --------- # Dx (1 + e^x) (1 + e^x) (1 + e^x)^2 ## return x * (1 - x)
685362146d7bfcaa3df47db8a10a04e6accb805d
6,805
def then(value): """ Creates an action that ignores the passed state and returns the value. >>> then(1)("whatever") 1 >>> then(1)("anything") 1 """ return lambda _state: value
5cf3f7b64b222a8329e961fa6c8c70f6c2c4cab0
6,808
import numpy def setup_hull(domain,isDomainFinite,abcissae,hx,hpx,hxparams): """setup_hull: set up the upper and lower hull and everything that comes with that Input: domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded hx - function that evaluates h(x) hpx - function that evaluates hp(x) hxparams - tuple of parameters for h(x) and h'(x) Output: list with: [0]= c_u [1]= xs [2]= h(xs) [3]= hp(xs) [4]= zs [5]= s_cum [6]= hu(zi) History: 2009-05-21 - Written - Bovy (NYU) """ nx= len(abcissae) #Create the output arrays xs= numpy.zeros(nx) hxs= numpy.zeros(nx) hpxs= numpy.zeros(nx) zs= numpy.zeros(nx-1) scum= numpy.zeros(nx-1) hus= numpy.zeros(nx-1) #Function evaluations xs= numpy.sort(abcissae) for ii in range(nx): hxs[ii]= hx(xs[ii],hxparams) hpxs[ii]= hpx(xs[ii],hxparams) #THERE IS NO CHECKING HERE TO SEE WHETHER IN THE INFINITE DOMAIN CASE #WE HAVE ABCISSAE ON BOTH SIDES OF THE PEAK #zi for jj in range(nx-1): zs[jj]= (hxs[jj+1]-hxs[jj]-xs[jj+1]*hpxs[jj+1]+xs[jj]*hpxs[jj])/( hpxs[jj]-hpxs[jj+1]) #hu for jj in range(nx-1): hus[jj]= hpxs[jj]*(zs[jj]-xs[jj])+hxs[jj] #Calculate cu and scum if isDomainFinite[0]: scum[0]= 1./hpxs[0]*(numpy.exp(hus[0])-numpy.exp( hpxs[0]*(domain[0]-xs[0])+hxs[0])) else: scum[0]= 1./hpxs[0]*numpy.exp(hus[0]) if nx > 2: for jj in range(nx-2): if hpxs[jj+1] == 0.: scum[jj+1]= (zs[jj+1]-zs[jj])*numpy.exp(hxs[jj+1]) else: scum[jj+1]=1./hpxs[jj+1]*(numpy.exp(hus[jj+1])-numpy.exp(hus[jj])) if isDomainFinite[1]: cu=1./hpxs[nx-1]*(numpy.exp(hpxs[nx-1]*( domain[1]-xs[nx-1])+hxs[nx-1]) - numpy.exp(hus[nx-2])) else: cu=- 1./hpxs[nx-1]*numpy.exp(hus[nx-2]) cu= cu+numpy.sum(scum) scum= numpy.cumsum(scum)/cu out=[] out.append(cu) out.append(xs) out.append(hxs) out.append(hpxs) out.append(zs) out.append(scum) out.append(hus) return out
9848b9cd08724b4c830d7590cd48a2d3047422e7
6,809
def clustering(vertice): """Calcula el coeficiente de clustering de un vertice. Obs: Devuelve -1 si el coeficiente no es calculable. Pre: EL vertice existe y es de clase Vertice. """ # Cuento las conexiones (por duplicado) aristas_entre_vecinos = 0 for vecino in vertice.iter_de_adyacentes(): for segundo_vecino in vecino.iter_de_adyacentes(): if vertice.esta_conectado_a(segundo_vecino): aristas_entre_vecinos += 1 # Calculo coeficiente try: ady = vertice.cantidad_adyacentes() max_aristas_posibles = ady * (ady-1) return aristas_entre_vecinos / float(max_aristas_posibles) except ZeroDivisionError: return -1
0654ab7207174c47d9d0b626ca92894e6330612f
6,810
def get_coverage_value(coverage_report): """ extract coverage from last line: TOTAL 116 22 81% """ coverage_value = coverage_report.split()[-1].rstrip('%') coverage_value = int(coverage_value) return coverage_value
ffd47b6c4ecec4851aab65dcb208ef776d12e36f
6,811
import random def generate_data(train_test_split): """ Generates the training and testing data from the splited data :param train_test_split: train_test_split - array[list[list]]: Contains k arrays of training and test data splices of dataset Example: [[[0.23, 0.34, 0.33, 0.12, 0.45, 0.68], [0.13, 0.35, 0.01, 0.72, 0.25, 0.08], ....] , .... , [[0.12, 0.45, 0.23, 0.64, 0.67, 0.98], [0.20, 0.50, 0.23, 0.12, 0.32, 0.88], ....]] :param test_index: test_index - int : Index of the test data to be split from the train_test_split Example: 5 Yields: train_data: train_data - array[list]: Contains k arrays of train data of dataset Example: [[0.23, 0.34, 0.33, 0.12, 0.45, 0.68], [0.13, 0.35, 0.01, 0.72, 0.25, 0.08], ... , .... , [0.12, 0.45, 0.23, 0.64, 0.67, 0.98], [0.20, 0.50, 0.23, 0.12, 0.32, 0.88], ....] train_data: test_data - array[list]: Contains k arrays of test data of dataset Example: [[0.23, 0.34, 0.33, 0.12, 0.45, 0.68], [0.13, 0.35, 0.01, 0.72, 0.25, 0.08], ... , .... , [0.12, 0.45, 0.23, 0.64, 0.67, 0.98], [0.20, 0.50, 0.23, 0.12, 0.32, 0.88], ....] """ test_index = eval( input("\nPlease enter the partition number to be used as test data (Press 0 for random partition): ")) while type(test_index) != int: test_index = eval(input("\nPlease enter an integer values for the partition to be used as test data: ")) split = train_test_split[:] if test_index != 0: real_index = test_index - 1 else: real_index = random.randrange(0, len(split)) test_data = split[real_index] split.remove(test_data) train_data = [] for x in split: for y in x: train_data.append(y) return train_data, test_data
8de3cbab8b58be3ff7ff289d9aa8d3cd9e2581f8
6,812
from pathlib import Path import re def get_detectron2_current_version(): """Version is not available for import through Python since it is above the top level of the package. Instead, we parse it from the file with a regex.""" # Get version info from detectron2 __init__.py version_source = (Path(__file__).parents[2] / "detectron2" / "__init__.py").read_text() version_number = re.findall(r'__version__ = "([0-9\.]+)"', version_source)[0] return version_number
52b7717fdee1fc64b7e8c3d4d3aa074373fcffb6
6,813
from typing import Iterable from typing import Mapping def _iter_but_not_str_or_map(maybe_iter): """Helper function to differ between iterables and iterables that are strings or mappings. This is used for pynads.concrete.List to determine if an iterable should be consumed or placed into a single value tuple. """ return (isinstance(maybe_iter, Iterable) and not isinstance(maybe_iter, (str, Mapping)))
3dab46cfd2d2d19bd0fa744370b9059d6a0683bc
6,815
def offbyKExtra(s1,s2,k): """Input: two strings s1,s2 and integer k Process: to check if number of extra characters in s2 as compared to s1 (or vice versa) is equal to k Output: return True when above condition is met otherwise return False""" flag=0 extra1='' if len(s1)>len(s2): for x in s1: if x not in s2: extra1=extra1+x elif s2.count(x)<s1.count(x) and x not in extra1: extra1=extra1+x*(s1.count(x)-s2.count(x)) elif s2.count(x)>s1.count(x): flag=-2 break if len(s1)<=len(s2): for y in s2: if y not in s1: extra1=extra1+y elif s1.count(y)<s2.count(y) and y not in extra1: extra1=extra1+y*(s2.count(y)-s1.count(y)) elif s1.count(y)>s2.count(y): flag=-2 break if flag==-2: return False elif len(extra1)==k: return True else: return False
10cb2480c95a729aceb219e14999dcbcf0cad1eb
6,816
def get_number_of_polymorphic_sites(pileup): """ # ======================================================================== GET NUMBER OF POLYMORPHIC SITES PURPOSE ------- Returns the number of polymorphic sites. INPUT ----- [PILEUP] [pileup] A Pileup object, which represents the pileup of aligned reads. RETURN ------ [INT] (pileup.count_polymorphic_sites()) The number of polymorphic sites in pileuip. # ======================================================================== """ return pileup.count_polymorphic_sites()
e388b20f500b141da0eedc54616703c6e444de8a
6,817
def user_save(form, is_patient=False, is_office=False): """Function saving the user to the database.""" user = form.save(commit=False) # The account is not active until the user activates it. user.is_active = False user.is_patient = is_patient user.is_office = is_office user.save() return user
8ce0a7af24bc72da98c015d0e9f7545069bbce19
6,818
def BuildTelemax(x: int, c: int) -> str: """ utility fct to build Telemax Pointage """ msg = '123456MAC:4c24e9870203PROT005170817100*Q:' msg += str(x + c + 1).zfill(6) msg += '9103' msg += '0071' msg += '0093' msg += '2 ' msg += '0' # msg += 'xx' msg += '10020100^1*' msg += '\r\n' return msg
ba5d51cc6e7463d693f74eb618471fb23a62b6a9
6,820
import os def renameFileName(fileName, toAdd): """ rename a fileName. Modify the basename of a path string with a given string. Example modify 'data.pk' to 'data_sample.pk'. Parameter : fileName : string relative path to fileName toAdd : string to add to the original fileName Returns : a new file Name (string) """ baseName, ext = os.path.splitext(fileName) newName = '%s_%s%s'%(baseName,toAdd,ext) return newName
38ffad918035917982a143a0fad2f6b031809b51
6,821
def parse_sync_agent_forwarder_id(json): """ Extract the sync agent forwarder id from the get response of LearningLocker. :param json: JSON statement from the get response. :type json: dict(str, list(dict(str, str)) :return: The statement forwarder id from the sync agent. :rtype: str """ temp_forwarder_id = 0 if len(json['edges']) > 0: temp_forwarder_id = json['edges'][0]['node']['_id'] return temp_forwarder_id
4b07dc13ca978cfc3fad46e432c8c21d46ee53fa
6,822
import numpy def generate_lineal_parameter(parameter_values): """Generate parameters list for lineal parameter type.""" initial_value = parameter_values['initial_value'] final_value = parameter_values["final_value"] interval = parameter_values["interval"] param_options = numpy.arange( initial_value, final_value, interval) return param_options.tolist()
6359a0c93c07aa3dfeba096501b73f69fb3b02f9
6,823
import re def to_snake_case(s): """Convert a string to snake-case format Parameters ---------- s: String String to convert to snake-case Returns ------- String Snake-case formatted string Notes ----- Adapted from https://gist.github.com/jaytaylor/3660565 Examples -------- >>> to_snake_case("snakesOnAPlane") == "snakes_on_a_plane" True >>> to_snake_case("SnakesOnAPlane") == "snakes_on_a_plane" True >>> to_snake_case("snakes_on_a_plane") == "snakes_on_a_plane" True >>> to_snake_case("IPhoneHysteria") == "i_phone_hysteria" True >>> to_snake_case("iPhoneHysteria") == "i_phone_hysteria" True """ s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", s) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
cf3ca065c471ed526ab15de5d6c07e9be74ddb59
6,824
def read_file(file_path="data/short.list"): """ Reads file, short.list by default. """ data = "" with open(file_path, "r", encoding="utf8", errors="ignore") as file: data = file.read().split("\n")[14:-1] return tuple(set(data))
f2ced72bfa6328c6794b629d043b837144304716
6,825
def is_help_command(command): """ Checks that the user inputted command is a help command, which will not go over the wire. This is a command with -h or --help. The help functionality is triggered no matter where the -h appears in the command (arg ordering) :param command: a list of strings representing the command, for example, ['node', 'list', '-h'] :return: True if it is a help command. False otherwise. """ for segment in command: if segment in ('-h', '--help'): return True return False
e68142c38d734e492f9f65dfdf04ac87f79bd666
6,827
import ipaddress def is_global(host: str) -> bool: """ >>> assert not is_global("127.0.0.1") >>> assert not is_global("192.168.20.168") >>> assert is_global("211.13.20.168") >>> assert is_global("google.com") """ if host == "localhost": return False try: address = ipaddress.ip_address(host) except ValueError: return True return address.is_global
1e68b762a279eb7b54f32339c783a631bedfa2c9
6,828
def make_initial_state(): """ Create an initial state dictionary. """ return { 'geogrid' : 'waiting', 'ingest' : 'waiting', 'ungrib' : 'waiting', 'metgrid' : 'waiting', 'real' : 'waiting', 'wrf' : 'waiting', 'output': 'waiting' }
7a9e2bccb52c1a75ce2ef1d313177fd573433461
6,829
import argparse def parse_arguments(args): """ Parse the arguments from the user """ parser = argparse.ArgumentParser( description= "HAllA's Clustering using hierarchical clustering and Silhouette score.\n", formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( "-v","--verbose", help="additional output is printed\n", action="store_true", default=False) parser.add_argument( "-i","--input", help="the input file D*N, Rows: D features and columns: N samples \n", required=False) parser.add_argument( "-d","--distance_matrix", help="the distance matrix file D*D (if input file is not provided), Rows: D features and columns: N samples \n", required=False) parser.add_argument( "-o","--output", help="the output directory\n", required=True) parser.add_argument( "-m", "--similarity_method", default= 'spearman', help="similarity measurement {default spearman, options: spearman, nmi, ami, dmic, mic, pearson, dcor}") parser.add_argument( "-n", "--estimated_number_of_clusters", type=int, help="estimated number of clusters") parser.add_argument( "-c","--linkage_method", default= 'single', help="linkage clustering method method {default = single, options average, complete\n") parser.add_argument( "--plot", help="dendrogram plus heatmap\n", action="store_true", default=False) parser.add_argument( "--resolution", default= 'high', help="high resolution enforce clusters to be smaller than n/log2(n) where n is the number of total features. Low resolution is good when w have well separated clusters.") return parser.parse_args()
e5478e250b8ef3a818f1fa794b0cc686304e24f6
6,831
import math def num_k_of_n(n: int, k: int) -> int: """Return number of combinations of k elements out of n.""" if k > n: return 0 if k == n: return 1 return math.factorial(n) // (math.factorial(k) * math.factorial((n - k)))
de99dd88fc6e747421e36c698a525b7e58b1e4de
6,832
def min_number_in_rotated_array(r_nums): """ :param r_nums:rotated arrat :return: min number """ if not r_nums: return None left = 0 right = len(r_nums)-1 while left < right: mid = (left + right) // 2 if r_nums[mid] == r_nums[right] == r_nums[left]: right -= 1 elif r_nums[mid] <= r_nums[right]: right = mid else: left = mid + 1 return r_nums[left]
97cd37fb040a38b6c52cf816d29b97aa36c3c338
6,834
def digitos(valor): """Resulta em uma string contendo apenas os dígitos da string original.""" return ''.join([d for d in valor if d.isdigit()])
dc742d871efefa8067f33c95cc277963e3cfa201
6,835
def from_Point(ros_pt): """From ROS Point to Klamp't point""" return [ros_pt.x,ros_pt.y,ros_pt.z]
34d83ea0266883679c7e2f51c4eb555e189940d4
6,836
import re def process_document(document, context_size, dictionary, fixed_dictionary=False): """ Given a dictionary, extract the tuples of words of length equal to context_size. Each word is represented by a unique integer number. If fixed_dictionary is True, only take consecutive tuples of words being (all of them) in the dictionary. Example: document = "This is a new document" context_size = 4 dictionary = { 0: "this", 1: "is", 2: "a", 3: "new", 4: "document" } return [(0, 1, 2, 3), (1, 2, 3, 4)] """ text = document.lower() p = re.compile("[a-z]+") tokens = p.findall(text) list_of_points = [] for i in range(len(tokens) - context_size + 1): data_point = [0 for l in range(context_size)] add_new_data_point = True for j in range(context_size): k = i+j if tokens[k] not in dictionary.index: if fixed_dictionary: # only takes series of words in the dictionary add_new_data_point = False break else: new_Ix = dictionary.size dictionary[new_Ix] = tokens[k] data_point[j] = dictionary.index[tokens[k]] if add_new_data_point: list_of_points.append(tuple(data_point)) return list_of_points
3f3531faa8c9aad63ac798e9c3e3a06230d5ecf7
6,838
def realm_from_principal(principal): """ Attempt to retrieve a realm name from a principal, if the principal is fully qualified. :param principal: A principal name: [email protected] :type: principal: str :return: realm if present, else None :rtype: str """ if '@' not in principal: return else: parts = principal.split('@') if len(parts) < 2: return return parts[-1]
1880fef7b4383edc6f2ccd94958200686d500e0c
6,841
def filtertime(timestamp, interval): """Check if timestamp is between timestamp_range - (time1,time2) Args: timestamp --> UNIX timestamp value. interval --> `Tuple` of 2 UNIX timestamp values. Returns: `bool` --> True/False """ T0, T1 = interval if (timestamp <= T1) and (timestamp >= T0): return True else: return False
72fe1aa9ed01e59ad7bbe5299b4c21272fab7354
6,842
import uuid def integrate_whole(payload, org, out_uuid, group): """integrates payload into whole of profile, returns dict""" if group: in_uuid = str(uuid.uuid4()) nested = {"PayloadContent": payload, "PayloadEnabled": True, "PayloadIdentifier": 'SparkleDisabler', "PayloadType": "com.apple.ManagedClient.preferences", "PayloadUUID": in_uuid, "PayloadVersion": 1, } payload = [nested] else: payload = payload finished_profile = {"PayloadContent": payload, "PayloadOrganization": org, "PayloadRemovalDisallowed": True, "PayloadScope": "System", "PayloadType": "Configuration", "PayloadUUID": out_uuid, "PayloadVersion": 1, } return finished_profile
b08cab03f0a1e3a2b74110a7829f6fc6d736d0f4
6,844
import random def summon_blocks(board): """Place 1-8 circles in random places on the speed board""" for _ in range(random.randint(1, 8)): x = random.randint(0, 4) y = random.randint(0, 4) while board[x][y] != 'g': x = random.randint(0, 4) y = random.randint(0, 4) board[x][y] = 'b' return board
0cfa703b6451e44ea8688561bc857ac70f560c90
6,845
def scope_to_list(scope): """Convert a space separated string to a list of scopes.""" if isinstance(scope, list) or scope is None: return scope else: return scope.split(" ")
c806f91192f86dbc42719787d9ddfe0d79690f0c
6,846
from typing import Callable import inspect def numargs(func: Callable) -> int: """Get number of arguments.""" return len(inspect.signature(func).parameters)
2b4e068798add68323db6bd43253fbca34ea71ba
6,847
import math def frequencyToMidi(frequency): """ Convert a given frequency in Hertz to its corresponding MIDI pitch number (60 = Middle C) """ return int(round(69 + 12 * math.log(frequency / 440.0, 2)))
29d4b92b9deacb81f768b554200c4b63b632bf23
6,849
def create_RevPAR(dataframe): """Calculate revpar from answer_num_rooms converted to supply of room nights divided by answer_ann_revenue""" dataframe['CREATED_revpar'] = dataframe['ANSWER_ann_revenue'].astype(float)/(dataframe['ANSWER_num_rooms'].astype(float)*365) return dataframe
0d3c91ff3909ea693fdde4b0ea8baae43a31a9a0
6,851
import numpy as np def available_kurucz_models(): """These hard-code the available Kurucz models, as present on Nov 22, 2019 on 'http://kurucz.harvard.edu/grids/grid'""" T_a = np.concatenate((np.arange(3500,13250,250),np.arange(13000,1000,51000))) logg_a = np.arange(0,5.5,0.5) Z_a = np.round(np.concatenate((np.arange(-5,0,0.5),np.arange(-0.3,0.4,0.1),np.array([0.5,1.0]))),decimals=1)#I need to do a rounding here because np.concatenate() makes a numerical error on the middle array... Weird! return(T_a,logg_a,Z_a)
2779e39e55127783986a71b65e0b7f09373551f7
6,853
import numpy as np def draw_skill_prices( T, J, pi_fun='pi_fixed', low=-0.2, high=0.2, const=[0.0, 0.05] ): """ Draws initial skill prices and simulates random prices changes. With the normalization of wages in task 1 to zero, some parts of this function are redundent. However, the way this function is currently set up allows for a simulation without this normalization, too. Arguments: T (int) Number of periods J (int) Number of tasks seed (int) Seed for random draw of prices pi_fun (str) defines the process of wage changes. Currently implemented options: - pi_normal: Draws from standard normal distribution. - pi_uniform: Draws uniform distribution. Borders are defined in "low" and "high" arguments. - pi_fixed: Non-random, constant price changes. Changes can be provided in "const" argument. low (int) Lower border of uniform distributed price changes. high (int) Upper border of uniform distributed price changes. const (list) Changes for pi_fixed option. Returns: pi1, pi2 JxT array of prices for tasks 1 and 2. Assumptions: (1) Initial relative skill price for task 2 is +5% (2) No price changes in a base period (t=0 to t=1) """ # import packages # # set seed # np.random.seed(seed) # define functions that return price changes for different specifications # (1) Draw stadard normal distributed changes in log prices. def pi_normal(J=J, T=T, **kwargs): pi_normal = np.around(np.random.normal(size=(J, T-1)), 4) return pi_normal # (2) Draw changes in log prices that are uniformly distributed over # some interval. def pi_uniform(J=2, T=T, **kwargs): low, high = kwargs['low'], kwargs['high'] pi_uniform = np.around( np.random.uniform(low, high, size=(J, T-1)), 4 ) return pi_uniform # (3) Fix changes in log prices. def pi_fixed(J=J, T=T, **kwargs): const = kwargs['const'] pi_fixed = np.array([const, ]*T).transpose() return pi_fixed # Set initial task prices # Assume task 1 has a lower price than task 2 pi1_0 = 0 pi2_0 = 0.1 # Define price array pi = np.empty([J, T]) # Set intial prices pi[:, 0] = pi1_0, pi2_0 pi[:, 1] = pi1_0, pi2_0 # Get price changes. # Find price changes function of choice: price_changes = eval(pi_fun) d_pi = price_changes(T=T, J=J, low=low, high=high, const=const) # Calculate prices in each period, while there is no price change in a base # period (from t=0 to t=1) for t in range(2, T): pi[:, t] = pi[:, t-1] + d_pi[:, t-1] return pi
a14af64fae651d3728fe4be2eae3f3e51eeecfe5
6,854