content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def assemble_result_str(ref_snp, alt_snp, flanking_5, flanking_3): """ (str, str, str, str) -> str ref_snp : str DESCRIPTION: 1 character (A, T, G or C), the reference SNP. alt_snp : str DESCRIPTION: 1 character (A, T, G or C), the variant SNP. flanking_5 : str DESCRIPTION: 50 characters (A, T, G or C), the 50 bp upstream from ref_snp. flanking_3 : str DESCRIPTION: 50 characters (A, T, G or C), the 50 bp downstream from ref_snp. Returns a new str, the reference SNP concatenated with its variant and its flanking sequences, like this: ref_snp = 'T' alt_snp = 'C' 50 bp = 'XXXXXXXXXXXXXXX' 'XXXXXXXXXXXXXXX[T/C]XXXXXXXXXXXXXXX' """ return flanking_5 + '[' + ref_snp + '/' + alt_snp + ']' + flanking_3
bc0b43464124d0d19f4bfc5646730a1f951a5ced
8,517
import requests def get_api_data(session, url, date_time): """Get the JSON-formatted response from the AMM API for the desired date-time. """ session = session or requests.Session() return session.get(url, params={"dt": date_time.format("DD/MM/YYYY")}).json()
ff44be4a958c4f05cc5bd562854059c552f693e1
8,518
def trunc(s, length): """Truncate a string to a given length. The string is truncated by cutting off the last (length-4) characters and replacing them with ' ...' """ if s and len(s) > length: return s[:length - 4] + ' ...' return s or ''
ea6d1702d709ac7d94cc2bcb2e945da71009c0fe
8,520
import operator def sort_values(values: list[tuple]) -> list[tuple]: """Returns a list of tuples sorted by x value (the value at index 0 of the inner tuple.""" return sorted(values, key=operator.itemgetter(0))
f172f68418988d4e01dcc406de8ec467abfe7aa8
8,521
def calc_rank(age, is_veteran=False, has_disability=False): """ Calculate a participant's rank. A higher number is higher ranked (top of the list). (i.e., Reverse this when sorting.) Change this function to change the ranking algorithm. The code in the comments below is a ranking system based on mortality vulerability, as described in several research papers namely: http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0073979 http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0073979 This code cannot currently be used to to a JOHS agreement regarding this ranking system but should be argued for in the future as it has a scientific basis. # This is the base vulnerability score factor = 1.0 age_multiplier = { range(0, 30): 1, range(30, 40): 1.04, range(40, 50): 1.63, range(50, 60): 2.7, range(60, 70): 4.28, range(70, 120): 11.67 } # find and apply the age rr multiplier factor *= [age_multiplier[key] for key in age_multiplier if age in key][0] # is the participant a vet? Apply the vet rr multiple if is_veteran: factor *= 1.33 # is the participant disabled? Apply the disabled rr multipler if has_disability: factor *= 1.5 # result is a number between 1 and about 22 return factor """ age_points = 0 # over 55? add more points if age >= 55: age_points += 1 vet_points = 1 dis_points = 1 points = age_points if is_veteran: points += vet_points if has_disability: points += dis_points # result is a number between 0 and 3 return points
2c3ab7446ac12e634ffe47b0e57e2232908b12b9
8,522
from pathlib import Path import os import zipfile import re def list_geo_file(folder=""): """Lists all the files in the folder named "folder" contained in GeoWatch Labs Agricultural Maps. Args: folder (str): folder name in which to search in GeoWatch Labs Agricultural Maps folder. Returns: list_data_file (list): list of files in GeoWatch Labs Agricultural Maps """ home = str(Path.home()) zip_file = home + "/GeoWatch Labs Agricultural Maps.zip" root_folder = home + "/GeoWatch Labs Agricultural Maps/" + folder # unzip file if not os.path.exists(root_folder): with zipfile.ZipFile(zip_file, "r") as zip_ref: zip_ref.extractall(home) # list all files in folder list_all_files = [] for path, subdirs, files in os.walk(root_folder): for name in files: list_all_files.append(os.path.join(path, name)) # list data files list_data_file = [f for f in list_all_files if re.search(".tif$", f)] return list_data_file
6d233447bcba53401900b3073843e8c435c967dd
8,523
def expose(class_method): """ Decorator which exposes given method into interface :param class_method: method to expose :return: given method with modifications """ class_method.is_exposed = True return class_method
ee234bd7535f29c39fc80643997b89aeb3c0f533
8,524
def merge_length_list(lists): """ Merge the list that demonstrates the word length """ res_list = [] for l in lists: if len(res_list) < len(l): res_list[len(res_list):] = [0] * (len(l)-len(res_list)) for length, num in enumerate(l): res_list[length] += num return res_list
564742fd7e0a7f3a0a8535a1f02c343d193a69ac
8,525
def func(pseudo_state, a1, a2, b1, b2, c1, c2, d1, d2): """ quadratic fit function for the Bellman value at given pseudo-state :param pseudo_state: list(float) - list of the four state variables for a given state :param a1, a2, ... d2: float - parameters of the quadratic fit function """ sum = a1*pseudo_state[0]**2 + a2*pseudo_state[0] sum += b1*pseudo_state[1]**2 + b2*pseudo_state[1] sum += c1*pseudo_state[2]**2 + c2*pseudo_state[2] sum += d1*pseudo_state[3]**2 + d2*pseudo_state[3] return sum
6478219704999dc4cfcbc915126d919e15fe3043
8,526
def intersection(box1, box2): """ Args: box1: bounding box box2: bounding box Returns: float: the area that intersects the two boxes """ y_min1, x_min1, y_max1, x_max1 = box1 y_min2, x_min2, y_max2, x_max2 = box2 min_ymax = min(y_max1, y_max2) max_ymin = max(y_min1, y_min2) intersect_heights = max(0, min_ymax - max_ymin) min_xmax = min(x_max1, x_max2) max_xmin = max(x_min1, x_min2) intersect_widths = max(0, min_xmax - max_xmin) return intersect_heights * intersect_widths
71746d93ead54aa5b36e7e6a5eb40e757711bef5
8,527
import sys def deep_getsizeof(data, ids=None): """ Returns the memory footprint of a (essentially) any object; based on sys.getsizeof, but uses a recursive method to handle collections of objects. """ if ids is None: ids = set() if id(data) in ids: return 0 size = sys.getsizeof(data) ids.add(id(data)) if (isinstance(data, str) or isinstance(data, bytes) or isinstance(data, int) or isinstance(data, float)): return size elif isinstance(data, list) or isinstance(data, tuple) or isinstance(data, set): return size + sum(deep_getsizeof(element, ids) for element in list(data)) elif isinstance(data, dict): return size + sum(deep_getsizeof(key, ids) + deep_getsizeof(value, ids) for key, value in data.items()) else: try: return deep_getsizeof(list(data)) except: return size
5e92a2b2e917f6d3a3bee06d305b580e669573c1
8,530
def calculateHeaders(tokens: list, rawHeaders: tuple) -> tuple: """ Takes sanitised, tokenised URCL code and the rawHeaders. Calculates the new optimised header values, then returns them. """ BITS = rawHeaders[0] bitsOperator = rawHeaders[1] MINREG = 0 MINHEAP = rawHeaders[3] MINSTACK = rawHeaders[4] RUN = rawHeaders[5] for line in tokens: for token in line: if token.startswith("R"): if token[1: ].isnumeric(): number = int(token[1:]) if number > MINREG: MINREG = number headers = (BITS, bitsOperator, MINREG, MINHEAP, MINSTACK, RUN) return headers
472eeb4397d68e232b66517064f91e5688c33e3c
8,531
def weg(m) -> str: """capture multiple "weg"s""" return m
2be9b16a4969d04f1322c22b07ffd98318fa05fb
8,532
def _monom(n): """ monomial in `eta` variables from the number `n` encoding it """ v = [] i = 0 while n: if n % 2: v.append(i) n = n >> 1 i += 1 return v
42621ddbca95b8fc3ca3d7eea51cc1dc97524758
8,534
from datetime import datetime def month_counter(fm, LAST_DAY_OF_TRAIN_PRD=(2015, 10, 31)): """Calculate number of months (i.e. month boundaries) between the first month of train period and the end month of validation period. Parameters: ----------- fm : datetime First day of first month of train period Returns: -------- Number of months between first month of train period and end month of validation period """ return ( (datetime(*LAST_DAY_OF_TRAIN_PRD).year - fm.year) * 12 + datetime(*LAST_DAY_OF_TRAIN_PRD).month - fm.month )
e10e6be0eb8a7762b182d073ca85ed1b97f831d3
8,535
def to_unicode(obj): """Convert object to unicode""" if isinstance(obj, bytes): return obj.decode('utf-8', 'ignore') return str(obj)
e54c02e04109b8a99a7eb4e357e95ead89166137
8,536
def get_shape(x, unknown_dim_size=1): """ Extract shape from onnxruntime input. Replace unknown dimension by default with 1. Parameters ---------- x: onnxruntime.capi.onnxruntime_pybind11_state.NodeArg unknown_dim_size: int Default: 1 """ shape = x.shape # replace unknown dimensions by default with 1 shape = [i if isinstance(i, int) else unknown_dim_size for i in shape] return shape
1c719191922a46b948fb567273e3a5152769e190
8,539
def word_tally(word_list): """ Compiles a dictionary of words. Keys are the word, values are the number of occurrences of this word in the page. :param word_list: list List of words :return: dictionary Dict of words: total """ word_dict = {} for word in word_list: if not word_dict.get(word): word_dict[word] = 1 else: word_dict[word] += 1 return word_dict
5ab1f7ac4c8a72cd5ceda2a391cef8a62a1ec34f
8,540
def step_lr(lr_max, epoch, num_epochs): """Step Scheduler""" ratio = epoch/float(num_epochs) if ratio < 0.3: return lr_max elif ratio < 0.6: return lr_max*0.2 elif ratio <0.8: return lr_max*0.2*0.2 else: return lr_max*0.2*0.2*0.2
515bb508a207f6aa5175756b49f26d53de9d7f6f
8,543
def dir_basename_from_pid(pid,j): """ Mapping article id from metadata to its location in the arxiv S3 tarbals. Returns dir/basename without extention and without full qualified path. It also ignores version because there is no version in the tarbals. I understand they have the updated version in the tarballs all the time. Add .txt .pdf or .jpg for the actual file you need and prepend with the path to your files dirs. """ schema="unhandled" if j['_rawid'][:4].isdigit() and '.' in j['_rawid']: # this is the current scheme from 0704 schema='current' # YYMM/YYMM.xxxxx.pdf (number of xxx is variable) dir_basename_str = '/'.join( [ j['_rawid'][:4] , j['_rawid'] ] ) elif '/' in j['_rawid']: # cond-mat/0210533 some rawids had the category and the id schema='slash' #YYMM/catYYMMxxxxx.pdf dir_basename_str = '/'.join( [ j['_rawid'].split("/")[1][:4], "".join(j['_rawid'].split("/")) ] ) else: # this is for rawid with no category, but we split category from metadata on the dot (if it has one) schema='else' #YYMM/catYYMMxxxxx.pdf dir_basename_str = '/'.join( [ j['_rawid'][:4].split("-")[0] , j['arxiv_primary_category']['term'].split(".")[0]+j['_rawid'] ] ) if schema == 'unhandled': print('unhandled mapping in pid to tarball',j['_rawid']) return dir_basename_str
38a955b2caecfa65f8aad44c8cf0fbc21d0b3709
8,545
import re def fix_extension(filename_end): """Processes ending section of filename to get extension Args: filename_end (str): starting section of filename Returns: str: file extension """ return_value = filename_end pattern_string = r".*\.(\w{3})$" pattern = re.compile( pattern_string, flags=re.IGNORECASE ) match = pattern.search(return_value) if match == None: raise ValueError return_value = match.group(1) return return_value
5317c3c52920d669374ac72cc6cccc70a2740174
8,548
import numpy as np def acf(x,l): """ Auto correlation function of a given vector x with a maximum lag of length l. """ return np.array([1]+[np.corrcoef(x[:-i], x[i:])[0,1] \ for i in range(1, l)])
f168cf69b7055508a95c22b869b36e5c808f5953
8,550
def parse_map_align_stdout(stdout): """Parse the stdout of map_align and extract the alignment of residues. Parameters ---------- stdout : str Standard output created with map_align Returns ------ dict A dictionary where aligned residue numbers in map_b are the keys and residue numbers in map_a values. Only misaligned regions are included. """ alignment_dict = {} for line in stdout.split('\n'): if line and line.split()[0] == "MAX": line = line.rstrip().lstrip().split() for residue_pair in line[8:]: residue_pair = residue_pair.split(":") if residue_pair[0] != residue_pair[1]: alignment_dict[int(residue_pair[1])] = int(residue_pair[0]) return alignment_dict
4cb699ffbb817e80402af22b08240323012919f8
8,551
import math def ads(x, adsParameter): """ ADS function """ p = adsParameter exp1 = 1 + math.exp(-1 * (x - p.C + p.D / 2) / p.E) exp2 = 1 + math.exp(-1 * (x - p.C - p.D / 2) / p.F) dx = p.A + p.B / exp1 * (1 - 1 / exp2) return dx / p.DMAX
f09f35aea0bc43c3dcefb922a306c6745e472aaa
8,553
def last_common_ancestor(taxonomies): """Compute last common ancestor""" lca = list() if len(taxonomies) > 1: zipped = zip(*taxonomies) for level in zipped: if len(set(level)) > 1: level = "*" else: level = level[0] lca.append(level) else: # only one top hit lca = taxonomies[0] return lca
0f49b2faa2e480afd41b93d5442d2ee1b86d5b24
8,554
import struct import socket def ip_to_ascii(ip_address): """ Converts the quad IP format to an integer representation. """ return struct.unpack('!L', socket.inet_aton(ip_address))[0]
2cb3ccbe70eed2dd2e8ac21d10e180805dec95ea
8,555
def _method_from_mod(block_dets, modname, methodname): """ E.g. from os import getcwd from os.path import join from os.path import join as joinpath <ImportFrom lineno="4" col_offset="0" type="int" module="os" level="0"> <names> <alias type="str" name="getcwd"/> """ importfrom_els = block_dets.element.xpath('descendant-or-self::ImportFrom') mod_els = [el for el in importfrom_els if el.get('module') == modname] if not mod_els: return False has_mod_method = False for mod_el in mod_els: names_els = mod_el.xpath('names/alias') methodname_els = [ el for el in names_els if el.get('name') == methodname] if methodname_els: has_mod_method = True break return has_mod_method
d50c91099feee6ee40e8d7ae682112bd4ce6d0f6
8,556
from typing import Any from typing import Sequence def complete_ports(_: Any, __: Any, incomplete: str) -> Sequence[str]: """Returns common ports for completion.""" return [k for k in ('80', '443', '8080') if k.startswith(incomplete)]
694306ae57bcfd21d6fa73a595768dde0ffba86a
8,557
def _get_max_mem(): """Return the current cgroup's memory high water mark.""" try: with open("/sys/fs/cgroup/memory/memory.max_usage_in_bytes") as f: return float(f.read().strip()) except Exception: return 0
6031fdc75c6ca2fd8f74e52951316e708b0eb36e
8,558
import os def add_image(qc_html, image, title=None): """ Adds an image to the report. """ if title: qc_html.write('<center> {} </center>'.format(title)) relpath = os.path.relpath(image, os.path.dirname(qc_html.name)) qc_html.write('<a href="' + relpath + '" >') qc_html.write('<img src="' + relpath + '" >') qc_html.write('</a><br>\n') return qc_html
01c4117c3ae9e18b6cfc49bcad11af97b80bc130
8,559
def coord_arg_to_coord(carg): """ Parameters ---------- carg : str Argument from parser for coordinates Eligible formats are like: J081240.7+320809 122.223,-23.2322 07:45:00.47,34:17:31.1 Returns ------- icoord : str or tuple Allowed format for coord input to linetools.utils.radec_to_coord """ if ',' in carg: radec = carg.split(',') if ':' in radec[0]: # 07:45:00.47,34:17:31.1 icoord = (radec[0], radec[1]) else: # 122.223,-23.2322 icoord = (float(radec[0]), float(radec[1])) else: # J081240.7+320809 icoord = carg # Return return icoord
16a6cb8090dc040b7b5f6a1a4ba873ea65e0dfdf
8,561
def amount_in_location(obj, user): """ Returns how many instances of this product are at the current user's location """ return obj.get_amount_stocked(user)
4b722cb9e5721cbc2c7d87ff30046087853cd5c0
8,563
def format_string(): """Returns the format string of the binary file""" return "IIccccI?cccc"
f833be1ff5ffcbc538ba0b785e9fe43c0513b688
8,564
def getCountryData(df): """ Implement a function to get counts of data based on show type """ country_df = df[["country","type","index"]] country_df = country_df.groupby(['country','type']).count().unstack() country_df.columns = ['Movie','TV Show'] country_df = country_df.reset_index().fillna(0) country_df = country_df.drop(0, axis = 0) country_df['Movie'] = country_df['Movie'].astype('int') country_df['TV Show'] = country_df['TV Show'].astype('int') country_df['Total_titles'] = country_df['Movie']+country_df['TV Show'] country_df = country_df.sort_values(by = 'Total_titles', ascending = False) return country_df pass
bde3247bc95c4914d3b665225aee9dd2a0bfe2b6
8,565
def all_success(mgmt_commands): """Determines if all child processes were successful. Args: mgmt_commands : A list of all Command objects Returns: True if all child processes succeeded """ for mgmt_command in mgmt_commands: if mgmt_command.retcode != 0: return False return True
1bc0d32491711e0d20106f1f294093b30e77bd55
8,566
import re def get_nameservice(hdfs_site): """ Multiple nameservices can be configured for example to support seamless distcp between two HA clusters. The nameservices are defined as a comma separated list in hdfs_site['dfs.nameservices']. The parameter hdfs['dfs.internal.nameservices'] was introduced in Hadoop 2.6 to denote the nameservice for the current cluster (HDFS-6376). This method uses hdfs['dfs.internal.nameservices'] to get the current nameservice, if that parameter is not available it tries to splits the value in hdfs_site['dfs.nameservices'] returning the first string or what is contained in hdfs_site['dfs.namenode.shared.edits.dir']. By default hdfs_site['dfs.nameservices'] is returned. :param hdfs_site: :return: string or empty """ name_service = hdfs_site.get('dfs.internal.nameservices', None) if not name_service: name_service = hdfs_site.get('dfs.nameservices', None) if name_service: for ns in name_service.split(","): if 'dfs.namenode.shared.edits.dir' in hdfs_site and re.match(r'.*%s$' % ns, hdfs_site['dfs.namenode.shared.edits.dir']): # better would be core_site['fs.defaultFS'] but it's not available return ns return name_service.split(",")[0] # default to return the first nameservice return name_service
65a86316112a94b6f361daea88cd5658d8019668
8,567
def checkChanList(chanprof, profile, chanList): """ Return non-zero value if any element of chanlist is not in the channel list of profile """ for c in chanList: if c not in chanprof[profile-1]: return 1 return 0
face301b61634bcff8721fcafb1cbc09e2bd0e5f
8,568
import json def image_get_class_cn_dict(cn_imagenet_class_path): """ 获得分类的中文对照词典 :param cn_imagenet_class_path: :return: """ fn = open(cn_imagenet_class_path, "r", encoding='UTF-8') str_json = fn.read() dic = json.loads(str_json) fn.close() return dic
190cb80d929bdef3fe33b082fa12149c1c290446
8,569
def jsonp(func): """ Decorator for the following JSON API functions for the connection test. """ def dec(request, *args, **kw): resp = func(request, *args, **kw) cb = request.GET.get('callback') if not cb: cb = '' resp['Content-Type'] = 'application/javascript' resp.content = "{}({})".format(cb, resp.content) return resp return dec
ccf8bfbffdd1ac865eb2d001f212925c332f1f82
8,570
def get_base_required_field_types(): """ Get field types for UI asset required fields. 2016-08-24: removed 'coordinates': 'floatlist', 2016-08-26" remove 'augmented': 'bool', 'Ref Des': 'string','hasDeploymentEvent': 'bool','remoteDocuments': 'list', added 'editPhase' can have values: EDIT, STAGED, OPERATIONAL removed 'lastModifiedTimestamp': 'long', """ base_required_field_types = { 'assetInfo': 'dict', 'assetType': 'string', 'dataSource': 'string', 'deployment_number': 'string', 'deployment_numbers': 'intlist', 'depth': 'float', 'editPhase': 'string', 'id': 'int', 'latitude': 'float', 'longitude': 'float', 'manufactureInfo': 'dict', 'mobile': 'bool', 'notes': 'string', 'orbitRadius': 'float', 'partData': 'dict', 'physicalInfo': 'dict', 'purchaseAndDeliveryInfo': 'dict', 'ref_des': 'string', 'remoteResources': 'dictlist', 'uid': 'string' } return base_required_field_types
0cb8b6041aeaf25efe8ad29e3986237efe5e382f
8,571
import numpy def encode_data(labeldata,hot_vector = 1): """ Takes array of label data, and transforms it into array of one-hot encoded vectors. Args: labeldata (iterable): Array or list of data labels hot_vector: value assingned for hot, default 1 Returns: numpy array of vectored labels """ vec_data = [] for label in labeldata: vector = [0] * 10 for i in range(10): if label == i: vector[i] = hot_vector vec_data.append(vector) data = numpy.array(vec_data) return data
92f4bb37428fa2023047abf9d368876bb6d529e5
8,573
def get_copysetup(copytools, copytool_name): """ Return the copysetup for the given copytool. :param copytools: copytools list from infosys. :param copytool name: name of copytool (string). :return: copysetup (string). """ copysetup = "" for ct in copytools.keys(): if copytool_name == ct: copysetup = copytools[ct].get('setup') break return copysetup
1181352a178f954d17cf7d7b8fc6c798b441d4a6
8,574
def last_occurence_of_tag_chain(sequence, tag_type): """ Takes a sequence of tags. Assuming the first N tags of the sequence are all of the type tag_type, it will return the index of the last such tag in that chain, i.e. N-1 If the first element of the sequence is not of type tag_type, it will return -1 """ for i in range(0, len(sequence)): if sequence[i][1] != tag_type: return i - 1 return len(sequence)
0edcf25e18ff4b3701f92c13bab2b634b738c158
8,575
def supports_parenting(): """Does this Engine support parenting of objects, aka nesting of transforms or attaching objects. This is important when sending transformation values. """ return False
0dd8fc9e5c1917f10cf9d09a1fb75b470abc6eec
8,576
def split_residue_id(atom): """Takes an atom and splits its het ID into components. :param Atom atom: the atom to read. :rtype: ``tuple``""" if atom.het: id = atom.het.id.split(".")[-1] num = "".join([c for c in id if c.isdigit()]) insert = "".join([c for c in id if c.isalpha()]) or "?" return num, insert return ".."
7340c24dc1ef982fd0a3772764a02d7c8fd30126
8,577
import numpy as np def pvs(t): """ Saturation vapor pressure as a function of tempetature t [°C] """ T = t + 273.15 # [K] Temperature # pws(T) [Pa] saturation pressure over liquid water # for temp range [0 200] °C eq. (6) C8 = -5.800_220_6e3 C9 = 1.391_499_3e0 C10 = -4.864_023_9e-2 C11 = 4.176_476_8e-5 C12 = -1.445_209_3e-8 C13 = 6.545_967_3e0 pws = np.exp( C8 / T + C9 + C10 * T + C11 * T**2 + C12 * T**3 + C13 * np.log(T)) return pws
8ba1aced4669753cca0ff2332bc2bd7ad5ad7814
8,578
def save_output_node(out): """ This calcfunction saves the out dict in the db """ out_wc = out.clone() return out_wc
7f89752332c023558dfb2ea7774231e1c1eeab99
8,579
def cal_fdp_power(selected, non_zero_index, r_index=False): """ Calculate power and False Discovery Proportion Parameters ---------- selected: list index (in R format) of selected non-null variables non_zero_index: true index of non-null variables r_index : True if the index is taken from rpy2 inference Returns ------- fdp: False Discoveries Proportion power: percentage of correctly selected variables over total number of non-null variables """ # selected is the index list in R and will be different from index of # python by 1 unit if selected.size == 0: return 0.0, 0.0 if r_index: selected = selected - 1 true_positive = [i for i in selected if i in non_zero_index] false_positive = [i for i in selected if i not in non_zero_index] fdp = len(false_positive) / max(1, len(selected)) power = len(true_positive) / len(non_zero_index) return fdp, power
5e14b19c95ec0bc465c6ea6c98606768f00ee49e
8,580
def _create_types_map(sqltypes): """Take a types module and utilising the `dir` function create a mapping from the string value of that attribute to the SQLAlchemy type instance. """ sql_types_map = { item.lower(): getattr(sqltypes, item) for item in dir(sqltypes) if item[0].isupper() } return sql_types_map
69a3902663eadc70050acc0c1869fcb86a2a6384
8,581
def discuss(topic) : """Discuss a topic with the user and return their response. Ask if the user likes the topic and why. Parameters: topic (str): The topic under discussion. Return: str: Response to the question of why they like the topic. """ like = input("Do you like " + topic + "? ") response = input("Why do you think that? ") return response
854dcb744440b98fbbeb763e9ae0a866afb5d7d0
8,582
def numba_update_portability_matrix( portability_matrix, r_ct_g, c_ct_g, c_f_g, c_cf_g, c_dt_g ): """ updating the probability matrix """ for a, b, c, d, e in zip(r_ct_g, c_ct_g, c_f_g, c_cf_g, c_dt_g): portability_matrix[a, b, c, d, e] = 1 return portability_matrix
138a55af650ab98d1ff4d991b5f81e6f709148c8
8,583
def find_author(name, authors): """ Find the author with the provided name, in the list of authors""" for author in authors: if author.name == name: return author raise ValueError('Author', name, 'not found.')
dd47f0ecf8574d68a0ce9b5e94dff19b58f5887a
8,584
def removeblanklines(astr): """remove the blank lines in astr""" lines = astr.splitlines() lines = [line for line in lines if line.strip() != ""] return "\n".join(lines)
fa4e04ca1b9cc643782af9363b5e6e6405a2b56d
8,585
def printChapterE(self, section): """ """ output = [] #if self.FAxial == 'COMPRESSION': # output.append(" "+"\n") output.append("_______________________________________________________________________________________"+"\n") output.append(" "+"\n") output.append(" FLEXURE AND AXIAL COMPRESSION FORCE CHECK RESULTS"+"\n") output.append(" "+"\n") output.append("Member ID URComb Pr [N] Mrx [N/mm] Mry [N/mm] Cb Q Lx [mm] Kx"+"\n") output.append("Design to Equ UR Pn [N] Mnx [N/mm] Mny [N/mm] Fex[N/mm2] Ly [mm] Ky"+"\n") # if self.design_method == 'ASD' or self.design_method == 'USER_DEFINED': _Axial_factor = self.Omega_c _BM_factor = self.Omega_b # output.append("Result Equ Pn OmegaC OmegaBx OmegaBy Fey[N/mm2] Lz [mm] Kz"+"\n") output.append(" Equ Mn Om*Pr/Pn Om*Mrx/Mnx Om*Mry/Mny Fez[N/mm2] Lb [mm] KL/r"+"\n") else: _Axial_factor = self.Phi_c _BM_factor = self.Phi_b # output.append("Result Equ Pn PhiC PhiCx PhiCy Fey[N/mm2] Lz [mm] Kz"+"\n") output.append(" Equ Mn Pr/Pn*Phi Mrx/Mn*Phi Mry/Mn*Phi Fez[N/mm2] Lb [mm] KL/r"+"\n") # output.append("......................................................................................."+"\n") output.append(" "+"\n") # self.Q = 1 # @hami2230 - added as a fudge. To be corrected during reporting. output.append("{:12s} {:3.4f} {:1.4E} {:1.4E} {:1.4E} {:1.2f} {:1.2f} {:1.3E} {:1.2f}\n" .format(self.beam_name, self.ChapterH_results.UR, abs(self.actions.Fx.convert('newton').value), abs(self.actions.My.convert('newton*metre').value), abs(self.actions.Mz.convert('newton*metre').value), self.Cb, self.Q, self.L.convert('metre').value, self.Kx)) # self.Fex_E4_7 = 1 self.Fey_E4_8 = 1 self.Fez_E4_9 = 1 # @hami2230 -added as a fudge. To be corrected during reporting. output.append("{:12s} {:6s} {:1.4E} {:1.4E} {:1.2f} {:1.2f} {:1.3E} {:1.2f}\n" .format(self.design_method, self.ChapterH_results.UR_flag, self.Pn_E.convert('kilonewton').value, self.Mnx.convert('newton*metre').value, self.Mny.convert('newton*metre').value, self.Fex_E4_7, self.L.convert('metre').value, self.Ky)) # output.append("{:12s} {:6s} {:1.4E} {:1.4E} {:1.4E} {:1.4E} {:1.3E} {:1.2f}\n" .format(self.ChapterH_results.status, self.Pn_E_flag, _Axial_factor, _BM_factor, _BM_factor, self.Fey_E4_8, self.L.convert('metre').value, self.Kz)) # self.Pc = 1 output.append("{:6s} {:3.6f} {:3.6f} {:3.6f} {:1.4E} {:1.3E} {:3.1f}\n" .format(self.Mnx_flag, abs(self.actions.Fx.value/self.Pc), abs(self.actions.My.value/self.Mcx.value), abs(self.actions.Mz.value/self.Mcy.value), self.Fez_E4_9, self.Lb.convert('metre').value, self.KLr.value)) # output.append(" "+"\n") #output.append("_______________________________________________________________________________________"+"\n") #output.append(" "+"\n") return output
9cd8ab3004864dd571f9853b9dc5554ebbb1b538
8,586
import re def CheckEmailFormat(email: str) -> bool: """ Emailアドレスのフォーマットをチェックする。 ask him """ regex = r'^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$' if(re.search(regex, email)): # print("Valid Email") return True else: # print("Invalid Email") return False
042afa1e96934e11211bfff2b6e492768c7c5f50
8,587
def to_type(cls): """ Cast the data to a specific type Parameters ---------- cls : class object The class to cast the object to """ return lambda _, value: cls(value)
3ae4daac59db30ce988adf55bc4f7f9e15822361
8,588
import hashlib def md5_key(chrom, start, end, ref, alt, assembly): """Generate a md5 key representing uniquely the variant Accepts: chrom(str): chromosome start(int): variant start end(int): variant end ref(str): references bases alt(str): alternative bases assembly(str) genome assembly (GRCh37 or GRCh38) Returns: md5_key(str): md5 unique key """ hash = hashlib.md5() hash.update( (" ".join([chrom, str(start), str(end), str(ref), str(alt), assembly])).encode("utf-8") ) md5_key = hash.hexdigest() return md5_key
64db55c0075d063aeec500f97700ec769840cc4f
8,589
def _missing_ids(vba, pcode_ids, verbose=False): """ See if there are any function names or variables that appear in the p-code that do not appear in the decompressed VBA source code. vba - (str) The decompressed VBA source code. pcode_ids - (set) The IDs defined in the p-code. return - (float) % missing items. """ # Check each ID. num_missing = 0.0 for curr_id in pcode_ids: if curr_id not in vba: if verbose: print("P-code ID '{0}' is missing.".format(curr_id)) num_missing += 1 if len(pcode_ids) == 0: return 0.0 return num_missing / len(pcode_ids)
86dfb5abf8bd9c24078adbed60f6428872ee1ae4
8,590
def faceBlobSalientBasedOnAverageValue(value, salientLabelValue): """Deprecated""" # the face is considered salient if 50% of pixels or more are marked salient if value > (0.5 * salientLabelValue): return True # bright green # if the face is not salient: else: return False
7a1f7d46e1cd251a80ebf8f7405dde58d108fdd8
8,595
import base64 def base64_encode_nifti(image): """Returns base64 encoded string of the specified image. Parameters ---------- image : nibabel.Nifti2Image image to be encoded. Returns ------- str base64 encoded string of the image. """ encoded_image = base64.encodebytes(image.to_bytes()) enc = encoded_image.decode("utf-8") return enc
5e3758089240d8840c1cb1828ab908dead3b4b7c
8,596
import numpy def calc_array_manifold_f(fbinX, fftlen, samplerate, delays, half_band_shift): """ Calculate one (conjugate) array manifold vector for each frequancy bin or subband. """ chan_num = len(delays) Delta_f = samplerate / float(fftlen) J = (0+1j) fftlen2 = fftlen / 2 if half_band_shift: if fbinX < fftlen2: phases = - J * 2.0 * numpy.pi * (0.5 + fbinX) * Delta_f * delays vs = numpy.exp( phases ) else: phases = - J * 2.0 * numpy.pi * (0.5 - fftlen + fbinX) * Delta_f * delays vs = numpy.exp( phases ) else: if fbinX <= fftlen2: vs = numpy.exp(- J * 2.0 * numpy.pi * fbinX * Delta_f * delays) else: vs = numpy.conjugate(numpy.exp(- J * 2.0 * numpy.pi * fbinX * Delta_f * delays)) return vs / chan_num
26005e6c831e4a04052f6beed86bc861a97eb522
8,597
import numpy def rotate(p, angle, center=(0, 0)): """ Rotates given point around specified center. Args: p: (float, float) Point to rotate. angle: float Angle in radians. center: (float, float) Center of rotation. """ dx = p[0]-center[0] dy = p[1]-center[1] sin = numpy.sin(angle) cos = numpy.cos(angle) x = center[0] + dx * cos - dy * sin y = center[1] + dx * sin + dy * cos return x, y
41b95c1b4162ad9ebacc2250c293e33daaa8a526
8,598
def pure_water_density_tanaka(t, a5=0.999974950): """Equation according Tanaka, M., et. al; Recommended table for the density of water between 0 C and 40 C based on recent experimental reports, Metrologia, 2001, 38, 301-309 :param t: water temperature (°C) :param a5: density of SMOW water under one atmosphere at temperature type. a5 must be changed if other water used (e.g., tap water). :return: water density at temperature type in mg/l """ a1 = -3.983035 # deg C a2 = 301.797 # deg C a3 = 522528.9 # (deg C)**2 a4 = 69.34881 # deg C return 1000.0 * (a5 * (1.0 - ((t + a2)*(t + a1)*(t + a1)) / (a3 * (t + a4))))
27bcc12f8f9089cf8307d72ba784675bbcd5036a
8,599
from pathlib import Path import pickle def load_trained_model(fname: str): """ Loads a saved ModelData object from file. Args: fname (str): Complete path to the file. Returns: A ModelData object containing the model, list of titles, list of authors, list of genres, list of summaries, training corpus, and test corpus. """ fp = Path(fname) with fp.open("rb") as f: model_data = pickle.load(f) return model_data
3bdfa3f090fa5efcd54b17bd47a0cd4ea57e1c4a
8,600
def generate_hostfile(map_, file): """ generate hostfile base on map_ """ for node in map_: print('node-{}.simgrid.org'.format(node), file=file) file.flush() return file.name
76de14bfec5e94ddbce4e29fbcaee844f566b086
8,601
import typing import asyncio def sync( awaitable: typing.Awaitable, event_loop: typing.Optional[asyncio.events.AbstractEventLoop] = None, ): """ Run an awaitable synchronously. Good for calling asyncio code from sync Python. The usage is as follows: .. highlight:: python .. code-block:: python from ori.asyncio import sync async def your_function(arg1, arg2): # Additional asyncio code can go here return arg1 + arg2 # If you call your_function() directly, you will get a coroutine # object. You need to either use the await keyword... # or the sync() wrapper as below. value = sync(your_function(1, 2)) assert value == 3 Args: awaitable: This is a function declared with `async def` or another type of awaitable object in Python. event_loop: An :mod:`asyncio` event loop. This defaults to `None`, in which case this function will try to pull the running event loop. If no event loop exists, this function will try to create one. Returns: This returns the same value that `awaitable` would if you used the `await` keyword instead of :func:`sync`. Raises: TypeError: If `awaitable` is not actually an awaitable object, then we raise a TypeError. """ if not event_loop: event_loop = asyncio.get_event_loop() return event_loop.run_until_complete(awaitable)
4fda68c3132564d9c6620fe8e4c1cfac401528a6
8,602
import math def check_float(a, b, precision=1e-4): """ check float data Args: a(list): input_data1 b(list): input_data2 precision(float): precision for checking diff for a and b Returns: bool """ def __adjust_data(num): if num == 0.0: return 0.0 return num / 10**(math.floor(math.log10(abs(num))) + 1) a = __adjust_data(a) b = __adjust_data(b) return math.fabs(a - b) < precision
240c4dac3228669592317e44b69f19d436a1c17d
8,603
def icosahedron_nodes_calculator(order): """Calculate the number of nodes corresponding to the order of an icosahedron graph Args: order (int): order of an icosahedron graph Returns: int: number of nodes in icosahedron sampling for that order """ nodes = 10 * (4 ** order) + 2 return nodes
eccea98ec5da3fae748c3505af4689dfe6f47b73
8,604
def load_board_state(file_name): """ o = food / point thing X = wall P = pac man S = ghost spawn ' ' = empty space """ board = [] with open(file_name, 'r') as file: for line in file: line = line.strip() board.append(list(line)) return board
4d3a1822649724407f62abfa7909014562fd6978
8,606
import click def validate_profile(context, param, value): """ Validates existance of profile. Returns the profile name if it exists; otherwise throws BadParameter """ if value in context.obj.configuration.profiles(): return value else: raise click.BadParameter("\"%s\" was not found" % value)
ac1fd3caa99a510173aa96f4c781abfacb6eed97
8,607
def formatPath(a): """Format SVG path data from an array""" return "".join([cmd + " ".join([str(p) for p in params]) for cmd, params in a])
f56f62b001bf37696fa3636310fda1c8e26e8ae9
8,609
def merge_configs(*configs): """ Merges dictionaries of dictionaries, by combining top-level dictionaries with last value taking precedence. For example: >>> merge_configs({'a': {'b': 1, 'c': 2}}, {'a': {'b': 2, 'd': 3}}) {'a': {'b': 2, 'c': 2, 'd': 3}} """ merged_config = {} for config in configs: for k, v in config.items(): if k in merged_config: merged_config[k].update(v) else: merged_config[k] = v.copy() return merged_config
dbbdff74695233b522cd4381a78ca82e6f8057fd
8,611
import torch def expected_unique(probabilities, sample_size): """Get expected number of unique samples. This computes the expected number of unique samples for a multinomial distribution from which we sample a fixed number of times. """ vals = 1 - (1 - probabilities) ** sample_size expectation = torch.sum(torch.as_tensor(vals), dim=-1) return torch.ceil(expectation)
fd9b7ebd35db0d29c32495f50cd7f220e76267ba
8,612
def label_tsne(tsne_results, sample_names, tool_label): """ Label tSNE results. Parameters ---------- tsne_results : np.array Output from run_tsne. sample_names : list List of sample names. tool_label : str The tool name to use for adding labels. Returns ------- dict Dictionary of the form: {<sample_name>: <coordinate>}. """ tsne_labeled = {sample_names[i]: {f'{tool_label}_x': float(tsne_results[i][0]), f'{tool_label}_y': float(tsne_results[i][1])} for i in range(len(sample_names))} return tsne_labeled
50422e192e57fb6a019618d46e9a95b9b3c3c768
8,613
def diff_list(l1, l2): """Returns side by side equality test""" return [False if i1==i2 else True for (i1, i2) in zip(l1, l2)]
50a81f3c517168d6b369e11ac874520cf5110656
8,615
from typing import List def get_routes(vehicles: List[int], stops: List[int]): """ Create dict of vehicles (key) and their routes (value). :vehicles: list of vehicle identities (same order as demand) :stops: list of stop numbers (same order as vehicles) return dict """ counts = {} for vehicle in vehicles: if vehicle in counts: counts[vehicle] += 1 else: counts[vehicle] = 1 routes = {} for i, vehicle in enumerate(vehicles): if vehicle not in routes: routes[vehicle] = [None for j in range(counts[vehicle])] routes[vehicle][int(stops[i]) - 1] = i return routes
966baf998c0ec22ad381175a5680b4cefd045a6f
8,616
def progress_bar(progress, size = 20): """ Returns an ASCII progress bar. :param progress: A floating point number between 0 and 1 representing the progress that has been completed already. :param size: The width of the bar. .. code-block:: python >>> ui.progress_bar(0.5, 10) '[#### ]' """ size -= 2 if size <= 0: raise ValueError("size not big enough.") if progress < 0: return "[" + "?" * size + "]" else: progress = min(progress, 1) done = int(round(size * progress)) return "[" + "#" * done + " " * (size - done) + "]"
ab3bffd9e2c9c0060001a3058217690e8d30a67d
8,618
def hasblocks(hashlist): """Determines which blocks are on this server""" print("HasBlocks()") return hashlist
9bcd481b6c6ec1ecbbcb1978edae0bff86fd5cb7
8,619
def instantiate_domains(domains, encoding_cnt): """create domain for fields we want to encode. """ instantiated = {} for d in domains: if "/encoding/*" in d: for i in range(encoding_cnt): instantiated[d.replace("/encoding/*", "/encoding/{}".format(i))] = domains[d] else: instantiated[d] = domains[d] return instantiated
69857f67070eeabeaf9b1b4b8eb4d62f48563528
8,623
def density(temp): """ Calculating density of water due to given temperature (Eq. 3.11) :param temp: temperature prediction Y[d, t] at depth d and time t :return: corresponding density prediction """ return 1000 * (1 - ((temp + 288.9414) * (temp - 3.9863) ** 2) / (508929.2 * (temp + 68.12963)))
92d6d7c5639e03790715f62a1027a15357cdf1cf
8,624
import torch def distance2bbox(points, distance, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (n, 3), [t, x, y]. distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom, frDis, 4point, bkDis, 4point). max_shape (list): Shape of the image. Returns: Tensor: Decoded bboxes. """ mid_t = points[:, 0] mid_x1 = points[:, 1] - distance[:, 0] mid_y1 = points[:, 2] - distance[:, 1] mid_x2 = points[:, 1] + distance[:, 2] mid_y2 = points[:, 2] + distance[:, 3] fr_t = points[:, 0] + distance[:, 4] fr_x1 = mid_x1 + distance[:, 5] fr_y1 = mid_y1 + distance[:, 6] fr_x2 = mid_x2 + distance[:, 7] fr_y2 = mid_y2 + distance[:, 8] bk_t = points[:, 0] - distance[:, 9] bk_x1 = mid_x1 + distance[:, 10] bk_y1 = mid_y1 + distance[:, 11] bk_x2 = mid_x2 + distance[:, 12] bk_y2 = mid_y2 + distance[:, 13] if max_shape is not None: mid_x1 = mid_x1.clamp(min=0, max=max_shape[2]) mid_y1 = mid_y1.clamp(min=0, max=max_shape[1]) mid_x2 = mid_x2.clamp(min=0, max=max_shape[2]) mid_y2 = mid_y2.clamp(min=0, max=max_shape[1]) fr_t = fr_t.clamp(min=0, max=max_shape[0]) fr_x1 = fr_x1.clamp(min=0, max=max_shape[2]) fr_y1 = fr_y1.clamp(min=0, max=max_shape[1]) fr_x2 = fr_x2.clamp(min=0, max=max_shape[2]) fr_y2 = fr_y2.clamp(min=0, max=max_shape[1]) bk_t = bk_t.clamp(min=0, max=max_shape[0]) bk_x1 = bk_x1.clamp(min=0, max=max_shape[2]) bk_y1 = bk_y1.clamp(min=0, max=max_shape[1]) bk_x2 = bk_x2.clamp(min=0, max=max_shape[2]) bk_y2 = bk_y2.clamp(min=0, max=max_shape[1]) return torch.stack([mid_t, mid_x1, mid_y1, mid_x2, mid_y2, fr_t, fr_x1, fr_y1, fr_x2, fr_y2, bk_t, bk_x1, bk_y1, bk_x2, bk_y2], -1)
ea773f3bd0d53a2aaccb85c7b7042c51c3dd0653
8,625
from typing import Tuple def _partition(lst: list, pivot: object) -> Tuple[list, list]: """Return a partition of <lst> with the chosen pivot. Return two lists, where the first contains the items in <lst> that are <= pivot, and the second is the items in <lst> that are > pivot. """ smaller = [] bigger = [] for item in lst: if item <= pivot: smaller.append(item) else: bigger.append(item) return smaller, bigger
07950d665eca6b5591d3c8b65a980c2597b9e45a
8,627
def split_arguments(args, splitter_name=None, splitter_index=None): """Split list of args into (other, split_args, other) between splitter_name/index and `--` :param args: list of all arguments :type args: list of str :param splitter_name: optional argument used to split out specific args :type splitter_name: str :param splitter_index: specific index at which to split :type splitter_index: int :returns: tuple (other, split_args) """ if splitter_index is None: if splitter_name not in args: return args, [] splitter_index = args.index(splitter_name) start_index = splitter_index + 1 end_index = args.index('--', start_index) if '--' in args[start_index:] else None if end_index: return ( args[0:splitter_index], args[start_index:end_index], args[(end_index + 1):] ) else: return ( args[0:splitter_index], args[start_index:], [] )
c6e800ff6d109699d346c76052a70e4e5ab670d8
8,628
import torch def cumulative_laplace_norm(input): """ Args: input: [B, C, F, T] Returns: """ batch_size, num_channels, num_freqs, num_frames = input.size() input = input.reshape(batch_size * num_channels, num_freqs, num_frames) step_sum = torch.sum(input, dim=1) # [B * C, F, T] => [B, T] cumulative_sum = torch.cumsum(step_sum, dim=-1) # [B, T] entry_count = torch.arange(num_freqs, num_freqs * num_frames + 1, num_freqs, dtype=input.dtype, device=input.device) entry_count = entry_count.reshape(1, num_frames) # [1, T] entry_count = entry_count.expand_as(cumulative_sum) # [1, T] => [B, T] cumulative_mean = cumulative_sum / entry_count # B, T cumulative_mean = cumulative_mean.reshape(batch_size * num_channels, 1, num_frames) normed = input / (cumulative_mean + 1e-7) return normed.reshape(batch_size, num_channels, num_freqs, num_frames)
1c61399c3b36a6552e59f3ed62da651207370670
8,629
import re def is_valid_record2(parsed_record): """Check if parsed_record is properly formatted""" if not ( "byr" in parsed_record and parsed_record["byr"].isdigit() and len(parsed_record["byr"]) == 4 and (1920 <= int(parsed_record["byr"]) <= 2002) ): return False if not ( "iyr" in parsed_record and parsed_record["iyr"].isdigit() and len(parsed_record["iyr"]) == 4 and (2010 <= int(parsed_record["iyr"]) <= 2020) ): return False if not ( "eyr" in parsed_record and parsed_record["eyr"].isdigit() and len(parsed_record["eyr"]) == 4 and (2020 <= int(parsed_record["eyr"]) <= 2030) ): return False if "hgt" in parsed_record: match = re.match(r"(?P<value>\d+)(?P<unit>in|cm)$", parsed_record["hgt"]) if not match: return False value = int(match.group("value")) unit = match.group("unit") if not ( (unit == "cm" and 150 <= value <= 193) or (unit == "in" and 59 <= value <= 76) ): return False else: return False if not ( "hcl" in parsed_record and re.match(r"#[0-9a-f]{6}$", parsed_record["hcl"]) ): return False if not ( "ecl" in parsed_record and re.match(r"amb|blu|brn|gry|grn|hzl|oth$", parsed_record["ecl"]) ): return False if not ("pid" in parsed_record and re.match(r"\d{9}$", parsed_record["pid"])): return False return True
d3fdb17f6c6726e74e02f41813c665e8be223406
8,630
def LargestPrimeFactor(num): """Calculate greatest prime factor using length of list It is best practice to use greatest prime factor for hash table's capacity. This method calculates the greatest prime factor using the int passed in and returns the prime integer. Time complexity of O(logn) :param num: length of list :return: prime number for hashtable capacity """ p_factor = 1 i = 2 while i <= num / i: if num % i == 0: p_factor = i num /= i else: i += 1 if p_factor < num: p_factor = num return p_factor
ccb09b7bd385a8b4795dbdeb3a4e886cd0922f20
8,631
def lua_property(name): """ Decorator for marking methods that make attributes available to Lua """ def decorator(meth): def setter(method): meth._setter_method = method.__name__ return method meth._is_lua_property = True meth._name = name meth.lua_setter = setter return meth return decorator
97cd57cf21c4afdb43b6504af56139228df751cd
8,632
def toalphanum(s): """ gets rid of the unwanted characters """ _s = '' for c in s: if c in '\\ /(.)-': _s += '_' else: _s += c return _s
14ce33504a467406f42c095526cc232364ac6d1a
8,633
def ip2int(ip_str): """ Convert XXX.XXX.XXX.XXX to integer representation Args: ip_str (str): IP in a XXX.XXX.XXX.XXX string format Returns: ip_int (int): Integer IP representation """ ip_int = None if isinstance(ip_str,str): # clean IP if ip_str.find(':') > 0: ip_str = ip_str[:ip_str.index(':')] octet = [int(x.strip()) for x in ip_str.split('.')] ip_int = octet[0] * pow(256,3) + octet[1] * pow(256,2) + octet[2] * 256 + octet[3] return ip_int
bb48f28519593222c005df1b009d7e669dde7669
8,634
import itertools def _find_chordless_cycles(bond_graph, max_cycle_size): """Find all chordless cycles (i.e. rings) in the bond graph Traverses the bond graph to determine all cycles (i.e. rings) each atom is contained within. Algorithm has been adapted from: https://stackoverflow.com/questions/4022662/find-all-chordless-cycles-in-an-undirected-graph/4028855#4028855 """ cycles = [[] for _ in bond_graph.nodes] ''' For all nodes we need to find the cycles that they are included within. ''' for i, node in enumerate(bond_graph.nodes): neighbors = list(bond_graph.neighbors(node)) pairs = list(itertools.combinations(neighbors, 2)) ''' Loop over all pairs of neighbors of the node. We will see if a ring exists that includes these branches. ''' for pair in pairs: ''' We need to store all node sequences that could be rings. We will update this as we traverse the graph. ''' connected = False possible_rings = [] last_node = pair[0] ring = [last_node, node, pair[1]] possible_rings.append(ring) if bond_graph.has_edge(last_node, pair[1]): cycles[i].append(ring) connected = True while not connected: ''' Branch and create a new list of possible rings ''' new_possible_rings = [] for possible_ring in possible_rings: next_neighbors = list(bond_graph.neighbors(possible_ring[-1])) for next_neighbor in next_neighbors: if next_neighbor != possible_ring[-2]: new_possible_rings.append(possible_ring + \ [next_neighbor]) possible_rings = new_possible_rings for possible_ring in possible_rings: if bond_graph.has_edge(possible_ring[-1], last_node): if any([bond_graph.has_edge(possible_ring[-1], internal_node) for internal_node in possible_ring[1:-2]]): pass else: cycles[i].append(possible_ring) connected = True if not possible_rings or len(possible_rings[0]) == max_cycle_size: break return cycles
ad7228c31477f457e13e45c789022aca35f00183
8,635
def access_token_generator(token_generator, access_token_template): """A function that generates a signed access token""" def func(**extra_claims): claims = {**access_token_template, **extra_claims} return token_generator(**claims) return func
4a1052b46dc85e3f15375fbbc5062a950fe40874
8,637
import os def create_ABC_estimate_config(path_run_ABC, param_num): """ Create ABCtoolbox config file for estimation. :param path_run_ABC: full or relative path to directory to run ABC in. :param param_num: number of parameters :return: """ file_name = '{}/test_ABC_estimate.txt'.format(path_run_ABC) simName = '{}/results_combined_transformed.txt'.format(path_run_ABC) obsName = '{}/results_observed_transformed.txt'.format(path_run_ABC) params = '1-{}'.format(param_num) outputPrefix = '{}/ABC_update_estimate_10pls_100ret_'.format(path_run_ABC) logFile = '{}/ABC_update_estimate_10pls_100ret.log'.format(path_run_ABC) num_sims = sum(1 for line in open('{}/results_combined.txt'.format(path_run_ABC))) - 1 try: os.remove(file_name) except OSError: pass config_file = open(file_name, 'a') config_file.write('task estimate\n') config_file.write('simName {}\n'.format(simName)) config_file.write('obsName {}\n'.format(obsName)) config_file.write('params {}\n'.format(params)) config_file.write('numRetained 100\n') config_file.write('maxReadSims {}\n'.format(num_sims)) config_file.write('diracPeakWidth 0.01\n') config_file.write('posteriorDensityPoints 100\n') config_file.write('jointPosteriors A,B\n') config_file.write('jointPosteriorDensityPoints 100\n') config_file.write('writeRetained 0\n') config_file.write('outputPrefix {}\n'.format(outputPrefix)) config_file.write('logFile {}\n'.format(logFile)) config_file.write('verbose\n') config_file.close() return [file_name, outputPrefix]
31287b0e03d0ae730a503370ebd05447190aa2b2
8,638
def calculate_mixing_param_constants(asm_obj): """Calculate the constants Cs and Cm required for the determination of the Cheng-Todreas mixing parameters Parameters ---------- asm_obj : DASSH Assembly object Contains the geometry and flow parameters Returns ------- tuple tuple of two dicts, each containing the laminar and turbulent constants for the calculation of eddy diffusivity and the swirl velocity for the assembly Notes ----- Implemented as a separate method so that it can be tested against the results in Tables 1 and 2 of the Cheng-Todreas 1986 paper. """ try: c_over_d = (asm_obj.d['pin-pin'] / asm_obj.pin_diameter)**-0.5 except ZeroDivisionError: # single pin, d['pin-pin'] = 0 c_over_d = 0.0 h_over_d = (asm_obj.wire_pitch / asm_obj.pin_diameter)**0.3 cm = {} cs = {} if asm_obj.n_pin >= 19: # Laminar cm['laminar'] = 0.077 * c_over_d cs['laminar'] = 0.413 * h_over_d # Turbulent cm['turbulent'] = 0.14 * c_over_d cs['turbulent'] = 0.75 * h_over_d else: # Laminar cm['laminar'] = 0.055 * c_over_d cs['laminar'] = 0.33 * h_over_d # Turbulent cm['turbulent'] = 0.1 * c_over_d cs['turbulent'] = 0.6 * h_over_d return cm, cs
336aa6de073fa9eece218deef0d236f47c7fea79
8,639
def cast_str_to_bool(input_string: str) -> bool: """Convert string to boolean with special handling for case, "True", 1. Special string parsing for booleans. Args: input_string (str): Evaluate this string as bool. Returns: case-insensitive 'True', '1' or '1.0' is True. It will be False for all other strings. """ return input_string.lower() in ['true', '1', '1.0']
38898f9aaa14ce9d6872941252213431c07878d1
8,640
def phi31_v_from_i_skow(phi31_i): """ Calculates the V band phi31 for the given I band phi3 using the relationship found in Skowron et al. (2016). (Skowron et al., 2016) (6) Parameters ---------- phi31_i : float64 The I band phi31 of the star. Returns ------- phi31_v : float64 The calculated V band phi31 of the star. """ return 0.122 * phi31_i ** 2 - 0.750 * phi31_i + 5.331
9594e7676bf844908e6555c4064aa795272e529d
8,641
def get_object_type(objects: list, types: list) -> list: """Get the object specified. Args: objects: a list of objects. types: a list of the types. Returns: A list of a certain type. """ return [item for item in objects if item.get('type') in types]
bfccc9c838f0a2d3294068acc0d2091c44de4798
8,642
import time def unix() -> int: """ Return the current time in seconds since the Epoch. Fractions of a second may be present if the system clock provides them. Returns: int """ return int(time.time())
3e5a0933d9a9eaee7c9f4136f651af6f2982dacc
8,643
def _get_relative_anchors(cluster_medoids, bounds): """Get medoid coords relative to fetched RGB.""" relative_medoids = cluster_medoids.copy() relative_medoids.loc[:, "xmin"] -= bounds["XMIN"] relative_medoids.loc[:, "ymin"] -= bounds["YMIN"] relative_medoids.loc[:, "xmax"] -= bounds["XMIN"] relative_medoids.loc[:, "ymax"] -= bounds["YMIN"] relative_medoids = relative_medoids * bounds['sf'] return relative_medoids.astype('int')
5b1bf76aa2e5bf21831df573dd85941dc738a5cc
8,645
def capacity_cost_rule(mod, g, p): """ """ return mod.DRNew_Cost[g, p]
cb6c7160cbdafa4b884f38083a20f59c155a63dc
8,646
import csv def create_fann_train_from_csv(csv_fname, train_fname, sections, all_species): """ takes the csv file saved by the leaf collection and creates FANN training data from it. """ # format 'binomial nomenclature: [leaf measurements] imported_data = {} total_leaves = 0 # read and parse the csv file with open(csv_fname, 'r') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',', quotechar='|') for row in csv_reader: if (len(row) == sections + 6): if not (row[0] in imported_data.keys()): imported_data[row[0]] = [] imported_data[row[0]].append(row[1:]) # get the total number of leaves. this is needed for the # fann training data for key in imported_data.keys(): total_leaves += len(imported_data[key]) # make the "header" for the training data # format (total data trainings), inputs, outputs # CHANCE THE INT ADDED TO SECTIONS WHEN MORE DATA IS ADDED############################# data_list = ['{0} {1} {2}'.format(total_leaves, sections + 5, len(all_species))] for key in imported_data.keys(): for leaf_data in imported_data[key]: data_line = ' '.join(leaf_data) data_list.append(data_line) output_list = [] for lname in all_species: if lname == key: output_list.append(1) else: output_list.append(-1) output_line = ' '.join([str(i) for i in output_list]) data_list.append(output_line) # write the data to a file with open(train_fname, 'w') as train_file: for line in data_list: train_file.write((line + '\n')) return imported_data
d0783c6a78c125250d47bffac753cda1c9df6f2e
8,647