content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def surface_carre(arete): """ fonction qui calcule et renvoie la surface d'un carré :param arête: la longueur d'un des côtés du carré :type arete: int ou float :return: la surface du carré dans l'unité (au carré) de celle d'arete :rtype: int ou float """ return arete*arete
fecc5ceae98a3549ccc79237cf94423e166a4429
8,648
import os import requests def notify_ifttt(message, title=None, link=None, key=None): """Send notification via IFTTT Parameters ---------- message : str title : str, optional link : str, optional key : str, optional API key for IFTTT. If not set, imports the environmental variable IFTTT_WEBHOOK_KEY. Returns ------- requests.Response """ url_fmt = "https://maker.ifttt.com/trigger/predictit_trade/with/key/{}" if key is None: key = os.environ["IFTTT_WEBHOOK_KEY"] if message is None or len(message) == 0: # message can be False, so we can't just test "if message" raise ValueError("IFTTT message cannot be empty string or None") data = {"value1": title, "value2": message, "value3": link} return requests.post(url_fmt.format(key), data=data)
b453fc2986bfa1f94e1b612663d1b71408c7d696
8,649
import requests def _confirm_webpage(url: str) -> bool: """ A function that confirms the existence of a webpage before the object is created """ response = requests.get(url=url, allow_redirects=True) code = response.status_code return True if code == 200 else False
9d16c8eb26457df238d553656c9cc82e12ff9482
8,651
import ipaddress def netaddr(host_ip, prefix): """Return network address and subnet mask.""" ip_address = host_ip + '/' + prefix try: ip_net = ipaddress.ip_network(ip_address, False) output = {'network_address': str(ip_net.network_address), 'network_mask': str(ip_net.netmask), 'first_host': str(ip_net[1]), 'last_host': str(ip_net[-2]), 'broadcast': str(ip_net[-1]), } return output except ValueError: return 'Invalid IP/prefix format'
2c51d802eb92b7a0542d85d043ecebe420cecc60
8,653
import asyncio def timeout(duration): """Rewrite a coroutine to operate on a strict timeout""" def rewrite(fn): async def wrapper1(*args, **kwargs): try: return await fn(*args, **kwargs) except asyncio.CancelledError: pass async def wrapper2(*args, **kwargs): fut = asyncio.ensure_future(wrapper1(*args, **kwargs)) try: return await asyncio.wait_for(fut, timeout=duration) except asyncio.TimeoutError: return None return wrapper2 return rewrite
425c00baa597fd3b41d0e2117562c6d8a28dedd1
8,654
import base64 def print_anycli(**kwargs): """ List the number of deployed scripts on remote device :param kwargs: keyword value: value to display :return: display the result of AnyCLI :Example: result = cli(url=base_url, auth=s, command="show vlan") print_anycli(result) """ value = kwargs.get('value', None) print(base64.b64decode(value['result_base64_encoded']).decode('utf-8')) return base64.b64decode(value['result_base64_encoded']).decode('utf-8')
3d0d34da4e1f6ca347d4503fb242b8485268641a
8,656
def is_service(hass, entry): """check whether config entry is a service""" domain, service = entry.split(".")[0], ".".join(entry.split(".")[1:]) return hass.services.has_service(domain, service)
d244dc15be20a7e56a17695dbf7df4c1811650a5
8,657
def any_in(collection, values): """ Check if any of a collection of values is in `collection`. Returns boolean. """ for value in values: if value in collection: return True return False
a8d4471940e96d2b6a307c8ccf48caaaecb10f98
8,658
import os import zipfile import fnmatch def get_zip_names(path, dir, infold=True): """Loads the filenames for the data matching 'path' in the location 'dir' ('dir' does not have to end with '.zip'). Returns a list of filenames and the zip archive.""" folder = os.path.split(dir)[1] if infold else '' if not dir.endswith('.zip'): dir += '.zip' else: folder = folder[:-4] archive = zipfile.ZipFile(dir, 'r') names = [n for n in archive.namelist() if fnmatch.fnmatch(n, os.path.join(folder, path))] return names, archive
cb23d3c630feb6c49b4aefbd6c06a7847dc96768
8,659
def choose_barrier(x, reverse=False): """ Choose the scenario where the AV hits the barrier. If there is no such scenario, abstain. :param reverse: If false, choose to hit the barrier. Else choose not to. """ if x["Passenger_noint"] == 0 and x["Passenger_int"] == 0: return -1 if x["Passenger_noint"] == 1 and x["Passenger_int"] == 1: return -1 elif x["Passenger_noint"] == 1: return 1 if not reverse else 0 elif x["Passenger_int"] == 1: return 0 if not reverse else 1 return -1
5ca785d2e69b3f1dba4e38a0d95d3cf3b4e90cd8
8,660
def _find_user_traceback_depth(tb): """Returns the depth of user-specific code in a traceback. This is the depth from wich we find a frame where __name__ is '__p1__'. Args: tb: A traceback object. """ depth = 0 while tb: # Find the topmost frame frame = tb.tb_frame while frame.f_back: frame = frame.f_back if frame.f_globals.get('__name__', None) != '__p1__': return depth + 1 # If it does not contain '__p1__' go down the stack. depth += 1 tb = tb.tb_next # We could not find it, assume everything was user-specified return 0
fcfd3227430f51b3120f2d84d6d2d3c9e45c968c
8,661
def dict2kvtable(obj, env): """Generate an HTML table from a dictionary""" return "XXX table"
4cb88b5259e7bab7ad3f106251371c5f80bcccd9
8,663
def prob3(num=600851475143): """ The prime factors of 13195 are 5, 7, 13 and 29. What is the largest prime factor of the number 600851475143 ? """ a = [] b = 1 i = 3 while True: if b == num: break elif num % i == 0: b *= i a.append(i) i += 1 return max(a)
f0689232bf53152d87a6e28ffe4cce98908bca4f
8,667
def is_turkish_id(x): """ checks if given id is valid TC ID """ if len(str(x)) != 11: return False str_id = str(x) # first 10 digit sum mod 10 equals 11th digit control lst_id = [int(n) for n in str_id if n.isdigit()] if lst_id[0] == 0: return False # first 10 digit sum first_10_sum = sum(lst_id[:10]) if first_10_sum % 10 != lst_id[-1]: return False total_odds = sum(lst_id[0::2][:-1]) total_evens = sum(lst_id[1::2][:-1]) is_10 = (total_odds * 7 + total_evens * 9) % 10 == lst_id[9] is_11 = (total_odds * 8) % 10 == lst_id[-1] if not is_10 or not is_11: return False else: return True
c24b233fd74b5c45f6aff752bb57ae12a296d6b5
8,668
def _make_unique(l): """Check that all values in list are unique and return a pruned and sorted list.""" return sorted(set(l))
18ae627f2a8f5dc8c61a332b73d8bd99c41d5ced
8,669
def post_info(request, post, put_content=False): """Return a hash with post fields (content is omitted for performance).""" data = { 'title': post.title, 'subject': post.subject, 'publication_date': post.pub_date, 'author': post.author.username, 'path': post.path, 'ref': request.build_absolute_uri('/api/posts/{}'.format(post.path)), 'link': request.build_absolute_uri('/post/{}'.format(post.path)), } if put_content: data['content'] = post.content return data
4092b20a76c8a1b140a9b6faa534ffe9e5c2aac4
8,670
import yaml def parse_config_file(config_file) -> dict: """Read config.yaml file with params. Returns ------- dict Dict of config """ with open(config_file, 'r') as stream: try: CONFIG = yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc) exit(1) return CONFIG
8f1fb9bcda94ef5c21edbf5e5bf95b327efd8c96
8,673
def Property(func): # pylint: disable = C0103 """ Property with improved docs handling :Parameters: `func` : ``callable`` The function providing the property parameters. It takes no arguments as returns a dict containing the keyword arguments to be defined for ``property``. The documentation is taken out the function by default, but can be overridden in the returned dict. :Return: The requested property :Rtype: ``property`` """ kwargs = func() kwargs.setdefault('doc', func.__doc__) kwargs = kwargs.get return property( fget=kwargs('fget'), fset=kwargs('fset'), fdel=kwargs('fdel'), doc=kwargs('doc'), )
061b5f6b4ec151888a64a590e2b5c974d2666301
8,674
def collision(object1, object2): """detect collision between two objects using circles Tests for collision between two objects by testing whether two circles centered on the objects overlap. Objects must have a a "radius" attribute, which is used to create the circle. Args: object1: First object for collision detection object2: Second object for collision detection Returns: bool: True if objects' circles (defined by their radius) overlap """ x_distance = object1.x - object2.x y_distance = object1.y - object2.y distance_squared = x_distance ** 2 + y_distance ** 2 try: return distance_squared <= \ (object1.fire_radius + object2.fire_radius) ** 2 except AttributeError: return distance_squared <= (object1.radius + object2.radius) ** 2
e42bcce7a111fa7f8de2c10b16a91c0d49992ddb
8,676
def find_key_symptom(tariffs, cause_reduction, cause, endorsements, rules=None): """Find the key endorsed symptom for a cause Args: tariffs (dict): processed tariff matrix cause_reduction (dict): mapping from cause46 to cause34 cause (int): cause number at the cause34 level endorsements (iterable): names of endorsed symptoms rules (dict): mapping of rule-based cause prediction to key symptom Returns: symptom (str): name of the key symptom """ rules = rules or {} rule_symp = rules.get(cause) if rule_symp: return rule_symp causes46s = [cause46 for cause46, cause34 in cause_reduction.items() if cause34 == cause] values = {} for cause46 in causes46s: for symptom, tariff in tariffs[cause46]: if symptom not in endorsements or tariff <= 0: continue if symptom in values and values[symptom] < tariff: continue else: values[symptom] = tariff if values: return sorted(values.items(), key=lambda x: x[1])[-1][0]
e8805fd29bf09cd3e0269ae4203f4fd7912f5c72
8,678
def transform(tokens): """ Accumulate tokens in lines. Add token (and white spaces) to a line until it overflow 80 chars. """ lines = [] current_line = [] for t in tokens: if sum([len(x) + 1 for x in current_line]) + len(t) > 80: lines.append(current_line) current_line = [] current_line.append(t) if current_line: lines.append(current_line) return lines
c30af21db61b2b00848b0263552461f9682a6d08
8,680
import json def ocr_loader(json_path): """Helper function to load ocr data from json file Args: json_path (string): Path to the json file with OCR output data Returns: string: OCR text output """ json_path = json_path.replace('\\', '/') with open(json_path, "r") as file: loaded_json = json.load(file) if type(loaded_json) is list: result = loaded_json else: result = loaded_json['text'] return " ".join(result)
7e182b184b305bffc97dadf59b139a1aa53250b1
8,681
def to_ps(obj, parlen=False): """Converts object into postscript literal >>> to_ps(None) 'null' >>> to_ps(123) '123' >>> to_ps(456.78) '456.78' >>> to_ps(True), to_ps(False) ('true', 'false') >>> to_ps('foo bar baz') 'foo bar baz' >>> to_ps('foo bar baz', parlen=True) '(foo bar baz)' """ if isinstance(obj, str): ret = '%s' % obj if parlen: ret = '(%s)' % ret elif isinstance(obj, bool): ret = {True: 'true', False: 'false'}[obj] elif isinstance(obj, type(None)): ret = 'null' else: ret = str(obj) return ret
11fa2888678970f9ab37e4827e87bbf67856898c
8,682
def enum_member_name(state): """ All enum member names have the form <EnumClassName>.<EnumMemberName>. For our rendering we only want the member name, so we take their representation and split it. """ return str(state).split('.', 1)[1]
d6fa9320c1f96209fd6d547f1a7715ade391c672
8,683
from typing import Counter import re def seq2polyA(seq): """ input seq output polyA report (1) pass or not (2) Left or Right (3) most common character (4) length of polyA (5) length of isoseq """ lst = [] L = seq[0:10] R = seq[-10:] end = L + R most_common_char = Counter(end).most_common(1)[0][0] Ln = Counter(L)[most_common_char] Rn = Counter(R)[most_common_char] if Ln > Rn: m = re.search('^(' + most_common_char + '+)', seq) if m: lst = ["L", most_common_char, m.group(1)] else: lst = ["L", most_common_char, "-"] else: m = re.search('(' + most_common_char + '+)$', seq) if m: lst = ["R", most_common_char, m.group(1)] else: lst = ["R", most_common_char, "-"] lst2 = [] if lst[1] == 'A' or lst[1] == 'T': lst2 = ["pass", lst[0], lst[1], str(len(lst[2])), str(len(seq))] else: lst2 = ["fail", lst[0], lst[1], str(len(lst[2])), str(len(seq))] return lst2
61517f86dc776863d3586c2013115c2e9c0afd96
8,684
def setbit(target, bit): """ Устанавливает бит bit в 1 в байте target """ return target | (1 << bit)
55f073fb82c5a29a50b8505be6a8a3971ebaf872
8,685
import time, random def generate_trade_id(): """ 采取一定规则生成一个11位的交易号 """ trade_id = str(time.time()) trade_id = trade_id[:3] + str(random.randint(1000, 10000)) + trade_id[7:10] + trade_id[11] return trade_id
79a25817b5eaa0e18c46f419e0373e620d5d9b6f
8,688
def request_path( request ): """ Get the path of the request """ return str(request.path)
506672b635b6196aa032c7bed5b740c5a8d70c79
8,689
import base64 import hashlib def _sub_hash_password(password): """ Hash long password to allow bcrypt to handle password longer than 72 characters. :param password: password to hash. :return: (String) The hashed password. """ # bcrypt only handles passwords up to 72 characters. # We use this hashing method as a work around. # Suggested in bcrypt PyPI page (2018/02/08 12:36 EST): # https://pypi.python.org/pypi/bcrypt/3.1.0 return base64.b64encode(hashlib.sha256(password.encode("utf-8")).digest())
73498949c1e29712192d9379d14cf816d095a01a
8,691
import os def get_default_version_hostname(): """Get the standard hostname of the default version of the app. For example if your application_id is my-app then the result might be my-app.appspot.com. Returns: The standard hostname of the default version of the application. """ return os.getenv('DEFAULT_VERSION_HOSTNAME')
b23bf9786fa4209a66d6581d86b1d4e4de6c7a41
8,692
from colorsys import rgb_to_hsv def from_rgb_to_paletton_hue(rgb, paletton): """ >>> p = Paletton() >>> print(from_rgb_to_paletton_hue((120, 0, 106), p)) 318 """ rhs_hue = round(rgb_to_hsv(*rgb)[0]*360) if rhs_hue not in paletton.HUE_OFFSETS: keys = sorted(paletton.HUE_OFFSETS.keys()) closest_offset_index = sorted(keys + [rhs_hue]).index(rhs_hue) rhs_hue = keys[closest_offset_index-1] return paletton.HUE_OFFSETS[rhs_hue]
c76f257f46fc9c84d830c2f006f1af8155cbb38f
8,693
def iobes2iob(iobes): """Converts a list of IOBES tags to IOB scheme.""" dico = {pfx: pfx for pfx in "IOB"} dico.update({"S": "B", "E": "I"}) return [dico[t[0]] + t[1:] if not t == "O" else "O" for t in iobes]
3baed417dfccf25ddf5f0bdae686a01fa8bfda95
8,695
def getFromLongestMatchingKey(object, listOfKeys, caseInsensitive=True): """ Function to take an object and a list of keys and return the value of the longest matching key or None if no key matches. :param object: The object with the keys. :type object: dict :param listOfKeys: A list of keys to try to match :type listOfKeys: list of string keys :param caseInsensitive: Case insensitive key matching? :type caseInsensitive: boolean :returns: value of longest matching key in object """ listOfKeys = listOfKeys.copy() if caseInsensitive: object = {k.lower():v for k,v in object.items()} listOfKeys = [k.lower() for k in listOfKeys] key = max( [str(k) for k in listOfKeys], key=len ) if len(listOfKeys) else None if key and key in listOfKeys: listOfKeys.remove(key) return( object.get( key, getFromLongestMatchingKey(object, listOfKeys) ) if key else None )
25271697197c5c16c2ad5ae7320fc452bc3c8205
8,696
def key_type(key): """String identifying if the key is a 'name' or an 'ID', or '' for None. This is most useful when paired with key_id_or_name_as_string. Args: key: A datastore Key Returns: The type of the leaf identifier of the Key, 'ID', 'name', or ''. """ if key.id(): return 'ID' elif key.name(): return 'name' else: return ''
8d055fc97313b7f613e5927d0f8f38d060a2cb2b
8,698
def _select_encoding(consumes, form=False): """ Given an OpenAPI 'consumes' list, return a single 'encoding' for CoreAPI. """ if form: preference = [ 'multipart/form-data', 'application/x-www-form-urlencoded', 'application/json' ] else: preference = [ 'application/json', 'multipart/form-data', 'application/x-www-form-urlencoded', 'application/octet-stream' ] if not consumes: return preference[0] for media_type in preference: if media_type in consumes: return media_type return consumes[0]
81cac06c34f3df0d3c570ebcae90545a3a988fbc
8,699
def other_classes(nb_classes, class_ind): """ Heper function that returns a list of class indices without one class :param nb_classes: number of classes in total :param class_ind: the class index to be omitted :return: list of class indices without one class """ other_classes_list = list(range(nb_classes)) other_classes_list.remove(class_ind) return other_classes_list
05b88e49827523508b14400aa83aa83dd48f2b2e
8,700
from typing import Dict from io import StringIO def interpolate(s: str, registers: Dict) -> str: """ Interpolates variables in a string with values from a supplied dictionary of registers. The parser is very lax and will not interpolate variables that don't exist, as users may not be intending to interpolate a variable when they type the hash character. The hash symbols can be escaped with a caret (^), but if a caret is placed before a character that doesn't need escaping (another caret or a hash character), then the escape character acts as a normal character (nothing is removed or replaced). Parameters ---------- s : str The string to interpolate registers : Dict[str, Message] A mapping of variable names to messages Returns ------- str A new string with interpolated values """ escaping = False variable_mode = False buffer = StringIO() variable_buffer = StringIO() for i in range(0, len(s)): if escaping: if s[i] != "#": buffer.write("^") buffer.write(s[i]) escaping = False elif variable_mode: if s[i] == "#": name = variable_buffer.getvalue() if name in registers: buffer.write(registers[name].content) else: buffer.write("#") buffer.write(name) buffer.write("#") variable_buffer = StringIO() variable_mode = False elif s[i] != " ": variable_buffer.write(s[i]) else: # invalid variable name buffer.write("#") buffer.write(variable_buffer.getvalue()) buffer.write(s[i]) variable_buffer = StringIO() variable_mode = False elif s[i] == "^": escaping = True elif s[i] == "#": variable_mode = True else: buffer.write(s[i]) if escaping: buffer.write("^") if len(variable_buffer.getvalue()): buffer.write("#") buffer.write(variable_buffer.getvalue()) return buffer.getvalue()
a877e455771e09bcca85455ffefe87c4622255f2
8,702
import array import math def fpart(x): """FRACTIONAL PART OF A REAL NUMBER""" if type(x) in [array, list]: if len(x) == 1: x = x[0] return math.modf(x)[0]
03659c7b0ae133d226019141af59f4ec039c7dde
8,704
def count_smileys(arr): """count valid smileys from an array, better reading""" count = 0 eyes = ":;" noses = "-~" smiles = ")D" for i in arr: if len(list(i)) > 2: if i[0] in eyes and i[1] in noses and i[2] in smiles: count += 1 else: if i[0] in eyes and i[1] in smiles: count += 1 return count #first solution
ebcd9147614b47a9911dfcb408bfd74c71de4203
8,705
def stringify_edge_set(s: set): """ Convert an agent-piece graph into a string, for display and testing """ return str(sorted([(agent.name(), piece) for (agent,piece) in s]))
8d95fa4174a37bac1094a13449f92143993bdd23
8,706
def get_fully_qualified_class_name(cls): """Returns fully dot-qualified path of a class, e.g. `ludwig.models.trainer.TrainerConfig` given `TrainerConfig`.""" return ".".join([cls.__module__, cls.__name__])
dc3cbbb8be4503b562a381aa45842399a623971e
8,707
def parsevROps(payload, alert): """ Parse vROps JSON from alert webhook. Returns a dict. """ if (not 'alertId' in payload): return alert alert.update({ "hookName": "vRealize Operations Manager", "AlertName": payload['alertName'] if ('alertName' in payload and payload['alertName'] != "") else "<None>", "alertId": payload['alertId'] if ('alertId' in payload) else "", "info": payload['info'] if ('info' in payload and payload['info'] is not None) else "", "criticality": payload['criticality'] if ('criticality' in payload) else "", "status": payload['status'] if ('status' in payload) else "", "type": payload['type'] if ('type' in payload) else "", "subType": payload['subType'] if ('subType' in payload) else "", "Risk": payload['Risk'] if ('Risk' in payload) else "<None>", "Efficiency": payload['Efficiency'] if ('Efficiency' in payload) else "<None>", "Health": payload['Health'] if ('Health' in payload) else "<None>", "resourceName": payload['resourceName'] if ('resourceName' in payload) else "", "resourceId": payload['resourceId'] if ('resourceId' in payload) else "", "adapterKind": payload['adapterKind'] if ('adapterKind' in payload) else "", "startDate": payload['startDate'] if ('startDate' in payload) else "", "updateDate": payload['updateDate'] if ('updateDate' in payload) else "", "icon": "http://blogs.vmware.com/management/files/2016/09/vrops-256.png", "Messages":"", "url":"", "editurl":"", }) if (alert['status'] == "ACTIVE"): if (alert['criticality'] == "ALERT_CRITICALITY_LEVEL_CRITICAL" or alert['criticality'] == "ALERT_CRITICALITY_LEVEL_IMMEDIATE"): color = "red" elif (alert['criticality'] == "ALERT_CRITICALITY_LEVEL_WARNING"): color = "yellow" else: color = "gray" elif (alert['status'] != "ACTIVE" and alert['status'] != ""): if (alert['criticality'] == "ALERT_CRITICALITY_LEVEL_CRITICAL" or alert['criticality'] == "ALERT_CRITICALITY_LEVEL_IMMEDIATE" or alert['criticality'] == "ALERT_CRITICALITY_LEVEL_WARNING"): color = "green" else: color = "gray" else: color = "red" alert.update({ "color": color, "fields": [ { "name": 'Health', "content": str(alert['Health']), }, { "name": 'Risk', "content": str(alert['Risk']), }, { "name": 'Efficiency', "content": str(alert['Efficiency']), }, { "name": 'Resouce Name', "content": alert['resourceName'], }, { "name": 'Adapter Kind', "content": alert['adapterKind'], }, { "name": 'Type', "content": alert['type'], }, { "name": 'Sub Type', "content": alert['subType'], }, ] }) if (alert['adapterKind'] == 'sample-adapter-type'): # If a test alert alert.update({ "moreinfo": "Hello from the webhook shim! This is a test webhook alert.\n\n" + \ ("Alert Name: ") + alert['AlertName'] + \ ("\nAlert Info: ") + alert['info'] + \ ("\nAlert Details: ") + str(alert['fields']), }) else: alert.update({ "moreinfo": ("Alert Name: ") + alert['AlertName'] + \ ("\nAlert Info: ") + alert['info'] + \ ("\nAlert Details: ") + str(alert['fields']), }) return alert
8149dd87f26c0767692dcd79ab88a36e1f2e328b
8,708
def as_chunks(l, num): """ :param list l: :param int num: Size of split :return: Split list :rtype: list """ chunks = [] for i in range(0, len(l), num): chunks.append(l[i:i + num]) return chunks
6bf6a2efed8e4830447319dd1624e70463faaf41
8,709
def _name_to_agent_class(name: str): """ Convert agent name to class. This adds "Agent" to the end of the name and uppercases the first letter and the first letter appearing after each underscore (underscores are removed). :param name: name of agent, e.g. local_human :return: class of agent, e.g. LocalHumanAgent. """ words = name.split('_') class_name = '' for w in words: # capitalize the first letter class_name += w[0].upper() + w[1:] # add Agent to the end of the name class_name += 'Agent' return class_name
6ac0dbf4fb8ab90e592b85216be6d9c109a1310c
8,711
def bin_search_recursive(array, what_to_find, left=0, right=None): """ Finds element in a sorted array using recursion. :param list array: A sorted list of values. :param what_to_find: An item to find. :returns: Index of the searchable item or -1 if not found. """ right = right if right is not None else len(array) - 1 if left > right: return -1 # Searchable not found middle_pos = (left + right) // 2 if array[middle_pos] == what_to_find: return middle_pos if what_to_find < array[middle_pos]: return bin_search_recursive(array, what_to_find, left=left, right=middle_pos - 1) return bin_search_recursive(array, what_to_find, left=middle_pos + 1, right=right)
83ff4dbcd9cab179c5e83f73d5fdc7c5a6bca4d4
8,712
from typing import List def extensions_to_glob_patterns(extensions: List) -> List[str]: """Generate a list of glob patterns from a list of extensions. """ patterns: List[str] = [] for ext in extensions: pattern = ext.replace(".", "*.") patterns.append(pattern) return patterns
a04ed356bfa5db7c0210b86dff832d32bfef6dbf
8,713
import re def get_operation_id_groups(expression): """Takes an operator expression from an .mmcif transformation dict, and works out what transformation IDs it is referring to. For example, (1,2,3) becomes [[1, 2, 3]], (1-3)(8-11,17) becomes [[1, 2, 3], [8, 9, 10, 11, 17]], and so on. :param str expression: The expression to parse. :rtype: ``list``""" if expression[0] != "(": expression = "({})".format(expression) groups = re.findall(r"\((.+?)\)", expression) group_ids = [] for group in groups: ids = [] elements = group.split(",") for element in elements: if "-" in element: bounds = [int(x) for x in element.split("-")] ids += [str(n) for n in list(range(bounds[0], bounds[1] + 1))] else: ids.append(element) group_ids.append(ids) return group_ids
8ec6fdca5209de1d658a2ae938fc840e9d1b0c23
8,714
def mrt_alert_msg(mrt_line, direction, stations, public_bus, mrt_shuttle, mrt_shuttle_dir): """ Message that will be sent if there is an MRT alert/breakdown/delay :param mrt_line: "DTL/NSL/EWL..." :param direction: "Both"/specific MRT station name("Jurong East") :param stations: "NS17, NS16, NS15, NS14, NS13, NS12..." :param public_bus: "Free bus service island-wide..." :param mrt_shuttle: "EW21|CC22, EW23, EW24|NS1, EW27..." :param mrt_shuttle_dir: "Both" """ return '<b>Line:</b> {}\n<b>Direction:</b> {}\n<b>Stations:</b> {}\n' \ '<b>Free Public Bus:</b> {}\n<b>Free MRT Shuttle:</b> {}\n' \ '<b>MRT Shuttle Direction:</b> {}\n\n'.format(mrt_line, direction, stations, public_bus, mrt_shuttle, mrt_shuttle_dir)
df03473dab23748f42bfe9fc8bc0fe9a80fc7c74
8,715
def is_envvar(buff, pos): """:return: start, end, pos or None, None, None tuple.""" try: while buff[pos] in ' \t': pos += 1 start = pos while True: if buff[pos] in '\0"\'()- \t\n': return None, None, None if buff[pos] == '=': if pos == start: return None, None, None return start, pos, pos + 1 pos += 1 except IndexError: return None, None, None
ee424577dd91a7d7011996c5f185f15855e1d2f5
8,716
def load_model(filename): """ Return a model stored within a file. This routine is for specialized model descriptions not defined by script. If the filename does not contain a model of the appropriate type (e.g., because the extension is incorrect), then return None. No need to load pickles or script models. These will be attempted if load_model returns None. """ return None
2be8d79119538c31606dfccffd8560aa82dc4e7a
8,717
def climb_stairs(stairs): # """You are climbing a stair case. It takes n steps to reach to the top. Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top? Example: Input: 2 Output: 2 3 1 1 1 1 2 2 1 """ if stairs == 1: return 1 elif stairs == 2: return 2 return climb_stairs(stairs - 1) + climb_stairs(stairs - 2)
c32a05ab1013b769c2d040a00c622605d7893398
8,718
import textwrap def text_word_wrap(text, width): """ Word-wrap a string to width `width`. """ return textwrap.wrap(text, width)
7dbaae3a61be37a3208dd9c9b6a541aadb325e3e
8,719
import json def load_tour(fname): """ Reads a tour from a JSON file. Input: - fname : filename Output: - tour : loaded tour """ with open(fname, 'r') as fp: return json.load(fp)
7cd4db05867d2ab5dd26620c8ff2497eb5aa4a68
8,720
def show_user(username): """Some Comment""" return 'Welcome: %s' % username
29980cfe7dba8048aa0ecaa351d9baf4d47dd8ec
8,721
def get_cost(ss, a, dist_mat, C, n): """Determines the cost of an airline route network from a sampleset. Args: - ss: Sampleset dictionary. One solution returned from the hybrid solver. - a: Float in [0.0, 1.0]. Discount allowed for hub-hub legs. - dist_mat: Numpy matrix providing distance between cities i and j. - C: Numpy matrix. Represents airline leg cost. - n: Int. Number of cities in play. Returns: - cost: Cost of provided route network. """ cost = 0 for i in range(n): for j in range(i+1, n): cost += dist_mat[i][j]*(C[i][ss[i]] + C[j][ss[j]] + a*C[ss[i]][ss[j]]) return cost
d8e810133a08213d0815a551c1fd7eaaa650532f
8,722
def _convert_to_float(score): """ Convert a string ('score') to float. If the string is empty, return None. If the string is float-like, return its float. """ if len(score) == 0: return None else: return float(score)
48cbc42310595d5a6ae8d8296feb7d81e61d52dc
8,724
import click import functools def ensure_host_configured(f): """Ensure that this configuration has been set up.""" @click.pass_obj @functools.wraps(f) def _wrapper(cfg, *args, **kwargs): if cfg.get('remote.ssh.host', '*') == '*': click.echo(f"{click.style('ERROR', fg='red')}: No configuration found at {cfg.filename!s}") raise click.BadParameter(message='Host not specified') if not cfg.filename.exists(): click.echo(f"{click.style('WARNING', fg='yellow')}: No configuration found at {cfg.filename!s}") return f(cfg, *args, **kwargs) return _wrapper
f44ec26db18a3a228620b9e3319529ff728c9cfa
8,725
def determine_suitable_lda(displace_elev, margin): """Given a displacement elevation and safety marign, determine the minimum acceptable LDA settings.""" return displace_elev * margin
6db43c125ee98bfefe91f9d44c601dcacdf7aff3
8,727
def constructfullKMap(tmap, kmap): """construct a complete k-map from the complete t-map and mapping between t-vals and k-vals""" newarr = tmap.copy() for t, k in enumerate(kmap): newarr[tmap==t] = k return newarr
d034522b00facc29cd9218f50a5f22ed1963c24c
8,728
def get_format_from_name(name: str) -> str: """ Function to infer the input format. Used when the input format is auto. """ try: int(name) src_format = "numeric" except ValueError: if len(name) == 2: src_format = "alpha-2" elif len(name) == 3: src_format = "alpha-3" else: src_format = "regex" return src_format
ca5baa8790837002261bab68f3989f57fa2943af
8,729
import string def shorten(message): """Convert message into a shorter form""" if message == 'On time': return '0' number = message.translate(None, string.ascii_letters).strip() sign = '+' if 'late' in message else '-' return '{}{}'.format(sign, number)
3fc8392e9ec61fa46fd9db6ed69ee4ac62a5f4b0
8,731
def get_create_table_field_data(field_data): """ Generates the field wise query segments needed to create a table. :param field_data: List of dicts with each dict having keys 'name', 'type' and, optionally, 'modifiers' :return: none """ field_query_list = [] for field in field_data: query_field_segment = field["name"] + " " + field["type"] if field["modifiers"]: query_field_segment += " " + field["modifiers"] field_query_list.append(query_field_segment) return ", ".join(field_query_list)
92ef0a4c60d54212e90cc3ac110964565bbfd1be
8,732
def determine_timestamp(item): """Determine the timestamp of the given item Args: item: [dict] Specifies an resource instance created by an API """ # There is no standard for this. # The following are common to some APIs. for key in ['creationTimestamp', 'timeCreated']: if key in item: return item[key] raise ValueError('Could not determine timestamp key for {0}' .format(item.get('kind', item)))
e590b8e2efa9a791b96ba64d1bd73601ba34d317
8,733
def checkbox_to_boolean(list_checkbox_params, dict_all_params): """ Takes a list of strings that are to be processed as checkboxes on a post parameter, (checkboxes supply some arbitrary value in a post if they are checked, and no value at all if they are not checked.), and a dict of parameters and their values to update. Returns a dictionary with modified/added values containing appropriate booleans. """ for param in list_checkbox_params: if param not in dict_all_params: dict_all_params[param] = False else: dict_all_params[param] = True return dict_all_params
fe57b02ae6593a0426364cba0c2b41f1362d2968
8,734
def distribution_data(history,mutant_id,i,all_types=False): """ generates neighbour data for mutants (or all cells if all_types is True) cells are labelled by their ancestor. all cells with ancestor=mutant_id are type 1, all other cells type 0. returns list of dicts with keys: tissueid, time, n, k, j [, type] n = # type 1 cells k = # neighbours j = # type 1 neighbours """ if all_types: return [{'tissueid':i,'time':int(tissue.time),'n':sum(tissue.properties['ancestor']==mutant_id),'k':len(cell_neighbours), 'j':sum((tissue.properties['ancestor']==mutant_id)[cell_neighbours]),'type': 1 if tissue.properties['ancestor'][idx]==mutant_id else 0} for tissue in history if 1<=sum(tissue.properties['ancestor']==mutant_id)<100 for idx,cell_neighbours in enumerate(tissue.mesh.neighbours)] else: return [{'tissueid':i,'time':int(tissue.time),'n':sum(tissue.properties['ancestor']==mutant_id),'k':len(cell_neighbours), 'j':sum((tissue.properties['ancestor']==mutant_id)[cell_neighbours])} for tissue in history if 1<=sum(tissue.properties['ancestor']==mutant_id)<100 for idx,cell_neighbours in enumerate(tissue.mesh.neighbours) if tissue.properties['ancestor'][idx]==mutant_id]
31f8b07108a6283713277f8ecf90df14c2f0e003
8,735
def likes(names): """Take string of names and let you know who likes 'it'.""" if len(names) == 0: return "no one likes this" elif len(names) == 1: return "{} likes this".format(names[0]) elif len(names) > 3: return "{}, {} and {} others like this".format(names[0], names[1], len(names) - 2) elif len(names) > 2: return "{}, {} and {} like this".format(names[0], names[1], names[2]) elif len(names) > 1: return "{} and {} like this".format(names[0], names[1])
0d4f3d4275b2d92c503228fc6002ce0ae01acb6a
8,736
import torch def camera_rays(camK, W=None, H=None, c2w=None, graphics_coordinate=True, center=False): """shoot viewing rays from camera parameters. Args: camK: Tensor of shape `[3,3]`, the intrinsic matrix. W: Integer, if set None, then `W` is calculated as `2*cx`. H: Integer, if set None, then `H` is calculated as `2*cy`. c2w: Tensor of shape `[4,4]` or `[3,4]` camera view matrix. If `None`, c2w is set as `[I,0]` graphics_coordinate: bool. Where or not use graphics coordinate (pointing negative z into screen). Default: `True`. center: bool. Where or set 0.5 offset for pixels Default: `False`. Returns: rays_o: tensor of shape `[W,H,3]` origins of the rays. rays_d: tensor of shape `[W,H,3]` directions of the rays. """ if c2w is None: c2w = torch.hstack((torch.eye(3), torch.zeros((3, 1)))) if W is None: W = camK[0, 2]*2 if H is None: H = camK[1, 2]*2 W = int(W) H = int(H) invK = torch.inverse(camK) u, v = torch.meshgrid(torch.linspace(0, W-1, W), torch.linspace(0, H-1, H)) u = u.t() v = v.t() if center: u = u + 0.5 v = v + 0.5 dirs = torch.stack([u, v, torch.ones_like(u)], dim=-1) dirs = torch.matmul(dirs, invK.T) # use graphics coordinate. negtive z pointing into screen. if graphics_coordinate: dirs[..., 1] *= -1 dirs[..., 2] *= -1 rays_d = torch.matmul(dirs, c2w[:3, :3].T) rays_o = c2w[:3, -1].expand(rays_d.shape) return torch.cat([rays_o, rays_d], dim=-1)
c572283305dccc243de1bd956d11b7fd2ff42726
8,737
import subprocess def run_command(cmd,input_dir): """ Another git convienience function, this time just running an arbitrary command in an arbitrary location and waiting until quit. """ p = subprocess.Popen(cmd, cwd=input_dir,stdout=subprocess.PIPE) out, err = p.communicate() return out
bb5cba884fc38b7c2d1dfc8407a57ca0c21ed62d
8,738
def get_rpm_properties(input_file: str): """ Summary: processes the structured name of the rpm file to get the arch, release, version, and name Parameters: input_file (str): the file Returns: dictionary containing arch, release, version, and name """ #get properties from rpm_file name arch = input_file.rsplit('.', 2)[1] release = input_file.rsplit('.', 2)[0].rsplit('-', 1)[1] version = input_file.rsplit('.', 2)[0].rsplit('-', 1)[0].rsplit('-', 1)[1] name = input_file.rsplit('.', 2)[0].rsplit('-', 1)[0].rsplit('-', 1)[0] #put into dictionary output = { 'arch': arch, 'release': release, 'version': version, 'name': name } #output return output
291171913e80ede385a464c525fc44e87aeaf41b
8,739
def withRequest(f): """ Decorator to cause the request to be passed as the first argument to the method. If an I{xmlrpc_} method is wrapped with C{withRequest}, the request object is passed as the first argument to that method. For example:: @withRequest def xmlrpc_echo(self, request, s): return s @since: 10.2 """ f.withRequest = True return f
f7fd8da601300aef722eb6706d111a54383648c0
8,740
def analysis_line_linearity(target_list, measure_list_mm, boundary_index_list, linearity_list): """ analysis line boundary/center linearity """ boundary_max_linearity = [0, [0, 0]] center_max_linearity = [0, [0, 0]] for i in range(len(linearity_list)): for j in range(len(linearity_list[i])): if j in boundary_index_list[i]: if linearity_list[i][j] > boundary_max_linearity[0]: boundary_max_linearity[0] = linearity_list[i][j] boundary_max_linearity[1] = [i, j] else: if linearity_list[i][j] > center_max_linearity[0]: center_max_linearity[0] = linearity_list[i][j] center_max_linearity[1] = [i, j] print("\tLinearity:") print_linearity_list = [["boundary", boundary_max_linearity], ["center ", center_max_linearity]] for print_linearity in print_linearity_list: name = print_linearity[0] max_L = print_linearity[1][0] max_i = print_linearity[1][1][0] max_j = print_linearity[1][1][1] print("\t\tMax %s linearity %f -------------- target: (%f %f)->(%f, %f), measure: (%f, %f), line %d" \ % ( name, max_L, target_list[max_i][0], target_list[max_i][1], target_list[max_i][2], target_list[max_i][3], \ measure_list_mm[max_i][max_j][0], measure_list_mm[max_i][max_j][1], max_i + 1 )) return boundary_max_linearity, center_max_linearity
ecaf54467529b8d36f2af25c65c557520bf59711
8,741
import torch def calc_init_centroid(images, num_spixels_width, num_spixels_height): """ calculate initial superpixels Args: images: torch.Tensor A Tensor of shape (B, C, H, W) num_spixels_width: int A number of superpixels in each column num_spixels_height: int A number of superpixels int each row Return: centroids: torch.Tensor A Tensor of shape (B, C, H * W) init_label_map: torch.Tensor A Tensor of shape (B, H * W) """ batchsize, channels, height, width = images.shape device = images.device # 自适应平均池化 centroids = torch.nn.functional.adaptive_avg_pool2d(images, (num_spixels_height, num_spixels_width)) with torch.no_grad(): num_spixels = num_spixels_width * num_spixels_height # 一共多少个superpixel labels = torch.arange(num_spixels, device=device).reshape(1, 1, *centroids.shape[-2:]).type_as(centroids) # 假如有20个superpixel,那么从上到下从左到右,依次标号 init_label_map = torch.nn.functional.interpolate(labels, size=(height, width), mode="nearest") # 每个像素标记初始superpixel标签,即按照上一步的图形,按照矩形进行标号 init_label_map = init_label_map.repeat(batchsize, 1, 1, 1) # 实现batchsize维度 init_label_map = init_label_map.reshape(batchsize, -1) # batch中每张图都展平(这个是pixel到superpixel的映射,因此channel维度是1 centroids = centroids.reshape(batchsize, channels, -1) # batch中每张图的每个channel维度上展平 return centroids, init_label_map
cfa87a57d4cb6b194da4ae6316433adefb8cfbe1
8,742
from datetime import date def get_current_year(): """ return: the current year. """ return date.today().isocalendar()[0]
0ba2fd73f596fd40c290dcda5b18bdcc1afa5c62
8,743
def deepmerge(a, b): """ Merge dict structures and return the result. >>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}} >>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}} >>> import pprint; pprint.pprint(deepmerge(a, b)) {'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}} """ if isinstance(a, dict) and isinstance(b, dict): return dict([(k, deepmerge(a.get(k), b.get(k))) for k in set(a.keys()).union(b.keys())]) elif b is None: return a else: return b
5d6f27d6bff8643e37398b4c3c31a0340585b88d
8,745
def clean_data(text): """Fixes broken symbols in poems. :param text: Broken text that has to be fixed :return: Text with correct UTF8 symbols""" corrections = { 'ó': 'ó', 'ż': 'ż', 'Ä™': 'ę', 'Å‚': 'ł', 'Å›': 'ś', 'ć': 'ć', 'Ä…': 'ą', 'Å„': 'ń', 'ź': 'ź' } for k, v in corrections.items(): text = text.replace(k, v) return text
ec847f50bc074f8f9ff081c55afccf3311037cd0
8,746
def check_session(): """ Checks validity of session using only required() decorator """ return '{}'
fd02e0f8ffd76d8eb6f69148142108320864bade
8,749
def _ggm_prob_wait_whitt_z(ca2, cs2): """ Equation 3.8 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue. See Whitt, Ward. "Approximations for the GI/G/m queue" Production and Operations Management 2, 2 (Spring 1993): 114-161. Parameters ---------- ca2 : float squared coefficient of variation for inter-arrival time distribution cs2 : float squared coefficient of variation for service time distribution Returns ------- float approximation for intermediate term z (see Eq 3.6) """ z = (ca2 + cs2) / (1.0 + cs2) return z
91cbf519541411dec095b710e7449f3a183c20d3
8,751
def parse_method(name): """Parse hyperparameters from string name to make legend label. Parameters ---------- name : str Name of method Returns ------- string : str Formatted string """ string = r"" if name.split('es_')[1][0] == '1': string += r'ES' if name.split('vm_')[1][0] == '1': if len(string) > 0: string += r', VM' else: string += r'VM' alpha = name.split('alpha_')[1].split('_')[0] if len(string) > 0: string += r', $\alpha=%s$' % alpha else: string += r'$\alpha=%s$' % alpha return string
eb6824a6ab7ca126c924fa7acca2725f8b06379e
8,752
import queue def queue_with(items): """Return a thread-safe iterator that yields the given items. +items+ can be given as an array or an iterator. If an iterator is given, it will be consumed to fill the queue before queue_with() returns. """ q = queue.Queue() for val in items: q.put(val) return lambda *args, **kwargs: q.get(block=False)
a7dae1a57ee09686ae432993678a96638810154a
8,753
def _get_file_contents(path): """ Gets the contents of a specified file, ensuring that the file is properly closed when the function exits """ with open(path, "r") as f: return f.read()
cfe84e52e2ac48d3f7d9d20fd1c85c71a222ef95
8,754
import textwrap def msgfmt(msg, prefix=''): """Format a message""" lines = [] for line in msg.splitlines(): lines += textwrap.wrap(line, 80 - len(prefix)) return '\n'.join([prefix + line for line in lines])
66bd8ecb9aa50ade1d8c588deaf70b3ff7d4f0d7
8,756
def convertKeyValueToString(dict,key) -> str: """ converts a single key value pair into a concatenated string""" if dict and key: # force cast key to string val = dict[key] valStr = str(val) keyStr = str(key) concantStr = keyStr + valStr else: concantStr = "" return concantStr
a5c0c4f0c5e2d1b072873ed4c491583ef9784e27
8,757
def _flatten(seq): """Internal function.""" res = () for item in seq: if isinstance(item, (tuple, list)): res = res + _flatten(item) elif item is not None: res = res + (item,) return res
eeb6c3df5dc4ca5e8a57e2e32f7333bafb107328
8,758
import uuid def get_uuid(value): """ gets the uuid object of given value. it raises an error if value is not a valid uuid string. :param str value: value to get uuid instance from it. :raises ValueError: value error. :rtype: uuid.UUID """ return uuid.UUID(value)
8b22f34b9d44366c3903ab1e59d57a3fa1f6b37a
8,759
def preprocess_int(i, **options): """Convert a string to an integer.""" return str(i)
7d40ef9e0547aaeb635068c11d91e84531e0ae4a
8,760
import torch import random import math def select_action(state, valueNetwork, episodeNumber, numTakenActions, args): """ Take action a with ε-greedy policy based on Q(s, a; θ) return: int """ assert isinstance(state, torch.Tensor) sample = random.random() if args.mode == 'train': eps_threshold = args.eps_end + (args.eps_start - args.eps_end) * \ math.exp(-1. * numTakenActions / args.eps_decay) else: # choose action greedily in eval mode eps_threshold = 0 if sample < eps_threshold: return random.randrange(4) else: return torch.max(valueNetwork(state), 1)[-1].item()
f7c024ad50360725fcaeda1584bc34e1fe569fbe
8,761
def convert_string_to_list(df, col, new_col): """Convert column from string to list format.""" fxn = lambda arr_string: [int(item) for item in str(arr_string).split(" ")] mask = ~(df[col].isnull()) df[new_col] = df[col] df.loc[mask, new_col] = df[mask][col].map(fxn) return df
cc0e04fbe6b5523647ceb954fc4c17e78f2a8554
8,762
from pathlib import Path def raw_directory(request): """Gets 'raw' directory with test datafiles""" return Path(request.config.rootdir) / "tests/testdata/2020/6/raw"
efbdf7c5966578e2180ea4f9db0580420706ec23
8,763
def solution(dataset: list) -> int: """ "there is a '\n' on the end of the first line but zip() will only go to the sortest length. If both lines have '\n's then they will be equivalent and won't be counted""" return sum(i != j for i, j in zip(*dataset))
f17ef66d94af49a3eafdb20e9fa3aa2e251089e6
8,764
import collections def _get_tables_and_columns_in_schema(db, schema): """Returns a dict describing all tables and columns in `schema`.""" # Obtain names of tables in `schema`: q = """ SELECT table_name FROM information_schema.tables WHERE table_schema = '""" + schema + """' ORDER BY table_name; """ rows_tables = db.query(q) # Obtain row counts (warning: could be inaccurate under heavy load): q = """ SELECT relname, n_live_tup FROM pg_stat_user_tables WHERE schemaname = '""" + schema + """'; """ num_rows = {row['relname']: row['n_live_tup'] for row in db.query(q)} # The above method occasionally returns zero counts for all or most # tables. In that case, this alternative row counting method can # provide more accurate information: q = """ SELECT relname, reltuples FROM pg_class LEFT JOIN pg_namespace ON (pg_namespace.oid = pg_class.relnamespace) WHERE relkind='r' AND pg_namespace.nspname='""" + schema + """'; """ for row in db.query(q): relname = row['relname'] if (relname not in num_rows) or (num_rows[relname] == 0): num_rows[relname] = row['reltuples'] # Obtain foreign keys: q = """ SELECT source_table.relname AS source_t, source_column.attname AS source_c, target_table.relname AS target_t, target_column.attname AS target_c FROM pg_constraint INNER JOIN pg_class source_table ON source_table.oid=pg_constraint.conrelid INNER JOIN pg_namespace ON pg_namespace.oid=pg_constraint.connamespace INNER JOIN pg_attribute source_column ON source_column.attrelid=pg_constraint.conrelid INNER JOIN pg_class target_table ON target_table.oid=pg_constraint.confrelid INNER JOIN pg_attribute target_column ON target_column.attrelid=pg_constraint.conrelid WHERE pg_constraint.contype='f' AND source_column.attnum = ANY(pg_constraint.conkey) AND target_column.attnum = ANY(pg_constraint.confkey) AND pg_namespace.nspname=%s ; """ q_data = [schema] foreign_keys = collections.defaultdict( lambda: collections.defaultdict(list)) foreign_keys_rows = db.query(q, q_data, return_dicts=False) for source_t, source_c, target_t, target_c in foreign_keys_rows: foreign_keys[source_t][source_c].append((target_t, target_c)) # Obtain column names of each obtained table tables = [] for row_table in rows_tables: table_name = row_table['table_name'] q = """ SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = '""" + schema + """' AND table_name = '""" + table_name + """'; """ columns = db.query(q) # Save foreign key references for each column for column in columns: column_name = column['column_name'] column['foreign_keys'] = foreign_keys[table_name][column_name] tables.append({ 'name': table_name, 'num_rows': num_rows[table_name], 'columns': columns, }) return tables
31b7c96e649e97819e92adffa1a05710a802ef37
8,765
def raw_formatter(subtitles): """ Serialize a list of subtitles as a newline-delimited string. """ return ' '.join(text for (_rng, text) in subtitles)
51109a9b29c30257e9e8fa50abf1e718374a521f
8,766
def dict_diff(first, second): """ >>> dict_diff({'a':'b', 'c':1}, {'a':'c', 'c':1}) {'a': {'original':'b', 'changed': 'c'} :type first: dict :type second: dict :rtype dict """ diff = {} keys = set(first) | set(second) for key in keys: first_value = first.get(key) second_value = second.get(key) if first_value != second_value: diff[key] = { 'original': first_value, 'changed': second_value } return diff
721556706b25888693dfb63c852fca14951890ea
8,767
import re def add_space_after_commas(origString): """ Directives with arguements need spaces insert after commas. """ # space after a comma newString = re.sub(',', ', ', origString) return newString
4ea62d9c792c9fd2f69d784e7f0c4bb972cd0a7c
8,768
def get_all_matching(cls, column_name, values): """Get all the instances of ``cls`` where the column called ``column_name`` matches one of the ``values`` provided. Setup:: >>> from mock import Mock >>> mock_cls = Mock() >>> mock_cls.query.filter.return_value.all.return_value = ['result'] Queries and returns the results:: >>> get_all_matching(mock_cls, 'a', [1,2,3]) ['result'] >>> mock_cls.a.in_.assert_called_with([1,2,3]) >>> mock_cls.query.filter.assert_called_with(mock_cls.a.in_.return_value) """ column = getattr(cls, column_name) query = cls.query.filter(column.in_(values)) return query.all()
d74ddf983e33f63dfcaf1f335c91b35faa00a651
8,769
def get_day_of_week(year: int, month: int, day: int) -> int: """ 0表示星期日,1-6表示星期一到星期六 """ if month in (1, 2): year -= 1 month += 12 return ( day + 2 * month + 3 * (month + 1) // 5 + year + year // 4 - year // 100 + year // 400 + 1 ) % 7
77d0d74aeadf3e5148573673ffa29ca2abf99467
8,771
import importlib import unittest def skipUnlessImported(module, obj): """如果对象不在导入的模块中,跳过被装饰的测试 . @skipUnlessImported('airflow.operators.mysql_operator', 'MySqlOperator') @skipUnlessImported('xTool.utils.tests', 'skipUnlessImported') """ try: m = importlib.import_module(module) except ImportError: m = None return unittest.skipUnless( obj in dir(m), "Skipping test because {} could not be imported from {}".format( obj, module))
84b1fbf970f3fb538a4c7efbf1ae43db0bf67cd7
8,772
def inc(n): """Increment an integer.""" return -~n
694ba6320b842985f87a36e452eb0f30e39442b4
8,775
def Vowel_or_Consonant(char = ''): """ A boolean function, which return either True or False """ # Determine whether each letter in the text is a vowel or a # consonant. if it is a vowel, set test to True, otherwise, set test to false. for i in char: if str(i)in 'aeiouy': test = True else : test = False # Return the value of the test variable return test
7b4d07a90b858983bd6f92d61b97592fcb748608
8,776
def windowedAverage(window, step, sample): """ Input is a sample, with a list of CHROM, POS, mel, sim, sec Output is a list of CHROM, POS, mel, sim, sec, but a window average around that point """ entryDict = {} ## best to get this in a dict, since things are going to get looked up several times positionDict = {} ## and a list of all the positions by chromosome windowDict = {} ## dictionary for the windows, chr:pos:[[mel,sim,sec], [mel,sim,sec]] for entry in sample: if entry[0] in entryDict: entryDict[entry[0]][int(entry[1])] = [entry[2], entry[3], entry[4]] else: entryDict[entry[0]] = {int(entry[1]) : [entry[2], entry[3], entry[4]]} if entry[0] in positionDict: positionDict[entry[0]].append(int(entry[1])) else: positionDict[entry[0]] = [int(entry[1])] ## now, for each chromosome, scan through the list of positions. Build clusters of positions, then average those clusters together for chrom in entryDict: windowDict[chrom] = {} ## figure out how many windows there should be totalSteps = int(max(positionDict[chrom]) / step) ## make a new dictionary entry for each of those steps i = 0 for idx in range(0, totalSteps): windowDict[chrom][i] = [] i += step ## add each position that is within the window range of a step to the step's dictionary for pos in positionDict[chrom]: for j in windowDict[chrom]: if (j-(window/2)) < pos < (j+(window/2)): windowDict[chrom][j].append(entryDict[chrom][pos]) ## for each window, average all the values for pos in windowDict[chrom]: mel_av = [] sim_av = [] sec_av = [] if len(windowDict[chrom][pos]) > 0: for ent in windowDict[chrom][pos]: mel_av.append(ent[0]) sim_av.append(ent[1]) sec_av.append(ent[2]) mel_av = sum(mel_av) / len(mel_av) sim_av = sum(sim_av) / len(sim_av) sec_av = sum(sec_av) / len(sec_av) else: mel_av = 0 sim_av = 0 sec_av = 0 windowDict[chrom][pos] = [mel_av, sim_av, sec_av] ## turn it into a list for output retList = [] for chrom in windowDict: for pos in windowDict[chrom]: if (windowDict[chrom][pos][0] > 0) or (windowDict[chrom][pos][1] > 0) or (windowDict[chrom][pos][2] > 0): retList.append([chrom, pos, windowDict[chrom][pos][0], windowDict[chrom][pos][1], windowDict[chrom][pos][2]]) return retList
fc93abdf9392f2183767de105ef1ff00e5f039a2
8,778
def epw_header(epw): """ Given an EPW, get the header """ with open(epw, "r") as f: header = f.readline().strip().split(",") return { "city": header[1], "state": header[2], "country": header[3], "source": header[4], "WMO": header[5], "lat": float(header[6]), "lon": float(header[7]), "tz": float(header[8]), "hgt": float(header[9]), }
257640179a3659c7dc142b79c6fa8a9a5f55ea39
8,780