content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import List from functools import reduce def product(li: List[int]) -> int: """Calculates the product of all the numbers in a list. doctests: >>> product([2, 1, 4]) 8 >>> product([3, 1, 4, 2, 5, 8]) 960 """ x = reduce(lambda a, b: a * b, li) return x
8afe00bb6056accc694ab955a48b6be85d8a30bf
9,050
def get_etl_pipeline_ids(client): """Return a dict mapping pipeline ids to their names, filtering on ETL pipelines.""" paginator = client.get_paginator("list_pipelines") response_iterator = paginator.paginate() filtered_iterator = response_iterator.search("pipelineIdList[?contains(@.name, 'ETL') == `true`].id") return list(filtered_iterator)
10dcd1d933ed8adabd75740a55d567cf786fffbb
9,051
import os import contextlib import wave def skip_long_utterance(wav_file, cut_off_len = 15): """ from "Text-Free Image-to-Speech Synthesis Using Learned Segmental Units" Appendix A: When computing duration statistics, we exclude utterances longer than 15s for SpokenCOCO... """ if os.path.islink(wav_file): wav_file = os.readlink(wav_file) with contextlib.closing(wave.open(wav_file, 'r')) as f: frames = f.getnframes() rate = f.getframerate() duration = frames / float(rate) if duration >= cut_off_len: return True else: return False
9f2d81f30aaa0e5325b6ec88a116810f4c6c7ae9
9,052
import os def GetUniqueName(path, name): """Make a file name that will be unique in case a file of the same name already exists at that path. @param path: Root path to folder of files destination @param name: desired file name base @return: string """ tmpname = os.path.join(path, name) if os.path.exists(tmpname): if '.' not in name: ext = '' fbase = name else: ext = '.' + name.split('.')[-1] fbase = name[:-1 * len(ext)] inc = len([x for x in os.listdir(path) if x.startswith(fbase)]) tmpname = os.path.join(path, "%s-%d%s" % (fbase, inc, ext)) while os.path.exists(tmpname): inc = inc + 1 tmpname = os.path.join(path, "%s-%d%s" % (fbase, inc, ext)) return tmpname
c22b5d897fcec291f4e1e69fae8d85a9601ba15e
9,053
def get_clusters_from_file(path, ref_cluster_names=[]): """Get cluster names, labels, and cells from cluster file or metadata file """ clusters = {} with open(path) as f: lines = f.readlines() headers = [line.strip().split('\t') for line in lines[:2]] names = headers[0] types = headers[1] got_all_cells = False all_cells = [] for cluster_index, type in enumerate(types): if type != 'group': continue name = names[cluster_index] clusters[name] = {} for line in lines[3:]: columns = line.strip().split('\t') cluster_label = columns[cluster_index].strip() if cluster_label in ref_cluster_names: continue cell = columns[0] if got_all_cells is False: all_cells.append(cell) if cluster_label in clusters[name]: clusters[name][cluster_label].append(cell) else: clusters[name][cluster_label] = [cell] got_all_cells = True return [clusters, all_cells]
cabf14c72b0467b0f2b15e3c0b8c8bd1846e92b5
9,055
def _tracking(fcn): """ Decorator to indicate the list has changed """ def new_fcn(self, *args): self.changed = True return fcn(self, *args) return new_fcn
3f6454190056f112134f01507b2bb353a8043790
9,056
import sys def caller_module(level=2, sys=sys): """This function is taken from Pyramid Web Framework - ``pyramid.path.caller_module``.""" module_globals = sys._getframe(level).f_globals module_name = module_globals.get('__name__') or '__main__' module = sys.modules[module_name] return module
4bc6d73f656c98be185f7b5aaa869d7fb6ca841c
9,057
import re def javacode_to_tokens(code:str): """ Starting on method level, without javadocs returns a touple of ([code-tokens],code-string) """ code_tokens = re.findall(r"\w+(?:'\w+)*|[^\w\s]", code) #print("Javacode to tokens to be done!",code,code_tokens) return (code_tokens,code)
85c0dccb06936326929493edc5192ccf01bf59ea
9,058
from typing import Optional from typing import List def _convert_names( names, max_levels: Optional[int] = None, err_msg: Optional[str] = None ) -> List[str]: """Helper function that converts arguments of index, columns, values to list. Also performs check on number of levels. If it exceeds `max_levels`, raise ValueError with `err_msg`. """ result = None if isinstance(names, str): result = [names] elif isinstance(names, list): result = names else: result = list(names) if max_levels and len(result) > max_levels: raise ValueError(err_msg) return result
d67fb93b039306e7dac973abffe1e08089993c0d
9,059
def is_MC(parcels): """ Dummy for Maricopa County. """ return (parcels.county == 'MC').astype(int)
6e8af2675f1ba40d642ada0d07e133aeb9dd0d70
9,060
def objScale(obj,factor): """ Object scaling function, gets obj and scale factor, returns an array of the scaled size """ oldSize = obj.get_size() newSize = [] for i in oldSize: newSize.append(int(i/float(factor))) return newSize
3104fc4e126299400a5a119fff0d8bc9d3ea32f7
9,061
def preprocess_encoder_input(arr): """ Simple method to handle the complex MFCC coefs that are produced during preprocessing. This means: 1. (For now), discarding one of the channels of the MFCC coefs 2. Collapsing any empty dimensions :param arr: the array of MFCC coefficients. """ return arr.squeeze()[0]
ccd754783377e9fe257e423f9099d6dbef21d11b
9,062
def determineWinCardIndex(pile): """ @param list pile: pile of cards in center @return int: index of winning card - highest index of leading suit, or if there are spades, highest spades index """ bestCard = (pile[0], 0) for i in range(1, len(pile)): bestCardSuit = bestCard[0].index // 13 cardSuit = pile[i].index // 13 if (bestCardSuit == cardSuit and pile[i].index > bestCard[0].index) or \ (bestCardSuit != 0 and cardSuit == 0): bestCard = (pile[i], i) #print("Out of " + str(pile) + " Best card: " + str(bestCard[0])) return bestCard[1]
c105ade59b79482de0f2d3e6b4d25c4353bb1dcf
9,063
def greyList(n): """ 生成格雷编码序列 参考:https://www.jb51.net/article/133575.htm :param n: 长度 :return: 范围 2 ** n的格雷序列 """ def get_grace(list_grace, n): if n == 1: return list_grace list_before, list_after = [], [] for i in range(len(list_grace)): list_before.append('0' + list_grace[i]) list_after.append('1' + list_grace[-(i + 1)]) return get_grace(list_before + list_after, n - 1) # get_grace生成的序列是二进制字符串,转化为10进制数 return [int(i, 2) for i in get_grace(['0', '1'], n)]
eab1f00ec2cdd62fbbffbb78a2c69c6fe9177e66
9,065
def hbb_to_kaa(hessian): """ Unit conversions on input Hessian matrix from (Hartrees/Bohr/Bohr) (kcal/mol/Angstrom/Angstrom). """ hessian = (hessian * 627.509474) / (0.529177**2) return hessian
e5daec25cba9104f8ecf1bcf30a0b4020969c704
9,066
def coloring(color, text): """Print a text in a specified color""" color_sequences = { 'default': '\033[0m', 'black': '\033[30m', 'red': '\033[31m', 'green': '\033[32m', 'yellow': '\033[33m', 'blue': '\033[34m', 'purple': '\033[35m', 'lightblue': '\033[36m', 'white': '\033[37m', } return color_sequences[color] + text + color_sequences['default']
3953d72329a01453f52fd099bb20624c7661aa87
9,067
import token import requests def get_webhook(): """ Getting installed webhooks :return: installed webhooks """ header = {'Content-Type': 'application/json;charset=utf-8'} request_address = 'https://api.ok.ru/graph/me/subscriptions?access_token={token}'.format(token=token) response = requests.get(request_address, headers=header) return response.text
345a331514d276b336803d2fed182d7591eb8cc6
9,069
import math def haversine(rad): """ Returns the haversine function of an angle in radians. """ return (1 - math.cos(rad)) / 2
294c901795aa499c42f3d67e6d6a3d5efecd46a8
9,070
def find_break_edges(ptree): """ Find edges which to remove from the graph for the original tree behind this ptree. ==> edges between adjac """ ret = set() if len(ptree.insert_descendants) > 0: lca = ptree.insert_descendants[0] for lca_child in lca: ret.add((lca.nodeid, lca_child.nodeid)) ret.add((lca_child.nodeid, lca.nodeid)) ancestors_of_children = [] for pchild in ptree: ancestors_of_children.append([pchild.insert_ancestors[-1]] if len(pchild.insert_ancestors) > 0 else []) ret_ = find_break_edges(pchild) ret |= ret_ if len(ptree) > 1: s = [ptree.insert_siblings[0]] for i in range(len(ancestors_of_children)): s.append(ancestors_of_children[i]) s.append(ptree.insert_siblings[i+1]) s = [se for se in s if len(se) > 0] for i in range(len(s)-1): ret.add((s[i][-1].nodeid, s[i+1][0].nodeid)) ret.add((s[i+1][0].nodeid, s[i][-1].nodeid)) return ret
db9ff3ae36dba799d6f479f83e775b41d7bce3df
9,071
def get_celsius(temperature_in_fahrenheit): """ Returns the temperature in Celsius of the given Fahrenheit temperature. For example, this function returns XXX when given YYY. Type hints: :type temperature_in_fahrenheit: float """ return (temperature_in_fahrenheit - 32) * (5 / 9)
501b5c3c6c7fe9792fd12cabbae71eddfbc34f58
9,072
import bz2 import json import codecs def json_exporter(data, filepath, compress=True): """Export a file to JSON. Compressed with ``bz2`` is ``compress``. Returns the filepath of the JSON file. Returned filepath is not necessarily ``filepath``, if ``compress`` is ``True``.""" if compress: filepath += ".bz2" with bz2.BZ2File(filepath, "w") as f: f.write(json.dumps(data, ensure_ascii=False).encode('utf-8')) else: with codecs.open(filepath, "w", encoding="utf-8") as f: json.dump(data, f, ensure_ascii=False) return filepath
dcdb9026b302c3bec6b6a7215cee0498a8655a61
9,073
import math def _realroots_quadratic(a1, a0): """gives the real roots of x**2 + a1 * x + a0 = 0""" D = a1*a1 - 4*a0 if D < 0: return [] SD = math.sqrt(D) return [0.5 * (-a1 + SD), 0.5 * (-a1 - SD)]
ad61307a09b9f5cbf444f0bd75448b39b09b2e96
9,074
def gunning_fog_index(n_words, n_polysyllable_words, n_sents): """https://en.wikipedia.org/wiki/Gunning_fog_index""" return 0.4 * ((n_words / n_sents) + 100 * (n_polysyllable_words / n_words))
aeb295edfa563027952f6a934636487e04b2b266
9,075
def _generate_windows_body(hooks): """Generate Windows specific functions. At the moment it implements load_impls_from_library, class destructor, and an utility function to convert from utf8 to wide-strings so we can use the wide family of windows functions that accept unicode. """ # generate destructor to free the library handles opened by load_from_library() result = [ "public:", f" ~HookCaller() {{", f" for (auto handle : this->handles) {{", f" FreeLibrary(handle);", f" }}", f" }}", ] # generate load_impls_from_library() result += [ f" void load_impls_from_library(const std::string& utf8_filename) {{", f" std::wstring w_filename = utf8_to_wstring(utf8_filename);", f" auto handle = this->load_dll(w_filename);", f" if (handle == NULL) {{", f' throw std::runtime_error("Error loading library " + utf8_filename + ": " + std::to_string(GetLastError()));', f" }}", f" this->handles.push_back(handle);", "", ] for index, hook in enumerate(hooks): result += [ f' auto p{index} = GetProcAddress(handle, "{hook.function_name}");', f" if (p{index} != nullptr) {{", f" this->append_{hook.name}_impl((uintptr_t)(p{index}));", f" }}", "", ] result.append(" }") result += [ "", "", "private:", f" std::wstring utf8_to_wstring(const std::string& s) {{", f" int flags = 0;", f" int required_size = MultiByteToWideChar(CP_UTF8, flags, s.c_str(), -1, nullptr, 0);", f" std::wstring result;", f" if (required_size == 0) {{", f" return result;", f" }}", f" result.resize(required_size);", f" int err = MultiByteToWideChar(CP_UTF8, flags, s.c_str(), -1, &result[0], required_size);", f" if (err == 0) {{", f" // error handling: https://docs.microsoft.com/en-us/windows/desktop/api/stringapiset/nf-stringapiset-multibytetowidechar#return-value", f" switch (GetLastError()) {{", f' case ERROR_INSUFFICIENT_BUFFER: throw std::runtime_error("utf8_to_wstring: ERROR_INSUFFICIENT_BUFFER");', f' case ERROR_INVALID_FLAGS: throw std::runtime_error("utf8_to_wstring: ERROR_INVALID_FLAGS");', f' case ERROR_INVALID_PARAMETER: throw std::runtime_error("utf8_to_wstring: ERROR_INVALID_PARAMETER");', f' case ERROR_NO_UNICODE_TRANSLATION: throw std::runtime_error("utf8_to_wstring: ERROR_NO_UNICODE_TRANSLATION");', f' default: throw std::runtime_error("Undefined error: " + std::to_string(GetLastError()));', f" }}", f" }}", f" return result;", f" }}", f"", f"", f" class PathGuard {{", f" public:", f" explicit PathGuard(std::wstring filename)", f" : path_env{{ get_path() }}", f" {{", fr' std::wstring::size_type dir_name_size = filename.find_last_of(L"/\\");', f' std::wstring new_path_env = path_env + L";" + filename.substr(0, dir_name_size);', f' _wputenv_s(L"PATH", new_path_env.c_str());', f" }}", f"", f" ~PathGuard() {{", f' _wputenv_s(L"PATH", path_env.c_str());', f" }}", f"", f" private:", f" static std::wstring get_path() {{", f" rsize_t _len = 0;", f" wchar_t *buf;", f' _wdupenv_s(&buf, &_len, L"PATH");', f" std::wstring path_env{{ buf }};", f" free(buf);", f" return path_env;", f" }} ", f"", f" std::wstring path_env;", f" }};", f"", f" HMODULE load_dll(const std::wstring& filename) {{", f" // Path Modifier", f" PathGuard path_guard{{ filename }};", f" // Load library (DLL)", f" return LoadLibraryW(filename.c_str());", f" }}", f"", f"", f"private:", f" std::vector<HMODULE> handles;", ] return result
27597f8556cdb4383179245a423a45a72324e2ae
9,076
import requests import json import logging def last_failed(url, job_type): """Return last failed job for a specified job type.""" # query query = { "query": { "bool": { "must": [ { "terms": { "status": [ "job-failed" ] } }, { "terms": { "resource": [ "job" ] } }, { "terms": { "type": [ job_type ] } } ] } }, "sort": [ {"job.job_info.time_end": { "order":"desc" } } ], "_source": [ "job_id", "payload_id", "payload_hash", "uuid", "job.job_info.time_queued", "job.job_info.time_start", "job.job_info.time_end", "error", "traceback" ], "size": 1 } r = requests.post('%s/job_status-current/job/_search' % url, data=json.dumps(query)) r.raise_for_status() result = r.json() count = result['hits']['total'] if count == 0: return None else: latest_job = result['hits']['hits'][0]['_source'] logging.info("latest job: %s" % json.dumps(latest_job, indent=2)) return latest_job
567e5ef7afaa460e7fb91e09fa74d2d011cb2cdb
9,078
def fix_spaces_inside_quotes(text, quote='``'): """ >>> test = '''\ :meth:`update` accepte soit un autre objet dictionnaire ou un iterable de\ paires clé / valeur (comme tuples ou d'autres iterables de longueur deux).\ Si les arguments de mots clés sont spécifiés, le dictionnaire est alors mis\ à jour avec ces paires clé / valeur: ``d.update(red=1, blue=2)``.\ ''' >>> fixed = fix_spaces_inside_quotes(test, '``') >>> ':meth:`update`' in fixed True >>> fixed = fix_spaces_inside_quotes(test, '*') >>> ':meth:`update`' in fixed True """ chunks = text.split(quote) is_inside_quote = False for i, chunk in enumerate(chunks): if is_inside_quote: chunks[i] = chunk.strip() is_inside_quote = not is_inside_quote return quote.join(chunks)
cafb4dd7d15c4ab1a2cd252352d33b9aa20e4bca
9,079
def toGoatLatin(S): """ :type S: str :rtype: str """ count=1 sentences=S.split() for i in range(len(sentences)): if sentences[i][0].lower() in "aeiou": sentences[i]+="ma"+count*"a" else: sentences[i]=sentences[i][1:]+sentences[i][0]+'ma'+count*"a" count+=1 return " ".join(sentences)
ebc1e567dfa60436aea14412d7b347d8481f8b0a
9,080
def __prepare_line(string, dir_source, replace_string): """ Prepare the line before it is being written into the content file """ if not replace_string == None: string = string.replace(dir_source, replace_string) return string
cbec6deab5c66960c5e8d57b52392e4ed3cf2b3d
9,081
def find_closest_raster(return_period,aoi_col='AoI_RP{}y_unique',depth_col='RP{}_max_flood_depth'): """ Find the closest AoI and Flood raster column name for given return period Arguments: *return_period* (float): Return period of the flood for which to find the nearest inundation raster *aoi_col* (str): the format of the column name to find, default can be changed to anything in G.es.attributes *depth_col* (str): the format of the column name to find, default can be changed in G.es.attributes Returns: *aoi_col* (str): e.g. 'AoI_RP10y_majority' *depth_col* (str) : e.g. 'RP500_max_flood_depth' """ assert return_period > 0 available_rps = [10,20,50,100,200,500] nearest = min(available_rps, key=lambda x:abs(x-return_period)) #note the difference in notation: AoI: 'RP10...', flood: 'RP010...' aoi_col = aoi_col.format(nearest) if len(str(nearest)) == 2: # e.g. RP10 -> RP010 depth_col = depth_col.format('0'+str(nearest)) elif len(str(nearest)) == 3: # e.g. RP200 -> RP200 depth_col = depth_col.format(nearest) else: raise ValueError('Does not know how to handle value nearest = {}, valid are e.g. 10, 500'.format(nearest)) return aoi_col, depth_col
177041afc9a52d4942ab4095b7383cfc8e17652b
9,083
def _is_bn_diff_doctypes(dyad): """Check if a dyad is between two different doctypes. Args: dyad (tuple): two-item tuple where each item is a dict which represents a document Returns: ind (bool): True if the dyad is between two different doctypes """ if dyad[0]["doctype"] != dyad[1]["doctype"]: ind = True else: ind = False return ind
2480cbca808164b2fec14fd13808cf5ebfb0dcc3
9,084
import requests def upload2ipfs(file_path: str) -> str: """Upload to ipfs using local port. IPFS node must be running locally. Run: $ ipfs daemon Args: nft_path (str): path to metadata file """ with open(file_path, "rb") as f: nft_binary = f.read() ipfs_endpoint = "http://127.0.0.1:5001/" api = "api/v0/add" nft_name = file_path.split("/")[-1:][0] ipfs_hash = requests.post(ipfs_endpoint + api, files={"from": nft_binary}).json()[ "Hash" ] nft_uri = f"https://ipfs.io/ipfs/{ipfs_hash}?filename={nft_name}" print("IPFS address: ", nft_uri) return nft_uri
4fdb53d4d6d61b1784673be6d84cb7ba41fc84a5
9,085
def GetCloudBasePath(): """Returns the folder within the Makani bucket where all databases live.""" return 'gs://gcp-public-data-makani-deps/deps/turbsim_databases'
40091d491fdc3960cc5aa08e0ca58ae0cf2009aa
9,086
def line(char='-', length=48): """Generates a string of characters with a certain length""" return ''.join([char for _ in range(length)])
32de8abb95ab7e73912e2b37f0996361ed181c5b
9,087
import logging def group_by_size(input_tensors, bytes_per_pack): """Groups `input_tensors` into chunks of `bytes_per_pack`. The method preserves the original order of `input_tensors`. The grouping is best effort, each pack could have more or less bytes than `bytes_per_pack`. It only groups values with known shape. Args: input_tensors: a list of Tensor. bytes_per_pack: an integer. Returns: A list of packs of Tensor. All values are grouped into one pack if `bytes_per_pack` is zero or any of the value has unknown shape. """ if bytes_per_pack == 0: return [input_tensors] packs = [] last_pack_size = 0 for value in input_tensors: num_elements = value.shape.num_elements() if num_elements is None: # Can't pack values with unknown shape. logging.warning( 'not packing values due to the unknown or inconsistent shape of %s', value) return [input_tensors] size = num_elements * value.dtype.size # Try to keep each pack as close to bytes_per_pack as possible, while each # pack is at least bytes_per_pack large. I.E. we err on the side of having # few but large packs. if not packs or last_pack_size > bytes_per_pack: packs.append([]) last_pack_size = 0 packs[-1].append(value) last_pack_size += size return packs
9ab5805898678b1541f116e5ef5ae1b9a1c42791
9,088
def adapters(text): """ Parse lines of text into a list of adapters (represented by their joltage), supplemented by the outlet (0) and your device (maximum + 3). """ adapters = list(sorted(map(int, text.splitlines()))) adapters = [0] + adapters + [max(adapters) + 3] return adapters
cb5aa44963506e8d0ea6aa0aeb89d094bfbb0bc8
9,089
def assign_bonds_to_groups(tors, group): """ | **Description:** Make a group for each torsion bond and keep track of how many members Finally it returns the biggest group. **Input:** - Tors: atoms with torsions - Group: Atoms grouped by proximity **Output:** - output: lit of group_numbers - big_grup: biggest group - nbig_group: members on the biggest group """ output = [] big_group = -1 nbig_group = 0 ngroup = max(group) ngroup_members = [] ##Hauria danar un mes?? for i in range(ngroup + 1): ngroup_members.append(0) for t in tors: group_number = max(group[t[0]], group[t[1]]) output.append(group_number) if (group_number >= 0): ngroup_members[group_number] = ngroup_members[group_number] + 1 for i in range(ngroup + 1): if (ngroup_members[i] > nbig_group): nbig_group = ngroup_members[i] big_group = i return output, big_group, nbig_group
8147c016efe435f46b587bc86e6fab713375bb70
9,090
def is_right_censored(lc, frange): """ Returns true if the light curve is cutoff on the right. """ return len(lc['t0'])-1 in frange
de2e81605db2dc2a5f073d8400e2e8ee1b46f199
9,093
def check_data(func): """Decorator function for checking possible exceptions during extraction. Args: func (obj): function used in try-except block Except: (str) : in case of exception assigns '-' for the missing data. """ def inner(line): try: return func(line) except Exception: return '-' return inner
b9dad9ff8adbee9f8307c4c61fc2d5e1918092e2
9,094
def is_empty_line(line: str) -> bool: """Checks whether a line is empty.""" return line.strip("\n").strip("\t").strip() == ""
ad58cc78e5f25353419682343c34c21e2679304d
9,096
def to_host_list(value): """Space separated list of FQDNs.""" return value.split()
85740e6e90096d5711022a7ae18b919673899b36
9,097
def _is_tarfile(filename): """Returns true if 'filename' is TAR file.""" return (filename.endswith(".tar") or filename.endswith(".tar.gz") or filename.endswith(".tgz"))
761b776e0e8078ddd4bee694e0a9d853dd2e31fd
9,098
from typing import Optional import asyncio import contextlib from typing import cast def get_running_loop() -> Optional[asyncio.AbstractEventLoop]: """Check if an event loop is already running.""" with contextlib.suppress(RuntimeError): if hasattr(asyncio, "get_running_loop"): return cast( asyncio.AbstractEventLoop, asyncio.get_running_loop(), # type: ignore # pylint: disable=no-member # noqa ) return asyncio._get_running_loop() # pylint: disable=no-member,protected-access return None
48750bc03be5d8cd17da20a3ca01d02149d471f5
9,099
def distance_diff_catl(ra, dist, gap): """ Computes the necessary distance between catalogues Parameters ----------- ra: float 1st distance dist: float 2nd distance Returns ----------- dist_diff: float amount of distance necessary between mocks """ ra = float(ra) dist = float(dist) gap = float(gap) ## Calculation of distance between catalogues dist_diff = (((ra + gap)**2 - (dist/2.)**2.)**(0.5)) - ra return dist_diff
2a523d1c9c132dc8fcb65bd8d633bf24fcf46f42
9,100
def parse_token(filehandle, token): """Iterates through filehandle until token found. If value found after token, returns it.""" for line in filehandle: line = line.strip() if line.startswith(token): if len(line) > len(token): return line.rsplit('\t', 1)[1] return None msg = f'Expected {token} in {filehandle}, but found EOF' raise ValueError(msg)
9f65ec378b33903250173aaa3f97cd058de13d2b
9,101
def short_information(title, index=0): """ Takes in track information and returns everything as a short formatted String. Args: title (str): track title string index (str): optional track number string Returns: A short formatted string of all track information. """ if " - " in title: split_title = str(title).split(" - ", 1) if index: return "{} {}".format(index, split_title[1]) else: return "{}".format(split_title[1]) else: if index: return "{} {}".format(index, title) else: return title
6754af1f2327eb5d9f37f4d25aa4f808d4793553
9,104
def add_css_file_extension(name): """ Appends the CSS file extension to a string. :return: name with '.css' append at the end append at the end :rType: string """ return '%s.css' % name
fbe4569e4660cc4145bac36a5ea88ae87ec4c319
9,105
def _get_position(a, n): """ returns position of substring :n: as "start", "end" or "middle" """ position = a.index(n) if position == 0: return ("start", position) elif position+len(n) == len(a): return ("end", position) else: return ("middle", position)
f7a18c540542f117df822c18396e3e554d1eba45
9,106
def previous(values, elements, scope=None, strict=True): """Return closest previous (index, elem) of values withing scope. Assumption: values and elements are sorted """ # Init iterator on elements elem_indexes = enumerate(elements) index, elem = next(elem_indexes) try: nindex, nelem = next(elem_indexes) except StopIteration: nindex, nelem = None, None # Build results res = [] for val in values: # No previous val if val < elem: res.append((None, None)) continue # Get closest previous elem try: while nelem is not None and val >= nelem: index, elem = nindex, nelem nindex, nelem = next(elem_indexes) except StopIteration: pass # Check thld if scope is None: res.append((index, elem)) continue thld = elem + scope if val < thld if strict else val <= thld: res.append((index, elem)) else: res.append((None, None)) return res
36b83dc2665539a3a9cb8b50419cd15410a8969c
9,107
def slice(from_index, to_index, list_or_string): """Returns the elements of the given list or string (or object with a slice method) from fromIndex (inclusive) to toIndex (exclusive). Dispatches to the slice method of the third argument, if present""" return list_or_string[from_index:to_index]
130692bad6f7de87a07786afe0ea3d6d30902ba7
9,108
def coding_strand_to_rna(strand): """returns the coding strand to the rna strand (T --> U)""" strand = strand.upper().replace("T","U") return strand
5c3420e921c10376b33b17dfc34e7301414bc6ef
9,110
def graph_to_entities_json(g): """ Converts the given graph to entities JSON. :param g: a graph :return: an array of JSON """ entities = [] for u, v in g.edges(): entity = { "Device": "", "IP": "", "Identity": "", "Location": "", "Cookie": "" } source = g.nodes()[u] target = g.nodes()[v] entity[source["label"]] = source["id"] entity[target["label"]] = target["id"] entities.append(entity) return entities
ef790764c9e6ff4f652c41a5af1d5da3e4d98733
9,111
def recode_mark(mark, mark_shouldbe, no_mark="XX"): """A little helper function to remap clips to standard values that can then be parsed. Replaces BP with LPRP so ADBP becomes ADLPRP. Arguments: - `mark`: A mark string returned by the glfc database. - `mark_shouldbe`: a dictionary mapping values that are known to be ambiguous to their unambiguous counterpart - `no_mark`: the string to be used to represent records without any marking information. """ mark = no_mark if (mark is None or mark is "") else mark for key, val in mark_shouldbe.items(): mark = mark.replace(key, val) tmp = mark_shouldbe.get(mark) if tmp: return tmp else: return mark
ea31126d8b3d6e519a1f376f4ef58bfdbc24914a
9,112
import os def remove_extension_from_filename(filename: str) -> str: """ Return a filename without its extension """ return os.path.splitext(filename)[0]
3aecca5e188c2a029f3e419070b55e5654fdb49c
9,113
import argparse import os def parse_command_line(): """Parse the command-line options.""" formatter_class = argparse.ArgumentDefaultsHelpFormatter description = 'Clang-format: Allow CHKERRQ to be on same line.' parser = argparse.ArgumentParser(description=description, formatter_class=formatter_class) parser.add_argument('--version', '-V', action='version', version='%(prog)s (version 0.1)') parser.add_argument('--file', '-f', dest='filepath', type=str, required=True, help='Path of the file.') parser.add_argument('--output', '-o', dest='outpath', type=str, default=None, help='Path of the output file.' 'Default adds "-new" to the input file name.') parser.add_argument('--inplace', dest='inplace', action='store_true', default=False, help='Use flag to modify inplace.') args = parser.parse_args() if args.outpath is None and not args.inplace: name, ext = os.path.splitext(args.filepath) args.outpath = name + '-new' + ext if args.inplace: args.outpath = args.filepath return args
9f49b12e49bfe21caa7e179fadfb35cee7be35c5
9,114
from typing import Iterable from typing import Any def getWriteOutColour( colour: Iterable[Any], convertType: type = int, multiplier: int = 255 ) -> list[Any]: """getWriteOutColour""" return [convertType(col * multiplier) for col in colour]
b81784b8e6fcce6a1479b710bc59b0db9c94241c
9,115
def add_frame_div_parent(cell_info): """ Adds the frame a cells parent divides on to cell info. Args: cell_info (dict): dict that maps cells to cell info Returns: dict: cell info with added frame_div_parent """ new_info = cell_info.copy() for info in new_info.values(): if info['parent']: parent = info['parent'] info['frame_div_parent'] = new_info[parent]['frame_div'] else: info['frame_div_parent'] = None return new_info
585feeaaf2a353ea2481cda41d547a004ecb8adc
9,117
import inspect def get_instances_of(cls, context): """从 context 中获取所有类型为 cls 的实例""" if type(context) is not dict: names = dir(context) context = {k: getattr(context, k) for k in names} objects = [] for name, value in context.items(): value_type = type(value) if inspect.isclass(value_type) and issubclass(value_type, cls): objects.append((name, value)) return objects
f95eae2039f2b8b2bcfb09adbd09e24abb6dba48
9,120
def purpleair_us_corr(df, param): """US-Wide Correction equation of Barkjohn et al. 2021 for PurpleAir PA-II sensors. Publication Link: https://amt.copernicus.org/articles/14/4617/2021/ Args: df (pandas dataframe): Dataframe with PurpleAir PA-II concentration values for PM2.5 Returns: df (pandas dataframe): Modified dataframe with US-Correction applied to param values ( under column header param + '_corrected') Raises: KeyError: If passed param name not in dataframe KeyError: If 'RH' not in passed dataframe """ # US Correction for PA data df[param + '_corrected'] = 0.524*df[param + '_Value'] - 0.0852*df['RH_Value'] + 5.72 return df
9a61af20cc6178de099a31f38215044da0eb0bc2
9,122
def validate_metadata(metadata, parameters): """validate metatdata. Ensure metadata references parameter workload_context, and that it is a string. Return error message string or None if no errors. """ for value in metadata.values(): if isinstance(value, dict): if "get_param" in value: if value["get_param"] == "workload_context": wc = parameters.get("workload_context", {}) if wc.get("type") == "string": break else: return ( 'must have parameter "workload_context"' ' of type "string"' ) break else: return None
177a1133bacd9e7560be9604cd03542eaf5944ff
9,123
import torch def quaternions_to_so3_matrix(q): """Normalises q and maps to group matrix.""" q = q / q.norm(p=2, dim=-1, keepdim=True) r, i, j, k = q[..., 0], q[..., 1], q[..., 2], q[..., 3] return torch.stack( [ r * r - i * i - j * j + k * k, 2 * (r * i + j * k), 2 * (r * j - i * k), 2 * (r * i - j * k), -r * r + i * i - j * j + k * k, 2 * (i * j + r * k), 2 * (r * j + i * k), 2 * (i * j - r * k), -r * r - i * i + j * j + k * k, ], -1, ).view(*q.shape[:-1], 3, 3)
7b48bc7176a462497e64671fe8a204a9942c301c
9,125
import os def get_cache_path(split): """Gets cache file name.""" cache_path = os.path.join(os.path.dirname(__file__), "../../../data/mini-imagenet/mini-imagenet-cache-" + split + ".pkl") return cache_path
ee822a1a1940e61189513dd100693c60ec6f2e4b
9,126
def flux(component): """Determine flux in every channel Parameters ---------- component: `scarlet.Component` or array Component to analyze or its hyperspectral model """ if hasattr(component, "get_model"): model = component.get_model() else: model = component return model.sum(axis=(1, 2))
b95b0aa926ee2cc2c78e90c425b47f04bc0a4d4c
9,127
import _ast def BinOpMap(operator): """Maps operator strings for binary operations to their _ast node.""" op_dict = { '+': _ast.Add, '-': _ast.Sub, '*': _ast.Mult, '**': _ast.Pow, '/': _ast.Div, '//': _ast.FloorDiv, '%': _ast.Mod, '<<': _ast.LShift, '>>': _ast.RShift, '|': _ast.BitOr, '&': _ast.BitAnd, '^': _ast.BitXor, } return op_dict[operator]()
0b332b1043b31b123daf8812e6f2ecb4e3974f19
9,128
def is_current_game_state(handler_input, state): """Check the current game state""" return handler_input.attributes_manager.session_attributes['game_state'] == state
a06e661408ca560d53ed15679af07dbb535744f0
9,129
def extract_title_from_text(text: str) -> str: """Extract and return the title line from a text written in Markdown. Returns the first line of the original text, minus any header markup ('#') at the start of the line. """ firstline = text.split('\n', 1)[0] return firstline.lstrip('# ')
c51c7dd517b7d50a50df472d055618a092bb3518
9,131
def _all_pairs(i, contextsize, arrlen): """ i: index in the array contextsize: size of the context around i arrlen: length of the array Returns iterator for index-tuples near i in a list of size s with context size @contextsize. Context of k around i-th index means all substrings/subarrays of a[i-N:i+N+1] containing a[i]. """ return [ (l,r) # start anywhere between i - contextsize to i for l in range(max(i-contextsize, 0), i+1) # end anywhere between i to i + contextsize for r in range(i+1, min(i + 1 + contextsize, arrlen)) ]
7234e7b092e60c74d4f1c0af44a469c25cc34dc9
9,132
def percentformat(x, pos): """ Generic percent formatter, just adds a percent sign """ if (x==0): return "0%" if (x<0.1): return ('%4.3f' % (x)) + "%" if (x<1): return ('%3.2f' % (x)) + "%" if (x<5): return ('%2.1f' % (x)) + "%" return ('%1.0f' % x) + "%"
382f9760e26a31c6ddcdf8a58600fa5010dcba1e
9,133
def bufferParser(readbuffer, burst=16): """ Parse concatenated frames from a burst """ out = b'' offset = 1 while len(readbuffer) > 0: length = readbuffer[2] if readbuffer[4] == offset: out += readbuffer[5:3+length] offset += 1 readbuffer = readbuffer[4+length:] return out
a3a7eb312f9e9c0e9a2183960074ebd1e9925025
9,136
def iterative_topological_sort(graph, start): """doesn't return some nodes""" seen = set() stack = [] # path variable is gone, stack and order are new order = [] # order will be in reverse order at first queue = [start] while queue: node = queue.pop() if node not in seen: seen.add(node) # no need to append to path any more queue.extend(graph[node]) while stack and node not in graph[stack[-1]]: # new stuff here! order.append(stack.pop()) stack.append(node) return stack + order[::-1]
a0654b1b1ce93818f01a5e6347243fabd1c0d23c
9,137
def _helper(length_diff, linked_list): """Helper function for longer linked list.""" length_diff = abs(length_diff) return linked_list[length_diff:]
67c208d2eab4f1be4e3e7a77008b1c3169fcee73
9,138
def account(account): """Changing scope of the account fixture to be called after mailhog_delete_all fixture""" return account
7e5b4386b5aae8b5f35b6593e328401959c84ee5
9,139
def create_grouped_word_list(words, group_span_indices, join_string): """Group together words with join_string string and return updated token list.""" adjusted_words = [] curr_group = [] group_idx = 0 curr_group_start_idx, curr_group_end_idx = group_span_indices[group_idx] for i, token in enumerate(words): if curr_group_start_idx <= i <= curr_group_end_idx: curr_group.append(token) if i == curr_group_end_idx: adjusted_words.append(join_string.join(curr_group)) # Update various indices curr_group = [] group_idx += 1 if group_idx < len(group_span_indices): curr_group_start_idx, curr_group_end_idx = group_span_indices[group_idx] # Track next group else: curr_group_start_idx, curr_group_end_idx = len(words), len(words) # Don't enter this block anymore else: adjusted_words.append(token) return adjusted_words
efd8c4a2e02b9704e6a3f8647ccb0d31be18551c
9,141
def sec_title(default_str: str) -> str: """Reads in a section title""" name = input('What would you like to title this section? ' + '(default is ' + default_str + ')\n') if name: return name return default_str
3dfc0ddcdc9cb9beb22b02892959334516b2a90b
9,144
def e_timeToString(dateString): """ input: string output: string description: format dateString to yyyymmddHHMM example: Wed Aug 29 07:23:03 CST 2018 ->> 201808290723 """ # define month list for get digital month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] year_string = dateString[-4:] month_string = str(month.index(dateString[4:7]) + 1) if len(month_string) == 1: month_string = "0" + month_string day_string = dateString[8:10] # time format HHMM time_string = dateString[11:16].replace(":", "") return year_string + month_string + day_string + time_string
1a0c3f014bbd95a9da0eb767e1ce219cb0c70195
9,145
def tachycardic_detector(patient_age, patient_heart_rate): """ Determines if patient is tachycardic based on their age and heart rate Args: patient_age: integer extracted from patient database entry patient_heart_rate: integer posted to patient database entry Returns: tachycardic_status: string containing either "tachycardic" or "not tachycardic" """ if patient_age < 3 and patient_heart_rate > 151: tachycardic_status = "tachycardic" elif patient_age < 5 and patient_heart_rate > 137: tachycardic_status = "tachycardic" elif patient_age < 8 and patient_heart_rate > 133: tachycardic_status = "tachycardic" elif patient_age < 12 and patient_heart_rate > 130: tachycardic_status = "tachycardic" elif patient_age <= 15 and patient_heart_rate > 119: tachycardic_status = "tachycardic" elif patient_heart_rate > 100: tachycardic_status = "tachycardic" else: tachycardic_status = "not tachycardic" # logging.info("Tachycardic status calculated: " + tachycardic_status) return tachycardic_status pass
595bf87d913cd94b9f4aa089a3f1cf32f342ccbf
9,146
import argparse def parse_args(): """ parsing and configuration :return: parse_args """ desc = "TensorFlow implementation of fast-style-GAN" parser = argparse.ArgumentParser(description=desc) parser.add_argument('--module', type=str, default='test', help='Module to select: train, test, test_dataset, create_dataset, train_without_affine') parser.add_argument('--training', type=bool, default=False, help='If the model is train, this argument should be true, else False') parser.add_argument('--GPU', type=str, default='0', help='GPU used to train the model') parser.add_argument('--config', type=str, default='config/config.yml', help='Path of config file for testing') return parser.parse_args()
fa256927a5b1c0e4cb34b341b7960617f8d238d1
9,147
import pickle def save_to_pkl(pkl, obj): """Save experiment resource to file.""" with open(pkl, 'wb') as f: pickle.dump(obj, f) return obj
cf8c71617faa88a192e8214bd71a43e78c6acb67
9,149
import torch def gelu_fast(x): """ Faster approximate form of GELU activation function """ return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
39f2e888b8e01edf0aaca4987c8a070850f58484
9,150
def projects(request): """Display projects.""" return {}
749b2a1d5de2427d7059b04c72d28c49f7792187
9,152
import argparse from pathlib import Path def get_args(): """ function to parse command line arguments Returns: _type_: parsed arguments """ parser = argparse.ArgumentParser() model_group = parser.add_mutually_exclusive_group(required=True) model_group.add_argument("--ckpt-path", type=str, default="", help="Checkpoint path.") model_group.add_argument("--model-path", type=str, default="", help="Saved model path.") parser.add_argument("--device", type=str, default="cpu", help="Device to load model on.") parser.add_argument("--img-path", type=Path, help="Image path.", required=True) parser.add_argument("--dataset-root", type=Path, help="Path to JHU Crowd++ dataset.") parser.add_argument("--subset", type=str, default="test", help="Subset.") return parser.parse_args()
15c8b0926ba43c6caa509b725778fb63533e001b
9,153
def diff_field(field1, field2): """returns true if field1 == field2""" return field1 == field2
6439d8c06c1d5b460141831acf83275795d19ccc
9,154
import random def example_classifier( task_info, mode="demo", default_split_prob={ "train": 0.9, "dev": 0.01, "test": 0.09, }, ): """ This will return the split this data belongs to. """ if mode == "demo" or mode == "all": if random.random() < default_split_prob["train"]: return "train" else: if random.random() < 0.9: return "test" else: return "dev" else: # We need to add here logics to determine # compositional splits! pass
51aa25630158a4c295df85afc8684be59aca9d25
9,155
def islist(item): """Check if an is item is a list - not just a sequence. Args: item (mixed): The item to check as a list. Returns: result (bool): True if the item is a list, False if not. """ return isinstance(item, list)
02c4157e1867e7b113e9695f2fa8fd4aaccc043d
9,156
def group_parameters(model_params_dict_expanded): """Groups the parameters to be estimates in flat dictionary structure""" model_params_dict_flat = dict() model_params_dict_flat["gamma_0s"] = list( model_params_dict_expanded["const_wage_eq"].values() ) model_params_dict_flat["gamma_1s"] = list( model_params_dict_expanded["exp_returns"].values() ) model_params_dict_flat["g_s"] = list( model_params_dict_expanded["exp_accm"].values() ) model_params_dict_flat["g_bar_s"] = list( model_params_dict_expanded["exp_accm_expected"].values() ) model_params_dict_flat["delta_s"] = list( model_params_dict_expanded["exp_deprec"].values() ) for key_ in list(model_params_dict_expanded["disutil_work"].keys()): if "child" in key_: model_params_dict_flat[key_] = model_params_dict_expanded["disutil_work"][ key_ ] for i in ["no", "yes"]: for j in ["f", "p"]: model_params_dict_flat[i + "_kids_" + j] = [ model_params_dict_expanded["disutil_work"][ i + "_kids_" + j + "_educ_low" ], model_params_dict_expanded["disutil_work"][ i + "_kids_" + j + "_educ_middle" ], model_params_dict_expanded["disutil_work"][ i + "_kids_" + j + "_educ_high" ], ] model_params_dict_flat["shocks_cov"] = model_params_dict_expanded["derived_attr"][ "shocks_cov" ] model_params_dict_flat["type_shares"] = model_params_dict_expanded["derived_attr"][ "type_shares" ] if model_params_dict_expanded["derived_attr"]["num_types"] > 1: for i in ["p", "f"]: model_params_dict_flat["theta_" + i] = [ v for k, v in model_params_dict_expanded["hetrg_unobs"].items() if "{}".format("theta_" + i) in k ] else: pass return model_params_dict_flat
deb566114d1b40610bf6e1e814e85b1d8d3e3351
9,157
import pandas def read_static_info(static_tracks_file): """ This method reads the static info file from highD data. :param static_tracks_file: the input path for the static csv file. :return: the static dictionary - the key is the track_id and the value is the corresponding data for this track """ return pandas.read_csv(static_tracks_file).to_dict(orient="records")
295757466640f90b0d3f95dd1d68aab0c90b329b
9,158
import os def get_file_extension(fname): """ Returns the extension from a filepath string ignoring the '.' character """ return os.path.splitext(fname)[-1][1:]
44c751df76fe34d2df81cc98a2c140556ddfbcf3
9,159
import os def check_extension(fname, extension = ".csv"): """ Checks whether the fname includes an extension. Adds an extension if none exists. fname - the name of the file to check. extension - the extension to append if necessary. >> Default: ".csv". """ root, ending = os.path.splitext(fname) if not ending: ending = extension return root + ending
05bb018453101d0017be4dade0bc9199e67e7dfb
9,160
def tshirt_code(tshirt_string): """ convert tshirt size strings into a code for us""" if not tshirt_string: return "" tshirt_code = "" if tshirt_string[0] == "f": tshirt_code += "0" tshirt_string = tshirt_string[1:] else: tshirt_code += "1" size_code = {"s": "1", "m": "2", "l": "3", "xl": "4", "xxl": "5", "3xl": "6", "4xl": "7"} tshirt_code += size_code.get(tshirt_string, "") return tshirt_code
f66d908528c6caa47ca878e4115eec00c52e3046
9,161
import time import calendar def dates_to_epoch(d): """Recursively converts all dict values that are struct_times to Unix timestamps.""" for key, value in d.iteritems(): if hasattr(value, 'iteritems'): d[key] = dates_to_epoch(value) elif type(value) is time.struct_time: d[key] = int(calendar.timegm(value)) return d
6a0a9a8f1a1636376973e65c4d3b4ff8a3603d3d
9,162
from typing import Dict from typing import Any import logging def build_log_config(level: str) -> Dict[str, Any]: """Build a log config from a level.""" return { "version": 1, "disable_existing_loggers": False, "formatters": { "basic": {"format": "%(asctime)s %(name)s %(levelname)s %(message)s"} }, "handlers": { "stream_handler": { "class": "logging.StreamHandler", "formatter": "basic", "level": getattr(logging, level), }, }, "loggers": { "": { "handlers": ["stream_handler"], "level": getattr(logging, level), }, }, }
e20a419ee6c69f6fa0eefbd51e5542349b1a1e8b
9,163
def i2n(i): """ip to number """ ip = [int(x) for x in i.split('.')] return ip[0] << 24 | ip[1] << 16 | ip[2] << 8 | ip[3]
14496c2e7c83794a8364732c512f2d3cfdaba1d9
9,165
def count_increases(report): """Meh >>> count_increases([199, 200, 208, 210, 200, 207, 240, 269, 260, 263]) 7 """ return sum((1 if report[n] < report[n + 1] else 0 for n in range(len(report) - 1)))
bb38ae2de0f5e7a8f7f2904cdca62a7de80543ab
9,166
from typing import Iterable from typing import Optional def effective_sample_size( weights: Iterable[float], total_weight: Optional[float] = None, ) -> float: """Computes the "effective sample size" of the given weights This value represents how "healthy" the underlying samples are. The lower this value, the fewer "real samples" are represented. As in, the closer this comes to zero, the more degenerated the samples are. See `https://en.wikipedia.org/wiki/Effective_sample_size` :param weights: the weights of the samples :param total_weight: total weight of all samples, requires extra computation if not given :return: the effective sample size of ``weights`` """ if total_weight is None: total_weight = sum(weights) assert total_weight and total_weight > 0 # for mypy return pow(total_weight, 2) / sum(pow(w, 2) for w in weights)
6915abd0484dc4b08b47c1c88b6e19e2af5dd1c4
9,168
def split_pair_occurrence(input_str): """ Q9HD36.A79T (x11) → (Q9HD36.A79T, 11) """ if '(x' not in input_str: return input_str, 1 pair, occurrence = [item.strip() for item in input_str.split()] occurrence = int(occurrence[2:-1]) return pair, occurrence
0812e907a97894ff6f2d94722874b3917ce30ad8
9,169
def find_joins(df, ids, downstream_col="downstream", upstream_col="upstream", expand=0): """Find the joins for a given segment id in a joins table. Parameters ---------- df : DataFrame data frame containing the joins ids : list-like ids to lookup in upstream or downstream columns downstream_col : str, optional (default "downstream") name of downstream column upstream_col : str, optional (default "upstream") name of upstream column expand : positive int, optional (default 0) If > 0, will expand the search to "expand" degrees from the original ids. E.g., if expand is 2, this will return all nonzero joins that are within 2 joins of the original set of ids. Returns ------- Joins that have the id as an upstream or downstream. """ out = df.loc[(df[upstream_col].isin(ids)) | (df[downstream_col].isin(ids))] # find all upstream / downstream joins of ids returned at each iteration for i in range(expand): next_ids = (set(out[upstream_col]) | set(out[downstream_col])) - {0} out = df.loc[ (df[upstream_col].isin(next_ids)) | (df[downstream_col].isin(next_ids)) ] return out
39f16985ddd8e79338e520e56ba6ee793558d03f
9,170
import math def max_crossing_subarray(given_array, start_index, mid_index, end_index): """Function To Calculate The Mid Crossing Sub Array Sum""" max_left_sum = - math.inf # Used For Sentinel Value max_right_sum = - math.inf # Used For Sentinel Value cross_start = None # Just used for variable pre assignment. cross_end = None # Just used for variable pre assignment. temp_sum = 0 # Find Max Sub Array in Left Part for i in reversed(range(start_index, mid_index + 1)): temp_sum = temp_sum + given_array[i] # Keep track of the max sum if temp_sum > max_left_sum: max_left_sum = temp_sum cross_start = i temp_sum = 0 # Find Max Sub Array In Right Part for j in range(mid_index + 1, end_index + 1): temp_sum = temp_sum + given_array[j] # Keep track of the max sum if temp_sum > max_right_sum: max_right_sum = temp_sum cross_end = j max_sum_ans = max_left_sum + max_right_sum # Max From This Sub Array. return cross_start, cross_end, max_sum_ans
1106b063b652e0d0d475f5b0979a138f4c48113b
9,172
import re def camel_2_snake_case(word): """ >>> camel_2_snake_case("HTTPResponseCodeXYZ") 'http_response_code_xyz' From https://stackoverflow.com/a/1176023/548792 """ return re.sub(r"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))", r"_\1", word).lower()
dc20c832a212f89d51bb05302c9e1677e8f2cb83
9,173
from datetime import datetime def now_str(time=False): """Return string to be used as time-stamp.""" now = datetime.now() return now.strftime(f"%Y-%m-%d{('_%H:%M:%S' if time else '')}")
02b73bda5f27e7c25120d50d50244bd103661c90
9,174
def from_file(path: str) -> set: """ Read conditions from a file. Each line contains a separate condition. :param path: Path to file. :return: Read conditions. """ conditions = set() with open(path) as f: for line in f: conditions.add(line) return conditions
3780d540d6f300fe0a97d354ed33fa0aab803d56
9,175
def getLeastReplaggedCommons(): """ Returns the name of the least replagged Commons replica among s1, s2 and s3 """ return "commonswiki-p.rrdb.toolserver.org" # broken: #return urllib.urlopen("http://toolserver.org/~eusebius/leastreplag").readline()
5726416f7a1cbb09f51de81d967009005a06af1b
9,178