content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def self_closing(xml_str, isSelfClosing): """ 是否自闭合空标签, :param isSelfClosing: :param xml_str: :return: """ if(isSelfClosing=="true"): xml_str = re.sub(r"<(.*)>(</.*>)", r"<\1/>" , xml_str) return xml_str else: return xml_str
b8b68626549da9a27335c5340db3ba65b753af90
5,609
def request_pet_name(): """Requests users pet name as input. Args: NONE Returns: User's name. Raises: ValueError: If input is not a character. """ while True: try: if (pet_name := input("Enter your pet's name: \n")).isalpha(): break else: print("Must be characters, please enter your pet's name again.") except ValueError: print("Provide name with only characters.") continue return pet_name
efef2cfb0792b89f158f5a0bb42d10cf9bd1655d
5,610
def _get_variable_names(expression): """Return the list of variable names in the Numexpr `expression`.""" names = [] stack = [expression] while stack: node = stack.pop() if node.astType == 'variable': names.append(node.value) elif hasattr(node, 'children'): stack.extend(node.children) return list(set(names))
db75b0066b89bc7a6a022a56b28981910836524c
5,611
def t(string): """ add \t """ return (string.count(".")) * "\t" + string
a394ac3983369836666d0610c345c6ef3c095994
5,612
def get_boundary_levels(eris): """Get boundary levels for eris.""" return [func(eris.keys()) for func in (min, max)]
20d98447e600fecc3b9495e9fb5e5d09ff3b3c1e
5,613
from typing import Dict def merge_hooks(hooks1: Dict[str, list], hooks2: Dict[str, list]) -> Dict[str, list]: """ Overview: merge two hooks, which has the same keys, each value is sorted by hook priority with stable method Arguments: - hooks1 (:obj:`dict`): hooks1 to be merged - hooks2 (:obj:`dict`): hooks2 to be merged Returns: - new_hooks (:obj:`dict`): merged new hooks .. note:: This merge function uses stable sort method without disturbing the same priority hook """ assert set(hooks1.keys()) == set(hooks2.keys()) new_hooks = {} for k in hooks1.keys(): new_hooks[k] = sorted(hooks1[k] + hooks2[k], key=lambda x: x.priority) return new_hooks
add5ae72917ca9aff109e8ac86a4d6902c14b298
5,614
def get_max_assocs_in_sample_csr(assoc_mat): """ Returns the maximum number of co-associations a sample has and the index of that sample. """ first_col = assoc_mat.indptr n_cols = first_col[1:] - first_col[:-1] max_row_size = n_cols.max() max_row_idx = n_cols.argmax() return max_row_size, max_row_idx
a341153afa0398cb2a43b97614cd39129e6b2ac5
5,615
def _decicelsius_to_kelvins(temperatures_decicelsius): """Converts from temperatures from decidegrees Celsius to Kelvins. :param temperatures_decicelsius: numpy array of temperatures in decidegrees Celsius. :return: temperatures_kelvins: numpy array of temperatures in Kelvins, with same shape as input. """ return temperatures_decicelsius * 0.1 + 273.15
880d42637970c680cd241b5418890468443c6a5b
5,616
import json def cancelCardTransactionPayload(cancel_time): """ Function for constructing payload for cancelCardTransaction API call. Note: All parameters are of type String unless otherwise stated below. :param cancel_time: Date and time of the request. Format - YYYY-MM-DD HH:mm:ss :return: JSON Payload for API call """ payload_py = { "cancel_time": cancel_time } payload_json = json.dumps(payload_py) return payload_json
e96ee75bbc4c20a094283fa664bca6ddd6b9556c
5,619
import os def device_exists(device): """Check if ethernet device exists.""" return os.path.exists('/sys/class/net/%s' % device)
94c42317eb42007b9c96896a58e1b179b47e297e
5,620
def to_bin(s): """ :param s: string to represent as binary """ r = [] for c in s: if not c: continue t = "{:08b}".format(ord(c)) r.append(t) return '\n'.join(r)
b4c819ae25983a66e6562b3677decd8389f5fbe2
5,622
def undupe_column_names(df, template="{} ({})"): """ rename df column names so there are no duplicates (in place) e.g. if there are two columns named "dog", the second column will be reformatted to "dog (2)" Parameters ---------- df : pandas.DataFrame dataframe whose column names should be de-duplicated template : template taking two arguments (old_name, int) to use to rename columns Returns ------- df : pandas.DataFrame dataframe that was renamed in place, for convenience in chaining """ new_names = [] seen = set() for name in df.columns: n = 1 new_name = name while new_name in seen: n += 1 new_name = template.format(name, n) new_names.append(new_name) seen.add(new_name) df.columns = new_names return df
51d13bad25571bc60edd78026bb145ff99281e2d
5,624
def js(data): """ JSをミニファイ """ # 今のところは何もしない return data
2ee82b81dcb3cfb9d133ed218ba1c67b5d16f691
5,625
def _has_endpoint_name_flag(flags): """ Detect if the given flags contain any that use ``{endpoint_name}``. """ return '{endpoint_name}' in ''.join(flags)
e8827da778c97d3be05ec82ef3367686616d3a88
5,626
def convert_example(example, tokenizer, max_seq_len=512, max_response_len=128, max_knowledge_len=256, mode='train'): """Convert all examples into necessary features.""" goal = example['goal'] knowledge = example['knowledge'] goal_knowledge = ' '.join([' '.join(lst) for lst in goal + knowledge]) if mode != 'test': tokenized_example = tokenizer.dialogue_encode( example['history'], response=example['response'], knowledge=goal_knowledge, task_type='knowledge', max_seq_len=max_seq_len, max_response_len=max_response_len, max_knowledge_len=max_knowledge_len, return_length=True) response_start = tokenized_example['input_ids'].index( tokenizer.cls_token_id, 1) response_end = tokenized_example['seq_len'] # Use to gather the logits corresponding to the labels during training tokenized_example['masked_positions'] = list( range(response_start, response_end - 1)) tokenized_example['labels'] = tokenized_example['input_ids'][ response_start + 1:response_end] return tokenized_example else: tokenized_example = tokenizer.dialogue_encode( example['history'], knowledge=goal_knowledge, task_type='knowledge', max_seq_len=max_seq_len, max_knowledge_len=max_knowledge_len, add_start_token_as_response=True) if 'response' in example: tokenized_example['response'] = example['response'] return tokenized_example
5ebce39468cda942f2d4e73cd18f8fa4dd837f0a
5,627
def format_date(date: str): """ This function formats dates that are in MM-DD-YYYY format, and will convert to YYYY-MM-DD, which is required sqlite. :param date: The date to modify. :return: The modified string. """ tmp = date.split("/") return "{}-{}-{}".format(tmp[2], tmp[0], tmp[1])
f1a0149bfd96db557c49becdedb84789daa1168c
5,630
def f_function(chromosome): """Define Fitness Function Here.""" x = chromosome.convert_to_integer() return (15 * x[0]) - (x[0] * x[0]) # return (((15 * x[0]) - (x[0] * x[0])) * -1) + 1000 To Find Minimum Solution
aee3744c63ada24302857ef4ddb4e6aff35fc69e
5,631
def xcrun_field_value_from_output(field: str, output: str) -> str: """ Get value of a given field from xcrun output. If field is not found empty string is returned. """ field_prefix = field + ': ' for line in output.splitlines(): line = line.strip() if line.startswith(field_prefix): return line[len(field_prefix):] return ''
a99efe76e21239f6ba15b8e7fb12d04d57bfb4de
5,633
import re def parse_sl(comments: str): """Parses comments for SL on an order""" parsed = None sl_at = "(SL\s{0,1}@\s{0,1})" sl_price = "([0-9]{0,3}\.[0-9]{1,2}((?!\S)|(?=[)])))" pattern = sl_at + sl_price match = re.search(pattern, comments) if match: match.groups() parsed = match.group(2) return parsed
d993fc1686fa2623423269812c834aedb0d504e2
5,634
def trip(u, v): """ Returns the scalar triple product of vectors u and v and z axis. The convention is z dot (u cross v). Dotting with the z axis simplifies it to the z component of the u cross v The product is: positive if v is to the left of u, that is, the shortest right hand rotation from u to v is ccw negative if v is to the right of u, that is, the shortest right hand rotation from u to v is cw zero if v is colinear with u Essentially trip is the z component of the cross product of u x v """ return (u[0] * v[1] - u[1] * v[0])
5f687ee4b16dc6c1b350ed574cb632a7c9ca996b
5,636
import os def is_ci() -> bool: """Return whether running in CI environment.""" return os.environ.get("CI", "") != ""
642b714d55fe52c93849b2775c4e0b4fede9f197
5,638
def get_instance(module, name, config): """ Get module indicated in config[name]['type']; If there are args to specify the module, specify in config[name]['args'] """ func_args = config[name]['args'] if 'args' in config[name] else None # if any argument specified in config[name]['args'] if func_args: return getattr(module, config[name]['type'])(**func_args) # if not then just return the module return getattr(module, config[name]['type'])()
ea57e7097665343199956509bb302e3806fb383a
5,639
def get_n_largest(n, lst, to_compare=lambda x: x): """ This returns largest n elements from list in descending order """ largests = [lst[0]]*n # this will be in descending order for x in lst[1:]: if to_compare(x) <= to_compare(largests[-1]): continue else: for i, y in enumerate(largests): if to_compare(x) >= to_compare(y): largests = largests[:i] + [x] + largests[i:-1] break return largests
4ef85d8656ae152ecab65d3a01bce7f885c47577
5,640
def dp_palindrome_length(dp, S, i, j): """ Recursive function for finding the length of the longest palindromic sequence in a string This is the algorithm covered in the lecture It uses memoization to improve performance, dp "dynamic programming" is a Python dict containing previously computed values """ if i == j: return 1 if (i, j) in dp: return dp[(i, j)] if S[i] == S[j]: if i + 1 == j: dp[(i, j)] = 2 else: dp[(i, j)] = 2 + \ dp_palindrome_length(dp, S, i + 1, j - 1) else: dp[(i, j)] = \ max( dp_palindrome_length(dp, S, i + 1, j), dp_palindrome_length(dp, S, i, j - 1)) return dp[(i, j)]
10a8ac671674ba1ef57cd473413211a339f94e62
5,641
def create_own_child_column(X): """ Replaces the column 'relationship' with a binary one called own-child """ new_column = X['relationship'] == 'own-child' X_transformed = X.assign(own_child=new_column) X_transformed = X_transformed.drop('relationship', axis=1) return X_transformed
303ec8f073920f0bba6704740b200c7f3306b7bd
5,642
def load_spans(file): """ Loads the predicted spans """ article_id, span_interval = ([], []) with open(file, 'r', encoding='utf-8') as f: for line in f.readlines(): art_id, span_begin, span_end = [int(x) for x in line.rstrip().split('\t')] span_interval.append((span_begin, span_end)) article_id.append(art_id) return article_id, span_interval
8f8de31e1d1df7f0d2a44d8f8db7f846750bd89f
5,643
def is_stupid_header_row(row): """returns true if we believe row is what the EPN-TAP people used as section separators in the columns table. That is: the text is red:-) """ try: perhaps_p = row.contents[0].contents[0] perhaps_span = perhaps_p.contents[0] if perhaps_span.get("style")=='color: rgb(255,0,0);': return True except (AttributeError, KeyError): pass # Fall through to False return False
124108520486c020d2da64a8eb6f5d266990ae02
5,644
import re def _get_http_and_https_proxy_ip(creds): """ Get the http and https proxy ip. Args: creds (dict): Credential information according to the dut inventory """ return (re.findall(r'[0-9]+(?:\.[0-9]+){3}', creds.get('proxy_env', {}).get('http_proxy', ''))[0], re.findall(r'[0-9]+(?:\.[0-9]+){3}', creds.get('proxy_env', {}).get('https_proxy', ''))[0])
b18d89718456830bdb186b3b1e120f4ae7c673c7
5,645
import os def multiple_files_multiple_tracks(): """Returns a path to a CUE file with multiple tracks per source file.""" cue_file = "Non-ISO_extended-ASCII_text_with_CRLF.cue" return os.path.join("tests", "files", cue_file)
36659616d2e065a8f8d9b1d7956e2c8326cdc805
5,646
def geometric_expval(p): """ Expected value of geometric distribution. """ return 1. / p
3afb3adb7e9dafa03026f22074dfcc1f81c58ac8
5,647
import os def getfile(basedir, manifest_value, user_argument): """Get name for a file that is referenced in a workflow manifest. If the user argument is given it overrides the respective value in the manifest. For user arguments we first assume that the path references a file on disk, either as absolute path or as a path relative to the current working directory. If no file exists at the specified location an attempt is made to read the file relative to the base directory. For manifest values, they are always assumed to be relative to the base directory. Parameters ---------- basedir: string manifest_value: string Relative path to the file in the base directory. user_argument: string User provided value that overrides the manifest value. This value can be None. Returns ------- string """ if user_argument is not None: if os.path.isfile(user_argument): # If the user argument points to an existing file that file is # returned. return user_argument # Assume that the user argument points to a file relative to the base # directory. return os.path.join(basedir, user_argument) return os.path.join(basedir, manifest_value)
6ec02bf01ce280843d74bedc731964c6e5f74de4
5,651
from pathlib import Path def _ignore_on_copy(directory, contents): # pylint: disable=unused-argument """Provides list of items to be ignored. Args: directory (Path): The path to the current directory. contents (list): A list of files in the current directory. Returns: list: A list of files to be ignored. """ # shutil passes strings, so ensure a Path directory = Path(directory) if directory.name == "material": return ["mkdocs_theme.yml", "main.html", "404.html"] if directory.name == "partials": return ["integrations"] if directory.name == "images": return ["favicon.png"] return []
3a551f6a252406b88fb19c0dc8180631cd5996ce
5,652
import torch def generate_fake_data_loader(): """" Generate fake-DataLoader with four batches, i.e. a list with sub-lists of samples and labels. It has four batches with three samples each. """ samples1 = torch.tensor([[2., 2., 2., 2.], [2., 2., 0., 0.], [0., 0., 2., 2.]]) samples2 = torch.tensor([[1., 2., 3., 4.], [1., 1., 2., 2.], [2., 2., 2., 2.]]) labels1 = torch.tensor([0, 0, 1]) labels2 = torch.tensor([1, 1, 0]) return [[samples1, labels1], [samples1, labels2], [samples2, labels1], [samples2, labels2]]
4d86ab464653f5766a44f03e41fd2c26714cabf1
5,653
def is_viable(individual): """ evaluate.evaluate() will set an individual's fitness to NaN and the attributes `is_viable` to False, and will assign any exception triggered during the individuals evaluation to `exception`. This just checks the individual's `is_viable`; if it doesn't have one, this assumes it is viable. :param individual: to be checked if viable :return: True if individual is viable """ if hasattr(individual, 'is_viable'): return individual.is_viable else: return True
c1e5c839f362e99800dcd1a996be9345cabb4261
5,654
import string import random def getCookie(): """ This function will return a randomly generated cookie :return: A cookie """ lettersAndDigits = string.ascii_lowercase + string.digits cookie = 'JSESSIONID=' cookie += ''.join(random.choice(lettersAndDigits) for ch in range(31)) return cookie
6fff76d37921174030fdaf9d4cb8a39222c8906c
5,655
def getKey(event): """Returns the Key Identifier of the given event. Available Codes: https://www.w3.org/TR/2006/WD-DOM-Level-3-Events-20060413/keyset.html#KeySet-Set """ if hasattr(event, "key"): return event.key elif hasattr(event, "keyIdentifier"): if event.keyIdentifier in ["Esc", "U+001B"]: return "Escape" else: return event.keyIdentifier return None
0935ad4cb1ba7040565647b2e26f265df5674e1d
5,657
def get_long_season_name(short_name): """convert short season name of format 1718 to long name like 2017-18. Past generations: sorry this doesn't work for 1999 and earlier! Future generations: sorry this doesn't work for the 2100s onwards! """ return '20' + short_name[:2] + '-' + short_name[2:]
314ef85571af349e2e31ab4d08497a04e19d4118
5,658
import json def import_data(): """Import datasets to internal memory""" with open('data/names.json') as f: data_names = json.load(f) with open('data/issues.json') as f: data_issues = json.load(f) with open('data/disasters.json') as f: data_disasters = json.load(f) with open('data/options.json') as f: data_options = json.load(f) return data_names, data_issues, data_disasters, data_options
11db10c2c56b6b714ecffa57510c9a79abfa1d86
5,659
import os def get_current_ingest_id(): """Get the uuid of the active ingest :return: the id of the active ingest :rtype: uuid """ return os.getenv('JETA_CURRENT_INGEST_ID')
31299e8422e07fe38bc7a850033cf128a9a27749
5,660
import hashlib def hashlib_mapper(algo): """ :param algo: string :return: hashlib library for specified algorithm algorithms available in python3 but not in python2: sha3_224 sha3_256, sha3_384, blake2b, blake2s, sha3_512, shake_256, shake_128 """ algo = algo.lower() if algo == "md5": return hashlib.md5() elif algo == "sha1": return hashlib.sha1() elif algo == "sha224": return hashlib.sha224() elif algo == "sha256": return hashlib.sha256() elif algo == "sha384": return hashlib.sha384() elif algo == "sha3_224": return hashlib.sha3_224() elif algo == "sha3_256": return hashlib.sha3_256() elif algo == "sha3_384": return hashlib.sha3_384() elif algo == "sha3_512": return hashlib.sha3_512() elif algo == "sha512": return hashlib.sha512() elif algo == "blake2b": return hashlib.blake2b() elif algo == "blake2s": return hashlib.blake2s() elif algo == "shake_128": return hashlib.shake_128() elif algo == "shake_256": return hashlib.shake_256() else: raise Exception("Unsupported hashing algorithm: %s" % algo)
56830caccd0b3f88982bfe09a8789002af99c1e7
5,661
import torch def compute_ctrness_targets(reg_targets): """ :param reg_targets: :return: """ if len(reg_targets) == 0: return reg_targets.new_zeros(len(reg_targets)) left_right = reg_targets[:, [0, 2]] top_bottom = reg_targets[:, [1, 3]] ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \ (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) return torch.sqrt(ctrness)
538a63b6adcd73fbd601d6e61eea5f27642746fa
5,662
import torch def bw_transform(x): """Transform rgb separated balls to a single color_channel.""" x = x.sum(2) x = torch.clamp(x, 0, 1) x = torch.unsqueeze(x, 2) return x
3ecec3ada4b75486ff96c30890e8a3e173ca7d31
5,663
import random def generate_utt_pairs(librispeech_md_file, utt_pairs, n_src): """Generate pairs of utterances for the mixtures.""" # Create a dict of speakers utt_dict = {} # Maps from speaker ID to list of all utterance indices in the metadata file speakers = list(librispeech_md_file["speaker_ID"].unique()) for speaker in speakers: utt_indices = librispeech_md_file.index[librispeech_md_file["speaker_ID"] == speaker] utt_dict[speaker] = list(utt_indices) while len(speakers) >= n_src: # Select random speakers selected = random.sample(speakers, n_src) # Select random utterance from each speaker utt_list = [] for speaker in selected: utt = random.choice(utt_dict[speaker]) utt_list.append(utt) utt_dict[speaker].remove(utt) if not utt_dict[speaker]: # no more utts for this speaker speakers.remove(speaker) utt_pairs.append(utt_list) return utt_pairs
9079fa35b961de053c86b08527085e8eb84609b8
5,664
def generate_discord_markdown_string(lines): """ Wraps a list of message into a discord markdown block :param [str] lines: :return: The wrapped string :rtype: str """ output = ["```markdown"] + lines + ["```"] return "\n".join(output)
1c0db2f36f4d08e75e28a1c024e6d4c35638d8f5
5,665
def wizard_active(step, current): """ Return the proper classname for the step div in the badge wizard. The current step needs a 'selected' class while the following step needs a 'next-selected' class to color the tip of the arrow properly. """ if current == step: return 'selected' elif (current + 1) == step: return 'next-selected'
2daad3f7651df7609f3473af698e116ce419c9df
5,666
def ez_admin(admin_client, admin_admin, skip_auth): """A Django test client that has been logged in as admin. When EZID endpoints are called via the client, a cookie for an active authenticated session is included automatically. This also sets the admin password to "admin". Note: Because EZID does not use a standard authentication procedure, it's also necessary to pull in skip_auth here. """ admin_client.login(username='admin', password='admin') #log.info('cookies={}'.format(admin_client.cookies)) return admin_client
0b2ac749a690ad5ac0dc83ca9c8f3905da5a016b
5,667
import os def request_csv_rows(settings, courseware_objects): """Fake coupon request spreadsheet data rows (loaded from CSV)""" fake_request_csv_filepath = os.path.join( settings.BASE_DIR, "sheets/resources/coupon_requests.csv" ) with open(fake_request_csv_filepath) as f: # Return all rows except for the header return [line.split(",") for i, line in enumerate(f.readlines()) if i > 0]
b27a27f5c328e80efcba66c09c6d8ef278525858
5,669
def _maybe_encode_unicode_string(record): """Encodes unicode strings if needed.""" if isinstance(record, str): record = bytes(record, "utf-8").strip() return record
2621056ba77fd314b966e3e0db08887da53e3803
5,671
def merge_extras(extras1, extras2): """Merge two iterables of extra into a single sorted tuple. Case-sensitive""" if not extras1: return extras2 if not extras2: return extras1 return tuple(sorted(set(extras1) | set(extras2)))
0383e0e99c53844f952d919eaf3cb478b4dcd6d1
5,673
import torch def _relu_3_ramp(x): """ Relu(x) ** 3 ramp function returns f(x) = relu(x) ** 3 df/dx(x) = relu(x) ** 2 """ rx = torch.relu(x) ramp = rx.pow(3) grad = rx.pow(2) * 3.0 return ramp, grad
56dfc37ef81209590e020f0c67f8204a6d8d338a
5,674
def get_shared_prefix(w1, w2): """Get a string which w1 and w2 both have at the beginning.""" shared = "" for i in range(1, min(len(w1), len(w2))): if w1[:i] != w2[:i]: return shared else: shared = w1[:i] return shared
d52850f038bc6bfe65878e3a58d7009e563af0a0
5,675
def cls_token(idx): """ Function helps in renaming cls_token weights """ token = [] token.append((f"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token")) return token
7f07ca4fe04326b4895e3fd41a3830dddc147f8a
5,676
def collapse(intlist): """Collapse a list of int values of chars into the int they represent.""" f = '' for i in intlist: f += chr(i) return int(f)
7b92a456e78c8b6d8bbdc5af805b22728865ec63
5,678
def sync_from(src, dest): """Synchronize a directory from Dropbox.""" return False
fe1339c59c25044bdf48e50e12cab80aa9a7ec63
5,682
def filter_timeline_actions(tim, **filters): """tim (dict) contains info for one TIM""" actions = tim['timeline'] for field, required_value in filters.items(): if field == 'time': # Times are given as closed intervals: either [0,134] or [135,150] acceptable_times = range(required_value[0], required_value[1] + 1) actions = filter(lambda action: action['time'] in acceptable_times, actions) else: # Removes actions for which action[field] != required_value actions = filter(lambda action: action[field] == required_value, actions) # filter returns an iterable object actions = list(actions) return actions
9f354e7d9d40b3ad31fd9d7cb256598b1fde11ba
5,683
def is_cross_not_claimed(event_list, team): """Returns if event list has a cross-not-claimed Goalkeeper event; cross not successfully caught""" cnc = False for e in event_list[:1]: if e.type_id == 53: cnc = True return cnc
c63080119d057f9eb8dfc950724f67f8e4d6be86
5,684
import numpy import math def _do_monte_carlo_run(pools, lulc_counts): """Do a single Monte Carlo run for carbon storage. Returns a dict with the results, keyed by scenario, and # including results for sequestration. """ # Sample carbon-per-grid-cell from the given normal distribution. # We sample this independently for each LULC type. lulc_carbon_samples = {} for lulc_id, distribution in pools.items(): if not distribution['variance']: lulc_carbon_samples[lulc_id] = distribution['total'] else: lulc_carbon_samples[lulc_id] = numpy.random.normal( distribution['total'], math.sqrt(distribution['variance'])) # Compute the amount of carbon in each scenario. results = {} for scenario, counts in lulc_counts.items(): # Amount of carbon is the sum across all lulc types of: # (number of grid cells) x (carbon per grid cell) results[scenario] = sum( count * lulc_carbon_samples[lulc_id] for lulc_id, count in counts.items()) # Compute sequestration. for scenario in ['fut', 'redd']: if scenario not in results: continue results['sequest_%s' % scenario] = results[scenario] - results['cur'] return results
03ddcf90c135ce02f7692557f31bb530390d2a7a
5,685
from typing import Dict def update_args(args: Dict, inv_file: str, conf_file: str) -> Dict: """ Add inventory file and config file in the correct spots inside the arguments Args: args (Dict): controller args inv_file (str): inventory file conf_file (str): config file Returns: Dict: updated args """ args['inventory'] = inv_file args['config'] = conf_file return args
c1cd377785f0af26740d5cecd73186caaa6c79b6
5,688
def _add_vessel_class(df): """Creates 'Class' column based on vessel LOA ft.""" df.loc[:, "Class"] = "Panamax" post_row = (df.loc[:, "LOA ft"] > 965) post_loc = df.loc[post_row, :].index post_pan = df.index.isin(post_loc) df.loc[post_pan, "Class"] = "Post-Panamax" return df
5abec9f0bee8d7d6c734100c64a7624fdb5fb672
5,689
def findOverlapOrNearest(gs, ts, tree, start, end): """ first to check direct overlap with TSS, if no or multiple items, then get the close one @param gs: {tss: cLoops2.ds.Gene}, tss is key and int @pram ts: [tss] @param tree: KDTree from TSSs @param start: query start @param end: query end return gene and distance """ #step 1, find overlaps rs = set() for i in range(start, end + 1): if i in gs: rs.add(gs[i]) if len(rs) > 0: rs = list(rs) return rs, [0] * len(rs) #find the nearest one else: d, i = tree.query([(start + end) / 2], k=1) g = gs[ts[i][0]] #d = ts[i][0] - (start+end)/2 d = int(d) return [g], [d]
8c3c8c85a22063a1f8f7ffcfdb832dd4b357a485
5,692
import os import logging import re def get_user_check_cls_def(user_def_file): """Get 'class UserCheck(object):' statement from userCheck.py. Args: user_def_file: The path of userCheck.py. Returns: xml_head_str: The 'class UserCheck' statement of userCheck.py. """ if not os.path.isfile(user_def_file): logging.error("%s is not exist or is not file", user_def_file) return logging.info("merge user check definitions from script %s", user_def_file) cls_str = "\n" is_cls_code = False idx = 0 try: with open(user_def_file, encoding="utf-8") as file: lines = file.readlines() while idx < len(lines): line = lines[idx] # get code for class UserCheck if re.match(r"^class UserCheck\(object\):\s+$", line): is_cls_code = True cls_str += line idx += 1 continue if is_cls_code: if not re.match(r"\s+", line): break cls_str += line idx += 1 except IOError as error: logging.error("can't process user define file %s, because %s", user_def_file, error) return cls_str
258379602cb8188cd1191ad59473f764287ad9e9
5,693
def makeTrans(tup: tuple): """ 生成一个字典格式的单词释义""" def toStr(s): return s if type(s) is str else str(s, encoding="utf-8") res = None if len(tup) >= 4: res = { "word": toStr(tup[0]), "phonetic": toStr(tup[1]), "translation": toStr(tup[2]), "exchange": toStr(tup[3]) } # log("makeTrans, res: %s" % (res)) return res
f5c578c83f0256cc8fa64abff2335de135ae9bfc
5,694
def mean(list): """Function that returns the mean of a list""" sum = 0 for num in list: sum += num return sum/len(list)
972544f64f87860a078405a4938226f7fab307c2
5,695
import torch def cpu(): """Defined in :numref:`sec_use_gpu`""" return torch.device('cpu')
899a95ed4b806280eda315c17a2d3e6d3f94e039
5,696
import argparse def parse_args(): """ args for fc testing. """ parser = argparse.ArgumentParser(description='PyTorch SiamFC Tracking Test') parser.add_argument('--arch', default='Ocean', type=str, help='backbone architecture') parser.add_argument('--resume', default='snapshot/OceanV19on.pth', type=str, help='pretrained model') parser.add_argument('--video', default='./dataset/soccer1.mp4', type=str, help='video file path') parser.add_argument('--online', default=True, type=bool, help='use online or offline model') parser.add_argument('--save', default=True, type=bool, help='save pictures') parser.add_argument('--init_bbox', default=None, help='bbox in the first frame None or [lx, ly, w, h]') args = parser.parse_args() return args
b8020b258fbd4080d9047c55d949c063453c7c64
5,699
import os def restore_uuid_file_name(filepath): """ remove uuid in filename, and get filepath with file extension """ orig = os.path.splitext(filepath) return f'{orig[0][:-33]}{orig[1]}'
ba49ab8941794e7e0ede50019ff10af748ea6bcd
5,700
import os def get_file_size(file_name): """Returns the size of the file.""" with open(file_name, "r") as fh: fh.seek(0, os.SEEK_END) return fh.tell()
5da21777b3859c144a22ba53ff44486bda828e8e
5,702
def is_auto(item): """ Checks if a parameter should be automatically determined """ if isinstance(item, float): if item == 9999.9: return True elif isinstance(item, str): if 'auto' in item.lower(): return True return False
fe6320adef43c51cdffd5b5d4a0bf34ac43d9c5a
5,703
def predict_by_lr_model(test_feature, lr_model): """ predict by lr_model (调用 sklearn 实例方法) """ result_list = [] #存储每个样本label为1的概率 prob_list = lr_model.predict_proba(test_feature) for index in range(len(prob_list)): result_list.append(prob_list[index][1]) #下标为0的对应label为0的概率,下标为1的对应label为1的概率 return result_list
03ea185aa4398e8ccb7449d9e32006dd391e9c13
5,706
def read_accelerometer(serial, calibration): """ Reads the raw values from the Arduino, parses them into separate variables and uses the calibration data to normalize the data Args: serial: a reference to the serial connection with the Arduino calibration: a reference to the calibration object that holds the values from the accelerometer calibration process Returns: (x_cal, y_cal, z_cal): a tuple of the normalized data """ components = serial.read_str() # parses the string from the Arduino into three separate variables x_raw, y_raw, z_raw = tuple(map(float, components.split(','))) # normalizes the data using the calibration information x_cal = (x_raw - calibration.offset[0]) / (calibration.gain[0]) y_cal = (y_raw - calibration.offset[1]) / (calibration.gain[1]) z_cal = (z_raw - calibration.offset[2]) / (calibration.gain[2]) return (x_cal, y_cal, z_cal)
3c5537e2a017f57dca8dccd24c2ba083a9c47345
5,708
def get_access(name): """Get access based on name In Python __var__ refers to a private access _var refers to protected access and var would refer to public access """ assert isinstance(name, str), "Expecting name to be a string" if len(name) > 4 and "__" == name[:2] and "__" == name[-2:]: return "PRIVATE" elif len(name) > 1 and name[0] == "_": return "PROTECTED" else: return "PUBLIC"
ffe072ed1820ce0536533a5882af1e1270780744
5,709
def run_query_series(queries, conn): """ Iterates through a list of queries and runs them through the connection Args: ----- queries: list of strings or tuples containing (query_string, kwargs) conn: the triplestore connection to use """ results = [] for item in queries: qry = item kwargs = {} if isinstance(item, tuple): qry = item[0] kwargs = item[1] result = conn.update_query(qry, **kwargs) # pdb.set_trace() results.append(result) return results
7a3e920663222b57233e9a01d1b3cacb039a02eb
5,710
def get_domain_id_field(domain_table): """ A helper function to create the id field :param domain_table: the cdm domain table :return: the id field """ return domain_table + '_id'
5805da82b4e57d14d4105d92a62cf4b5cc4bc3f2
5,711
def insertion_sort(arr): """ Returns the list 'arr' sorted in nondecreasing order in O(n^2) time. """ for i in range(1,len(arr)): key = arr[i] j = i-1 while j >= 0 and arr[j] > key: arr[j+1] = arr[j] j = j-1 arr[j+1] = key return arr
cafd83cd31cbadcbc0a5c3aaff7d21f3ae907083
5,713
def index(): """Root route test""" return "Weights route"
c2a609f067a8155f16bd2a638a7a5c9f399a1575
5,714
def string(): """String representation.""" return "{:s}".format('something')
d13ae4fe229f767c515b0f0d6439ac61c6bfdbe8
5,717
def vertical_move(t, v_speed=2/320): """Probe moves vertically at v_speed [cm/s]""" return 0.*t, 0*t, v_speed*t
eb6a066bf6b6659728647c78dd7673a3d45b250d
5,718
import numpy def dummy_image(): """Create a dummy image""" x = numpy.linspace(-1.5, 1.5, 1024) xv, yv = numpy.meshgrid(x, x) signal = numpy.exp(- (xv ** 2 / 0.15 ** 2 + yv ** 2 / 0.25 ** 2)) # add noise signal += 0.3 * numpy.random.random(size=signal.shape) return signal
8cbf5f31cde69b8ac775114277cee8f88d6dd932
5,719
import os def GetPicList(basedir): """ base_dir -> batch1 -> we -> want -> these -> images -> batch2 """ filename = '' for name in os.listdir(basedir): if not name.startswith('.'): filename = name break if not filename: raise ValueError("Couldn't find any non-hidden directories in basedir") pic_list = os.listdir(os.path.join(basedir,filename)) for pic in pic_list: pic = pic.replace('.tif','') return pic_list
de671e7f336e59999f89dd8f3ead2d4bfb059907
5,720
import time def format_time(record): """Format time to ISO 8601. https://en.wikipedia.org/wiki/ISO_8601 """ utc_time = time.gmtime(record.created) time_string = time.strftime('%Y-%m-%d %H:%M:%S', utc_time) return '%s.%03dZ' % (time_string, record.msecs)
ea07736965711a214a738f5443f68cf02e20fcb2
5,722
def range_to_number(interval_str): """Converts "X-Y" -> "X".""" if not '-' in interval_str: return int(interval_str) # If first character is -, X is a negative number if interval_str.startswith('-'): number = '-' + interval_str.split('-')[1] else: number = interval_str.split('-')[0] if number[-1] == 'M': return int(round(float(number[:-1]) * 1000000)) elif number[-1] == 'B': return int(round(float(number[:-1]) * 1000000000)) elif '.' in number: return float(number) else: return int(number)
562031503241cc37b1b6df5dd657f2f2d90b79a3
5,723
import os def remove_and_create_dir(path): """ System call to rm -rf and then re-create a dir """ dir = os.path.dirname(path) print('attempting to delete ', dir, ' path ', path) if os.path.exists(path): os.system("rm -rf " + path) os.system("mkdir -p " + path) return path
5921e55e799580fdb0a3bfea91b0589f60bdbafc
5,724
import time import os import shutil def setup_testrun_dir(): """ Sets up a testrun_* directory in the cwd and returns the path to it """ test_run = "testrun_{}".format(int(time.time())) os.mkdir(test_run) this_files_dir = os.path.dirname(os.path.realpath(__file__)) config_templates = os.path.join(this_files_dir, "integration", "config") os.mkdir(os.path.join(test_run, "runfolders")) shutil.copy2(os.path.join(config_templates, "app.config"), test_run) shutil.copy2(os.path.join(config_templates, "logger.config"), test_run) return os.path.realpath(test_run)
f71ca502677ececfa1cec9f97b4a13d9426dce23
5,725
def literal_label(lit): """ Invent a nice label name for the given literal """ return '{}_{}'.format(lit.function.name, lit.name)
14a22d989ee9f07e00e66d1340b946d385d677fd
5,726
def move(board): """Queries the user to move. Returns false if the user puts in an invalid input or move, returns true if the move was successful""" start_input = input("MOVE WHICH PIECE? ") if not start_input.isdigit(): return False start = int(start_input) if start not in board or board[start] != "!": return False end_input = input("TO WHERE? ") if not end_input.isdigit(): return False end = int(end_input) if end not in board or board[end] != "O": return False difference = abs(start - end) center = (end + start) / 2 if ( (difference == 2 or difference == 18) and board[end] == "O" and board[center] == "!" ): board[start] = "O" board[center] = "O" board[end] = "!" return True else: return False
3377b4f349c9519eff4ede707d10e08038e9d7fc
5,728
def _total_probe_count_without_interp(params, probe_counts): """Calculate a total probe count without interpolation. This assumes that params are keys in the datasets of probe_counts. The result of ic._make_total_probe_count_across_datasets_fn should give the same count as this function (if params are keys in the datasets of probe_counts). But this uses probe_counts directly and can be used as a sanity check -- i.e., it does not do any interpolation. Args: params: parameter values to use when determining probe counts; params[i] is the (i % N)'th parameter of the (i/N)'th dataset, where N is the number of datasets probe_counts: dict giving number of probes for each dataset and choice of parameters Returns: total number of probes across all datasets, according to the given values of params """ num_datasets = len(probe_counts) # The total number of parameters must be a multiple of the number # of datasets assert len(params) % num_datasets == 0 num_params = int(len(params) / num_datasets) s = 0 for i, dataset in enumerate(sorted(probe_counts.keys())): p = tuple(params[num_params * i + j] for j in range(num_params)) s += probe_counts[dataset][p] return s
0973e667dbf1fc3bdf476791cbf709549230f94b
5,729
import os import argparse def existing_file(path): """Checks if a file exists. Returns: str: The path to the file. Raises: argparse.ArgumentTypeError: If a path argument does not exist. """ if not os.path.isfile(path): raise argparse.ArgumentTypeError( 'No such file or directory: "%s"' % path) return path
64ae432231d71ec98132b7a32be149f9f5a192dd
5,730
from typing import Any import math def make_divisible(x: Any, divisor: int): """Returns x evenly divisible by divisor.""" return math.ceil(x / divisor) * divisor
bfbcfb334777a6c7214f16aa0fadd56906e2b7bc
5,731
def select_data(all_tetrode_data, index): """ Select tetrode data by trial indices. :param all_tetrode_data: (list of 4d numpy arrays) each of format [trial, 1, neuron + tetrode, time] :param index: (1d numpy array) trial indices :return: (list of 4d numpy arrays) selected subset of tetrode data """ current_data = [] for x in all_tetrode_data: current_data.append(x[index, :, :, :]) return current_data
5a883771ef499e0b82e0d3ac5b86550180760e13
5,733
from functools import reduce def rec_hasattr(obj, attr): """ Recursive hasattr. :param obj: The top-level object to check for attributes on :param attr: Dot delimited attribute name Example:: rec_hasattr(obj, 'a.b.c') """ try: reduce(getattr, attr.split('.'), obj) except AttributeError: return False else: return True
b1a9b12f54abb93202a5b41c950f761986307170
5,735
def not_shiptoast_check(self, message): """Checks whether the message object is not in a shiptoast chat.""" if (message.channel.id in self.settings["shiptoast"]) or (message.channel.name in self.settings["shiptoast"]): return False else: return True
b951ee6be9d9173065f340eda08e997b83964fe4
5,736
from typing import Dict from typing import Any def __create_notification(title: str, content: str) -> Dict[str, Any]: """ Creates a notification "object" from the given title and content. :params title: The title of the notification. :params content: The content of the notification. :returns A dictionary representing a notification "object". """ return {"title": title, "content": content}
484abcc2afcb8f726811e36516572bc5c302a415
5,737
import os import csv def csv_find(filein, data): """Finds and returns the row number of the element given, in a CSV file.""" if not os.path.isfile(filein): return(-1) with open(filein, 'rt') as fi: reader = csv.reader(fi, delimiter=',') for row in reader: hashout = row[1] # location of hash if hashout == data: return(row[0]) return(-1)
524e006720ebe3043fbfe539c45e42d19b77250b
5,738
import json def updateResourceJsons(swagger,examplesDict,dirName): """ Update the Resource JSON file to include examples in other folder """ try: # Iterate through all resources in the output folder for id in range(len(swagger['tags'])): resourceName = swagger['tags'][id]['name'] if resourceName == 'CapabilityStatement': continue # create swagger subset which was initially created in 'AnnotateFiles.py' with open('./output/'+resourceName+'.json',encoding='utf8') as f: swaggerSubset = json.load(f) resourceExamples = {} # Iterate through all examples for the resource for example in examplesDict[resourceName]: with open(dirName+"/"+example,encoding='utf8') as f: exampleContents = json.load(f) # Add the example keyed by the file name resourceExamples[example] = {"value":exampleContents} swaggerSubset['paths']['/'+resourceName]['post']['requestBody']['content']['application/fhir+json']['examples'] = resourceExamples swagger['paths']['/'+resourceName]['post']['requestBody']['content']['application/fhir+json']['examples'] = resourceExamples # Save the file with 'w' to overwrite current outputted file with open('./output/'+resourceName+'.json','w',encoding='utf8') as f: json.dump(swaggerSubset,f) # Return status with open('./output/openapi3.json','w',encoding='utf8') as f: json.dump(swagger,f) return "SUCCESS" except Exception as e: print("Error duing saving") print(e) return "ERROR"
3d9a7a31e3875bb7c56d8dfbd26ca5b73039101b
5,739
import torch def reparametisation_trick(mu, log_var, device): """ :param mu: The mean of the latent variable to be formed (nbatch, n_z) :param log_var: The log variance of the latent variable to be formed (nbatch, n_z) :param device: CPU or GPU :return: latent variable (nbatch, n_z) """ noise = torch.normal(mean=0, std=1.0, size=log_var.shape).to(torch.device(device)) z = mu + torch.mul(torch.exp(log_var / 2.0), noise) return z
9cb646132f49fa79b6a8690d10fd188968931978
5,741
def mro_hasattr(cls: type, attr: str) -> bool: """Check if an attribute exists in a type's class hierarchy Args: cls (type): The type attr (str): The attribute Returns: bool: True if has the attribute. Raises: TypeError: Not called on a type """ if not isinstance(cls, type): raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}") for klass in cls.mro()[1:]: if hasattr(klass, attr): return True return False
cfc41693e3d3321bcb63dae079abf2e768f97905
5,742
def method2(): """Provide an examples of doc strings that are too long. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. """ # noqa W505: doc line too long (127 > 100 characters) (auto-generated noqa) return 7
689c50c2cfb62d39cd35eec125813830c6068fdb
5,743
def epochJulian2JD(Jepoch): """ ---------------------------------------------------------------------- Purpose: Convert a Julian epoch to a Julian date Input: Julian epoch (nnnn.nn) Returns: Julian date Reference: See JD2epochJulian Notes: e.g. 1983.99863107 converts into 2445700.5 Inverse of function JD2epochJulian ---------------------------------------------------------------------- """ return (Jepoch-2000.0)*365.25 + 2451545.0
2738940ad390f979317177984c9120b34fa7d2af
5,744
import os def _isfile(path): """Variant of os.path.isfile that is somewhat type-resilient.""" if not path: return False return os.path.isfile(path)
1e5c6e993008b7256c22fe38af174fe87fd01d20
5,745
import inspect def get_classes(mod): """Return a list of all classes in module 'mod'""" return [ key for key, _ in inspect.getmembers(mod, inspect.isclass) if key[0].isupper() ]
be04546650a6243a3abfe4053a4dcaa9d71f85d7
5,746