content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import json def parse_line(header, line): """Parse one line of data from the message file. Each line is expected to contain chunk key - comma - tile key (CSV style). Args: header (dict): Data to join with contents of line to construct a full message. line (string): Contents of the line. Returns: (string): JSON encoded data ready for enqueuing. Raises: (RuntimeError): if less than 2 columns found on a line. """ msg = {} msg['job_id'] = header['job_id'] msg['upload_queue_arn'] = header['upload_queue_url'] msg['ingest_queue_arn'] = header['ingest_queue_url'] tokens = line.split(',') if len(tokens) < 2: raise RuntimeError('Bad message line encountered.') msg['chunk_key'] = tokens[0].strip() msg['tile_key'] = tokens[1].strip() return json.dumps(msg)
452dd80f84a35f6e3532330155bade7f424c102a
9,179
def filequote(text): """Transform text to file name.""" trans = str.maketrans(' /()', '____') return text.translate(trans)
dd6237fe6c66f60c00c8a569636adf17d45d66cc
9,180
def characteristic(text, ontology=None): """ Making a ENA Biosamples characteristic """ if ontology: return [{"text": text, "ontologyTerms": [ontology]}] else: return [{"text": text}]
e7f175a1ef8137b4c0e19a28a5d74055d9363c66
9,181
def _strip_trailing_ffs(binary_table): """ Strip all FFs down to the last 32 bytes (terminating entry) """ while binary_table.endswith("\xFF"*64): binary_table = binary_table[0:len(binary_table)-32] return binary_table
43c14297da709f78316e460180c6c4515650f34d
9,182
import struct def byte_to_float(b1, b2, b3, b4): """ A function to get a 32 bit float from 4 bytes read in order [b1, b2, b3, b4] :param b1: first byte :param b2: second byte :param b3: third byte :param b4: fourth byte :return: the byte array from b1, b2, b3, b4 unpacked as a float using the 'struct' module """ arr = bytearray([b1, b2, b3, b4]) return struct.unpack('<f', arr)
962480d1b9d2c50e3196b5480e9c62bf696a8f0d
9,184
import re def get_used_by_from_comments(lines: "list[str]") -> "tuple[int, list[str]]": """Read the module-used-by block comment from a module file. Args: lines (list[str]): The content of the module file as a list of strings. Returns: tuple[int, list[str]]: The integer indicates the last line number of the module-used-by block comment. The list indicates the assembly file names from the block comment. """ line_count = len(lines) line_no = 0 matches = [] while line_no < line_count: line = lines[line_no].strip() match = re.match(r"^//\s*\*?(.+\.adoc)", line) if match: matches.append(match.group(1).strip()) if not re.match((r"^//"), line): break line_no += 1 return line_no, matches
6ac30266524373d0de7cf7bb9ad9fd8dcd1933a2
9,185
from pathlib import Path def construct_target_path(participant_name, model_name, roi): """Construct path to save results to.""" project_root = Path(__file__).parents[1] return project_root / "results" / participant_name / f"model_{model_name}"\ / f"roi_{roi}"
072681647a3362563829c25d4890aa13425cff2c
9,186
import codecs def txidFromBroadcast (hexStr): """Extracts the hex txid from a broadcast in hex.""" # The prevout txid is the first part of the broadcast data # in serialised form. But we need to reverse the bytes. hexRev = hexStr[:64] bytesRev = codecs.decode (hexRev, "hex") return bytesRev[::-1].hex ()
96690f4fdef5f0cff857188045696e427914b887
9,187
def filter_properties(person, PERSON_PROPERTIES): """ Extract specific properties of the given person into a new dictionary. Parameters: person (dict): the dictionary containing properties of a person. PERSON_PROPERTIES (tupl): a tuple containing the characteristics of a person Returns: record (dict): a dictionary containing filtered key-value pairs of characteristics of the person. """ record = {} for key, val in person.items(): if key in PERSON_PROPERTIES: record[key] = val return record
2a3ec4ab32c5d99d475ebffaefe0d8c40ce137af
9,190
def get_chain_hash(contract, s, u_i, s_i, a, b, bytes_30, dyn_bytes, bar_uint, arr) -> bytes: """Uses the contract to create and hash a Foo struct with the given parameters.""" result = contract.functions.hashFooStructFromParams(s, u_i, s_i, a, b, bytes_30, dyn_bytes, bar_uint, arr).call() return result
2faeb03eff5ee1a4e564a50f8bff78fb99cdd169
9,191
import math def f1(myList): """Solves x = Sqrt((120-y)/8)""" return math.sqrt((120-myList[1])/8) #return 120-8*myList[0]**2
5d84417d52ec3b667862a3750f6d46f74964a586
9,193
def _filter_by_variance(frame, threshold=0.005): """Removes from frame any columns with a relative variance beneath the given threshold. """ # first, for each column X, compute relative variance as # var((X-min(X))/(max(X)-min(X))) numerators = frame.subtract(frame.min(axis='index'), axis='columns') denominators = frame.max(axis='index') - frame.min(axis='index') quotients = numerators.div(denominators, axis='columns') variances = quotients.var(axis='index', ddof=0) # now apply the filter return frame.loc[:, variances.map(lambda x: x > threshold)]
f4225fe41adab6f5f1a0189f9eaeae071109a5ee
9,194
def is_not_csv_file(filename): """Retun an indication if the file entered is the clusterserviceversion (csv) file """ return not filename.endswith('clusterserviceversion.yaml')
3afcecfee95f300b9a5e6128f33a58dcdfc2c443
9,196
import torch def load_snapshot(model_path): """ Load snapshot :param model_path: path to snapshot :type model_path: str :return: built state :rtype: dict """ state = torch.load(model_path) return state
bdeba078302b8c8c6ac39f156877ef58e91341ec
9,198
def create_input_list(pdb_list_fname): """ create a list of tuples (pdb_id, chain) from a text file """ pdb_list = [] with open(pdb_list_fname, 'r') as f: for record in f.read().splitlines(): pdb_id, chain = record[:-1], record[-1] # check PDB ID and chain are valid if not pdb_id.isalnum() or len(pdb_id) != 4 or not chain.isalpha() or len(chain) != 1: continue pdb_list.append((pdb_id, chain)) return pdb_list
d02588ec1d2ff55454782b337ac15cf9e6f67a80
9,200
def _query_worrying_level(time_elapsed, state): """ Gives a "worriness" level to a query For instance, long times waiting for something to happen is bad Very long times sending is bad too Return a value between 0 and 1 rating the "worrying level" See http://dev.mysql.com/doc/refman/5.7/en/general-thread-states.html """ state_lower = state.lower() if state in ('creating sort index', 'sorting result'): max_time = 60 elif state in ('creating table', 'creating tmp table', 'removing tmp table'): max_time = 180 elif state == 'copying to tmp table on disk': max_time = 60 elif state in ('executing', 'preparing'): max_time = 300 elif state == 'logging slow query': return 0.5 elif state_lower == 'sending data': max_time = 600 elif state_lower in ('sorting for group', 'sorting for order'): max_time = 60 elif state_lower.startswith('waiting'): max_time = 600 else: return 0 if time_elapsed > max_time: return 1 else: return float(time_elapsed) / max_time
383e36c75d68a9e975d48efc6c68deeee446c987
9,201
def dict_to_casl_rules(rules: dict): """ Given a dict where the keys are the subject and the values are the actions, return a list of dicts ready to be serialized as JSON :return: """ perms = [] for key, actions in rules.items(): perms.append({ 'subject': key, 'actions': actions }) return perms
5d0f3dfd610a1cd7deb7f09a668e291997419b2a
9,202
def bbox_hflip(bboxes, img_width): """horizontal flip the bboxes ^ ............. . . . . . . . . . . . . ............. ^ Args: bbox (ndarray): bbox ndarray [box_nums, 4] flip_code (int, optional): [description]. Defaults to 0. """ flipped = bboxes.copy() flipped[..., 0::2] = img_width - bboxes[..., 0::2] flipped = flipped[..., [2, 1, 0, 3]] return flipped
00e45f69a517ccb15623afb813fc05ad1c7c7eee
9,203
import argparse def create_arg_parser(): """" Creates and returns the ArgumentParser object. """ parser = argparse.ArgumentParser(description='Parses git log output object for change-log generation.') parser.add_argument('-b', '--branch', default='dev', help='current git branch checked out') return parser
167bcf54b079583b10e4ef72ff2c1a6e82ded6bc
9,204
def _pathjoin(a, b): """ POSIX-like path join for Globus Transfer paths As with _normpath above, this is meant to behave correctly even on Windows systems """ if not b: # given "" as a file path return a elif b.startswith("/"): # a path starting with / is absolute return b if a.endswith("/"): return a + b else: return a + "/" + b
20079d97be4e07499a9b0dfa80458a7e151826c3
9,205
def probability(vector, x, t): """ Finds the probability of vector[x] in t occurnences If x is not in vector then the probability is .001/t @param {Vector} vector {int} x {float} t @return {float} """ t = t*1.0 return vector[x] / t or 0.001 / t
bb6c731a157104a653669730be0569f555402167
9,206
def getNodeDictVlans(nodesInfo, hostname, switchName): """Get Node dictionary.""" if not nodesInfo: return None, {} for _, nodeDict in list(nodesInfo['nodes'].items()): if nodeDict['hostname'] == hostname: for intf, intfDict in list(nodeDict['NetInfo'].items()): print(intfDict) if not isinstance(intfDict, dict): print('Something is failing on agent. It did not sent dict!') return None, {} if 'switch' in list(intfDict.keys()) and intfDict['switch'] == switchName: return intf, intfDict return None, {}
1113f0eb1829c9e84791ed151ce05c7165168b10
9,207
def rob(nums): """ You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed, the only constraint stopping you from robbing each of them is that adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night. Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police. Args: nums: list[int] Returns: int """ # DP r = nr = 0 for x in nums: r_prev = r r = nr + x nr = max(r_prev, nr) return max(r, nr) # f(0): r = nums[0]; nr = 0 # f(1): r = nums[1]; nr = f(0) # f(k) = max( f(k-2) + nums[k], f(k-1) )
9bfb631b2781bbf95fa299a6474e0b1fe36ac19b
9,209
def tamiz1(m): """Algoritmo clásico para el tamiz de Eratóstenes""" l, n = [i for i in range(2, m+1)], 2 while n: for i in l[l.index(n)+1:]: if i % n == 0: l.remove(i) if l.index(n) +1 < len(l): n = l[l.index(n) + 1] else: return l
3063e5007360cfbbda53e10b84e7ca141473a552
9,210
import numpy import warnings def getMetrics(sector, symbols): """Returns a 2xN numpy.Array of metrics for the given symbols from the given sector. """ metrics = [ # hard-coded for now, could easily be parameterized "Price Performance (52 Weeks)", "Standard Deviation (1 Yr Annualized)" ] table = numpy.zeros((2, len(symbols))) toDelete = [] # columns (symbols) to remove once populated for i, metric in enumerate(metrics): for j, symbol in enumerate(symbols): try: security = sector.getSecurity(symbol) table[i,j] = security[metric] except Exception as e: warnings.warn("Could not extract metric %s for symbol %s" % (metric, symbol)) print(e) toDelete.append(j) for j in toDelete[::-1]: table = numpy.delete(table, j, 1) return table
df00d21031056bb2faf3ad9d420c308cdf075b5a
9,213
def single_text_phrase(context, slug=None, language=None): """ for using this template tag you must enable one of the text_phrase context_processors. this templatetag will return the first text phrase object, if there is more then one object. if you want single text phrase in special language set the language arg. example: {% load phrases_tags %} {% single_text_phrase "language" "en" as lang %} <p>{{ lang.text }}</p> """ phrases = context.get('text_phrases', None) if not phrases: return None if not language: phrase = phrases.filter(slug=slug) return phrase.first() phrase = phrases.filter(slug=slug, language=language) return phrase.first()
7e9b5a28cbf1ae0215e201e3af0f22631aad9ac2
9,215
import os def _find_files(root_dir, should_include): """ Return a list of paths to all modules below the given directory. Arguments: should_include: a function that accepts a file path and returns True or False. """ paths = [] # Return value. is_module = lambda path: path.endswith(".py") # os.walk() is new in Python 2.3 # http://docs.python.org/library/os.html#os.walk for dir_path, dir_names, file_names in os.walk(root_dir): new_paths = [os.path.join(dir_path, file_name) for file_name in file_names] new_paths = list(filter(is_module, new_paths)) new_paths = list(filter(should_include, new_paths)) paths.extend(new_paths) return paths
0f572880279a28914ad99f7635c0f573fa01044a
9,216
def linear_scale(input, in_low, in_high, out_low, out_high): """ (number, number, number, number, number) -> float Linear scaling. Scales old_value in the range (in_high - in-low) to a value in the range (out_high - out_low). Returns the result. >>> linear_scale(0.5, 0.0, 1.0, 0, 127) 63.5 """ in_range = (in_high - in_low) out_range = (out_high - out_low) result = (((input - in_low) * out_range) / in_range) + out_low return result
caeab8e992caca2dba96f48b0eb617fd361bb9eb
9,218
import os def check_folder(folder): """ Test if folder exists and is absolute """ if os.path.isdir(folder): if os.path.isabs(folder): return True else: raise ValueError("The path to the folder must be absolute") else: raise OSError("Can't find the path.")
e2d606ab5bb68e104c8896da753d2e76d6ac7697
9,219
def _error_matches_criteria(error, criteria): """ Check if an error matches a set of criteria. Args: error: The error to check. criteria: A list of key value pairs to check for in the error. Returns: A boolean indicating if the provided error matches the given criteria. """ for key, value in criteria: if error.get(key) != value: return False return True
8f52f7288fdefa496084b4faf689ed269360050a
9,220
import torch def eval(device, model, datas, criterion): """Eval the model""" losses = 0 model.eval() with torch.no_grad(): for data, target in datas: output = model(data.to(device)).flatten() losses += criterion(output.flatten(), target.to(device)).item() return losses / len(datas.dataset)
bf9d71640922e3c3a9d9bcd0fc83bc37f6c2da7d
9,221
import re def pad_punctuation_w_space(text: str) -> str: """Pad punctuation marks with space for separate tokenization.""" result = re.sub(r'([:;"*.,!?()/\=-])', r" \1 ", text) result = re.sub(r"[^a-zA-Z]", " ", result) result = re.sub(r"\s{2,}", " ", result) # code for removing single characters result = re.sub(r"\b[a-zA-Z]\b", "", result) return result
8bdb82865d5e127e32d483f83246f4ad1b96b0be
9,222
def tensors2classlist(tensor, seq_lens): """ Converts a 3d tensor (max(seq_len), batch_size, output_dim=1) to a 2d class list (list[batch_size * list[seq_len]]) Arguments: tensor (torch.tensor) : 3d padded tensor of different sequence lengths of shape (max(seq_lens), batch_size, output_dim=1) seq_lens (list[int]) : length of each of the sequences without padding (i.e. onehottensor.shape[1] without padding) Returns: batch_list (list[list[int]]) : list of class lists with each internal list corresponding to a sequence and each class in the internal list corresponding to a class for each step of the sequence (or in sigmoid case, corresponds to the probability of positive class (1)) """ batch_list = [] for idx in range(0, tensor.shape[1]): # for every tensor sample in batch value_list = [] tensor2d = tensor[:seq_lens[idx], idx, :] # shape (seq_len, dim=1) value_list = tensor2d.squeeze().tolist() batch_list.append(value_list) return batch_list
52de31050a32ce54b2733f4c4dd348044e3da259
9,223
def skippable_exons(exons): """ Determine which exon(s) can be skipped For each exon (except the first and second, which cannot be skipped), we want to find the minimum number of exons which together have a size that can be divided by 3. >>> list(skippable_exons([30])) [] >>> list(skippable_exons([30,30])) [] >>> list(skippable_exons([30,30,30])) [[1]] >>> list(skippable_exons([30,30,30,30])) [[1], [2]] >>> list(skippable_exons([30,31,32,30])) [[1, 2]] >>> list(skippable_exons([30,32,32,30])) [] """ # If there are less than 3 exons, there is nothing to skip if len(exons) < 3: return [] # We check every exon that isn't the first or the last for i in range(1,len(exons)): # Test every sub-sequence of exons, starting from the current exon for j in range(i+1, len(exons)): # Determine the total lenght of the exons we are considering total_length = sum(exons[i:j]) if total_length%3 == 0: yield list(range(i,j)) # Once we found the minimum number of exons to skip to stay in # frame (can be 1), we are not interested in skipping more break
f96ec0da6d72191d252cfe0ba5cdbeb21bc4388c
9,224
from typing import Callable from typing import Any def not_pf(predicate: Callable[[Any], bool]): """ Negates the predicate * **predicate**: predicate to be tested * **return**: a predicate that is the negation of the passed predicate >>> p = not_pf(true_p) >>> p(1) False >>> p = not_pf(false_p) >>> p(1) True """ def internal(elm): return not predicate(elm) return internal
50d3993c4a83e5794a63134b65c732d1aa0ca1fa
9,225
def filter_genes(centroids): """returns genes that have std > 0""" return centroids.index[(centroids.std(axis=1) != 0).tolist()]
fcfbd18b6d657d6758feb324642c4118b80aecfd
9,226
from operator import mul def dot(A, B): """ Dot product between two arrays. A -> n_dim = 1 B -> n_dim = 2 """ arr = [] for i in range(len(B)): if isinstance(A, dict): val = sum([v * B[i][k] for k, v in A.items()]) else: val = sum(map(mul, A, B[i])) arr.append(val) return arr
9ea609f78e27eb3046507db3e366531090b26d6d
9,227
def remove_last_range(some_list): """ Returns a given list with its last range removed. list -> list """ return some_list[:-1]
ea2063c901d3aaf67caad97f1760f6fb6afb31c1
9,228
def filter_by_indices(good_indices, vals): """ 从分段算法得到的下标集合中得到 对应的轨迹点集合 :param good_indices: 下标集合 :param vals: 原始点数据(未分段) 集合 :return: 分段后的点集合 """ vals_iter = iter(vals) good_indices_iter = iter(good_indices) out_vals = [] num_vals = 0 for i in good_indices_iter: if i != 0: raise ValueError("the first index should be 0, but it was " + str(i)) # 起点必须为0下标 else: for item in vals_iter: out_vals.append(item) break num_vals = 1 break max_good_index = 0 vals_cur_index = 1 for i in good_indices_iter: max_good_index = i for item in vals_iter: num_vals += 1 if vals_cur_index == i: vals_cur_index += 1 out_vals.append(item) break else: vals_cur_index += 1 for i in vals_iter: num_vals += 1 if num_vals < 2: raise ValueError("list passed in is too short") # 分段下标集合最大下标一定是 点集合最后一个 if max_good_index != num_vals - 1: raise ValueError("last index is " + str(max_good_index) + " but there were " + str(num_vals) + " vals") # print(max_good_index, num_vals) return out_vals
c38dd76a90452cdbe96c92c8850752f56cc9882f
9,233
import numpy as np import os def gen_index_noddi(in_bval, b0_index): """ This is a function to generate the index file for FSL eddy :param in_bval: :param b0_index: :return: """ out_file = os.path.abspath('index.txt') bvals = np.loadtxt(in_bval) vols = len(bvals) index_list = [] for i in range(0, len(b0_index)): if i == (len(b0_index) - 1): index_list.extend([i+1] * (vols - b0_index[i])) else: index_list.extend([i+1] * (b0_index[i+1] - b0_index[i])) index_array = np.asarray(index_list) try: len(index_list) == vols except ValueError: raise ValueError("It seems that you do not define the index file for FSL eddy correctly!") np.savetxt(out_file, index_array.T) return out_file
84ac37def63d1714030d797930e3de958b8ff6a4
9,234
def speed_control(target, current, Kp=1.0): """ Proportional control for the speed. :param target: target speed (m/s) :param current: current speed (m/s) :param Kp: speed proportional gain :return: controller output (m/ss) """ return Kp * (target - current)
ce01369dc9445f65249a82cfb7882223ded38f36
9,235
import os def get_outpath(filename, outdir): """Get output filepath. :filename: name of music file :outdir: path of output directory :returns: path of converted music file """ outname = '{}.mp3'.format(os.path.splitext(filename)[0]) outpath = os.path.join(outdir, outname) return outpath
048c1ce65c21a0a561f928eb42882eaa60ee8b1a
9,237
def is_parsed_result_successful(parsed_result): """Returns True if a parsed result is successful""" return parsed_result['ResponseMetadata']['HTTPStatusCode'] < 300
717f8aa88b814405a5a008e9706338fd0f91a7ff
9,239
def PopularTagsPerLang(df, lang, top_k = 10): """ Function: Get top k tags with largest number of fanworks by media and in selected language. Input: - df: pandas.DataFrame. - lang: list[str], languages to include. - top_k: int, number of top tags to include. Output: - df_top: pandas.DataFrame. """ df_top = df.groupby(["MediaType","org_lang"])['fantom','cnt','MediaType',"org_lang"].apply(lambda x: x.nlargest(top_k, ['cnt'])).reset_index(drop=True) df_top['rank'] = df_top.sort_values(by=['cnt'], ascending=False).groupby(["MediaType","org_lang"]).cumcount()+1 df_top = df_top.loc[df_top['org_lang'].isin(lang)].set_index(['rank','MediaType'])['fantom'].unstack(fill_value = 0) return(df_top)
c5e0d4e459924292880b3eaee770412083ea59e7
9,241
def r_min_KimKim(T_sat, sigma, h_fg, rho, deltaT_sub): """ minimum droplet radius """ r_min = 2*T_sat*sigma / (h_fg * rho * deltaT_sub) return r_min
c2e9a7e0741f6d663a73ff04eb32939732c34f36
9,242
import re import zipfile def northwind(table_name): """ Yield a stream of "records" as dictionaries, with certain adjustments. So it turns out my source of NorthWind data has a bizarre nonstandard format: Embedded commas are those followed by whitespace! The usual csv module doesn't handle that by default and neither does MS Excel. Fortunately it's not hard to deal with. Anyway, this is just a concept demo. It doesn't have to be amazing. It has to get a point across, and the weird CSV format is not that point. """ def split(s:str): return delimiter.split(s.rstrip('\n')) delimiter = re.compile(r',(?!\s)') with zipfile.ZipFile('northwind.zip', 'r') as archive: text = iter(archive.read(table_name+'.csv').decode('utf-8').splitlines()) heads = [h.lower() for h in split(next(text))] for tails in text: row = dict(zip(heads, split(tails))) yield row
303fa89d19e12c17ab14cc0591a4182ce28f489c
9,244
def sub_field(k, v): """Return a nested dictionary with field keys k and value v.""" res = {} field_d = res fields = k.split('.') for f in fields[:-1]: field_d[f] = {} field_d = field_d[f] field_d[fields[-1]] = v return res
193869fdfaca84172c71ca935f5fdb312682b19e
9,247
import torch def get_prediction(model, batch, device): """Get predicted labels for given input batch and model.""" images = torch.tensor(batch, dtype=torch.float).to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) return predicted
e8bb4257dc19f26fa206e26fa844ec9717974e52
9,250
def list_split(l, indices): """Split list at given indices. Closed lists have the same first and last elements. If the list is closed, splitting wraps around if the first or last index is not in the indices to split. Parameters ---------- l : list A list. indices : list A list of indices to split. Returns ------- split_lists : list Nest lists from splitting the list at the given indices. """ n = len(l) if l[0] == l[-1]: closed = True if n - 1 in indices: indices.remove(n - 1) if 0 not in indices: indices.append(0) else: closed = False indices = list(sorted(set(indices))) split_lists = [] current_list = [] for index, item in enumerate(l): current_list.append(item) if (index in indices and index != 0) or index == n - 1: split_lists.append(current_list) current_list = [item] if closed: if 0 not in indices: start = split_lists.pop(0)[1:] split_lists[-1] += start return split_lists
a882842f6d51eeda010017dbdd2bfa722ebb363d
9,252
def no_op(ctx, node, name, args): """Skip node.""" return None
1fede015a843657f3959bb8da4c2216a8674e60c
9,253
import gzip import os def hook_compressed_text(filename, mode, encoding='utf8'): """ #lines are byte strings and not text string if we use gzip.open by default. """ ext = os.path.splitext(filename)[1] if ext == '.gz': return gzip.open(filename, mode + 't', encoding=encoding) #elif ext == '.bz2': # import bz2 # return bz2.open(filename, mode + 't', encoding=encoding) else: return open(filename, mode, encoding=encoding)
7c3b76e4d33cb400020e677554f0d9584fa799b1
9,254
import warnings import functools def deprecated(func): """ Decorator to be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. Usage:: @other_decorators_must_be_upper @deprecated def some_old_function(x,y): return x + y class SomeClass: @deprecated def some_old_method(self, x,y): return x + y """ @functools.wraps(func) def new_func(*args, **kwargs): warnings.warn_explicit("Call to deprecated function %(funcname)s." % { 'funcname': func.__name__, }, category=DeprecationWarning, filename=func.func_code.co_filename, lineno=func.func_code.co_firstlineno + 1 ) return func(*args, **kwargs) return new_func
ef4ca24b5da4a4df2b3c2a11f2e6b71791233a85
9,255
import argparse def setup_cli(args, cfg): """ Configure command-line arguements """ description =""" Benign_domains outputs a list of preceived benign domains. This is intended to help gather data for ML training sets and generate white lists. The core set of domains are provided by majestic million. Options: - Validate domains against VirusTotal's datasets (in progress) - Submit domains to a CRITs instance - Output to a file""" parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-s', '--start', action='store', default=cfg['benign'].get('startDomain', fallback='0'), dest='start', type=int, help='Define starting domain rank number. Overrides config file') parser.add_argument('-e', '--end', action='store', default=cfg['benign'].get('endDomain', fallback='200'), dest='end', type=int, help='Define ending domain rank number. Overrides config file') return parser.parse_args(args)
3bdd81fa9526ce06bf56bf847e04def67b9ce72e
9,256
import subprocess def get_course_ids(): """ Get a list of course ids that is necessary for the rest of the functions to work. """ global course_ids dump_course_ids = subprocess.Popen(['/edx/bin/python.edxapp', '/edx/app/edxapp/edx-platform/manage.py', 'lms', '--settings', 'production', 'dump_course_ids'], stdout=subprocess.PIPE) course_ids = dump_course_ids.communicate()[0].split() return course_ids
9017db92c197a756646e916ceab7ebacd481f453
9,258
import math def divisors(n: int) -> list[int]: """Get the proper divisors of a number n""" limit = int(math.sqrt(n)) + 1 proper_divisors = {1} for i in range(2, limit): if n % i == 0: proper_divisors.add(n // i) proper_divisors.add(i) return list(proper_divisors)
0a71ecccbda802d3a3575f024073fac575355ffa
9,260
def get_lsb_num(num): """docstring""" cnt = 0 while not (num >> cnt) & 1: cnt += 1 # return cnt + 1
6abf34d4831b80310dbf57bf08b7fce0b6c0a73d
9,261
def foreign_key( table_name: str, schema: str, parent_name: str, parent_schema: str ) -> str: """Return column names (child and parent) of the foreign key.""" return f""" SELECT att2.attname as child_column, att.attname as parent_column FROM (SELECT unnest(con1.conkey) AS parent, unnest(con1.confkey) AS child, con1.confrelid, con1.conrelid, con1.conname, ns2.nspname FROM pg_class cl JOIN pg_namespace ns ON cl.relnamespace = ns.oid JOIN pg_constraint con1 ON con1.conrelid = cl.oid JOIN pg_class cl2 on cl2.oid = con1.confrelid JOIN pg_namespace ns2 on ns2.oid = cl2.relnamespace WHERE cl.relname = '{table_name}' AND ns.nspname = '{schema}' AND cl2.relname = '{parent_name}' AND ns2.nspname = '{parent_schema}' AND con1.contype = 'f' ) con JOIN pg_attribute att ON att.attrelid = con.confrelid AND att.attnum = con.child JOIN pg_class cl ON cl.oid = con.confrelid JOIN pg_attribute att2 ON att2.attrelid = con.conrelid AND att2.attnum = con.parent """
e1c7221fd308ee44f7b09718e66028351262334a
9,262
from pathlib import Path def available_models(): """Check for available neural network models. This function returns a list of all neural network models saved. If None is available, it returns a message informing there's no models previously saved. Returns ------- dirs : list List of all neural network models saved. Examples -------- >>> import rossml as rsml >>> rsml.available_models() ['test_model'] """ try: path = Path(__file__).parent / "models" dirs = [folder.name for folder in path.iterdir() if folder.is_dir()] if len(dirs) == 0: dirs = "No neural network models available." except FileNotFoundError: dirs = "No neural network models available." return dirs
f3bbfa56ea0eaa2e06467e479cdefd18d02e8021
9,263
import math def compute_dimension(bounds, pixel_resolution: tuple): """ :param bounds: :param pixel_resolution: width and height of pixels in the units of its coordinate reference system extracted from transformation of image :return: """ output_width = int(math.ceil((bounds[2] - bounds[0]) / pixel_resolution[0])) output_height = int(math.ceil((bounds[3] - bounds[1]) / pixel_resolution[1])) return output_width, output_height
83d3c133a8471d41d69cad4fb00d529e36634731
9,265
def get_num_adain_params(model): """ input: - model: nn.module output: - num_adain_params: int """ # return the number of AdaIN parameters needed by the model num_adain_params = 0 for m in model.modules(): if m.__class__.__name__ == "AdaptiveInstanceNorm1d": num_adain_params += 2 * m.num_features return num_adain_params
1ba52ef9284415dfad1cb0d6808447a71614e318
9,267
def GetSweepParamList(core): """ スイープパラメータ値リストの生成 [in] core PDIコアデータ 戻り値 -> スイープパラメータ値リスト """ if not core or not core.pd: return None spl = [] for p in core.pd.plist: if not p.disable and p.calcCaseNum() > 1: spl.append(p) continue # end of for(p) return spl
9f50c4475ae5414681e15f1f43e7900575a8d500
9,270
import os import re def format_file_path(filepath): """Formats a path as absolute and with the correct platform separator.""" try: filepath = os.path.realpath(os.path.abspath(filepath)) filepath = re.sub(r'[/\\]', os.path.sep, filepath) except: # pragma: nocover pass return filepath
858a5f7b3126233165e25cc5d54e227287cf68b6
9,272
import numpy def rand_index(l): """ Return an index of the list with the probability given in the list. Example: prob_index([0.5,0.25,0.25]) should return 0 50% of the time, 1 25% of the time and 2 25% of the time. """ r = numpy.random.uniform(0., sum(l)) s = l[0] for i,p in enumerate(l): if r < s: return i s += p # Should only reach this point due to floating-point errors. return len(l) - 1
2dc37eb292034284053b4d91c42bcfcada8376cd
9,273
def get_smaller_channel(channel, channel_range): """ get channels which is smaller than inputs :param channel:input channel :param channel_range:list,channel range :return:list,channels which is larger than inputs """ return list(filter(lambda x: x < channel, channel_range))
4b9f862756663f12d8f9a8208239ca16fc88042b
9,275
def flatten(game_data): """Flatten the game data into a vector. Parameters ---------- game_data : ndarray An ndarray of shape (# games, 2, 11, 52). 2 teams, 10 players + 1 team aggregate statistic, and 52 features. Returns ------- flattened_data : ndarray An ndarray of shape (1144,). The flattened game data. """ return game_data.reshape(game_data.shape[0], -1)
f71316e87d54fdefde52074e1ef58c87cbabb212
9,276
import socket def get_host_ip(): # https://www.cnblogs.com/z-x-y/p/9529930.html """查询本机ip地址 用于快速获取主机的IP地址 Returns: 一个字符串,表示IP地址 例如: 127.0.0.1 """ s = None try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('8.8.8.8', 80)) ip = s.getsockname()[0] finally: if s is not None: s.close() return ip
4f1d4a1d709c467a70a3a91ad985ff0954e6c492
9,277
import json def load_json(filepath): """Return parsed json file as dictionary.""" with open(filepath) as json_file: try: json_data = json.load(json_file) except json.JSONDecodeError: return None return json_data
daa1d93aaf0602c0e4771e78b5f36ac3d04e4891
9,278
def lighten(color, scale=1.0): """ Lighten a color. - color is a tuple (r, g, b, a) - scale can be any number, if < 1, color will be darken """ return tuple(map( lambda x: int(min(max(x * scale, 0), 255)), color[:3] )) + color[3:]
4c520c00ca3509b3e09090b7d72790db2a80f63c
9,279
def AB2Jy(ABmag): """Convert AB magnitudes to Jansky""" return 10.**(-0.4*(ABmag+48.60))/1e-23
a55b70df44f56461d935c8e5aa8aff50df26a982
9,280
def all_valid(formsets): """Validate every formset and return True if all are valid.""" # List comprehension ensures is_valid() is called for all formsets. return all([formset.is_valid() for formset in formsets])
3cffd9879143e4879794e86bbb65e49f4f2fd975
9,281
def horizontal_link_count(shape): """Number of horizontal links.""" assert len(shape) == 2 return shape[0] * (shape[1] - 1)
e4d997cd668a75410e3fb208e7a200cbba3fb6bf
9,282
def build_err_payload(aggregator, import_): """ Builds a JSON error response to return as a WS client notification. """ # flatten errors & warnings into a single list to send to the UI. Each ImportErrorSummary # may optionally contain multiple related errors grouped by subcategory errs = [] for err_type_summary in aggregator.errors.values(): errs.extend(err_type_summary.to_json()) warns = [] for warn_type_summary in aggregator.warnings.values(): warns.extend(warn_type_summary.to_json()) return { "pk": import_.pk, "uuid": import_.uuid, "status": import_.status, "errors": errs, "warnings": warns, }
1fae5be0308ea5086ac7a3a62be96779162bb2cd
9,283
def m(x0: float, x1: float, y0: float, y1: float) -> float: """ Simple gradient function. Parameters ---------- x0 : float x co-ordinate at time 0. x1 : float x co-ordinate at time 1. y0 : float y co-ordinate at time 0. y1 : float y co-ordinate at time 1. Returns ------- grad : float Gradient value. Notes ----- """ grad = (y1 - y0) / (x1 - x0) return grad
d138dfedd1e381a575ff6f5108b8841470febbd7
9,284
def _refresh(fn): """Decorator to refresh the attributes of this object from the cluster""" def wrapper(self, *args, **kwargs): self.Refresh() fn(self, *args, **kwargs) self.Refresh() return wrapper
5db0f25fc3042aec25f1af8f5a65edf8436cacdc
9,287
import time def getDateRange(dt): """ 获取时间戳范围 参数 -------------- dt: str url请求中传递过来的date参数 返回值 -------------- turple 返回一对 (起始时间戳,终止时间戳) """ if dt == 'weekend': startDT = "2017-03-25 00:00:00" endDT = "2017-03-27 00:00:00" elif dt == 'weekday': startDT = "2017-03-20 00:00:00" endDT = "2017-03-25 00:00:00" else: return None startTimeArray = time.strptime(startDT, "%Y-%m-%d %H:%M:%S") endTimeArray = time.strptime(endDT, "%Y-%m-%d %H:%M:%S") return (time.mktime(startTimeArray) * 1000, time.mktime(endTimeArray) * 1000)
1265df8cb736a8ac13515220b68296c7517771ad
9,288
import random def generate_random_color(): """ Generate random color. """ r = random.random() g = random.random() b = random.random() return (r, g, b)
11416e6714a08bfbea8c6939774d1f0e54664ac4
9,289
import copy def tiwary_mmvt_model(tmpdir_factory, tiwary_mmvt_model_persistent): """ Create a copy of the model that is not persistent. But this at least doesn't require us to generate an entirely new model """ tiwary_mmvt_model = copy.deepcopy(tiwary_mmvt_model_persistent) return tiwary_mmvt_model
381dd449c8cbcfbdf43ed9a3e8b71fff09f9a2c9
9,290
def _get_disposable_app_filename(clientInfo): """ Get name of file used to store creds. """ return clientInfo.get('file', clientInfo['name'] + '.client_data.json')
7d6a67443cd8815ddfde3f69aae450d59f59a437
9,291
def _timestamp_from_record_tuple(record): """Extract timestamp from HBase tuple record """ return record[0]['timestamp']
de0ff6f12e14093a236cab651d4baae2299d2124
9,292
import subprocess def make_lint_report(nb_fpath): """Run the tutorial linter on a notebook and capture the output.""" cmdline = ["python", "ci/lint_tutorial.py", nb_fpath] res = subprocess.run(cmdline, capture_output=True) return res.stdout.decode()
efa21f6ed52affee862300ae18ed4b639ac9e29a
9,293
def get_valid_ip_address(ls): """ Question 7.10: Compute all valid IP addresses from decimal string, given that """ ips = [] # loop over first ip packet first_idx = 1 while first_idx < 4 and first_idx < len(ls): second_idx = 1 while second_idx < 4 and \ first_idx + second_idx < len(ls): third_idx = 1 while third_idx < 4 and \ first_idx + second_idx + third_idx < len(ls): first_octet = ls[:first_idx] second_octet = ls[first_idx:first_idx + second_idx] third_octet = ls[first_idx + second_idx:first_idx + second_idx + third_idx] fourth_octet = ls[first_idx + second_idx + third_idx:] octets = [first_octet, second_octet, third_octet, fourth_octet] if all([int(octet) <= 255 for octet in octets]): ips.append('.'.join(octets)) third_idx += 1 second_idx += 1 first_idx += 1 return ips
eb8b8fc574f489ab260c1a854c4fb1302ca170f8
9,294
import csv def process_csv(csv_path, image='image_id_', stage='image_stage_', fake='image_fake_', answer='image_answer_'): """ Process a csv file to a list of Result objects :param csv_path: :param image: :param stage: :param fake: :param answer: :return: """ results = [] with open(csv_path, mode='r') as csv_file: csv_reader = csv.DictReader(csv_file, delimiter=';') for row in csv_reader: image_ids = {key for key in row.keys() if key.startswith(image)} answered_images = {key: value for key, value in row.items() if key in image_ids and value != ''} answered_ids = {key.replace(image, '') for key in answered_images.keys()} row_answers = [] for answered_id in answered_ids: image_answer = { 'image': row[image + answered_id], 'stage': row[stage + answered_id], 'fake': True if row[fake + answered_id] == '1' else False, 'guess': True if row[answer + answered_id] == '1' else False, 'guessed_correctly': row[fake + answered_id] == row[answer + answered_id] } row_answers.append(image_answer) row_result = { 'uuid': row['uuid'], 'name': row['name'], 'age': row['age'], 'feedback': row['feedback'], 'answers': row_answers, } results.append(row_result) return results
f363521cac13e80d92b9398e284ed6c9b7cf3e54
9,295
def move_odict_item(odict, key, newpos): """ References: http://stackoverflow.com/questions/22663966/changing-order-of-ordered-dictionary-in-python CommandLine: python -m utool.util_dict --exec-move_odict_item Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> odict = OrderedDict() >>> odict['a'] = 1 >>> odict['b'] = 2 >>> odict['c'] = 3 >>> odict['e'] = 5 >>> print(ut.repr4(odict, nl=False)) >>> move_odict_item(odict, 'c', 1) >>> print(ut.repr4(odict, nl=False)) >>> move_odict_item(odict, 'a', 3) >>> print(ut.repr4(odict, nl=False)) >>> move_odict_item(odict, 'a', 0) >>> print(ut.repr4(odict, nl=False)) >>> move_odict_item(odict, 'b', 2) >>> result = ut.repr4(odict, nl=False) >>> print(result) {'a': 1, 'c': 3, 'b': 2, 'e': 5} """ odict[key] = odict.pop(key) for i, otherkey in enumerate(list(odict.keys())): if otherkey != key and i >= newpos: odict[otherkey] = odict.pop(otherkey) return odict
2bf86d8a5da8b474b8d487a0247424cb360c0d35
9,297
def document_search_keys(): """A list of Regulations.gov document search keys.""" return ['documents', 'totalNumRecords']
c0969bde192a249c087590332d5deeff19ee06eb
9,298
import pandas as pd def describe(col, data): """ return basic statistical descriptions """ d = {} # number of observations d['Nobs'] = [data[col].count()] # mean d['Mean'] = [data[col].mean()] # std d['Std.'] = [data[col].std()] # mad d['Mad'] = [data[col].mad()] # min d['Min'] = [data[col].min()] # max d['Max'] = [data[col].max()] # skew d['Skew'] = [data[col].skew()] # excess kurt d['Excess Kurt'] = data[col].kurt() # acf lag=1 d['acf lag=1'] = data[col].autocorr(1) # acf lag=5 d['acf lag=5'] = data[col].autocorr(5) # acf lag=10 d['acf lag=10'] = data[col].autocorr(10) # acf lag=20 d['acf lag=20'] = data[col].autocorr(20) # turn to DataFrame output = pd.DataFrame(d) # rename index output.index = [col] return(output)
300310365fe67a5b76747474c77d0a674299fe49
9,299
def computeIoUs(preds, truths): """ Compute intersection over union for the predicted masks vs ground-truth masks. @preds and @truths must have the same length and both are iterables of numpy matrices of same dimensions. """ # List to collect IoU for each pair IoUs = [] # Iterate over the collections and compute IoUs for predicted, truth in zip(preds, truths): intersection = predicted * truth union = predicted + truth # Re-adjust union back to [0, 1] scale and return the result. union[union == 2] = 1 IoUs.append(float(sum(intersection.flat)) / (sum(union.flat) or 1)) return IoUs
208606710c07878bccf8cae0f3b95ce65cb4180a
9,300
import re def remove_special_char(in_seq): """ Function is responsible for normalize strings to defined format (UPPERCASE with '_' replacing any special character) :param in_seq: list of strings :return: list of strings """ _sub = re.sub(" {1,5}", "_", in_seq.strip()).lower() _chars = ['*', '\\', '&', '/', '+'] for x in _chars: _sub = _sub.replace(x, '_') return _sub
425f8a7fcd6a2df7db667063564f419536ae68d9
9,301
import numpy def position_from_msg(tf_msg, fmt='xyz'): """Extract position from geomety_msg/TransformStamed message.""" return numpy.array([getattr(tf_msg.transform.translation, d) for d in fmt])
f74b5aa4fe9e9e462e6eedc4dafa22c7ba2be1e8
9,302
import json def format_navigation_links(additional_languages, default_lang, messages, strip_indexes=False): """Return the string to configure NAVIGATION_LINKS.""" f = u"""\ {0}: ( ("{1}/archive.html", "{2[Archive]}"), ("{1}/categories/{3}", "{2[Tags]}"), ("{1}/rss.xml", "{2[RSS feed]}"), ),""" pairs = [] def get_msg(lang): """Generate a smaller messages dict with fallback.""" fmsg = {} for i in (u'Archive', u'Tags', u'RSS feed'): if messages[lang][i]: fmsg[i] = messages[lang][i] else: fmsg[i] = i return fmsg if strip_indexes: index_html = '' else: index_html = 'index.html' # handle the default language pairs.append(f.format('DEFAULT_LANG', '', get_msg(default_lang), index_html)) for l in additional_languages: pairs.append(f.format(json.dumps(l, ensure_ascii=False), '/' + l, get_msg(l), index_html)) return u'{{\n{0}\n}}'.format('\n\n'.join(pairs))
81882137af3e80ba24a3de797ebb9f30e6d5a877
9,303
def fixture_spring_metadata( first_read, second_read, spring_tmp_path, checksum_first_read, checksum_second_read ): """Return metada information""" metadata = [ { "path": str(first_read.absolute()), "file": "first_read", "checksum": checksum_first_read, "algorithm": "sha256", }, { "path": str(second_read.absolute()), "file": "second_read", "checksum": checksum_second_read, "algorithm": "sha256", }, {"path": str(spring_tmp_path.absolute()), "file": "spring"}, ] return metadata
f6e9964b811fd1ce4e873f7a5f57d392ebb3fe98
9,304
import re import os def reformat_comment(comment: str, add_tab=False): """ :param comment: :param add_tab :return: """ comment = re.sub(r'\s+', ' ', comment) sentence_len = 0 tmp = [] for word in comment.split(' '): if sentence_len >= 70: sentence_len = len(word) tmp.append(os.linesep) else: sentence_len += len(word) tmp.append(word) comment = " ".join(tmp) res = [] more_than_one_line = False for line in comment.split(os.linesep): if add_tab and more_than_one_line: res.append("\t {}".format(line.lstrip())) else: res.append(line.lstrip()) more_than_one_line = True comment = "{}".format(os.linesep).join(res) return comment
2e14cb1866c5f098ee40e7301551d5e145214389
9,306
def fizz_buzz(num): """ return 'Fizz', 'Buzz', 'FizzBuzz', or the argument it receives, all depending on the argument of the function, a number that is divisible by, 3, 5, or both 3 and 5, respectively. """ if not isinstance(num, int): raise TypeError("Expected integer as input") if num % 3 == 0 and num % 5 == 0: return "FizzBuzz" elif num % 3 == 0: return "Fizz" elif num % 5 == 0: return "Buzz" return num
8b741800f80ebe631f6821a865c9080c33eb4e27
9,309
def dict_to_cvode_stats_file(file_dict: dict, log_path: str) -> bool: """ Turns a dictionary into a delphin cvode stats file. :param file_dict: Dictionary holding the information for the cvode stats file :param log_path: Path to were the cvode stats file should be written :return: True """ file_obj = open(log_path + '/integrator_cvode_stats.tsv', 'w') file_obj.write(' Time [s]\t Steps\t RhsEvals\t LinSetups\t NIters\t NConvFails\t NErrFails\t' ' Order\t StepSize [s]\n') for line_index in range(0, len(file_dict['time'])): time_string = ' ' * (25 - len(str("{:.10f}".format(file_dict['time'][line_index])))) + \ str("{:.10f}".format(file_dict['time'][line_index])) steps_string = ' ' * (10 - len(str(file_dict['steps'][line_index]))) + \ str(file_dict['steps'][line_index]) rhs_string = ' ' * (10 - len(str(file_dict['rhs_evaluations'][line_index]))) + \ str(file_dict['rhs_evaluations'][line_index]) lin_string = ' ' * (10 - len(str(file_dict['lin_setups'][line_index]))) + \ str(file_dict['lin_setups'][line_index]) iterations_string = ' ' * (8 - len(str(file_dict['number_iterations'][line_index]))) + \ str(file_dict['number_iterations'][line_index]) conversion_fails_string = ' ' * (11 - len(str(file_dict['number_conversion_fails'][line_index]))) + \ str(file_dict['number_conversion_fails'][line_index]) error_fails_string = ' ' * (11 - len(str(file_dict['number_error_fails'][line_index]))) + \ str(file_dict['number_error_fails'][line_index]) order_string = ' ' * (6 - len(str(file_dict['order'][line_index]))) + \ str(file_dict['order'][line_index]) step_size_string = ' ' * (14 - len(str("{:.6f}".format(file_dict['step_size'][line_index])))) + \ str("{:.6f}".format(file_dict['step_size'][line_index])) file_obj.write(time_string + '\t' + steps_string + '\t' + rhs_string + '\t' + lin_string + '\t' + iterations_string + '\t' + conversion_fails_string + '\t' + error_fails_string + '\t' + order_string + '\t' + step_size_string + '\n') file_obj.close() return True
4b6d92ad610c47eed5b2e593980a74f617ed44f4
9,310
import re def isNumber(test): """ Test if the string is a valid number Return the converted number or None if string is not a number. """ try: test = str(test) if re.search('\.',test): try: return float(test) except: return None else: try: return int(test) except: return None except: return None
93f3afd1c3e8cefc64b1ff738e3f8336a1b8ffd6
9,311
def unskew_S1(S1, M, N): """ Unskew the sensivity indice (Jean-Yves Tissot, Clémentine Prieur (2012) "Bias correction for the estimation of sensitivity indices based on random balance designs.", Reliability Engineering and System Safety, Elsevier, 107, 205-213. doi:10.1016/j.ress.2012.06.010) """ lamb = (2 * M) / N return S1 - lamb / (1 - lamb) * (1 - S1)
c82dfb842ff61781a45d132acd66f88ab018690c
9,312
def split_list(l, break_pts): """returns list l split up into sublists at break point indices""" l_0 = len(l) sl = [] # Return a list containing the input list if no breakpoints indices selected if len(break_pts) == 0: return [l] # Else splits the list and return a list of sub lists. ADJUST SO IT'S NOT BP INDICES BUT RATHER LOCATION VALUES? else: for brk in break_pts: delta_l = l_0 - len(l) sl.append(l[:brk - delta_l]) l = l[brk - delta_l:] sl.append(l) return sl
940fe3425e9708e1852fd4930cb5af3e96076b1f
9,313
import sys def HaveGoodGUI(): """Returns true if we currently have a good gui available. """ return "pywin.framework.startup" in sys.modules
e57c7063959024baaa84bd545fa6650516ba11e8
9,315
def has_func(obj, fun): """check if a class has specified function: https://stackoverflow.com/a/5268474 Args: obj: the class to check fun: specified function to check Returns: A bool to indicate if obj has funtion "fun" """ check_fun = getattr(obj, fun, None) return callable(check_fun)
3284c1a30c3b74c93c1c34c102632beb99bf5576
9,318
import torch def detr_load(): """ Loads the detr model using resnet50 Returns: the detr model pretrained on COCO dataset """ model = torch.hub.load('facebookresearch/detr', 'detr_resnet50', pretrained=True) model.eval() return model
71e20ac9f29ff7211ecb36514758145d929636fc
9,319
import math def distance(p0, p1): """calculate distance between two joint/3D-tuple in the XZ plane (2D)""" return math.sqrt((p0[0] - p1[0])**2 + (p0[2] - p1[2])**2)
02e1a1488c32f465f2a1817adb8dfbdb4ea26431
9,322