content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def find_overlapping_annotations(m, annotations): """Takes a markup object and a df of annotations from the same report""" def overlaps(m_span, a_span): m_span = [int(n) for n in m_span] a_span = [int(n) for n in a_span] #print(m_span, a_span) overlap = ((a_span[0] <= m_span[0] <= a_span[1]) | (a_span[0] <= m_span[1] <= a_span[1]) | (m_span[0] <= a_span[0] <= m_span[1]) | (m_span[0] <= a_span[1] <= m_span[1]) ) #print(overlap) return overlap overlapping_idx = [] annotations.reset_index(drop=True, inplace=True) for i, row in annotations.iterrows(): print(m.docSpan, row.Span) if overlaps(m.docSpan, row.Span): print("Match") print(i) overlapping_idx.append(i) print(annotations) print("Here") overlapping_annotations = annotations.iloc[overlapping_idx] return overlapping_annotations
7cc95cd8ecf7403c84a800f502364384ed208199
9,722
import torch def match_grasp_view_and_label(end_points): """ Slice grasp labels according to predicted views. """ top_view_inds = end_points['grasp_top_view_inds'] # (B, Ns) template_views_rot = end_points['batch_grasp_view_rot'] # (B, Ns, V, 3, 3) grasp_labels = end_points['batch_grasp_label'] # (B, Ns, V, A, D) grasp_offsets = end_points['batch_grasp_offset'] # (B, Ns, V, A, D, 3) grasp_tolerance = end_points['batch_grasp_tolerance'] # (B, Ns, V, A, D) B, Ns, V, A, D = grasp_labels.size() top_view_inds_ = top_view_inds.view(B, Ns, 1, 1, 1).expand(-1, -1, -1, 3, 3) top_template_views_rot = torch.gather(template_views_rot, 2, top_view_inds_).squeeze(2) top_view_inds_ = top_view_inds.view(B, Ns, 1, 1, 1).expand(-1, -1, -1, A, D) top_view_grasp_labels = torch.gather(grasp_labels, 2, top_view_inds_).squeeze(2) top_view_grasp_tolerance = torch.gather(grasp_tolerance, 2, top_view_inds_).squeeze(2) top_view_inds_ = top_view_inds.view(B, Ns, 1, 1, 1, 1).expand(-1, -1, -1, A, D, 3) top_view_grasp_offsets = torch.gather(grasp_offsets, 2, top_view_inds_).squeeze(2) end_points['batch_grasp_view_rot'] = top_template_views_rot end_points['batch_grasp_label'] = top_view_grasp_labels end_points['batch_grasp_offset'] = top_view_grasp_offsets end_points['batch_grasp_tolerance'] = top_view_grasp_tolerance return top_template_views_rot, top_view_grasp_labels, top_view_grasp_offsets, top_view_grasp_tolerance, end_points
ca26ba32bca1c196b43a6197ad13cd5c5ffb2a71
9,723
import numbers def torch_item(x): """ Like ``x.item()`` for a :class:`~torch.Tensor`, but also works with numbers. """ return x if isinstance(x, numbers.Number) else x.item()
dae9881c21a305b42e5c488723a88beb117bf90f
9,724
def count(grid, c): """ Count the occurrences of an object "c" in the 2D list "grid". """ acc = 0 for row in grid: for elem in row: acc += c == elem return acc
6a497b5d052ce8e1d2619f2278010ecd41126a42
9,725
def config_retry_strategy(retry): """Generate retry strategy.""" if not isinstance(retry, int): raise ValueError("Parameter retry should be a number") return {"limit": retry, "retryPolicy": "Always"}
cb436261391e57e845ac5019c7906a56edc2db64
9,726
def interval(lower_, upper_): """Build an interval.""" if lower_ <= upper_: return lower_, upper_ return None
3dd1b0c04c9cad8e5f8a69e5c348f07a7542fe7b
9,728
def fetch_links(html): """ Fetch all links on a given page and return their hrefs in a list """ elements = html.cssselect('a') return [e.get('href') for e in elements if e.get('href') is not None]
f97f610e5baeb3e3b304c3093a5e27fc0b8de551
9,729
import requests def nlp_answer(question: str = "人工智能") -> str: """ 智能问答 https://ownthink.com/robot.html :param question: word in chinese :type question: str :return: indicator data :rtype: list or dict or pandas.DataFrame """ url = 'https://api.ownthink.com/bot' params = { 'spoken': question } r = requests.get(url, params=params) json_data = r.json() answer = json_data['data']['info']['text'] return answer
4947917c54862dd0bf38706c8091573e177c8b7c
9,730
def echo_handler(ex, *args, **kwargs): """ Example error handler which echoes the exception and the arguments. """ argstring = ','.join(['%s' % arg for arg in args]) return '%s/%s/%s' % (ex.message, argstring, kwargs)
29ca4f7663e9c3bf1893bf3f4ab84c8744827ec3
9,731
import json def load_config(config_name): """ Loads a json config file and returns a config dictionary. :param config_name: the path to the config json """ with open(config_name) as config_file: config = json.load(config_file) return config
5920d21c67133d2d106863910fdd8db95efc94e6
9,733
def args_to_int(args: list) -> tuple: """ Convert augs to int or return empty """ try: return tuple([int(i) for i in args]) except ValueError: return ()
1585e4c80f9637d5b1773e04c92b2f477092dd7a
9,734
def get_by_name(opname, operators): """Return operator class instance by name. Parameters ---------- opname: str Name of the sklearn class that belongs to a TPOT operator operators: list List of operator classes from operator library Returns ------- ret_op_class: class An operator class """ ret_op_classes = [op for op in operators if op.__name__ == opname] if len(ret_op_classes) == 0: raise TypeError('Cannot found operator {} in operator dictionary'.format(opname)) elif len(ret_op_classes) > 1: raise ValueError( 'Found duplicate operators {} in operator dictionary. Please check ' 'your dictionary file.'.format(opname) ) ret_op_class = ret_op_classes[0] return ret_op_class
29692c00ae034c391582ab7dd40a1d728406e73f
9,735
def import_pkg_operation(): """This function imports the primary package and returns ``True`` when successful.""" return True
e78efc086d7060fd95f1d1da76585c2dbbfcee76
9,736
def get_sub_text_from_xml_node( xmlnode, _text=None ): """ Concatenates the content at and under the given ElementTree node, such as text = get_sub_text_from_xml_node( xmlnode ) """ if _text == None: _text = [] if xmlnode.text: _text.append( xmlnode.text ) for nd in xmlnode: get_sub_text_from_xml_node( nd, _text ) if nd.tail: _text.append( nd.tail ) return ''.join( _text )
620200d5ba782f5696c720ba4da722204783bd17
9,737
def group_dates(dates): """ Groups list of given days day by day. :param dates: List of dates to group. :type dates: list :return: """ days = [] times = [] for elem in dates: if elem.date() not in days: days.append(elem.date()) times.append([elem.time()]) else: times[-1].extend([elem.time()]) events = [(day, time) for day, time in zip(days, times)] return events
35ff8c2da3e4a27eb065afe630814e29eb06df7d
9,738
def is_asciidoc(path): """Returns true if the given path is an Asciidoc file.""" # NOTE: Only checking the extension for now. if path.endswith(".txt"): return True return False
be7fd1994e21b3e540f7bdb6cc7da74753491644
9,739
def total() -> int: """ :return: int: Return sum of number of grains in each of the 64 squares, i.e. 2^64 - 1 """ # The sum of the geometric series 1 + 2 + 4 + 8... 64 terms is 2^64 - 1 return 2**64 - 1
4322456aaff1417354d4b5224594dab9b3c05508
9,741
def get_douban_url(detail_soup): """-""" detail_soup = detail_soup.find('a', title='豆瓣链接') if detail_soup: return detail_soup['href'] return ''
3f4cb56876da722883e3386d7af7ce41726456ac
9,742
def truncate_out_cert_raw(origin_raw_str: str): """truncate the original ssl certificate raw str like:\n -----BEGIN CERTIFICATE----- raw_str_data... -----END CERTIFICATE-----\n and split out the raw_str_data""" if not isinstance(origin_raw_str, str) or origin_raw_str == "": raise Exception("Invalid certificate origin_raw_str") if origin_raw_str.startswith("-----BEGIN CERTIFICATE-----"): origin_raw_str = origin_raw_str.replace("-----BEGIN CERTIFICATE-----", "") if origin_raw_str.endswith("-----END CERTIFICATE-----"): origin_raw_str = origin_raw_str.replace("-----END CERTIFICATE-----", "") origin_raw_str.strip() return origin_raw_str
0513a927ab74ac8d6df4e379319eb3516c2f0c14
9,743
def cmp_char(a, b): """Returns '<', '=', '>' depending on whether a < b, a = b, or a > b Examples -------- >>> from misc_utils import cmp_char >>> cmp_char(1, 2) '<' >>> print('%d %s %d' % (1, cmp_char(1,2), 2)) 1 < 2 Parameters ---------- a Value to be compared b Value to be compared Returns ------- {'<', '=', '>'} Character denoting the result of comparing `a` and `b`. """ if a < b: return '<' elif a == b: return '=' elif a > b: return '>' else: return '?'
7e8183564f888df3cce65f2bbbeb659aec43928c
9,744
def get_command_name(cmd, default=''): """Extracts command name.""" # Check if command object exists. # Return the expected name property or replace with default. if cmd: return cmd.name return default
f77a73d1ff24ec74b1c7cf10f89c45fab41fed20
9,746
def get_ocid(prefix, tenderID): """greates unique contracting identifier""" return "{}-{}".format(prefix, tenderID)
309e8a07dcdf787fd2dd6a41abb4f4d26f1baa63
9,748
import hashlib def get_str_md5(content): """ Calculate the MD5 for the str file :param content: :return: """ m = hashlib.md5(content) # 创建md5对象 return m.hexdigest()
c0e864288d8d6af2fe31b5cb5afe54bfe83e2fb3
9,750
def make_test(row): """ Generate a test method """ def row_test(self): actual = row.get("_actual") if actual in ("P", "F"): if actual == "P": self.assertMeansTest("eligible", row) else: self.assertMeansTest("ineligible", row) row_test.__doc__ = str(row.get("line_number")) + ": " + row.get("_description") return row_test
083117f44687c56a7a33cfa74776baea6b40048c
9,751
import math def binary_sigmoid(n: float, lmbda: float = 1.0) -> float: """ Binary Sigmoidal (Unipolar Continuous) Activation Function """ return 1 / (1 + (math.exp(-lmbda * n)))
dec3380311c5bb1130e8455254d55b44ab416d21
9,752
def client(app): """ Fixture to emulate client""" return app.test_client()
555e43501347e257ea9b0eeb95ba80e99e1a6ce9
9,753
def first_discoverable(targets, cat, kwargs): """A target chooser: the first target for which discover() succeeds This may be useful where some drivers are not importable, or some sources can be available only sometimes. """ for t in targets: try: if cat: s = cat[t] if kwargs and t in kwargs: s = s.configure_new(**kwargs[t]) else: s = t s.discover() return s except Exception: pass raise RuntimeError("No targets succeeded at discover()")
592d738d943578929d69d4d98ca98cdda16f96b9
9,754
def _centralize(shape): """Create a shape which is just like the input, but aligned to origin. The origin is (0; 0) in 2D case. The input shape stays unchanged. It's expected to be a numpy.array. """ centroid = shape.mean(0) return shape - centroid
0c65ac55d3dbad9d540d73e2480c0d71ad703302
9,755
def is_html_needed(user_agent): """ Basing on `user_agent`, return whether it needs HTML or ANSI """ plaintext_clients = ['curl', 'wget', 'fetch', 'httpie', 'lwp-request', 'python-requests'] if any([x in user_agent for x in plaintext_clients]): return False return True
67a75c34dca4672534058729875dc5ee98696590
9,756
import unicodedata def _normalize(string_to_convert, normalize=False): """ a utility method for normalizing string """ try: return unicodedata.normalize('NFC', string_to_convert) if normalize else string_to_convert except TypeError: return string_to_convert
2c4edc31741d8b87165996339c8b9231f5ed6aa5
9,757
def __xor_bytes(bytes1, bytes2): """xor of a list of bytes""" assert len(bytes1) == len(bytes2) return [bytes1[i] ^ bytes2[i] for i in range(len(bytes1))]
0b576cd877839cd2191fee57f6b5f270a37726de
9,758
def report_issue() -> str: # pragma: no cover """Used when errors are really f*cked up""" return ('Report an issue please? ' '( https://github.com/agamm/comeback/issues )')
9b6015a12f252341f63bb988d2bb5b46c3c66318
9,759
def _as_list(arr): """Make sure input is a list of mxnet NDArray""" if not isinstance(arr, (list, tuple)): return [arr] return arr
be489c8d1be314c8b34df25546228f855c223b57
9,760
import random def random_number(): """Generate a random string of fixed length """ return random.randint(0, 9999)
f3d448b3118d82fd88946ddddadaa1941ffd7d41
9,761
def eh_posicao(pos): """ Verifica se um determinado valor e uma posicao valida Parametros: pos (universal): Possivel posicao. Retorna: (bool): True se for numero valido e False se nao for. """ return False if type(pos) != int or pos < 1 or pos > 9 else True
af0f73f8e4513a679b34795d7be43c26bbc6b586
9,762
import os def parse(filepath): """ Simple method for fully specified path which split the string into three parts: folders, filename without suffix and suffix e.g. for ./myfolder/myfile.ext method returns ./myfolder/, myfile, ext :param filepath: str any form of os.path (relative or absolute) :return: str, str, str folders, filename without suffix, suffix without dot """ base = os.path.basename(filepath) suffix = os.path.splitext(filepath)[1][1:] path = filepath[:-len(base)] return path, base[:-len(suffix)-1], suffix
66fc5f1228361962687116d8c921e859ef03401f
9,763
def calc_hole(first_map, second_map, min_size=419430400): """ Calcul hole between 2 mappings. format of a Mapping Tuple: 2 integers : - physical_address - mapping_size formated as following ( physical_address, mapping_size ) Input : - first_map : a Mapping Tuple - second_map : a Mapping Tuple - min_size : an Integer representing the minimum wanted size. By default 50MB Output : - A Mapping Tuple or None (if there is no hole) """ hole_start = int(first_map[0],16) + first_map[1] #The end of first mapping hole_size = int(second_map[0],16) - hole_start if hole_size>min_size: return ( hex(hole_start), hole_size ) else : return None
c035d23eff6e73295d0421e1cfb63f992caf9673
9,764
def vareq(x1, x2): """Determine if two vars are equal. This does not check if they're values are the same, but if they are exactly the same pointer. Also note that it makes no difference at all if they have the same `name` value. @param x1: A Logic Variable @param x2: Another Logic Variable @return: True if they have the same id. """ return x1.id == x2.id
ba123f9be3cad453399f8e6002ee98d9cb1cf336
9,765
def _find_last_larger_than(target, val_array): """ Takes an array and finds the last value larger than the target value. Returns the index of that value, returns -1 if none exists in array. """ ind = -1 for j in range(len(val_array), 0, -1): if val_array[j - 1] > target: ind = j - 1 break return ind
dbdba59ba35b502669082c8416159770843b7312
9,766
import struct def readStruct(fb, structFormat, seek=False, cleanStrings=True): """ Return a structured value in an ABF file as a Python object. If cleanStrings is enabled, ascii-safe strings are returned. """ if seek: fb.seek(seek) varSize = struct.calcsize(structFormat) byteString = fb.read(varSize) vals = struct.unpack(structFormat, byteString) vals = list(vals) if cleanStrings: for i in range(len(vals)): if type(vals[i]) == type(b''): vals[i] = vals[i].decode("ascii", errors='ignore').strip() if len(vals) == 1: vals = vals[0] return vals
1920c69f1881698a3898774be95e8f10a462d936
9,767
def abspath(newpath, curpath): """Return the absolute path to the given 'newpath'. The current directory string must be given by 'curpath' as an absolute path. """ assert newpath assert curpath assert curpath.startswith('/') subdirs = newpath.split('/') if not subdirs[0] or curpath == '/': # Absolute path (curpath is ignored) # or we're in the root directory dirs = [""] else: # Relative path; extract directory components from curpath dirs = curpath.split('/') for s in subdirs: if not s or s == ".": # Empty or 'current directory' pass elif s == "..": dirs.pop() if not dirs: raise ValueError("Too many '..' in path '{}'".format(newpath)) else: dirs.append(s) if len(dirs) == 1: # Special case for root: joining [] or [""] return "", but you can't # set the first component to "/" since joining ["/","foo"] would # return "//foo" return '/' return '/'.join(dirs)
0b1416492891121f433ce3bfbf934601bfc96f06
9,770
def get_by_string(source_dict, search_string, default_if_not_found=None): """ Search a dictionary using keys provided by the search string. The search string is made up of keywords separated by a '.' Example: 'fee.fie.foe.fum' :param source_dict: the dictionary to search :param search_string: search string with keyword separated by '.' :param default_if_not_found: Return value if search is un-successful :return value, dictionary or default_if_not_found """ if not source_dict or not search_string: return default_if_not_found dict_obj = source_dict for search_key in search_string.split("."): try: dict_obj = next(val for key, val in dict_obj.iteritems() if key == search_key) except StopIteration: return default_if_not_found return dict_obj
59386f5777805f2e7c5a7c7204c56d3d5792c190
9,772
def get_na_row_values(columns): """ returns na values for all columns starting from gender """ row_values = [ -1, -1, -1, -1, -1, -1, -1, False, -1 ] actions_idx = columns.index('actions') row_values += [0] * (len(columns) - actions_idx) return row_values
11c45f3a5a7bb9a42458c6f4c4b3d73a5e75fc3c
9,773
def nonspecific(rna_id, sequence, min_length, max_length): """ Compute all the fragment sequences in case of nonspecific cleavage, based on the info selected by the user on minimum and maximum length for the sequences generated from nonspecific cleavage """ output_sequences, seq_list = [], list(sequence) for i in range(min_length, max_length + 1): if i <= len(sequence): for position in range(0, len(sequence) - i + 1): seq_to_add = ''.join(seq_list[position:position + i]) output_sequences.append( "{} {} {} {} {}\n".format(rna_id, seq_to_add, position + 1, position + i, 0)) return output_sequences
c507d5ffdf5dad5ad6c30e7aab095c9db59cc16c
9,774
import socket import struct def int_to_ip(addr): """ Converts a the numeric representation of an ip address to an ip address strin Example: >>> int_to_ip(2130706433) '127.0.0.1' """ return socket.inet_ntoa(struct.pack("!I", addr))
6db99b4cb7e7274eb1ac2b7783a4b606d845c4a5
9,775
import argparse def parse(): """Parse system arguments.""" parser = argparse.ArgumentParser(description="Main entry point into \ the MoonTracker application") parser.add_argument('-p', dest='port', action='store', default=5000, type=int, help='have Flask listen on the specified \ port number [default: 5000]') parser.add_argument('--prod', action='store_true', help='use production \ environment, as opposed to development') return parser.parse_args()
4feafae8a598eee96e049c4830e17987d5476dad
9,776
def aa_seq_doc(aa_sequence): """This function takes in an amino acid sequence (aa sequence) and adds spaces between each amino acid.""" return ' '.join([aa_sequence[i:i+1] for i in range(0, len(aa_sequence))])
cb497c340d5ecc29184dc079ea9530ebddc43fbd
9,777
def farey_alg(n, largest_divisor=100): """ This started simple, but then i figured it could be really close and then come farther away again, so i changed it to a much more complicated one. It can still gives a closing range, but it now gives the best approximate it has come across, not the last. This means if the n is close to, but slightly over 1/2 it gives 1/2 until the upper bound i closer than the 1/2 is. Parameters ---------- n: float largest_divisor: int Returns ------- """ if n > 1: n = n - int(n) small_dividend = 0 small_divisor = 1 large_dividend = 1 large_divisor = 1 next_divisor = small_divisor + large_divisor best_dividend = 0 best_divisor = 0 best_error = 1 while next_divisor <= largest_divisor: new_dividend = small_dividend + large_dividend new_divisor = next_divisor new_approx = new_dividend / new_divisor if new_approx == n: range_ = (small_dividend / small_divisor, large_dividend / large_divisor) return new_dividend, new_divisor, range_ elif new_approx < n: small_dividend = new_dividend small_divisor = new_divisor else: large_dividend = new_dividend large_divisor = new_divisor new_error = abs(n - new_approx) if new_error < best_error: best_dividend = new_dividend best_divisor = new_divisor best_error = new_error print(new_dividend, new_divisor, new_approx) next_divisor = small_divisor + large_divisor range_ = (small_dividend / small_divisor, large_dividend / large_divisor) return best_dividend, best_divisor, range_
30dd6815908aa9d05cb4387909e5aac37aa7bc4a
9,779
def getLigandCodeFromSdf ( sdfFileName ): """ Funkcja sluzy do pobierania kodow ligandow z pliku .sdf Wejscie: sdfFileName - nazwa pliku sdf Wyjscie: ligandCodes - lista znalezionych kodow ligandow """ sdfFile= open(sdfFileName, 'r' ) line = sdfFile.readline() ligandCodes = [] while line: if "field_0" in line: lineWithData = sdfFile.readline() ligandCodes.append( lineWithData.strip() ) line = sdfFile.readline() sdfFile.close() return ligandCodes
9b25f91b754448f6fab4ce11e0f816cbf5406dea
9,781
def _transformer( emb_dim=512, num_heads=8, num_layers=6, qkv_dim=512, mlp_dim=2048, dropout_rate=None, attention_dropout_rate=None, nonlinearity='gelu', ): """Transformer config.""" configs = { 'models.build_transformer_config.emb_dim': emb_dim, 'models.build_transformer_config.num_heads': num_heads, 'models.build_transformer_config.num_decoder_layers': num_layers, 'models.build_transformer_config.num_encoder_layers': num_layers, 'models.build_transformer_config.qkv_dim': qkv_dim, 'models.build_transformer_config.mlp_dim': mlp_dim, 'models.build_transformer_config.mlp_activations': (nonlinearity,), } if dropout_rate is not None: configs['models.build_transformer_config.dropout_rate'] = dropout_rate if attention_dropout_rate is not None: configs[ 'models.build_transformer_config.attention_dropout_rate'] = attention_dropout_rate return configs
547f585d68a798324ef47e0c5093ff89956e9e9a
9,782
import random def make_bank_act_tp_data(ctif_tp): """ 生成银行账号种类 :param ctif_tp: 主体类型 :return: 账号类型 """ if ctif_tp == "1": act_tp = random.choice(["02", "03"]) elif ctif_tp == "2": act_tp = random.choice(["01", "03"]) else: raise TypeError("ctif_tp={}类型错误!".format(ctif_tp)) return act_tp
90e394b44dd6802b5c9a7773edb3b9baa32b0eef
9,783
def v70_from_params_asdict(v70_from_params): """Converts a sparse `v70_from_params` array to a dict.""" dict_v70_from_params = {} for i in range(v70_from_params.shape[0]): for j in range(v70_from_params.shape[1]): v = v70_from_params[i, j] if v: dict_v70_from_params[(i, j)] = str(v) return dict_v70_from_params
355ba80c131d08b6aedf20680545b4b31e07832e
9,784
def open_r(filename): """Open a file for reading with encoding utf-8 in text mode.""" return open(filename, 'r', encoding='utf-8')
08086625a9c05738a3536001a158eff3b0718ddf
9,785
import torch def compute_mae(vec1, vec2): """ vec1, vec2 is torch.Tensor """ vec1 = vec1.reshape(-1, vec1.shape[-1]) vec2 = vec2.reshape(-1, vec2.shape[-1]) if vec2.shape[-1] == 2 and vec1.shape[-1] == 3: vec1 = vec1[..., :2] / torch.norm(vec1[..., :2], dim=-1, keepdim=True) if vec2.shape[-1] == 3 and vec1.shape[-1] == 2: vec2 = vec2[..., :2] / torch.norm(vec2[..., :2], dim=-1, keepdim=True) cos = torch.sum(vec1 * vec2, dim=1) cos[cos > 1] = 1 cos[cos < -1] = -1 rad = torch.acos(cos) mae = torch.rad2deg(torch.mean(rad).cpu().detach()) return mae
14b023666d726004b05f57efa874d2cbe6b81db7
9,787
def prepend_protocol(url: str) -> str: """Prefix a URL with a protocol schema if not present Args: url (str) Returns: str """ if '://' not in url: url = 'https://' + url return url
856961526207510c630fe503dce77bdcfc58d0cc
9,788
def readBinaryWatch(self, num): # ! 44ms,进行双重循环,并统计1的数量,没什么太多的技巧性 """ :type num: int :rtype: List[str] """ return ['%d:%02d' % (h, m) for h in range(12) for m in range(60) if (bin(h) + bin(m)).count('1') == num]
1e14be488a54f4746b39771c81e4dd50bce1a9b3
9,789
def chunk_by_image(boxes): """ turn a flat list of boxes into a hierarchy of: image category [boxes] :param boxes: list of box detections :return: dictionary of boxes chunked by image/category """ chunks = {} for b in boxes: if b['image_id'] not in chunks: chunks[b['image_id']] = {b['category_id']: [b]} elif b['category_id'] not in chunks[b['image_id']]: chunks[b['image_id']][b['category_id']] = [b] else: chunks[b['image_id']][b['category_id']].append(b) return chunks
d6eaf46214a97853407112a9d0a3c47a132fb3c4
9,791
def find_duplicate_uuids(images): """ Create error records for UUID duplicates. There is no real way to figure out which is the correct image to keep, so we keep the first one and mark all of the others as an error. We also remove the images with duplicate UUIDs from the image dataframe. """ dupe_mask = images.sample_id.duplicated(keep='first') dupes = images.loc[dupe_mask, ['image_file', 'sample_id']] images = images[~dupe_mask] dupes['msg'] = '' # Handle the case where there are no duplicates if dupes.shape[0]: dupes['msg'] = dupes.apply( lambda dupe: 'DUPLICATES: Files {} and {} have the same QR code'.format( dupe.image_file, images.loc[images.sample_id == dupe.sample_id, 'image_file']), axis=1) dupes = dupes.drop(['sample_id'], axis=1) return images, dupes
ac8b396692df921de56c0a18aa2d9f1953f4c5ca
9,792
from typing import Tuple from typing import List def split_by_commas(maybe_s: str) -> Tuple[str, ...]: """Split a string by commas, but allow escaped commas. - If maybe_s is falsey, returns an empty tuple - Ignore backslashed commas """ if not maybe_s: return () parts: List[str] = [] split_by_backslash = maybe_s.split(r'\,') for split_by_backslash_part in split_by_backslash: splitby_comma = split_by_backslash_part.split(',') if parts: parts[-1] += ',' + splitby_comma[0] else: parts.append(splitby_comma[0]) parts.extend(splitby_comma[1:]) return tuple(parts)
ca21e5103f864e65e5ae47b49c161e8527036810
9,794
import re def replace_flooded_chars(text): """replace 3 or more repetitions of any character patterns w/ 2 occurrences of the shortest pattern""" return re.sub(r'(.+?)\1\1+', r'\1\1', text)
1e7a4403cc55b155a4088185b701dd4f9ac95019
9,796
def kalman_predict(m, P, A, Q): """Kalman filter prediction step""" m_p = A @ m P_p = A @ P @ A.T + Q return m_p, P_p
a43e644693d02e317f2ac67a36f0dc684e93f3d5
9,797
import os,imp def listmodules(package_name=''): """List modules in a package or directory""" package_name_os = package_name.replace('.','/') file, pathname, description = imp.find_module(package_name_os) if file: # Not a package return [] ret = [] for module in os.listdir(pathname): if module.endswith('.py') and module != '__init__.py': tmp = os.path.splitext(module)[0] ret.append(package_name+'.'+tmp) return ret
d869d19ad3d36593d9cb6095b52de4fbf906fda0
9,799
def is_probably_gzip(response): """ Determine if a urllib response is likely gzip'd. :param response: the urllib response """ return (response.url.endswith('.gz') or response.getheader('Content-Encoding') == 'gzip' or response.getheader('Content-Type') == 'application/x-gzip')
30ca3774f16debbac4b782ba5b1c4be8638fe344
9,800
import random import math def buffon(needlesNbr, groovesLen, needlesLen): """Simulates Buffon's needle experiments.""" intersects = 0 for i in range(needlesNbr): y = random.random() * needlesLen / 2 angle = random.random() * math.pi z = groovesLen / 2 * math.sin(angle) if y <= z: intersects += 1 expFreq = intersects / needlesNbr thFreq = 2 * needlesLen / (math.pi * groovesLen) return (intersects, expFreq, thFreq)
34bbb29346690b5d0ef519282699f0b3b82d93cb
9,801
def stringify(plaintext): """ Used to convert hex integers into a string when decrypting. :param plaintext: a hex integer number. :return: a ascii string. """ if len(plaintext) % 2 == 1: plaintext = '0' + plaintext lst = [] end = len(plaintext) // 2 for i in range(end): lst.append(chr(eval('0x' + plaintext[2 * i: 2 * (i + 1)]))) return ''.join(lst)
ddab6a748b9194ce763fd82c38ee428a41a50c72
9,802
def get_new_snp(vcf_file): """ Gets the positions of the new snp in a vcf file :param vcf_file: py_vcf file :return: list of new snp """ new_snp = [] for loci in vcf_file: if "gff3_notarget" in loci.FILTER: new_snp.append(loci) return(new_snp)
1385a5552f9ad508f5373b2783d14938ab04b9c5
9,803
def my_function() -> dict: """Return a set of data into a dictionary""" hills_of_rome = { 'Aventine Hill': {'Latin': 'Aventinus', 'Height': 46.6, 'Italian': 'Aventino'}, 'Caelian Hill': {'Latin': r'Cælius', 'Height': 50.0, 'Italian': 'Celio'}, 'Capitoline Hill': {'Latin': 'Capitolinus', 'Height': 44.7, 'Italian': 'Campidoglio'}, 'Esquiline Hill': {'Latin': 'Esquilinus', 'Height': 58.3, 'Italian': 'Esquilino'}, 'Palatine Hill': {'Latin': 'Palatinus', 'Height': 51.0, 'Italian': 'Palatino'}, 'Quirinal Hill': {'Latin': 'Quirinalis', 'Height': 50.9, 'Italian': 'Quirinale'}, 'Viminal Hill': {'Latin': 'Viminalis', 'Height': 57.0, 'Italian': 'Viminale'}} return hills_of_rome
2482a65efc45c9c7f30a12f20bef2ba069c37d0a
9,804
def most_freq(neighbors): """ Returns the dominant color with the greater frequency Example: num_dominating = [paper, paper, paper, spock, spock, spock, spock, spock] Returns: spock """ return max(set(neighbors), key=neighbors.count)
09c041b27dbf55f6e862d73bde421a86ac265f42
9,805
import os def testing_guard(decorator_func): """ Decorator that only applies another decorator if the TESTING environment variable is not set. Args: decorator_func: The decorator function. Returns: Function that calls a function after applying the decorator if TESTING environment variable is not set and calls the plain function if it is set. """ def decorator_wrapper(*decorator_args): def replacement(original_func): """Function that is called instead of original function.""" def apply_guard(*args, **kwargs): """Decides whether to use decorator on function call.""" if os.getenv("TESTING") is not None: return original_func(*args, **kwargs) return decorator_func(*decorator_args)(original_func)(*args, **kwargs) return apply_guard return replacement return decorator_wrapper
3b8a6ba26fd537f1edd521391158c459c340b6d7
9,806
from datetime import datetime def greater_than_days_cutoff(timestamp, cutoff): """ Helper function to calculate if PR is past cutoff """ # Convert string to datetime object last_update = datetime.strptime(timestamp[0:22], '%Y-%m-%dT%H:%M:%S.%f') # Get the number of days since this PR has been last updated last_update_days = (datetime.now() - last_update).days return last_update_days > cutoff
2dd1a9c01112d30a77ca2f5826db32d29f26d830
9,807
import requests def discover_oidc_provider_config(oidc_provider_config_endpoint, client_id, mccmnc): """ Make an HTTP request to the ZenKey discovery issuer endpoint to access the carrier’s OIDC configuration """ oidc_provider_config_url = '%s?client_id=%s&mccmnc=%s' % ( oidc_provider_config_endpoint, client_id, mccmnc ) config_response = requests.get(oidc_provider_config_url) config_json = config_response.json() if (config_json == {} or config_json.get('issuer') is None): raise Exception('unable to fetch provider metadata') return config_json
b5fe999cbd8a2515d689b564d1de6df30c28bde3
9,808
def reportnulls(df): """ Takes a data frame and check de nulls and sum the resutls and organizes them from highest to lowest """ null_counts = df.isnull().sum().sort_values(ascending=False) # return count of null values return null_counts
a3dc20feeaaf0f3467de76812531f1d0b791dc01
9,810
def getConstrainedTargets(driver, constraint_type='parentConstraint'): """ Gets all the transforms the given driver is driving through the giving constraint type. Args: driver (PyNode): The transform that is driving the other transform(s) through a constraint. constraint_type (string): The type of constraint to look for. Normally "parentConstraint", "pointConstraint", "orientConstraint", or "scaleConstraint". Returns: (set): Transform(s) being driven by given driver through given constraint type. """ # Using sets to remove duplicates from list connections because Maya constraints = set(driver.listConnections(type=constraint_type)) targets = set() for constraint in constraints: targets.update(set(constraint.connections(source=False, destination=True, et=True, type='transform'))) return targets
4e94a4cf1e72012e2413f1889ec76ccd0a76800e
9,812
from typing import List from pathlib import Path def get_isbr2_nii_file_paths(dir_paths: List[Path], file_selector: str) -> List[Path]: """Returns all the .nii.gz file paths for a given file_selector type. Arguments: dir_paths: a list of sample dir paths, each directory holds a full scan file_selector: a string representing which file type to chose, e.g. 'ana' for IBSR_02_ana.nii.gz """ nii_file_paths = [] for dir_path in dir_paths: sample_name = dir_path.name file_name = f'{sample_name}_{file_selector}.nii.gz' file_path = dir_path / file_name nii_file_paths.append(file_path) return nii_file_paths
fc04b9fa48e2d44c532344c36bdbdefe71f24f67
9,813
def get_num_gophers(blades, remainders, M): """Find no. of gophers given no. of blades and remainders.""" for i in range(1, M + 1): congruences = all([i % b == r for b, r in zip(blades, remainders)]) if congruences: return i return None
0467bd7a9ab56181b03c26f0048adf52b1cc8228
9,814
import math def __calc_entropy_passphrase(word_count, word_bank_size, pad_length, pad_bank_size): """ Approximates the minimum entropy of the passphrase with its possible deviation :param word_count: Number of words in passphrase :param word_bank_size: Total number of words in the word bank :param pad_length: Number of characters used in padding :param pad_bank_size: The size of the character pool used to generate padding :return: A tuple containing the minimum entropy and deviation """ # Multiply word bank size by 2 since there are uppercase or lower case words inner = math.pow(word_bank_size*2, word_count) entropy = math.log(inner, 2) inner = math.pow(pad_bank_size, pad_length) deviation = math.log(inner, 2) return entropy, deviation
c3597b8d8fc35387638e1e0e4923316c2b99aaa8
9,815
def join_string(list_string, join_string): """ Join string based on join_string Parameters ---------- list_string : string list list of string to be join join_string : string characters used for the joining Returns ------- string joined : string a string where all elements of the list string have been join with join_string between them Examples -------- >>> test = ["a", "b", "c", "d"] >>> join_string(test, "/") 'a/b/c/d' """ return join_string.join(list_string)
bcb55fb72b9579dd5ab548b737a0ffd85cbc7f43
9,816
def value_from_example(example, feature_name): """Returns the feature as a Python list.""" feature = example.features.feature[feature_name] feature_type = feature.WhichOneof('kind') return getattr(feature, feature_type).value[:]
8c73b2ecec80255de219911b0628fc89359a220a
9,817
def top_level_nodes(ig_service): """test fixture gets the top level navigation nodes""" response = ig_service.fetch_top_level_navigation_nodes() return response["nodes"]
323a88402a2790d672273001826d0ae3d25c017d
9,818
def parse_segments(segments_str): """ Parse segments stored as a string. :param vertices: "v1,v2,v3,..." :param return: [(v1,v2), (v3, v4), (v5, v6), ... ] """ s = [int(t) for t in segments_str.split(',')] return zip(s[::2], s[1::2])
4adfaff824ceb12772e33480a73f52f0054f6f5d
9,820
def GetFirstWord(buff, sep=None):#{{{ """ Get the first word string delimited by the supplied separator """ try: return buff.split(sep, 1)[0] except IndexError: return ""
ce01a470ff5ba08f21e37e75ac46d5a8f498a76a
9,821
def contar_caracteres(s): """ FUNÇÃO QUE CONTA OS CARACTERES DE UMA STRING :param s: string a ser contada """ num_of_caracteres = {} for caracter in s: num_of_caracteres[caracter] = (num_of_caracteres.get(caracter, 0) + 1) return num_of_caracteres
afad48efc5f2f22f5c8e4622836516d3229266c8
9,823
def get_domain_ip(domain): """ Get the IP for the domain. Any IP that responded is good enough. """ if domain.canonical.ip is not None: return domain.canonical.ip if domain.https.ip is not None: return domain.https.ip if domain.httpswww.ip is not None: return domain.httpswww.ip if domain.httpwww.ip is not None: return domain.httpwww.ip if domain.http.ip is not None: return domain.http.ip return None
5f16fe7716561059d00ae33c631ae1df711ecc0e
9,824
import torch def BPR_Loss(positive : torch.Tensor, negative : torch.Tensor) -> torch.Tensor: """ Given postive and negative examples, compute Bayesian Personalized ranking loss """ distances = positive - negative loss = - torch.sum(torch.log(torch.sigmoid(distances)), 0, keepdim=True) return loss
868df180dc0166b47256d64c60928e9759b80e5f
9,825
import math def get_distance(pos_1, pos_2): """Get the distance between two point Args: pos_1, pos_2: Coordinate tuples for both points. """ x1, y1 = pos_1 x2, y2 = pos_2 dx = x1 - x2 dy = y1 - y2 return math.hypot(dx, dy)
457827af4625c493537c8501c66ebba73d9ce1a1
9,826
def _sub_matches_cl(subs, state_expr, state): """ Checks whether any of the substitutions in subs will be applied to state_expr Arguments: subs: substitutions in tuple format state_expr: target symbolic expressions in which substitutions will be applied state: target symbolic state to which substitutions will be applied Returns: boolean indicating positive match Raises: """ sub_symbols = set( sub[0] for sub in subs if str(state) not in [ str(symbol) for symbol in sub[1].free_symbols ] ) return len(sub_symbols.intersection(state_expr.free_symbols)) > 0
7ceba9a6c0a83251289dec8f27ccbf1558e47dac
9,827
def find_fxn(tu, fxn, call_graph): """ Looks up the dictionary associated with the function. :param tu: The translation unit in which to look for locals functions :param fxn: The function name :param call_graph: a object used to store information about each function :return: the dictionary for the given function or None """ if fxn in call_graph['globals']: return call_graph['globals'][fxn] else: try: return call_graph['locals'][fxn][tu] except KeyError: return None
e73783b2eddadcbbc9e9eff39073805fc158c34e
9,828
import os def save_format(file): """Return 'mat' or 'py' based on file name extension.""" ext = os.path.splitext(file)[1] return ext[-(len(ext) - 1):]
a52b05a368034b54ce6d362b714369d7952a3786
9,829
import torch def alexnet_metapoison(widths=[16, 32, 32, 64, 64], in_channels=3, num_classes=10, batchnorm=False): """AlexNet variant as used in MetaPoison.""" def convblock(width_in, width_out): if batchnorm: bn = torch.nn.BatchNorm2d(width_out) else: bn = torch.nn.Identity() return torch.nn.Sequential(torch.nn.Conv2d(width_in, width_out, kernel_size=3, padding=1), torch.nn.ReLU(), bn, torch.nn.MaxPool2d(2, 2)) blocks = [] width_in = in_channels for width in widths: blocks.append(convblock(width_in, width)) width_in = width model = torch.nn.Sequential(*blocks, torch.nn.Flatten(), torch.nn.Linear(widths[-1], num_classes)) return model
40f84a7d434cd68b9f824ee847ea045e4479f5be
9,831
def checksum(message): """ Calculate the GDB server protocol checksum of the message. The GDB server protocol uses a simple modulo 256 sum. """ check = 0 for c in message: check += ord(c) return check % 256
bfba144414f26d3b65dc0c102cb7eaa903de780a
9,832
import torch def sds_bmm_torch(s_t1, d_t2): """ bmm (Batch Matrix Matrix) for sparse x dense -> sparse. This function doesn't support gradient. And sparse tensors cannot accept gradient due to the limitation of torch implementation. with s_t1.shape = (b, x, s), d_t2.shape = (b, s, y), the output shape is (b, x, y) This is a work around utilizing torch.smm for sparse x dense -> sparse :param s_t1: sparse tensor 1 (in list, representing batches) :param d_t2: dense tensor 2 :return: bmm result in sparse (in list, representing batches) """ device = d_t2.device assert type(s_t1) == list batch_num = len(s_t1) assert batch_num == d_t2.shape[0], 'Batch size mismatch.' outp = [] for b in range(batch_num): # force cpu _s_t1 = s_t1[b].cpu() _d_t2 = d_t2[b].cpu() assert _s_t1.shape[1] == _d_t2.shape[0], 'Matrix shape mismatch.' _outp = torch.smm(_s_t1, _d_t2) # CUDA version of smm is not implemented outp.append(_outp) return outp
a17daf9b000d808ab3a3cf18590373480de2d543
9,833
def parse_report_filter_values(request, reports): """Given a dictionary of GET query parameters, return a dictionary mapping report names to a dictionary of filter values. Report filter parameters contain a | in the name. For example, request.GET might be { "crash_report|operating_system": "Linux", "crash_report|graphics_card": "nVidia", "apprentice_report|version": "13.0", "start_date": "2015-01-01", "end_date": "2015-01-31", } We want to return { "crash_report": { "operating_system": "Linux", "graphics_card": "nVidia", }, "apprentice_report": { "version": "13.0", }, } """ report_name_to_filter_values = {} # Note that if there are multiple values in the request.GET dictionary, # as is the case for checkboxes with corresponding hidden fields, that # items() will simply return the last value. for report_and_parm_name, value in request.GET.items(): if "|" in report_and_parm_name: report_name, parm_name = report_and_parm_name.split("|", 1) report_name_to_filter_values.setdefault( report_name, {})[parm_name] = value # Make sure that all reports are in the result, and that each of the # report's filters has a value. for report in reports: filter_values = report_name_to_filter_values.setdefault( report.name(), {}) for filt in report.get_filters(): if filt.name not in filter_values: filter_values[filt.name] = filt.default_value() # Give the filter a chance to convert from the GET value into # something that makes more sense to the report. filter_values[filt.name] = filt.process_GET_value( filter_values[filt.name]) return report_name_to_filter_values
217a7bfdeb65952637774ebefb6ae0ea7a0d991c
9,834
import itertools def iter_extend(iterable, length, obj=None): """Ensure that iterable is the specified length by extending with obj""" return itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length)
1e6a2bdd36b8bcb3202c4472c9e7621eef9edcf1
9,835
import numpy def check_type_force_float(x, name): """ If an int is passed, convert it to a float. If some other type is passed, raise an exception. """ if type(x) is int: return float(x) elif type(x) is not float and type(x) is not numpy.float64: raise TypeError("%r should be a float" % (name,)) else: return x
23469a7f5aedd2c0ec30969413a31330a7028dfc
9,836
def platform2str(platform: str) -> str: """ get full platform name """ if platform == "amd": return "AMD Tahiti 7970" elif platform == "nvidia": return "NVIDIA GTX 970" else: raise LookupError
083e38f45db482c9fe6761c719df4bf0f5719256
9,837
def keyfunc(line): """Return the key from a TAB-delimited key-value pair.""" return line.partition("\t")[0]
39737389cd7a9e8046ff00700004b8864a242914
9,839
def html_wrap(html_string): """Add an html-head-body wrapper around an html string.""" html_prefix="""<html> <head> <title>HTML CSS TESTS</title> <link rel="stylesheet" type="text/css" href="tests/manual html-css tests/html-css.css"> </head> <body>""" html_postfix=""" </body></html> """ return html_prefix + html_string + html_postfix
8510549f4de1de25ac98361f757210eafdb02631
9,840
def split(ary, n): """Given an array, and an integer, split the array into n parts""" result = [] for i in range(0, len(ary), n): result.append(ary[i: i + n]) return result
27ae4f06603de17c993656fae9df07b61f333474
9,841
def manager(manager_maker): """ return an uninitialized AccountManager instance. """ return manager_maker(addid=False)
7557829687f7368a20fd52b6cba84a1c236b6ed8
9,842
def tcp_to_udp_data(d): """ Trim TCP packet to send it over UDP. :param d: (bytes) TCP DNS response :return: (bytes) UDP ready DNS response """ d = d[2:] return d
efdddfe8aaa9443b2fd2c194401708a12a6b2389
9,843