content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_total_obs_num_samples(obs_length=None, num_blocks=None, length_mode='obs_length', num_antennas=1, sample_rate=3e9, block_size=134217728, num_bits=8, num_pols=2, num_branches=1024, num_chans=64): """ Calculate number of required real voltage time samples for as given `obs_length` or `num_blocks`, without directly using a `RawVoltageBackend` object. Parameters ---------- obs_length : float, optional Length of observation in seconds, if in `obs_length` mode num_blocks : int, optional Number of data blocks to record, if in `num_blocks` mode length_mode : str, optional Mode for specifying length of observation, either `obs_length` in seconds or `num_blocks` in data blocks num_antennas : int Number of antennas sample_rate : float Sample rate in Hz block_size : int Block size used in recording GUPPI RAW files num_bits : int Number of bits in requantized data (for saving into file). Can be 8 or 4. num_pols : int Number of polarizations recorded num_branches : int Number of branches in polyphase filterbank num_chans : int Number of coarse channels written to file Returns ------- num_samples : int Number of samples """ tbin = num_branches / sample_rate chan_bw = 1 / tbin bytes_per_sample = 2 * num_pols * num_bits / 8 if length_mode == 'obs_length': if obs_length is None: raise ValueError("Value not given for 'obs_length'.") num_blocks = int(obs_length * chan_bw * num_antennas * num_chans * bytes_per_sample / block_size) elif length_mode == 'num_blocks': if num_blocks is None: raise ValueError("Value not given for 'num_blocks'.") pass else: raise ValueError("Invalid option given for 'length_mode'.") return num_blocks * int(block_size / (num_antennas * num_chans * bytes_per_sample)) * num_branches
0d5c3de03723c79d31c7f77ece29226daaf4f442
6,437
def event_date_row(event_names_and_dates): """ Returns the third row of the attendance csv. This is just a list of event dates. :param list[(str, datetime)] event_names_and_dates: A list of names and dates for each event that should appear on the csv :returns: the row to be printed :rtype: [str] """ # =" " has to be added around the dates to make sure it isn't auto-formatted by Excel event_dates = ['="' + str(dates) + '"' for _, dates in event_names_and_dates] return ['', '', '', '', '', '', ''] + event_dates
5b51eaef8cde99040a1aff9a0c6abaaef5e52896
6,439
def get_page_index(obj, amongst_live_pages=True): """ Get oage's index (a number) within its siblings. :param obj: Wagtail page object :param amongst_live_pages: Get index amongst live pages if True or all pages if False. :return: Index of a page if found or None if page doesn't have an index. """ qs = obj.__class__.objects.filter(depth=obj.depth).values_list('pk', flat=True) if amongst_live_pages: qs = qs.live() if obj.depth > 1: # making sure the non-root nodes share a parent parentpath = obj._get_basepath(obj.path, obj.depth - 1) qs = qs.filter( path__range=obj._get_children_path_interval(parentpath)) try: index = list(qs).index(obj.pk) return index except ValueError: return None
fd1950533a398019ab0d3e208a1587f86b134a13
6,441
def depth(data): """ For each event, it finds the deepest layer in which the shower has deposited some E. """ maxdepth = 2 * (data[2].sum(axis=(1, 2)) != 0) maxdepth[maxdepth == 0] = 1 * ( data[1][maxdepth == 0].sum(axis=(1, 2)) != 0 ) return maxdepth
aa48c88c516382aebe2a8b761b21f650afea82b1
6,442
import argparse def parse_args(args): """Parse the command line arguments.""" parser = argparse.ArgumentParser( description='STOMP client for Network Rail\'s public data.' ) subparsers = parser.add_subparsers() # Creation of new configuration files create_parser = subparsers.add_parser( 'create', description='Create a new, empty configuration file.' ) create_parser.add_argument( 'location', help='The location for the new configuration file.' ) # Running the client run_parser = subparsers.add_parser( 'run', description='Run the client' ) run_parser.add_argument( 'config', help='The configuration file to use for setup.' ) return parser.parse_args(args)
632b3702f6a3b3837b58ca39b7290081e7a3fb94
6,443
def env_start(): """ returns numpy array """ global maze, current_position current_position = 500 return current_position
ed377adedc48159607a4bb08ea6e3624575ec723
6,444
import math def plagdet_score(rec, prec, gran): """Combines recall, precision, and granularity to a allow for ranking.""" if (rec == 0 and prec == 0) or prec < 0 or rec < 0 or gran < 1: return 0 return ((2 * rec * prec) / (rec + prec)) / math.log(1 + gran, 2)
f8debf876d55296c3945d0d41c7701588a1869b6
6,446
def get_matrix_header(filename): """ Returns the entries, rows, and cols of a matrix market file. """ with open(filename) as f: entries = 0 rows = 0 cols = 0 for line in f.readlines(): if line.startswith('%'): continue line = line.split() entries = int(line[0]) rows = int(line[1]) cols = int(line[2]) return entries, rows, cols
66200661715cb9a67522ced7b13d4140a3905c28
6,447
def minOperations(n): """ finds min. operations to reach and string """ if type(n) != int or n <= 1: return 0 res = 0 i = 2 while(i <= n + 1): if (n % i == 0): res += i n /= i else: i += 1 return res
c26cbd71c6e675adea79938b6e7248a4c093e63f
6,449
import numpy def gfalternate_createdataandstatsdict(ldt_tower,data_tower,attr_tower,alternate_info): """ Purpose: Creates the data_dict and stat_dict to hold data and statistics during gap filling from alternate data sources. Usage: Side effects: Called by: Calls: Author: PRI Date: May 2015 """ data_dict = {} stat_dict = {} label_tower = alternate_info["label_tower"] label_composite = alternate_info["label_composite"] data_dict["DateTime"] = {"data":ldt_tower} data_dict[label_tower] = {"attr":attr_tower, "output_list":[label_tower,label_composite], "data":data_tower} data_dict[label_composite] = {"data":numpy.ma.masked_all_like(data_tower), "fitcorr":numpy.ma.masked_all_like(data_tower), "attr":attr_tower} stat_dict[label_tower] = {"startdate":alternate_info["startdate"],"enddate":alternate_info["enddate"]} stat_dict[label_composite] = {"startdate":alternate_info["startdate"],"enddate":alternate_info["enddate"]} return data_dict,stat_dict
a1690fb9e53abcd6b23e33046d82c10a2ca7abc0
6,450
def get_satellite_params(platform=None): """ Helper function to generate Landsat or Sentinel query information for quick use during NRT cube creation or sync only. Parameters ---------- platform: str Name of a satellite platform, Landsat or Sentinel only. params """ # check platform name if platform is None: raise ValueError('Must provide a platform name.') elif platform.lower() not in ['landsat', 'sentinel']: raise ValueError('Platform must be Landsat or Sentinel.') # set up dict params = {} # get porams depending on platform if platform.lower() == 'landsat': # get collections collections = [ 'ga_ls5t_ard_3', 'ga_ls7e_ard_3', 'ga_ls8c_ard_3'] # get bands bands = [ 'nbart_red', 'nbart_green', 'nbart_blue', 'nbart_nir', 'nbart_swir_1', 'nbart_swir_2', 'oa_fmask'] # get resolution resolution = 30 # build dict params = { 'collections': collections, 'bands': bands, 'resolution': resolution} else: # get collections collections = [ 's2a_ard_granule', 's2b_ard_granule'] # get bands bands = [ 'nbart_red', 'nbart_green', 'nbart_blue', 'nbart_nir_1', 'nbart_swir_2', 'nbart_swir_3', 'fmask'] # get resolution resolution = 10 # build dict params = { 'collections': collections, 'bands': bands, 'resolution': resolution} return params
2298c100eed431a48a9531bc3038c5ab8565025d
6,451
import os import http async def process_file(path, request_headers): """Serves a file when doing a GET request with a valid path.""" sever_root="/opt/vosk-server/websocket/web" MIME_TYPES = { "html": "text/html", "js": "text/javascript", "css": "text/css" } if "Upgrade" in request_headers: return # Probably a WebSocket connection if path == '/': path = '/index.html' response_headers = [ ('Server', 'asyncio websocket server'), ('Connection', 'close'), ] # Derive full system path full_path = os.path.realpath(os.path.join(sever_root, path[1:])) # Validate the path if os.path.commonpath((sever_root, full_path)) != sever_root or \ not os.path.exists(full_path) or not os.path.isfile(full_path): print("HTTP GET {} 404 NOT FOUND".format(full_path)) return http.HTTPStatus.NOT_FOUND, [], b'404 NOT FOUND' # Guess file content type extension = full_path.split(".")[-1] mime_type = MIME_TYPES.get(extension, "application/octet-stream") response_headers.append(('Content-Type', mime_type)) # Read the whole file into memory and send it out body = open(full_path, 'rb').read() response_headers.append(('Content-Length', str(len(body)))) print("HTTP GET {} 200 OK".format(path)) return http.HTTPStatus.OK, response_headers, body
d51d2ff1ec27185c31fc4eff3dfed8243e6d1764
6,453
import requests def get_token(corp_id: str, corp_secret: str): """获取access_token https://open.work.weixin.qq.com/api/doc/90000/90135/91039 """ req = requests.get( f'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={corp_id}&corpsecret={corp_secret}' ) return req.json().get('access_token')
9a9c3fcdb74312b5d2d7c62588aea3cf78796ec9
6,456
def list_agg(object_list, func): """Aggregation function for a list of objects.""" ret = [] for elm in object_list: ret.append(func(elm)) return ret
b2d8eef9c795e4700d111a3949922df940435809
6,459
def parse_word(word: str) -> str: """Compile a word of uppercase letters as numeric digits. Non-uppercase letter words are returned unchanged.""" if not word.isupper(): return word compiled_word = " + ".join([letter + "*" + str(10**index) for index, letter in enumerate(word[:: -1])]) return "(" + compiled_word + ")"
aa246c7d5e92035f14476327f5b2b694b383f7e1
6,460
import os def check_directory(directory, verbose): """ Inputs: graph_directory- the directory for the graphs to be place verbose- the verbose flag Checks to see if the graph directory exists. If it doesn't exit, the folder is created. """ cwd = os.getcwd() + '/' if not os.path.isdir(cwd + directory): if verbose: print('Making output directory\n') os.mkdir(cwd + directory) else: directory_exists = True i = 0 while directory_exists: i += 1 if not os.path.isdir(cwd + directory + f'_{i}'): directory = directory + f'_{i}' os.mkdir(cwd + directory) directory_exists = False return(directory)
2d76f00f4438a97e4fc91d3b23b74c94015385c5
6,461
def relu_backward(dout, cache): """ Backward pass for the ReLU function layer. Arguments: dout: numpy array of gradient of output passed from next layer with any shape cache: tuple (x) Output: x: numpy array of gradient for input with same shape of dout """ x = cache dx = dout * (x >= 0) return dx
3384ddf789ed2a31e25a4343456340a60e5a6e11
6,462
import os def via_sudo(): """ Return `True` if Blueprint was invoked via `sudo`(8), which indicates that privileges must be dropped when writing to the filesystem. """ return 'SUDO_UID' in os.environ \ and 'SUDO_GID' in os.environ \ and 'blueprint' in os.environ.get('SUDO_COMMAND', '')
c30c3e21f5bd780e42c37a0248a1406edf44bd44
6,463
import collections def find_identities(l): """ Takes in a list and returns a dictionary with seqs as keys and positions of identical elements in list as values. argvs: l = list, e.g. mat[:,x] """ # the number of items in the list will be the number of unique types uniq = [item for item, count in collections.Counter(l).items()] # Initialise a dictionary that will hold the results identDict = {} for item in uniq: identDict[item] = [ x for x in range(len(l)) if l[x] == item ] return identDict
db7b64cc430ab149de7d14e4f4a88abafbadbe34
6,465
def all_children(wid): """Return all children of a widget.""" _list = wid.winfo_children() for item in _list: if item.winfo_children(): _list.extend(item.winfo_children()) return _list
ca52791b06db6f2dd1aeedc3656ecf08cb7de6d8
6,466
def getGeneCount(person, geneSetDictionary): """ determines how many genes a person is assumed to have based upon the query information provided """ if person in geneSetDictionary["no_genes"]: gene_count = 0 elif person in geneSetDictionary["one_gene"]: gene_count = 1 else: gene_count = 2 return gene_count
0fef236dd805ae77f04a22670752031af15ca5b2
6,468
def _mgSeqIdToTaxonId(seqId): """ Extracts a taxonId from sequence id used in the Amphora or Silva mg databases (ends with '|ncbid:taxonId") @param seqId: sequence id used in mg databases @return: taxonId @rtype: int """ return int(seqId.rsplit('|', 1)[1].rsplit(':', 1)[1])
2ce74f453e3496c043a69b4205f258f06bfd0452
6,471
def url_to_filename(url): """Converts a URL to a valid filename.""" return url.replace('/', '_')
db3023c582590a47a6adc32501a2e3f5fd72f24f
6,472
def get_definitions_query_filter(request_args): """ Get query_filter for alert_alarm_definition list route. """ query_filters = None display_retired = False valid_args = ['array_name', 'platform_name', 'instrument_name', 'reference_designator'] # Process request arguments if 'retired' in request_args: if (request_args.get('retired')).lower() == 'true': display_retired = True key = None key_value = None for key in valid_args: if key in request_args: tmp = request_args.get(key) if tmp: key_value = str(tmp) break # If query_filter to be created, create it if key_value is not None or display_retired: query_filters = {} if key_value is not None: query_filters[key] = key_value if display_retired: query_filters['retired'] = True return query_filters
a087cbd9ca6ffe9b38afc2d8802c12e4dfd47e50
6,475
import re def targetInCol(df, target): """ Return meta information (Line or Area) from information in a column of DF. Arguments: doc -- csv Promax geometry file target -- meta information to get (Line or Area) """ c = list(df.columns) ptarget = r''+re.escape(target) i = [i for i, x in enumerate(c) if re.search(ptarget, x, re.I)] return df.iloc[0][i[0]] if i else None
5d40cf251bd2a7593a46a5b63b5de3a56f8cec29
6,476
def add_transformer_enc_hyperparams_args(parser): """Only applicable when args.model_name is 'transformer_enc'""" parser.add_argument('--hid_dim', type=int, default=128) parser.add_argument('--num_enc_layers', type=int, default=3) parser.add_argument('--num_enc_heads', type=int, default=8) parser.add_argument('--enc_pf_dim', type=int, default=256) parser.add_argument('--enc_dropout', type=float, default=0.1) parser.add_argument('--fc_dim', type=int, default=64, help='hidden size of the linear layer added on top') return parser
bc38c3cc1d9fc7e87cebfbf7bdc74f8e9d0a124e
6,478
import os def get_data_path(): """ Return the path to the project's data folder :return: The path to the data folder """ project_folder = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) return os.path.join(project_folder, "data")
351d11a22b56567f59858e0e0f092661beedaed6
6,479
import math def calc_mupen_res(N,region_w,region_h): """find res to fit N mupen instances in region""" results = [] for row_length in range(1,N+1): col_length = math.ceil(N/float(row_length)) instance_width = int(math.floor( min(640, region_w/float(row_length) ))) instance_height = int(math.floor(instance_width*(480.0/640.0))) if instance_height*col_length <= region_h and instance_width*row_length <= region_w: results.append((instance_width, instance_height)) return max(results)
35b5e739102097d856b7c2e154516d4e866a1567
6,480
def package_dir_path(path): """Return package path to package install directory""" return path + '/.pkg'
edd4b97256ccf02a3f1165b99cae746826e8aee0
6,481
def get_params_out_of_range( params: list, lower_params: list, upper_params: list ) -> list: """ Check if any parameter specified by the user is out of the range that was defined :param params: List of parameters read from the .inp file :param lower_params: List of lower bounds provided by the user in the .inp file :param upper_params: List of upper bounds provided by the user in the .inp file :return: List of parameters out of the defined range """ params_out = [ i for i in range(len(lower_params)) if params[i] < lower_params[i] or params[i] > upper_params[i] ] return params_out
67a8ca57a29da8b431ae26f863ff8ede58f41a34
6,483
from pathlib import Path def read_file(in_file: str): """Read input file.""" file_path = Path(in_file) data = [] count = 0 with open(file_path) as fp: for line in fp: data.append(line) count = count + 1 return ''.join(data), count
4fbae8f1af7800cb5f89784a0230680a1d6b139a
6,484
def limit(value, limits): """ :param <float> value: value to limit :param <list>/<tuple> limits: (min, max) limits to which restrict the value :return <float>: value from within limits, if input value readily fits into the limits its left unchanged. If value exceeds limit on either boundary its set to that boundary. """ if value < limits[0]: value = limits[0] elif value > limits[1]: value = limits[1] else: pass return value
55fb603edb478a26b238d7c90084e9c17c3113b8
6,485
import random def DiceRoll(): """A function to simulate rolling of one or more dice.""" def Roll(): return random.randint(1,6) print("\nRoll Dice: Simulates rolling of one or more dice.") num = 1 try: num = int(input("\nEnter the number of dice you wish to roll: ")) except: print("Input should be a number.") if num > 0: out = [] # list to store roll output i = 1 while i <= num: out.append(str(Roll())) i+=1 print("\nRoll Result(s)") print("============") print(", ".join(out))
90e9587473fb06541ec9daa2ec223759940a5ecb
6,486
import sys def do_verify(options, _fuse): """ @param options: Commandline options @type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS """ tableOption = _fuse.operations.getTable("option") curHashFunc = tableOption.get("hash_function") tableHash = _fuse.operations.getTable("hash") tableHashCT = _fuse.operations.getTable("hash_compression_type") tableBlock = _fuse.operations.getTable("block") _fuse.operations.hash_function = curHashFunc hashCount = tableHash.get_count() if _fuse.getOption("verbosity") > 0: print("Ready to verify %s blocks." % hashCount) cur = tableHash.getCursor(True) cur.execute("SELECT * FROM `%s`" % tableHash.getName()) cnt = equal = 0 lastPrc = "" for hashItem in iter(cur.fetchone, None): cnt += 1 blockItem = tableBlock.get(hashItem["id"]) hashCT = tableHashCT.get(hashItem["id"]) blockData = _fuse.decompressData(_fuse.operations.getCompressionTypeName(hashCT["type_id"]), blockItem["data"]) newHash = _fuse.operations.do_hash(blockData) if newHash == hashItem["value"]: equal += 1 prc = "%6.2f%%" % (cnt*100.0/hashCount) if prc != lastPrc: lastPrc = prc if _fuse.getOption("verbosity") > 0: sys.stdout.write("\r%s " % prc) sys.stdout.flush() if _fuse.getOption("verbosity") > 0: sys.stdout.write("\n") sys.stdout.flush() if _fuse.getOption("verbosity") > 0: print("Processed %s hashes, equal %s blocks." % (cnt, equal,)) if hashCount != cnt: print("Something went wrong?") return 1 if cnt != equal: print("Data corrupted?! %s block hashes not equals!" % (cnt - equal)) return 1 else: print("All data in good hash ;)") return 0
cba45a78cf57422bcca2b01909666fe0fdcb72a1
6,488
def is_byte_array(value, count): """Returns whether the given value is the Python equivalent of a byte array.""" return isinstance(value, tuple) and len(value) == count and all(map(lambda x: x >= 0 and x <= 255, value))
16793415885ea637aecbeeefe24162d6efe9eb39
6,489
def _FilterSubstructureMatchByAtomMapNumbers(Mol, PatternMol, AtomIndices, AtomMapIndices): """Filter substructure match atom indices by atom map indices corresponding to atom map numbers. """ if AtomMapIndices is None: return list(AtomIndices) return [AtomIndices[Index] for Index in AtomMapIndices]
3594a11452848c9ae11f770fa560fe29d68aa418
6,490
def worker_mode(self): """ bool: Whether or not all MPI ranks are in worker mode, in which all worker ranks are listening for calls from the controller rank. If *True*, all workers are continuously listening for calls made with :meth:`~_make_call` until set to *False*. By default, this is *False*. Setting this value to *True* allows for easier use of *PRISM* in combination with serial/OpenMP codes (like MCMC methods). """ return(bool(self._worker_mode))
45d256e47bfeffe9e3878297c6009061801e5d8d
6,491
from typing import Any from typing import Set import inspect from unittest.mock import Mock def _get_default_arguments(obj: Any) -> Set[str]: """Get the names of the default arguments on an object The default arguments are determined by comparing the object to one constructed from the object's class's initializer with Mocks for all positional arguments Arguments: obj: the object to find default arguments on Returns: the set of default arguments """ cls = type(obj) obj_sig = inspect.signature(cls.__init__) try: mocked_obj = cls( **{ param.name: Mock() for param in obj_sig.parameters.values() if param.default == param.empty and param.kind != param.VAR_KEYWORD and param.name != "self" } ) except Exception: return set() return { key for key, value in obj.__dict__.items() if key in mocked_obj.__dict__ and mocked_obj.__dict__[key] == value }
483fe82dd79aadfe1da387fb0c602beb503f344b
6,493
def collatz(number): """If number is even (number // 2) else (3 * number + 1) Args: number (int): number to collatz Returns: int: collatz number """ if (number % 2) == 0: print(number // 2) return number // 2 print(3 * number + 1) return 3 * number + 1
221fd238bd6d0c40c9cb80be2c58746bb206c17b
6,494
def get_disk_at(board, position): """ Return the disk at the given position on the given board. - None is returned if there is no disk at the given position. - The function also returns None if no disk can be obtained from the given board at the given position. This is for instance the case if a string, a number, ... is passed instead of a board or a position, if the given position is outside the boundaries of the given board, ... ASSUMPTIONS - None (same remark as for the function dimension) """ if not isinstance(position, (tuple, list)) or not isinstance(board, dict): return None else: return board[position]
4b793ce1947b2f71d666b1d1676ca894f43c3b58
6,495
def firstUniqChar(s): """ :type s: str :rtype: int """ if len(s) == 0: return -1 if len(s) == 1: return 0 hash_table = {} for i in s: if i not in hash_table: hash_table[i] = 1 else: hash_table[i] += 1 for i in s: if hash_table[i] <= 1: return s.find(i) return -1
25148de95099094991339bb0fe6815644e5b94cb
6,496
from typing import Optional from typing import Tuple import re def parse_test_stats_from_output(output: str, fail_type: Optional[str]) -> Tuple[int, int]: """Parse tasks output and determine test counts. Return tuple (number of tests, number of test failures). Default to the entire task representing a single test as a fallback. """ # pytest m = re.search('^=+ (.*) in [0-9.]+ seconds =+\n\Z', output, re.MULTILINE) if m: counts = {} for part in m.group(1).split(', '): # e.g., '3 failed, 32 passed, 345 deselected' count, key = part.split() counts[key] = int(count) return (sum(c for k, c in counts.items() if k != 'deselected'), counts.get('failed', 0)) # myunit m = re.search('^([0-9]+)/([0-9]+) test cases failed(, ([0-9]+) skipped)?.$', output, re.MULTILINE) if m: return int(m.group(2)), int(m.group(1)) m = re.search('^([0-9]+) test cases run(, ([0-9]+) skipped)?, all passed.$', output, re.MULTILINE) if m: return int(m.group(1)), 0 # Couldn't find test counts, so fall back to single test per tasks. if fail_type is not None: return 1, 1 else: return 1, 0
8ec67d226c2280eb08de3589cba7b6aa0a09024c
6,497
from typing import List from typing import Tuple from typing import Dict def _constraint_items_missing_from_collection( constraints: List[Tuple], collection: Dict[str, int] ) -> List[str]: """ Determine the constrained items that are not specified in the collection. """ constrained_items = set() for constraint in constraints: if len(constraint) > 1: constrained_items.add(constraint[1]) return sorted(constrained_items - collection.keys())
918667f1e8b001637c9adf00ef5323b2e8587775
6,498
import csv def main(file_in, file_out): """ Read in lines, flatten list of lines to list of words, sort and tally words, write out tallies. """ with open(file_in, 'r') as f_in: word_lists = [line.split() for line in f_in] # Flatten the list # http://stackoverflow.com/questions/952914/\ # making-a-flat-list-out-of-list-of-lists-in-python words = [word for word_list in word_lists for word in word_list] counts = map(words.count, words) tallies = sorted(set(zip(words, counts))) with open(file_out, 'w') as f_out: writer = csv.writer(f_out, quoting=csv.QUOTE_NONNUMERIC) for (word, count) in tallies: writer.writerow([word, count]) return None
225dd5d5b4c2bcb158ee61aa859f78e1e608a5fe
6,500
def load_atomic(val): """ Load a std::atomic<T>'s value. """ valty = val.type.template_argument(0) # XXX This assumes std::atomic<T> has the same layout as a raw T. return val.address.reinterpret_cast(valty.pointer()).dereference()
307bcc9d3eae2eede6a8e2275104280a1e7b4b94
6,501
def dataId_to_dict(dataId): """ Parse an LSST dataId to a dictionary. Args: dataId (dataId): The LSST dataId object. Returns: dict: The dictionary version of the dataId. """ return dataId.to_simple().dict()["dataId"]
77b7566492b80a8c6e2becacafff36737c8a7256
6,502
def getHoliday(holidayName): """Returns a specific holiday. Args: holidayName (str): The name of the holiday to return. Case-sensitive. Returns: HolidayModel: The holiday, as a HolidayModel object, or None if not found. """ print(holidayName) return None
15b67fd6ac607d1ff12a216cc3c4baab61305be6
6,503
import six import hashlib def hash_mod(text, divisor): """ returns the module of dividing text md5 hash over given divisor """ if isinstance(text, six.text_type): text = text.encode('utf8') md5 = hashlib.md5() md5.update(text) digest = md5.hexdigest() return int(digest, 16) % divisor
3f127837bb072df5ee609b3afa80dd04e4f7b794
6,504
def _get_exploration_memcache_key(exploration_id): """Returns a memcache key for an exploration.""" return 'exploration:%s' % exploration_id
1400607cc86f84c242201c9c9fe36a7a06cd2357
6,505
def pairs_from_array(a): """ Given an array of strings, create a list of pairs of elements from the array Creates all possible combinations without symmetry (given pair [a,b], it does not create [b,a]) nor repetition (e.g. [a,a]) :param a: Array of strings :return: list of pairs of strings """ pairs = list() for i in range(len(a)): for j in range(len(a[i + 1 :])): pairs.append([a[i], a[i + 1 + j]]) return pairs
50c489d660a7e82c18baf4800e599b8a3cd083f0
6,507
def shared_options(option_list): """Define decorator for common options.""" def _shared_options(func): for option in reversed(option_list): func = option(func) return func return _shared_options
7ef551ea9879b708e6b449ce1155d47b662efd3d
6,509
from typing import Any def complex_key(c: complex) -> Any: """Defines a sorting order for complex numbers.""" return c.real != int(c.real), c.real, c.imag
55c17b0d4adf8bcfb39b50c66d5bd8133f5bb814
6,510
def get_campaign_goal(campaign, goal_identifier): """Returns goal from given campaign and Goal_identifier. Args: campaign (dict): The running campaign goal_identifier (string): Goal identifier Returns: dict: Goal corresponding to goal_identifer in respective campaign """ if not campaign or not goal_identifier: return None for goal in campaign.get("goals"): if goal.get("identifier") == goal_identifier: return goal return None
1a8738416ee8187ad2a6a977b36b68f66052bfe8
6,511
def getChanprofIndex(chanprof, profile, chanList): """ List of indices into the RTTOV chanprof(:) array corresponding to the chanlist. NB This assumes you've checked the chanlist against chanprof already. """ ilo = sum(map(len, chanprof[:profile-1])) ichanprof = [] for c in chanList: ichanprof.append(ilo + chanprof[profile-1].index(c)) return ichanprof
e61e210e8b05fdfbf3a769f4b5b388d765d436b9
6,512
import argparse def parse_args(): """Accepts path arguments for eml or directory.""" outpath = "C:\\Utilities\\Logs" parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", help="Path to EML file", required=False) parser.add_argument("-p", "--path", help="Directory holding eml files", required=False) parser.add_argument("-o", "--out", help="Directory for results", const=outpath, nargs='?', required=False, default=outpath) args = parser.parse_args() return args
61c4fcc5bd4278a2dd3c32aa3fd9a779f0f2ff3e
6,513
import bisect def find_dataset_ind(windows_ds, win_ind): """Taken from torch.utils.data.dataset.ConcatDataset. """ return bisect.bisect_right(windows_ds.cumulative_sizes, win_ind)
76abcbdf9718cc59f1d2b7ca8daacc062970b253
6,514
def _unsigned16(data, littleEndian=False): """return a 16-bit unsigned integer with selectable Endian""" assert len(data) >= 2 if littleEndian: b0 = data[1] b1 = data[0] else: b0 = data[0] b1 = data[1] val = (b0 << 8) + b1 return val
22feb074aca7f4ab7d489eacb573c3653cad9272
6,515
def calculate_term_frequencies(tokens): """Given a series of `tokens`, produces a sorted list of tuples in the format of (term frequency, token). """ frequency_dict = {} for token in tokens: frequency_dict.setdefault(token, 0) frequency_dict[token] += 1 tf = [] for token, count in frequency_dict.items(): tf.append( (count, token) ) return sorted(tf, reverse=True)
b764175cd59fe25c4a87576faee2a76273097c5e
6,516
def get_dlons_from_case(case: dict): """pull list of latitudes from test case""" dlons = [geo[1] for geo in case["destinations"]] return dlons
666ab789761e99749b4852a51f5d38c35c66bd2a
6,517
def horiLine(lineLength, lineWidth=None, lineCharacter=None, printOut=None): """Generate a horizontal line. Args: lineLength (int): The length of the line or how many characters the line will have. lineWidth (int, optional): The width of the line or how many lines of text the line will take space. Defaults to 1. lineCharacter (str, optional): The string, character, or number the line will use as a single character. Defaults to '-'. printOut (bool, optional): Print out the generated dummy text. Defaults to False. Returns: The horizontal line created. """ meaningless_text = "" lineGenerated = "" # check if lineWidth is none if lineWidth is None: # if lineWidth is none, set it to default of 1 width = 1 else: # if line wdith is not none, set it to the given value width = lineWidth # check if lineCharacter is none if lineCharacter is None: # if lineCharacter is none, set it to default "-" character = "-" else: # if line character is not none, then use the user specified character character = lineCharacter for i in range(width): # generate a line for char in range(lineLength): lineGenerated += character if width > 1: # if line width is greater than 1, append a new line character lineGenerated += "\n" meaningless_text += lineGenerated # check if printOut is not none if printOut is not None: # print out is not none and is true so print out the generated text. if printOut == True: print(meaningless_text) # print out is not none and is false so only return the generated text. return meaningless_text
64a4e9e22b480cbe3e038464fe6e0061e023d2c2
6,520
import math def _rescale_read_counts_if_necessary(n_ref_reads, n_total_reads, max_allowed_reads): """Ensures that n_total_reads <= max_allowed_reads, rescaling if necessary. This function ensures that n_total_reads <= max_allowed_reads. If n_total_reads is <= max_allowed_reads, n_ref_reads and n_total_reads are just returned. However, if n_total_reads > max_allowed_reads, then n_ref_reads and n_total_reads are rescaled to new values n_ref_reads' and n_total_reads' so that n_total_reads' == max_allowed_reads and n_ref_reads' / n_total_reads' ~ n_ref_reads / n_total_reads. Args: n_ref_reads: int. Number of reference supporting reads. n_total_reads: int. Total number of reads. max_allowed_reads: int. The maximum value allowed for n_total after rescaling, if necessary. Returns: New values for n_ref_reads and n_total_reads. """ if n_total_reads > max_allowed_reads: ratio = n_ref_reads / (1.0 * n_total_reads) n_ref_reads = int(math.ceil(ratio * max_allowed_reads)) n_total_reads = max_allowed_reads return n_ref_reads, n_total_reads
d09b343cee12f77fa06ab467335a194cf69cccb4
6,521
import os from pathlib import Path def getFilesFromPath(path): """returns all files corresponding to path parameter""" raw = os.listdir(Path(path)) files_to_return = list() for i in raw: if i != ".DS_Store": files_to_return.append(i) return files_to_return
a2248b0a57a722fd9412bb80f49d3d8e8b5b6b69
6,522
import logging import os def get_pages(root_path): """ Reads the content folder structure and returns a list of dicts, one per page. Each page dict has these keys: path: list of logical uri path elements uri: URI of the final rendered page as string file_path: physical path of the file, as valid from within this script Won't return anything for the home page and other index pages. """ logging.info("Getting pages from %s" % root_path) num_root_elements = len(root_path.split(os.sep)) pages = [] for root, dirs, files in os.walk(root_path): if ".git" in dirs: dirs.remove(".git") if "img" in dirs: dirs.remove("img") for filename in files: if not filename.endswith(".md"): continue path = root.split(os.sep)[num_root_elements:] file_path = root + os.sep + filename if filename not in ("index.md", "_index.md"): # append name of file (without suffix) as last uri segment segment = filename[:-3] path.append(segment) uri = "/" + "/".join(path) + "/" record = { "path": path, "uri": uri, "file_path": file_path } pages.append(record) return pages
acc6887638e2d6f6950d4e38556b155e22a6999f
6,524
def load_dict(path): """ Load a dictionary and a corresponding reverse dictionary from the given file where line number (0-indexed) is key and line string is value. """ retdict = list() rev_retdict = dict() with open(path) as fin: for idx, line in enumerate(fin): text = line.strip() retdict.append(text) rev_retdict[text] = idx return retdict, rev_retdict
31a67c2a28518a3632a47ced2889150c2ce98a78
6,525
def strip(string, p=" \t\n\r"): """ strip(string, p=" \t\n\r") """ return string.strip(p)
1be2a256394455ea235b675d51d2023e8142415d
6,528
def get_square(array, size, y, x, position=False, force=False, verbose=True): """ Return an square subframe from a 2d array or image. Parameters ---------- array : 2d array_like Input frame. size : int Size of the subframe. y : int Y coordinate of the center of the subframe (obtained with the function ``frame_center``). x : int X coordinate of the center of the subframe (obtained with the function ``frame_center``). position : bool, optional If set to True return also the coordinates of the bottom-left vertex. force : bool, optional Size and the size of the 2d array must be both even or odd. With ``force`` set to True this condition can be avoided. verbose : bool optional If True, warning messages might be shown. Returns ------- array_out : array_like Sub array. y0, x0 : int [position=True] Coordinates of the bottom-left vertex. """ size_init = array.shape[0] # assuming square frames if array.ndim != 2: raise TypeError('Input array is not a 2d array.') if not isinstance(size, int): raise TypeError('`Size` must be integer') if size >= size_init: # assuming square frames msg = "`Size` is equal to or bigger than the initial frame size" raise ValueError(msg) if not force: # Even input size if size_init % 2 == 0: # Odd size if size % 2 != 0: size += 1 if verbose: print("`Size` is odd (while input frame size is even). " "Setting `size` to {} pixels".format(size)) # Odd input size else: # Even size if size % 2 == 0: size += 1 if verbose: print("`Size` is even (while input frame size is odd). " "Setting `size` to {} pixels".format(size)) else: # Even input size if size_init % 2 == 0: # Odd size if size % 2 != 0 and verbose: print("WARNING: `size` is odd while input frame size is even. " "Make sure the center coordinates are set properly") # Odd input size else: # Even size if size % 2 == 0 and verbose: print("WARNING: `size` is even while input frame size is odd. " "Make sure the center coordinates are set properly") # wing is added to the sides of the subframe center wing = (size - 1) / 2 y0 = int(y - wing) y1 = int(y + wing + 1) # +1 cause endpoint is excluded when slicing x0 = int(x - wing) x1 = int(x + wing + 1) if y0 < 0 or x0 < 0 or y1 > size_init or x1 > size_init: # assuming square frames raise RuntimeError('square cannot be obtained with size={}, y={}, x={}' ''.format(size, y, x)) array_out = array[y0: y1, x0: x1].copy() if position: return array_out, y0, x0 else: return array_out
8d83d4d16241e118bbb65593c14f9f9d5ae0834c
6,530
import json def read_json(json_file): """ Read input JSON file and return the dict. """ json_data = None with open(json_file, 'rt') as json_fh: json_data = json.load(json_fh) return json_data
4e1ea153d040ec0c3478c2d1d3136eb3c48bfe1c
6,531
def alt_or_ref(record, samples: list): """ takes in a single record in a vcf file and returns the sample names divided into two lists: ones that have the reference snp state and ones that have the alternative snp state Parameters ---------- record the record supplied by the vcf reader samples: list list of sample names Returns ------- ref_group, alt_group : list lists of samples divided by ref or alt snp state """ tracker = 0 ref_group = [] alt_group = [] for call in record.calls: state = int(call.data.get('GT')) sample_name = samples[tracker] if state == 0: ref_group.append(sample_name) elif state == 1: alt_group.append(sample_name) else: print("there is a problem reading the state information") raise SystemExit(0) tracker += 1 return ref_group, alt_group
abaccfeef02ee625d103da88b23fce82a40bc04c
6,534
def is_const_component(record_component): """Determines whether a group or dataset in the HDF5 file is constant. Parameters ---------- record_component : h5py.Group or h5py.Dataset Returns ------- bool True if constant, False otherwise References ---------- .. https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md, section 'Constant Record Components' """ return "value" in record_component.attrs.keys()
4adb2ff7f6fb04086b70186a32a4589ae9161bb5
6,535
import pytz def isodate(dt): """Formats a datetime to ISO format.""" tz = pytz.timezone('Europe/Zagreb') return dt.astimezone(tz).isoformat()
d07118e188772ec6a87d554c6883530164eeb550
6,536
def _ncells_after_subdiv(ms_inf, divisor): """Calculates total number of vtu cells in partition after subdivision :param ms_inf: Mesh/solninformation. ('ele_type', [npts, nele, ndims]) :type ms_inf: tuple: (str, list) :rtype: integer """ # Catch all for cases where cell subdivision is not performed if divisor is None: divisor = 1 # Calculate the number of low-order cells in each high order element n_sub_ele = divisor ** ms_inf[1][2] # Pyramids require the further addition of an arithmetic series if ms_inf[0] == 'pyr': n_sub_ele += (divisor - 1) * divisor / 2 # Multiply by number of elements return n_sub_ele * ms_inf[1][1]
981db31a7729c0cac88575b1cb12505a30cf0abb
6,537
def isStringLike(s): """ Returns True if s acts "like" a string, i.e. is str or unicode. Args: s (string): instance to inspect Returns: True if s acts like a string """ try: s + '' except: return False else: return True
73fc002843735536c159eed91cf54886f52e78e7
6,538
def compat(data): """ Check data type, transform to string if needed. Args: data: The data. Returns: The data as a string, trimmed. """ if not isinstance(data, str): data = data.decode() return data.rstrip()
51d2d37b427e77b038d8f18bebd22efa4b4fdcce
6,541
def rsa_decrypt(cipher: int, d: int, n: int) -> int: """ decrypt ciphers with the rsa cryptosystem :param cipher: the ciphertext :param d: your private key :param n: your public key (n) :return: the plaintext """ return pow(cipher, d, n)
33822a0a683eca2f86b0e2b9b319a42806ae56cc
6,542
import yaml def config_get_timesteps(filepath): """Get list of time steps from YAML configuration file. Parameters ---------- filepath : pathlib.Path or str Path of the YAML configuration file. Returns ------- list List of time-step indices (as a list of integers). """ with open(filepath, 'r') as infile: config = yaml.safe_load(infile)['parameters'] config.setdefault('startStep', 0) nstart, nt, nsave = config['startStep'], config['nt'], config['nsave'] return list(range(nstart, nt + 1, nsave))
8a51e1437edbf2d73884cb633dbf05e9cfe5a98d
6,543
def merge_cv_results(cv_results): """ Means across CV """ dtypes = ["train", "dev", "test"] props_l1 = ["mean_loss", "mean_accuracy", "mean_positive_f1", "UL-A", "Joint-A"] props_l2 = ["accuracy", "positive_f1"] merged_results = {} for dtype in dtypes: merged_results[dtype] = {} for prop in props_l1: summ = 0.0 for item in cv_results: summ += item[dtype][prop] merged_results[dtype][prop] = summ/len(cv_results) num_labels = len(cv_results[0][dtype]["label_wise"]) merged_results[dtype]["label_wise"] = [{} for _ in range(num_labels)] for i in range(num_labels): for prop in props_l2: summ = 0.0 for item in cv_results: summ += item[dtype]["label_wise"][i][prop] merged_results[dtype]["label_wise"][i][prop] = summ/len(cv_results) return merged_results
854b0672ec31103136ad3c7285311f865a098159
6,544
from typing import List from typing import Tuple def _broads_cores(sigs_in: List[Tuple[str]], shapes: Tuple[Tuple[int, ...]], msg: str ) -> Tuple[List[Tuple[int, ...]], List[Tuple[int, ...]]]: """Extract broadcast and core shapes of arrays Parameters ---------- sigs_in : Tuple[str, ...] Core signatures of input arrays shapes : Tuple[int, ...] Shapes of input arrays msg : str Potential error message Returns ------- broads : List[Tuple[int, ...]] Broadcast shape of input arrays cores : List[Tuple[int, ...]] Core shape of input arrays Raises ------ ValueError If arrays do not have enough dimensions. """ dims = [len(sig) for sig in sigs_in] broads, cores = [], [] if any(len(shape) < dim for shape, dim in zip(shapes, dims)): raise ValueError('Core array does not have enough ' + msg) for shape, dim in zip(shapes, dims): if dim: broads.append(shape[:-dim]) cores.append(shape[-dim:]) else: broads.append(shape) cores.append(()) return broads, cores
228cbe06e4bc1e092cf92034255bfd60f01664c1
6,545
import json import logging def _ParseStepLogIfAppropriate(data, log_name): """PConditionally parses the contents of data, based on the log type.""" if not data: return None if log_name.lower() == 'json.output[ninja_info]': # Check if data is malformatted. try: json.loads(data) except ValueError: logging.error('json.output[ninja_info] is malformatted') return None if log_name.lower() not in ['stdout', 'json.output[ninja_info]']: try: return json.loads(data) if data else None except ValueError: logging.error( 'Failed to json load data for %s. Data is: %s.' % (log_name, data)) return None return data
4f2ef1f451c271adf0285ae90f88cf10b6e8d9be
6,546
import os def check_bash_path(fname): """Check if file is in your bash path and executable (i.e. executable from command line), and prepend path to it if so. Arguments: ---------- fname : str Filename to check. Returns: -------- fname : str Potentially updated filename with absolute path prepended.""" PATH = os.environ['PATH'].split(':') for path in PATH: if os.path.exists('{0}/{1}'.format(path,fname)): if not os.access('{0}/{1}'.format(path,fname), os.X_OK): raise IOError('"{0}" found in "{1}" but file is not executable.'.format(fname,path)) else: fname = '{0}/{1}'.format(path,fname) break return fname
09269ac74daa21c9e47ca50c85f89db3212e40bc
6,548
def cal_rank_from_proc_loc(pnx: int, pi: int, pj: int): """Given (pj, pi), calculate the rank. Arguments --------- pnx : int Number of MPI ranks in x directions. pi, pj : int The location indices of this rank in x and y direction in the 2D Cartesian topology. Returns ------- rank : int """ # pylint: disable=invalid-name return pj * pnx + pi
97146de9f69dd2f62173c19dfdb98d8281036697
6,549
def _chg_float(something): """ floatに変換できたらfloatに変換する """ try: f = float(something) return f except ValueError: pass return something
d0119c255b0842b2de4e60293c8037ff6f75b181
6,551
def filter_matches(matches, threshold=0.75): """Returns filterd copy of matches grater than given threshold Arguments: matches {list(tuple(cv2.DMatch))} -- List of tupe of cv2.DMatch objects Keyword Arguments: threshold {float} -- Filter Threshold (default: {0.75}) Returns: list(cv2.DMatch) -- List of cv2.DMatch objects that satisfy ratio test """ filtered = [] for m, n in matches: if m.distance < threshold * n.distance: filtered.append(m) return filtered
c2cbec1da42d96575eb422bfdda6a1351e24508b
6,553
import string import random def createRandomStrings(l,n,chars=None,upper=False): """create list of l random strings, each of length n""" names = [] if chars == None: chars = string.ascii_lowercase #for x in random.sample(alphabet,random.randint(min,max)): if upper == True: chars = [i.upper() for i in chars] for i in range(l): val = ''.join(random.choice(chars) for x in range(n)) names.append(val) return names
678b5aeb3cc98ae2b47822500fcbaff05081058a
6,554
def get_previous_quarter(today): """There are four quarters, 01-03, 04-06, 07-09, 10-12. If today is in the last month of a quarter, assume it's the current quarter that is requested. """ end_year = today.year end_month = today.month - (today.month % 3) + 1 if end_month <= 0: end_year -= 1 end_month += 12 if end_month > 12: end_year += 1 end_month -= 12 end = '%d-%02d-01' % (end_year, end_month) begin_year = end_year begin_month = end_month - 3 if begin_month <= 0: begin_year -= 1 begin_month += 12 begin = '%d-%02d-01' % (begin_year, begin_month) return begin, end
4175c80d2aa75c0e3e02cdffe8a766c4a63686d0
6,555
def get_payload(address): """ According to an Address object, return a valid payload required by create/update address api routes. """ return { "address": address.address, "city": address.city, "country": str(address.country), "first_name": address.first_name, "last_name": address.last_name, "title": address.title, "postcode": address.postcode, }
34fde5090aae774a24a254ea7dd7f03cc0f784be
6,556
def Material (colorVector): """ Material color. """ if colorVector == None: return None else: assert isinstance (colorVector, (list, tuple)) assert len (colorVector), 3 for cid in range (0, 3): assert colorVector[cid] >= 0 assert colorVector[cid] <= 1 return colorVector
89cee73c485669786f1a7cc4855ad8460c9db023
6,558
import math def auto_border_start(min_corner_point, border_size): """Determine upper-right corner coords for auto border crop :param min_corner_point: extreme corner component either 'min_x' or 'min_y' :param border_size: min border_size determined by extreme_frame_corners in vidstab process :return: adjusted extreme corner for cropping """ return math.floor(border_size - abs(min_corner_point))
e09d48a8c8c59053516357cbfd320cf92a080cc4
6,559
import configparser def bootPropsConfig(artifact, resources, targetDir, scalaVersion = "2.13.1"): """Create the configuration to install an artifact and its dependencies""" scala = {} scala["version"] = scalaVersion app = {} app["org"] = artifact.org app["name"] = artifact.name app["version"] = artifact.version app["class"] = "com.scleradb.pathgen.Main" app["cross-versioned"] = "binary" if resources: app["resources"] = ", ".join(resources) repositories = {} repositories["local"] = None repositories["typesafe-ivy-releases"] = "http://repo.typesafe.com/typesafe/ivy-releases/, [organization]/[module]/[revision]/[type]s/[artifact](-[classifier]).[ext]" repositories["maven-central"] = None repositories["Sonatype OSS Snapshots"] = "https://oss.sonatype.org/content/repositories/snapshots" boot = {} boot["directory"] = targetDir log = {} log["level"] = "error" config = configparser.ConfigParser(allow_no_value = True, delimiters = ":") config["scala"] = scala config["app"] = app config["repositories"] = repositories config["boot"] = boot config["log"] = log return config
66b8a4d641b3b1728e1d99c3f7bd7104806cdc50
6,560
import tempfile import subprocess def convert_BigWig2Wig(fname, path_to_binary=None): """ uses the UCSC bigwigToWig to do the conversion, ready for reading """ tmp_path = tempfile.mkdtemp() success=subprocess.check_call(["BigWig2Wig", fname, "%s/out.wig" % tmp_path]) if success==0: return("%s/out.wig" % tmp_path)
97286ac5b36257f5522e94ae15ffd5772d54d726
6,561
import torch def string_to_tensor(string, char_list): """A helper function to create a target-tensor from a target-string params: string - the target-string char_list - the ordered list of characters output: a torch.tensor of shape (len(string)). The entries are the 1-shifted indices of the characters in the char_list (+1, as 0 represents the blank-symbol) """ target = [] for char in string: pos = char_list.index(char) + 1 target.append(pos) result = torch.tensor(target, dtype=torch.int32) return result
eb6c7fcccc9802462aedb80f3da49abec9edc465
6,562
def _add_sld_boilerplate(symbolizer): """ Wrap an XML snippet representing a single symbolizer in the appropriate elements to make it a valid SLD which applies that symbolizer to all features, including format strings to allow interpolating a "name" variable in. """ return """ <StyledLayerDescriptor version="1.0.0" xmlns="http://www.opengis.net/sld" xmlns:ogc="http://www.opengis.net/ogc" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd"> <NamedLayer> <Name>%(name)s</Name> <UserStyle> <Name>%(name)s</Name> <Title>%(name)s</Title> <FeatureTypeStyle> <Rule> """ + symbolizer + """ </Rule> </FeatureTypeStyle> </UserStyle> </NamedLayer> </StyledLayerDescriptor> """
971199333570d5bc7baefec88f35b92922ef6176
6,564
def sequence_names_match(r1, r2): """ Check whether the sequences r1 and r2 have identical names, ignoring a suffix of '1' or '2'. Some old paired-end reads have names that end in '/1' and '/2'. Also, the fastq-dump tool (used for converting SRA files to FASTQ) appends a .1 and .2 to paired-end reads if option -I is used. """ name1 = r1.name.split(None, 1)[0] name2 = r2.name.split(None, 1)[0] if name1[-1:] in '12' and name2[-1:] in '12': name1 = name1[:-1] name2 = name2[:-1] return name1 == name2
645ac09011cc4b94c5b6d60bf691b6b1734d5b6b
6,565
def get_wccp_service_group_settings( self, ne_id: str, cached: bool, ) -> dict: """Get per-group WCCP configuration settings from appliance .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - wccp - GET - /wccp/config/group/{neId}?cached={cached} :param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE`` :type ne_id: str :param cached: ``True`` retrieves last known value to Orchestrator, ``False`` retrieves values directly from Appliance :type cached: bool :return: Returns nested dictionary of wccp settings per group-id \n * keyword **<wccp_group_id>** (`dict`): WCCP group detail object \n * keyword **password** (`str, optional`): WCCP service group password * keyword **mask_src_port** (`int, optional`): WCCP service group mask source port * keyword **force_l2_return** (`bool, optional`): WCCP service group force l2 return * keyword **hash_dst_ip** (`bool, optional`): WCCP service group hash destination ip * keyword **self** (`int, optional`): Integer value of service group Id * keyword **weight** (`int, optional`): WCCP service group weight * keyword **hash_src_port** (`bool, optional`): WCCP service group hash source port * keyword **assign_method** (`str, optional`): Assignment Method * keyword **hash_dst_port** (`bool, optional`): WCCP service group hash destination port * keyword **hash_src_ip** (`bool, optional`): WCCP service group hash source ip * keyword **encap** (str ing, optional): WCCP service group forwarding method * keyword **protocol** (`str, optional`): WCCP service group protocol * keyword **assign_detail** (`str, optional`): WCCP service group assignment detail * keyword **compatibility** (`str, optional`): WCCP service group compatibility mode. Valid values: ``ios``, ``nexus`` * keyword **interface** (`str, optional`): WCCP service group interface * keyword **mask_dst_ip** (`int, optional`): WCCP service group mask destination ip * keyword **mask_dst_port** (`int, optional`): WCCP service group mask destination port * keyword **mask_src_ip** (`int, optional`): WCCP service group mask source ip * keyword **priority** (`int, optional`): WCCP service group priority. Valid range: ``[0, 255]`` * keyword **router** (`dict, optional`): WCCP service group router information :rtype: dict """ return self._get("/wccp/config/group/{}?cached={}".format(ne_id, cached))
75e07296893eabafbbf0285134cf33cb3eb46480
6,566
import os import re def commonpath(paths): """py2 compatible version of py3's os.path.commonpath >>> commonpath([""]) '' >>> commonpath(["/"]) '/' >>> commonpath(["/a"]) '/a' >>> commonpath(["/a//"]) '/a' >>> commonpath(["/a", "/a"]) '/a' >>> commonpath(["/a/b", "/a"]) '/a' >>> commonpath(["/a/b", "/a/b"]) '/a/b' >>> commonpath(["/a/b/c", "/a/b/d"]) '/a/b' >>> commonpath(["/a/b/c", "/a/b/d", "//a//b//e//"]) '/a/b' >>> commonpath([]) Traceback (most recent call last): ... ValueError: commonpath() arg is an empty sequence >>> commonpath(["/absolute/path", "relative/path"]) Traceback (most recent call last): ... ValueError: (Can't mix absolute and relative paths") """ assert os.sep == "/", "tested only on slash-delimited paths" split_re = re.compile(os.sep + "+") if len(paths) == 0: raise ValueError("commonpath() arg is an empty sequence") spaths = [p.rstrip(os.sep) for p in paths] splitpaths = [split_re.split(p) for p in spaths] if all(p.startswith(os.sep) for p in paths): abs_paths = True splitpaths = [p[1:] for p in splitpaths] elif all(not p.startswith(os.sep) for p in paths): abs_paths = False else: raise ValueError("Can't mix absolute and relative paths") splitpaths0 = splitpaths[0] splitpaths1n = splitpaths[1:] min_length = min(len(p) for p in splitpaths) equal = [i for i in range(min_length) if all(splitpaths0[i] == sp[i] for sp in splitpaths1n)] max_equal = max(equal or [-1]) commonelems = splitpaths0[:max_equal + 1] commonpath = os.sep.join(commonelems) return (os.sep if abs_paths else '') + commonpath
94b92bd6802cc7483db2e3e05b8fb6a8a9630372
6,568
def map_currency(currency, currency_map): """ Returns the currency symbol as specified by the exchange API docs. NOTE: Some exchanges (kraken) use different naming conventions. (e.g. BTC->XBT) """ if currency not in currency_map.keys(): return currency return currency_map[currency]
98b235952b042109a4e2083ce6c8fd85690c22e3
6,569
def _check_type(value, expected_type): """Perform type checking on the provided value This is a helper that will raise ``TypeError`` if the provided value is not an instance of the provided type. This method should be used sparingly but can be good for preventing problems earlier when you want to restrict duck typing to make the types of fields more obvious. If the value passed the type check it will be returned from the call. """ if not isinstance(value, expected_type): raise TypeError("Value {value!r} has unexpected type {actual_type!r}, expected {expected_type!r}".format( value=value, expected_type=expected_type, actual_type=type(value), )) return value
a18ecfe9d63e6a88c56fc083da227a5c12ee18db
6,570
def get_patch_info(shape, p_size): """ shape: origin image size, (x, y) p_size: patch size (square) return: n_x, n_y, step_x, step_y """ x = shape[0] y = shape[1] n = m = 1 while x > n * p_size: n += 1 while p_size - 1.0 * (x - p_size) / (n - 1) < 50: n += 1 while y > m * p_size: m += 1 while p_size - 1.0 * (y - p_size) / (m - 1) < 50: m += 1 return n, m, (x - p_size) * 1.0 / (n - 1), (y - p_size) * 1.0 / (m - 1)
b681f355ffbf3c7f5653996cd021950e2c9689d4
6,571
def addCol(adjMatrix): """Adds a column to the end of adjMatrix and returns the index of the comlumn that was added""" for j in range(len(adjMatrix)): adjMatrix[j].append(0) return len(adjMatrix[0])-1
29ee11953cbdb757e8ea80e897751059e42f1d90
6,572
import subprocess def get_shell_python_version(command='python -V'): """ Get version of Python running in shell | None --> tuple Use command keyword to test python3 instead of python if python version is Python 2. Minor version is not used at present but is included in case it is needed in future versions. """ result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, check=True, stderr=subprocess.STDOUT, universal_newlines=True) version_string = result.stdout major_version_index = version_string.find(' ') + 1 major_version = int(version_string[major_version_index]) minor_version_index = version_string.find('.') + 1 minor_version = int(version_string[minor_version_index]) micro_version_index = version_string.rfind('.') +1 end_index = len(version_string) micro_version = int(version_string[micro_version_index:end_index]) return tuple([major_version, minor_version, micro_version])
27e441adb062c236881e4fc85d9e55db0962e4bc
6,573
def calculate_average(result): """Calculates the average package size""" vals = result.values() if len(vals) == 0: raise ValueError("Cannot calculate average on empty dictionary.") return sum(vals)/float(len(vals))
ea4b66b41533b0e8984b5137c39c744eec9d3e1f
6,575