content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import warnings def load_tree(tree,fmt=None): """ Load a tree into an ete3 tree data structure. tree: some sort of tree. can be an ete3.Tree (returns self), a dendropy Tree (converts to newick and drops root), a newick file or a newick string. fmt: format for reading tree from newick. 0-9 or 100. See ete3 documentation for how these are read (http://etetoolkit.org/docs/latest/tutorial/tutorial_trees.html#reading-and-writing-newick-trees). As of ETE3.1.1, these numbers mean: | ====== ============================================== | FORMAT DESCRIPTION | ====== ============================================== | 0 flexible with support values | 1 flexible with internal node names | 2 all branches + leaf names + internal supports | 3 all branches + all names | 4 leaf branches + leaf names | 5 internal and leaf branches + leaf names | 6 internal branches + leaf names | 7 leaf branches + all names | 8 all names | 9 leaf names | 100 topology only | ====== ============================================== if fmt is None, try to parse without a format descriptor, then these formats in numerical order. Returns an ete3 tree object. """ # Already an ete3 tree. if type(tree) is ete3.TreeNode: return tree # Convert dendropy tree into newick (drop root) if type(tree) is dp.Tree: tree = tree.as_string(schema="newick",suppress_rooting=True) # If we get here, we need to convert. If fmt is not specified, try to parse # without a format string. if fmt is None: try: t = Tree(tree) except ete3.parser.newick.NewickError: # Try all possible formats now, in succession w = "\n\nCould not parse tree without format string. Going to try different\n" w += "formats. Please check output carefully.\n\n" warnings.warn(w) formats = list(range(10)) formats.append(100) t = None for f in formats: try: t = Tree(tree,format=f) w = f"\n\nSuccessfully parsed tree with format style {f}.\n" w += "Please see ete3 documentation for details:\n\n" w += "http://etetoolkit.org/docs/latest/tutorial/tutorial_trees.html#reading-and-writing-newick-trees\n\n" warnings.warn(w) break except ete3.parser.newick.NewickError: continue if t is None: err = "\n\nCould not parse tree!\n\n" raise ValueError(err) else: # Try a conversion with the specified format t = Tree(tree,format=fmt) return t
efc727fee6f12b4a8bc0e8c2b2319be2a820df13
11,100
from typing import Dict def load_extract(context, extract: Dict) -> str: """ Upload extract to Google Cloud Storage. Return GCS file path of uploaded file. """ return context.resources.data_lake.upload_df( folder_name="nwea_map", file_name=extract["filename"], df=extract["value"] )
c9d5fedf6f2adcb871abf4d9cead057b0627267a
11,101
def _make_default_colormap(): """Return the default colormap, with custom first colors.""" colormap = np.array(cc.glasbey_bw_minc_20_minl_30) # Reorder first colors. colormap[[0, 1, 2, 3, 4, 5]] = colormap[[3, 0, 4, 5, 2, 1]] # Replace first two colors. colormap[0] = [0.03137, 0.5725, 0.9882] colormap[1] = [1.0000, 0.0078, 0.0078] return colormap
ca6275fc60efe198be5a89662d791f6c47e45b24
11,102
def poly_to_box(poly): """Convert a polygon into an array of tight bounding box.""" box = np.zeros(4, dtype=np.float32) box[0] = min(poly[:, 0]) box[2] = max(poly[:, 0]) box[1] = min(poly[:, 1]) box[3] = max(poly[:, 1]) return box
4fb8cea86494c34832f43dbf7f942a214dc2e010
11,103
import torch def default_collate(batch): """Puts each data field into a tensor with outer dimension batch size""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): return torch.stack(batch, 0) elif ( elem_type.__module__ == "numpy" and elem_type.__name__ != "str_" and elem_type.__name__ != "string_" ): # pragma: no cover elem = batch[0] if elem_type.__name__ == "ndarray": return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith("float") else int return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): # pragma: no cover return torch.LongTensor(batch) elif isinstance(batch[0], float): # pragma: no cover return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): # pragma: no cover return batch elif isinstance(batch[0], container_abcs.Mapping): # pragma: no cover return {key: default_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], container_abcs.Sequence): # pragma: no cover transposed = zip(*batch) return [default_collate(samples) for samples in transposed] raise TypeError((error_msg.format(type(batch[0]))))
576366ac5e57a84a015ffa3e5e80e8d4b62ac329
11,104
def updatestatus(requestdata, authinfo, acldata, supportchan, session): """Update the /Status page of a user.""" if requestdata[2] in acldata['wikis']: wikiurl = str('https://' + acldata['wikis'][requestdata[2]]['url'] + '/w/api.php') sulgroup = acldata['wikis'][requestdata[2]]['sulgroup'] else: return 'Wiki could not be found' if requestdata[0] in acldata['users']: if sulgroup in acldata['users'][requestdata[0]]['groups']: request = [acldata['users'][requestdata[0]]['groups'][sulgroup], requestdata[3]] else: return f"Data not found for {sulgroup} in {requestdata[0]}, Keys were: {acldata['users'][requestdata[0]].keys()}" elif requestdata[1][0] in acldata['sulgroups'][sulgroup]['cloaks']: request = [requestdata[1][1], requestdata[3]] else: ERRNOAUTH = "You don't seem to be authorised to use this plugin. Check you are signed into NickServ and try again." if supportchan is None: return ERRNOAUTH return f'{ERRNOAUTH} If this persists, ask for help in {supportchan}.' return mwapi.main( performer=request[0], target=str('User:' + (str(request[0]) + '/Status')), action='create', reason=str('Updating status to ' + str(request[1]) + ' per ' + str(request[0])), url=wikiurl, authinfo=[authinfo[0], authinfo[1]], content=str(request[1]), session=session, )
f305f1c4ceb6b4cfd949a1005a961b710e81740f
11,105
def sortByTimeStamps(paths): """Sorts the given list of file paths by their time-stamp :paths: The file paths to sort by time-stamp :returns: A sorted list of file paths """ sortedPaths = [] timeStamps = [] # Extract the YYYYMMDD & HHMMSS timestamps from the file paths for p in paths: timeStamp = getTimeStamps(p) timeStamps.append((int(timeStamp[0]), int(timeStamp[1]))) # Sort the timestamps in ascending order FIX FOR TUPLE timeStamps = sorted(timeStamps, key = lambda x: (int(x[0]), int(x[1]))) # Sort the paths by comparing to the sorted timestamps for t in timeStamps: for p in paths: timeStamp = getTimeStamps(p) if (int(timeStamp[0]), int(timeStamp[1])) == t: sortedPaths.append(p) return sortedPaths
01d60e0f3d793ca17f04462911406d03a6c3ddf0
11,106
def get_xml_nk_bands(xml_tree): """ Function to specifically get kpoint (cartesian) coordinates and corresponding eigenvalues (in Hartree) """ k_points_car = [] k_eigenvalues = [] k_occupations = [] for ks_energies in xml_tree.iter(tag='ks_energies'): k_points_car.append( get_xml_data(ks_energies,'k_point',as_type=float) ) k_eigenvalues.append( get_xml_data(ks_energies,'eigenvalues',as_type=float) ) k_occupations.append( get_xml_data(ks_energies,'occupations',as_type=float) ) k_points_car = np.array(k_points_car) k_eigenvalues = np.array(k_eigenvalues) k_occupations = np.array(k_occupations) return k_points_car, k_eigenvalues, k_occupations
e510995ee468552d395c179aa8713f159b1ad0e1
11,107
def enumerate(server, directory_list, filenames): """ Enumerate directories and files on the web server. """ print('\n[*] Enumerating resources.') to_search = [server] directories = [] resources = [] print('[*] Recursively searching for directories.') while len(to_search) != 0: base_url = to_search.pop(0) print('[*] Searching for directories in {0}'.format(base_url)) to_search.extend(check(base_url, directory_list)) directories.append(base_url) resources.append(base_url) if len(filenames) > 0: print('\n[*] Searching for files.') for url in directories: resources.extend(check(url, filenames, False)) return resources
e9b2eb94b71b48dcc032448369a413cc4c1790ba
11,108
def deep_equals(x, y): """Test two objects for equality in value. Correct if x/y are one of the following valid types: types compatible with != comparison pd.Series, pd.DataFrame, np.ndarray lists, tuples, or dicts of a valid type (recursive) Important note: this function will return "not equal" if types of x,y are different for instant, bool and numpy.bool are *not* considered equal Parameters ---------- x: object y: object Returns ------- bool - True if x and y are equal in value x and y do not need to be equal in reference """ if type(x) != type(y): return False # we now know all types are the same # so now we compare values if isinstance(x, pd.Series): if x.dtype != y.dtype: return False # if columns are object, recurse over entries and index if x.dtype == "object": index_equal = x.index.equals(y.index) return index_equal and deep_equals(list(x.values), list(y.values)) else: return x.equals(y) elif isinstance(x, pd.DataFrame): if not x.columns.equals(y.columns): return False # if columns are equal and at least one is object, recurse over Series if sum(x.dtypes == "object") > 0: return np.all([deep_equals(x[c], y[c]) for c in x.columns]) else: return x.equals(y) elif isinstance(x, np.ndarray): if x.dtype != y.dtype: return False return np.array_equal(x, y, equal_nan=True) # recursion through lists, tuples and dicts elif isinstance(x, (list, tuple)): return _tuple_equals(x, y) elif isinstance(x, dict): return _dict_equals(x, y) elif x != y: return False return True
27f5dc79e5c3b9e8a08a4bbd0db847995f0fa9ef
11,109
import os import pandas def get_local_log(date, which="completed", safeout=False): """ """ filein = get_log_filepath(date, which=which) if not os.path.isfile(filein): if safeout: return None raise IOError(f"No {which}_log locally stored for {date}. see download_log()") return pandas.read_csv(filein)
cc7f16fedf8de4d343e4ab3d4013aa0dc5799133
11,110
def get_asan_options(redzone_size, malloc_context_size, quarantine_size_mb, bot_platform, leaks): """Generates default ASAN options.""" asan_options = {} # Default options needed for all cases. asan_options['alloc_dealloc_mismatch'] = 0 asan_options['print_scariness'] = 1 asan_options['strict_memcmp'] = 0 # Set provided redzone size. if redzone_size: asan_options['redzone'] = redzone_size # This value is used in determining whether to report OOM crashes or not. set_value('REDZONE', redzone_size) # Set maximum number of stack frames to report. if malloc_context_size: asan_options['malloc_context_size'] = malloc_context_size # Set quarantine size. if quarantine_size_mb: asan_options['quarantine_size_mb'] = quarantine_size_mb # Test for leaks if this is an LSan-enabled job type. if get_value('LSAN') and leaks: lsan_options = join_memory_tool_options(get_lsan_options()) set_value('LSAN_OPTIONS', lsan_options) asan_options['detect_leaks'] = 1 else: remove_key('LSAN_OPTIONS') asan_options['detect_leaks'] = 0 # FIXME: Support container overflow on Android. if bot_platform == 'ANDROID': asan_options['detect_container_overflow'] = 0 # Enable stack use-after-return. asan_options['detect_stack_use_after_return'] = 1 asan_options['max_uar_stack_size_log'] = 16 # Other less important default options for all cases. asan_options.update({ 'allocator_may_return_null': 1, 'allow_user_segv_handler': 0, 'check_malloc_usable_size': 0, 'detect_odr_violation': 0, 'fast_unwind_on_fatal': 1, 'print_suppressions': 0, }) # Add common sanitizer options. asan_options.update(COMMON_SANITIZER_OPTIONS) # FIXME: For Windows, rely on online symbolization since llvm-symbolizer.exe # in build archive does not work. asan_options['symbolize'] = int(bot_platform == 'WINDOWS') # For Android, allow user defined segv handler to work. if bot_platform == 'ANDROID': asan_options['allow_user_segv_handler'] = 1 # Check if UBSAN is enabled as well for this ASAN build. # If yes, set UBSAN_OPTIONS and enable suppressions. if get_value('UBSAN'): ubsan_options = get_ubsan_options() # Remove |symbolize| explicitly to avoid overridding ASan defaults. ubsan_options.pop('symbolize', None) set_value('UBSAN_OPTIONS', join_memory_tool_options(ubsan_options)) return asan_options
a3d06902dcad73dd265d865683bd95004babc867
11,111
def get_onelinepred_results(pred_file, thred=0.1): """"from pred_file parse pred_results Args: # TODO save format of pred_file still unknown pred_file (str): pred_file path thred: pred_box's score less than it could be ignored Return: pred_dict (dict(list)) : output predict result. The outer dict means different images , inner list contains xywh class_id(1) score """ if pred_file is None: return None pred_dict = {} lines = open(pred_file, 'r').readlines() for line in lines: split_item = line.strip().split() if len(split_item) < 5: continue image_path = split_item[0] #image key first occur if not image_path in pred_dict.keys(): pred_dict[image_path] = list() pred_box = np.array(split_item[1:]).reshape((-1, 9)).astype(np.float) #if int(pred_cls) < 2: pred_cls = '0' for box in pred_box: cls_id = 1 #int(box[0]) - 1 #skip background score = box[0] # if not (abs(box[8]) < 35 and abs(box[7]) < 35 and abs(box[6]) < 35): continue # if score < thred or box[5] < 0.5: continue pred_dict[image_path].append(box[1:5].tolist()+[cls_id, score]) #box+cls return pred_dict
989ab4160f4e675fd781a751a02e218c98b2355e
11,112
import re def _is_valid_img_uri(uri: str) -> bool: """ Returns true if a string is a valid uri that can be saved in the database. """ regex = "data:image/jpeg;base64*." return not uri or re.match(regex, uri)
0836bfa447b42fb7ed24fc897de8fb40c6e593b2
11,113
def update_config(a, b, mode="default"): """Update the configuration a with b.""" if not b: return a from_version = get_config_version(a) to_version = get_config_version(b) if from_version == 1 and to_version == 2: # When updating the configuration to a newer version, we clear all user fields. a = {k: v for k, v in a.items() if k in _non_user_fields} return replace_config(a, b) if mode == "default" or mode == "merge": return merge_config(a, b) if mode == "replace": return replace_config(a, b) raise ValueError("Invalid configuration update mode: %s" % mode)
464adc3a4daeedb246d911caab5477ff4d55841e
11,114
import os def disk_partitions(disk_ntuple, all=False): """Return all mountd partitions as a named tuple. If all == False return physical partitions only. """ phydevs = [] if os.path.exists('/proc/filesystems'): my_file = open('/proc/filesystems', 'r') for line in my_file: if not line.startswith('nodev'): phydevs.append(line.strip()) else: print ('path does not exist: /proc/filesystems') retlist = [] if os.path.exists('/etc/mtab'): my_file = open('/etc/mtab', 'r') for line in my_file: if not all and line.startswith('none'): continue fields = line.split() device = fields[0] mountpoint = fields[1] fstype = fields[2] if not all and fstype not in phydevs: continue if device == 'none': device = '' ntuple = disk_ntuple(device, mountpoint, fstype) retlist.append(ntuple) else: print ('path does not exist: /etc/mtab') return retlist
1c46fe7efab860c4fe9f3be7745d3ef2c24eafa1
11,115
def create_app(config_name='DevelopmentConfig'): """Create the Flask application from a given config object type. Args: config_name (string): Config instance name. Returns: Flask Application with config instance scope. """ app = Flask(__name__) {{cookiecutter.package_name | upper}}(app, config_name=config_name) return app
6022c976ffa2bf6afa692bb96c5c53bfed4a7d32
11,116
def label_accuracy_score(hist): """Returns accuracy score evaluation result. - overall accuracy - mean accuracy - mean IU - fwavacc """ with np.errstate(divide='ignore', invalid='ignore'): iu = np.diag(hist) / ( hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist) ) mean_iu = np.nanmean(iu) return mean_iu
5e129604d476f17e0cfd7a30f785775266763432
11,117
def get_page_title(title: str): """ Возвращает заголовок, отображаемый на вкладках """ return f'{title} | NeuraHS'
3df2de16325cf0c4c849e7d09111ea87e36c309a
11,118
def make_pointer_union_printer(val): """Factory for an llvm::PointerUnion printer.""" pointer, value = get_pointer_int_pair(val['Val']) if not pointer or not value: return None pointer_type = val.type.template_argument(int(value)) string = 'llvm::PointerUnion containing %s' % pointer_type return make_printer(string, [('pointer', pointer.cast(pointer_type))])
40d12a45a05fb49dd32b1a450b7dff23ab0ece7c
11,119
def get_paramvals_percentile(table, percentile, chi2_arr): """ Isolates 68th percentile lowest chi^2 values and takes random 1000 sample Parameters ---------- table: pandas dataframe Mcmc chain dataframe pctl: int Percentile to use chi2_arr: array Array of chi^2 values Returns --------- mcmc_table_pctl: pandas dataframe Random 1000 sample of 68th percentile lowest chi^2 values """ percentile = percentile/100 table['chi2'] = chi2_arr table = table.sort_values('chi2').reset_index(drop=True) slice_end = int(percentile*len(table)) mcmc_table_pctl = table[:slice_end] # Best fit params are the parameters that correspond to the smallest chi2 bf_params = mcmc_table_pctl.drop_duplicates().reset_index(drop=True).\ values[0][:5] # Sample random 100 of lowest chi2 mcmc_table_pctl = mcmc_table_pctl.drop_duplicates().sample(10) return mcmc_table_pctl, bf_params
1d83c54b61446aecf0a7fcbf4d8ae49e96a25b3f
11,120
def get_text_from_span(s, (start, end)): """ Return the text from a given indices of text (list of words) """ return " ".join(s[start: end])
df58cf8056039b183dc421c94baa22176fe23e84
11,121
import time import struct def __timestamp(): """Generate timestamp data for pyc header.""" today = time.time() ret = struct.pack(b'=L', int(today)) return ret
477c8473026c706785b4091bbbf647b86eaa560f
11,122
def reverse_index(alist, value): """Finding the index of last occurence of an element""" return len(alist) - alist[-1::-1].index(value) -1
21fc4e17a91000085123ea4be42c72cb27a3482c
11,123
def generate_twist(loops, non_interacting=False): """Generate initial configuration to start braid moves where the active end has crossed outside the loops and they have an initial twist. Format: ' │ │ │┃' '┏━━━━━┛' '┃│ │ │ ' '┗━┓│ │ ' ' │┃│ │ ' '┏│┛│ │ ' '┃│ │ │ ' Keyword arguments: non_interacting -- loops which the active end cannot interact with (default False) -- if False, all loops are interactable -- if Integer (n), n loops randomly selected to be non-interactive -- if List (j,k,l), loops j, k and l (from left) are non-interactive """ # we can use the peppino generator for the first part of this configuration # we just add the additional lines spaces = (loops * 2) + 1 row_1, row_2, row_3 = generate_peppino(loops, non_interacting) if row_3[1] == "┆": first_loop = "┆" else: first_loop = "│" # row 4 row_4 = list(row_3) # add first crossing row_4[0] = "┗" row_4[1] = "━" row_4[2] = "┓" # row 5 row_5 = list(row_3) row_5[0] = " " row_5[1] = first_loop row_5[2] = "┃" # row 6 row_6 = list(row_3) row_6[0] = "┏" row_6[1] = first_loop row_6[2] = "┛" # row 7 row_7 = list(row_3) return ( row_1, row_2, row_3, "".join(row_4), "".join(row_5), "".join(row_6), "".join(row_7), )
fdeb58b49d2e559c4d0ccfc24e439057683f7e96
11,124
def get_pathway_id_names_dict(): """ Given a pathway ID get its name :return: pathway_id_names_dict """ # Fixme: This is not analysis specfic (I think, KmcL) I believe any analysis should do # A fix is for this is probably wise. analysis = Analysis.objects.get(name='Tissue Comparisons') pals_df = get_cache_df(MIN_HITS, analysis) pathway_id_names_dict = {} for ix, row in pals_df.iterrows(): pathway_id_names_dict[row.pw_name] = ix return pathway_id_names_dict
4764280666108a291809558cf22984d44539a3d3
11,125
def IsShuttingDown(_shutting_down=_shutting_down): """ Whether the interpreter is currently shutting down. For use in finalizers, __del__ methods, and similar; it is advised to early bind this function rather than look it up when calling it, since at shutdown module globals may be cleared. """ return _shutting_down[0]
6cbc5d3388ee8eb0cabbb740fc5e0b8f2ac4714a
11,126
def ellipse_center(a): """ Parameters ---------- a : fitted_ellipse_obj """ b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0] num = b*b-a*c x0=(c*d-b*f)/num y0=(a*f-b*d)/num return np.array([x0,y0])
66487a641c35d2c1c1c8a8c7c0bb129eda55f4c4
11,127
def _to_average_temp(name, temperature_map): """ Converts the list of temperatures associated to a label to a list of average temperatures. If the sensor does not exist, it will return _default_temperature. If the high or critical temperature thresholds are invalid, it will use the values from _default_temperature instead. :param name: Name of the sensor to check. :param temperature_map: Dictionary of temperatures, as returned by psutil.sensors_temperatures :return: List containing the current, high and critical temperatures of the label. """ if name not in temperature_map: return _default_temperature temps = [0.0, 0.0, 0.0] for temp in temperature_map[name]: current = temp.current if temp.current is not None and temp.current > -50.0 else _default_temperature[0] high = temp.high if temp.high is not None and temp.high > 0.0 else _default_temperature[1] critical = temp.critical if temp.critical is not None and temp.critical > 0.0 else _default_temperature[2] temps[0] += current temps[1] += high temps[2] += critical size = float(len(temperature_map[name])) temps[0] = _round(temps[0] / size) temps[1] = _round(temps[1] / size) temps[2] = _round(temps[2] / size) return temps
88c3b5d0bdd64f782a26a7dc11d44dc39e6efc82
11,128
def segments_decode(aseg): """ Decode segments. Parameters ---------- aseg : numpy.ndarra of uint32 Returns ------- segments : list of list of int """ max = 2 ** 32 - 1 segments = [] l = [] for x in list(aseg): if x == max: segments.append(l) l = [] else: l.append(x) return segments
d5edf85ae489b62c8820c3616a75a9ca305f06ec
11,129
def cvGetReal3D(*args): """cvGetReal3D(CvArr arr, int idx0, int idx1, int idx2) -> double""" return _cv.cvGetReal3D(*args)
4130a4f9571bdea1c9e54b5fcf7d1d0f5c3ce083
11,130
def get_wf_double_FF_opt( molecule, pcm_dielectric, linked=False, qchem_input_params=None, name="douple_FF_opt", db_file=">>db_file<<", **kwargs, ): """ Firework 1 : write QChem input for an FF optimization, run FF_opt QCJob, parse directory and insert into db, pass relaxed molecule to fw_spec and on to fw2, Firework 2 : write QChem input for an optimization in the presence of a PCM, using the molecule passed from fw1, run FF_opt QCJob, parse directory and insert into db Args: molecule (Molecule): input molecule to be optimized and run. pcm_dielectric (float): The PCM dielectric constant. max_cores (int): Maximum number of cores to parallelize over. Defaults to 32. qchem_input_params (dict): Specify kwargs for instantiating the input set parameters. Basic uses would be to modify the default inputs of the set, such as dft_rung, basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See pymatgen/io/qchem/sets.py for default values of all input parameters. For instance, if a user wanted to use a more advanced DFT functional, include a pcm with a dielectric of 30, and use a larger basis, the user would set qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set": "6-311++g**"}. However, more advanced customization of the input is also possible through the overwrite_inputs key which allows the user to directly modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to inputs.py to print an actual input file. For instance, if a user wanted to set the sym_ignore flag in the rem section of the input file to true, then they would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs could be used in conjunction with more typical modifications, as seen in the test_double_FF_opt workflow test. qchem_cmd (str): Command to run QChem. db_file (str): path to file containing the database credentials. kwargs (keyword arguments): additional kwargs to be passed to Workflow Returns: Workflow """ first_qchem_input_params = qchem_input_params or {} # Optimize the molecule in vacuum fw1 = FrequencyFlatteningOptimizeFW( molecule=molecule, name="first_FF_no_pcm", qchem_cmd=">>qchem_cmd<<", max_cores=">>max_cores<<", qchem_input_params=first_qchem_input_params, linked=linked, db_file=db_file, ) # Optimize the molecule in PCM second_qchem_input_params = {"pcm_dielectric": pcm_dielectric} for key in first_qchem_input_params: second_qchem_input_params[key] = first_qchem_input_params[key] fw2 = FrequencyFlatteningOptimizeFW( name="second_FF_with_pcm", qchem_cmd=">>qchem_cmd<<", max_cores=">>max_cores<<", qchem_input_params=second_qchem_input_params, linked=linked, db_file=db_file, parents=fw1, ) fws = [fw1, fw2] wfname = f"{molecule.composition.reduced_formula}:{name}" return Workflow(fws, name=wfname, **kwargs)
d21b04035d41beb3a24e9cbba45c420bd8d9b727
11,131
import sys import traceback def return_stack(): """ Create the stack of the obtained exception :return: string stacktrace. """ exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) return lines[0] + lines[1]
8dd58917ffe378cf88c38429d63ed8d553df4ffb
11,132
def get_element_action_names(element): """Get a list of all the actions the specified accessibility object can perform. Args: element: The AXUIElementRef representing the accessibility object Returns: an array of actions the accessibility object can perform (empty if the accessibility object supports no actions) """ error_code, names = AXUIElementCopyActionNames(element, None) error_messages = { kAXErrorIllegalArgument: "One or both of the arguments is an illegal value.", kAXErrorInvalidUIElement: "The AXUIElementRef is invalid.", kAXErrorFailure: "There was some sort of system memory failure.", kAXErrorCannotComplete: "The function cannot complete " "because messaging has failed in some way.", kAXErrorNotImplemented: "The process does not fully support the accessibility API.", } check_ax_error(error_code, error_messages) return names
f906f9565eb72b060d9e4c69ab052dc6001f192a
11,133
import re def ParseFile(fname): """Parse a micrcode.dat file and return the component parts Args: fname: Filename to parse Returns: 3-Tuple: date: String containing date from the file's header license_text: List of text lines for the license file microcodes: List of Microcode objects from the file """ re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$') re_license = re.compile('/[^-*+] *(.*)$') re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE) microcodes = {} license_text = [] date = '' data = [] name = None with open(fname) as fd: for line in fd: line = line.rstrip() m_date = re_date.match(line) m_license = re_license.match(line) m_name = re_name.match(line) if m_name: if name: microcodes[name] = Microcode(name, data) name = m_name.group(1).lower() data = [] elif m_license: license_text.append(m_license.group(1)) elif m_date: date = m_date.group(1) else: data.append(line) if name: microcodes[name] = Microcode(name, data) return date, license_text, microcodes
2774157dd256f11268a7ea4ee3d941e7aea1ca4f
11,134
import numpy def cal_q_vel(guidance_v): """ 暂时使用默认参考速度进行优化,等调试成熟再使用粗糙速度来优化 :return: """ q_vel = numpy.zeros((1, n_t + 1)) if flag_obs == 0: q_vel[0][0] = -ref_v q_vel[0][n_t] = ref_v if flag_obs == 1: for i in range(n_t + 1): if i < 1: q_vel[0][i] = -guidance_v[0][i] elif i >= n_t: q_vel[0][i] = guidance_v[0][i - 1] else: q_vel[0][i] = guidance_v[0][i - 1] - guidance_v[0][i] # print('q_vel:', numpy.shape(q_vel), q_vel) return q_vel
b7551e7b911c5e0fd27a1e90f00c1e1a3a60f53f
11,135
def tf_decode( ref_pts, ref_theta, bin_x, res_x_norm, bin_z, res_z_norm, bin_theta, res_theta_norm, res_y, res_size_norm, mean_sizes, Ss, DELTAs, R, DELTA_THETA, ): """Turns bin-based box3d format into an box_3d Input: ref_pts: (B,p,3) [x,y,z] ref_theta: (B,p) [ry] or a constant value bin_x: (B,p,K), bin assignments along X-axis res_x_norm: (B,p,K), normalized residual corresponds to bin_x bin_z: (B,p,K), bin assignments along Z-axis res_z_norm: (B,p,K), normalized residual corresponds to bin_z bin_theta: (B,p,K), bin assignments for orientation res_theta_norm: (B,p,K), normalized residual corresponds to bin_theta res_y: (B,p,K), residual w.r.t. ref_pts along Y-axis res_size_norm: (B,p,K,3), residual w.r.t. the average object size [l,w,h] mean_sizes, (B,p,K,3), average object size [l,w,h] Ss: XZ search range for different classes [-Ss, +Ss] DELTAs: XZ_BIN_LENs for different classes R: THETA search range [-R, +R] DELTA_THETA: THETA_BIN_LEN = 2 * R / NUM_BIN_THETA Output: boxes_3d: (B,p,K,7) 3D box in box_3d format [x, y, z, l, w, h, ry] """ ndims = ref_pts.shape.ndims dx = (tf.to_float(bin_x) + 0.5) * DELTAs - Ss + res_x_norm * DELTAs dz = (tf.to_float(bin_z) + 0.5) * DELTAs - Ss + res_z_norm * DELTAs if ndims == 3: # rpn K = tf.shape(bin_x)[2] if isinstance(ref_theta, tf.Tensor): # rotate along y all_rys = ref_theta ry_sin = tf.sin(all_rys) ry_cos = tf.cos(all_rys) rot_mats = tf.stack( [ tf.stack([ry_cos, ry_sin], axis=2), tf.stack([-ry_sin, ry_cos], axis=2), ], axis=3, ) rot_mats = tf.tile(tf.expand_dims(rot_mats, 2), [1, 1, K, 1, 1]) dxz_rot = tf.matmul( rot_mats, tf.expand_dims(tf.stack([dx, dz], axis=3), axis=3), transpose_a=True, transpose_b=True, ) dxz_rot = tf.squeeze(tf.matrix_transpose(dxz_rot), axis=3) dx = dxz_rot[:, :, :, 0] dz = dxz_rot[:, :, :, 1] else: assert ref_theta == 0 ref_pts_tiled = tf.tile(tf.expand_dims(ref_pts, axis=2), [1, 1, K, 1]) x = dx + ref_pts_tiled[:, :, :, 0] z = dz + ref_pts_tiled[:, :, :, 2] y = res_y + ref_pts_tiled[:, :, :, 1] elif ndims == 2: # rcnn K = tf.shape(bin_x)[1] if isinstance(ref_theta, tf.Tensor): # rotate along y all_rys = ref_theta ry_sin = tf.sin(all_rys) ry_cos = tf.cos(all_rys) rot_mats = tf.stack( [ tf.stack([ry_cos, ry_sin], axis=1), tf.stack([-ry_sin, ry_cos], axis=1), ], axis=2, ) rot_mats = tf.tile(tf.expand_dims(rot_mats, 1), [1, K, 1, 1]) dxz_rot = tf.matmul( rot_mats, tf.expand_dims(tf.stack([dx, dz], axis=2), axis=2), transpose_a=True, transpose_b=True, ) dxz_rot = tf.squeeze(tf.matrix_transpose(dxz_rot), axis=2) dx = dxz_rot[:, :, 0] dz = dxz_rot[:, :, 1] else: assert ref_theta == 0 ref_pts_tiled = tf.tile(tf.expand_dims(ref_pts, axis=1), [1, K, 1]) x = dx + ref_pts_tiled[:, :, 0] z = dz + ref_pts_tiled[:, :, 2] y = res_y + ref_pts_tiled[:, :, 1] ref_theta = tf.tile(tf.expand_dims(ref_theta, axis=1), [1, K]) theta = ( ref_theta + (tf.to_float(bin_theta) + 0.5) * DELTA_THETA - R + res_theta_norm * 0.5 * DELTA_THETA ) size = mean_sizes + res_size_norm * mean_sizes if ndims == 3: l = size[:, :, :, 0] w = size[:, :, :, 1] h = size[:, :, :, 2] # combine all boxes_3d = tf.stack([x, y, z, l, w, h, theta], axis=3) # y+h/2 elif ndims == 2: l = size[:, :, 0] w = size[:, :, 1] h = size[:, :, 2] # combine all boxes_3d = tf.stack([x, y, z, l, w, h, theta], axis=2) # y+h/2 return boxes_3d
720252aaad2b8d380d30d871e97d47b2c9309a68
11,136
def _histogram_discretize(target, num_bins=gin.REQUIRED): """Discretization based on histograms.""" discretized = np.zeros_like(target) for i in range(target.shape[0]): discretized[i, :] = np.digitize(target[i, :], np.histogram( target[i, :], num_bins)[1][:-1]) return discretized
14108b9208dca586f7fd39dac3a5a17f1e5a2928
11,137
def apply_acl(instance, content): """Apply ACLs.""" any_acl_applied = False if not isinstance(instance, roleable.Roleable): return any_acl_applied instance_acl_dict = {(l.ac_role_id, p.id): l for p, l in instance.access_control_list} person_ids = set() for role_id, data in content.get("access_control_list", {}).iteritems(): person_ids |= {i["id"] for i in data["added"] + data["deleted"]} person_dict = {p.id: p for p in all_models.Person.query.filter( all_models.Person.id.in_(person_ids)) } acr_dict = {r.id: r for r in ACR.get_ac_roles_for(instance.type).values()} for role_id, data in content.get("access_control_list", {}).iteritems(): role_id = int(role_id) if role_id not in acr_dict: continue for add in data["added"]: if (role_id, add["id"]) not in instance_acl_dict: instance.add_person_with_role_id(person_dict[add["id"]], role_id) any_acl_applied = True for delete in data["deleted"]: if (role_id, delete["id"]) in instance_acl_dict: instance.acr_id_acl_map[role_id].remove_person( person_dict[delete["id"]] ) any_acl_applied = True return any_acl_applied
134f4ae98018626712c2f918ce5b501129169a30
11,138
import json def serialize(results): """Serialize a ``QueryDict`` into json.""" serialized = {} for result in results: serialized.update(result.to_dict()) return json.dumps(serialized, indent=4)
1ce996e1172344ba72ccbb9487b51b0efc30fa5c
11,139
def allowed_once (cave, visited): """Only allows small caves to be visited once. Returns False if `cave` is small and already in `visited`. """ return big(cave) or (small(cave) and cave not in visited)
f3619c1d230de50fab539103084457413f30a74e
11,140
from datetime import datetime import json def _serialize_examstruct(exam): """ Serialize the exam structure for, eg. cache. The dates, especially, need work before JSON """ assert isinstance(exam, dict) date_fmt = '%Y-%m-%d %H:%M:%S' assert isinstance(exam['start'], datetime.datetime) assert isinstance(exam['end'], datetime.datetime) safe = exam.copy() safe['start'] = exam['start'].strftime(date_fmt) safe['end'] = exam['end'].strftime(date_fmt) return json.dumps(safe)
3c553986bfd6b565bbdc34218ca01d984d3aab69
11,141
def _analysis_test_impl(ctx): """Implementation function for analysis_test. """ _ignore = [ctx] return [AnalysisTestResultInfo( success = True, message = "All targets succeeded analysis", )]
5f006c817581b771bf3d1f5b3cc7861cd98e8958
11,142
import os def filename_to_scienceurl(filename, suffix=None, source="irsa", verbose=False, check_suffix=True): """ """ _, filefracday, paddedfield, filtercode, ccd_, imgtypecode, qid_, *suffix_ = os.path.basename(filename).split("_") suffix_ = "_".join(suffix_) year,month, day, fracday = filefrac_to_year_monthday_fracday(filefracday) paddedccdid = ccd_.replace("c","") qid = qid_.replace("q","") if suffix is None: suffix = suffix_ return science_path(year, month, day, fracday, paddedfield, filtercode, paddedccdid, qid, # added in input imgtypecode=imgtypecode, suffix=suffix, source=source, verbose=verbose, check_suffix=check_suffix)
c78fa2150b45b2ccd7b347d7cbe24f1760e55450
11,143
import warnings def CD_Joint(CD_J_AS = None, Ypred = None, beta = None, zeta = None, active_set = None, lam = None, P = None, P_interaction = None, Y = None, B = None, B_interaction = None, S = None, S_interaction = None, I = None, interaction_terms = None, r = None, max_iter = None, tol = 1e-4, full_set = None, MaxSuppSize_main = None, MaxSuppSize_interaction = None, verbose = False, path = None): """Cyclic Block Coordinate Descent over the full set of main/interaction effects. Args: CD_J_AS: a callable function that optimizes over a reduced set of main effects, callable. Ypred: numpy array of shape (N, ). beta: coefficients for main/interaction effects, 2 lists of arrays of shapes [ [(Ki+1, 1), ...], [(Kij+1, 1), ...]] zeta: binary vector to track which main effects are in the active set, 2 bool arrays of shape [(1, d), (1, Imax)] active_set: indices of main effects to optimize over, a numpy int array. lam: regularization parameters [lam_1, lam_2], list of floats. P: B^T*B + 2*N*(lam_1*S_i + eps*I) matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...]. eps is a small epsilon for numerical stability. P_interaction: B^T*B + 2*N*(lam_1*S_ij + eps*I) matrices for main effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...]. eps is a small epsilon for numerical stability. Y: training target responses, a float numpy array of shape (N,). B: B-spline transformed sparse matrices for main effects, list of sparse matrices of shapes [(N, Ki+1), ...]. B_interaction: B-spline transformed sparse matrices for interaction effects, list of sparse matrices of shapes [(N, Kij+1), ...]. S: Smoothness matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...]. S_interaction: Smoothness matrices for interaction effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...]. I: number of possible main/interaction effects, int scalers. interaction_terms: list of interaction effects to consider if only a subset need to be considered, a 2D numpy array of of shape (Imax, 2). r: relative scaling factor for L0 penalty between main and interaction effects. We consider r=1.0 (corresponds to alpha symbol in the paper), float scaler. max_iter: maximum number of Cyclic BCD on the active set, int scaler. tol: relative loss termination criteria for stopping, a float scalar. full_set: indices of all main effects, a numpy int array. main_terms: list of main effects to consider if only a subset need to be considered, not supported yet. MaxSuppSize_main: Stop L0 regularization if the active set of main effects is larger than the MaxSuppSize_main and move to next smoothing lambda setting and start L0 regularization, int scaler. MaxSuppSize_interaction: Stop L0 regularization if the active set of interaction effects is larger than the MaxSuppSize_interaction and move to next smoothing lambda setting and start L0 regularization, int scaler. verbose: for printing optimization steps, bool scaler. path: for logging, str. Returns: Ypred: Updated prediction, numpy array of shape (N, ). beta: Updated coefficients for main effects, list of arrays of shapes [(Ki+1, 1), ...]. zeta: Updated binary vector to track which main effects are in the active set, a bool array of shape (1, d). delta: Updated coefficients for interaction effects, list of arrays of shapes [(Kij+1, 1), ...]. alpha: Updated binary vector to track which interaction effects are in the active set, a bool array of shape (1, Imax). active_set: Updated indices of nonzero main effects, a numpy int array. active_interaction_set: Updated indices of nonzero interaction effects, a numpy int array. MaxSuppSize_flag: indicates Maximum Support size is reached, bool scaler. """ N = Y.shape[0] delta = beta[1] beta = beta[0] alpha = zeta[1] zeta = zeta[0] active_interaction_set = active_set[1] active_set = active_set[0] full_interaction_set = full_set[1] full_set = full_set[0] Bspam = B Bspam_interaction = B_interaction Pspam = P Pspam_interaction = P_interaction d = I[0] dinteraction = I[1] MaxSuppSize_flag = 0 eps = 1e-8 warnings.filterwarnings("error") res = Y-Ypred beta_p = [(P.solve((B.transpose()).dot(res))).reshape(-1,1) for B, P in zip(Bspam, Pspam)] res_p = np.array([np.linalg.norm(res-B.dot(bp)) for B, bp in zip(Bspam, beta_p)]) active_set = np.arange(d) # if active_set is None: # A = int(np.ceil(0.1*d)) # active_set = res_p.argsort()[:A] # else: # A = np.minimum(np.maximum(int(np.ceil(0.2*len(active_set))),10), 50) # active_set = np.union1d(active_set, res_p.argsort()[:A]) res = Y-Ypred delta_p = [(P.solve((B.transpose()).dot(res))).reshape(-1,1) for B, P in zip(Bspam_interaction, Pspam_interaction)] res_p = np.array([np.linalg.norm(res-B.dot(dp)) for B, dp in zip(Bspam_interaction, delta_p)]) if active_interaction_set is None: A = int(np.ceil(0.01*dinteraction)) active_interaction_set = res_p.argsort()[:A] else: A = np.minimum(np.maximum(int(np.ceil(0.2*len(active_interaction_set))),10), 50) active_interaction_set = np.union1d(active_interaction_set, res_p.argsort()[:A]) ''' Coordinate Descent over full set ''' for it in range(max_iter): Ypred, beta, zeta, delta, alpha = CD_J_AS(Ypred = Ypred, beta = [beta, delta], zeta = [zeta, alpha], active_set = [active_set, active_interaction_set], lam = [lam[0], lam[1]], P = Pspam, P_interaction = Pspam_interaction) active_set = np.where(zeta[0,:] == 1)[0] active_interaction_set = np.where(alpha[0,:] == 1)[0] if (len(np.where(zeta[0,:] == 1)[0]) > MaxSuppSize_main) or (len(np.where(alpha[0,:] == 1)[0]) > MaxSuppSize_interaction): MaxSuppSize_flag = 1 break J = 0.5*mean_squared_error(Y, Ypred)+\ lam[0]*sum([(np.transpose(beta[k])).dot(S[k].dot(beta[k]))[0,0] for k in active_set])+\ lam[0]*sum([(np.transpose(delta[k])).dot(S_interaction[k].dot(delta[k]))[0,0] for k in active_interaction_set])+\ eps*sum([np.dot(beta[k][:,0],beta[k][:,0]) for k in active_set])+\ eps*sum([np.dot(delta[k][:,0],delta[k][:,0]) for k in active_interaction_set])+\ lam[1]*(np.count_nonzero(zeta[0,:]))+\ r*lam[1]*(np.count_nonzero(alpha[0,:])) if verbose == True: display(Math(r'Iteration: {}, Obj: {:.0f}, '.format(it, J)+', \sum_{j \in S^c} z_j: '+'{} \leq {}.'.format(np.count_nonzero(zeta[0,:]), len(active_set))+'\sum_{ij \in S^c} z_{ij}: '+'{} \leq {}.'.format(np.count_nonzero(alpha[0,:]),len(active_interaction_set)))) for j in [x for x in full_set if x not in active_set]: if zeta[0,j]==1: Ypred -= Bspam[j].dot(beta[j]) res = Y-Ypred beta[j], zeta[:,j] = utilities.solve(B=Bspam[j], P=Pspam[j], y=res, beta=beta[j], S=S[j], lam=[lam[0], lam[1]]) if zeta[0,j]==1: Ypred += Bspam[j].dot(beta[j]) for j in [x for x in full_interaction_set if x not in active_interaction_set]: if alpha[0,j]==1: Ypred -= Bspam_interaction[j].dot(delta[j]) res = Y-Ypred delta[j], alpha[:,j] = utilities.solve(B=Bspam_interaction[j], P=Pspam_interaction[j], y=res, beta=delta[j], S=S_interaction[j], lam=[lam[0], r*lam[1]]) if alpha[0,j]==1: Ypred += Bspam_interaction[j].dot(delta[j]) if np.count_nonzero(zeta[0,:])==active_set.shape[0] and np.count_nonzero(alpha[0,:])==active_interaction_set.shape[0]: if np.sum(sorted(active_set) == np.where(zeta[0,:] == 1)[0])==active_set.shape[0] and np.sum(sorted(active_interaction_set) == np.where(alpha[0,:] == 1)[0])==active_interaction_set.shape[0]: #print('Active set converged') active_set = np.where(zeta[0,:] == 1)[0] active_interaction_set = np.where(alpha[0,:] == 1)[0] break active_set = np.where(zeta[0,:] == 1)[0] active_interaction_set = np.where(alpha[0,:] == 1)[0] # for i in active_set: # Pspam[i] = sp.linalg.splu((Bspam[i].transpose()).dot(Bspam[i])+2*N*(lam[0]*S[i]+eps*sp.csr_matrix(np.identity(Bspam[i].shape[1])))) # for i in active_interaction_set: # Pspam_interaction[i] = sp.linalg.splu((Bspam_interaction[i].transpose()).dot(Bspam_interaction[i])+2*N*(lam[0]*S_interaction[i]+eps*sp.csr_matrix(np.identity(Bspam_interaction[i].shape[1])))) if(it == max_iter-1): with open(path+'/Warning.txt', "a") as f: f.write('Warning: CD over full set did not converge within the chosen max_iter!') f.write('\lambda_1: {:.7f},\lambda_2: {:.7f}'.format(lam[0], lam[1])) return Ypred, beta, zeta, delta, alpha, active_set, active_interaction_set, MaxSuppSize_flag
780bbd6a44dcfacf55a22390a6f7ee8c98e2d2f0
11,144
from typing import Tuple def testAllCallbacksSmokeTest( args_count: int, type_checker: TypeCheckerFixture ) -> None: """ Parametrized test to do basic checking over all Callbacks (except Callback0). We generate functions with too much arguments, too few, and correct number, and check that the errors are as expected. This should be enough to catch copy/paste errors when declaring the Callback overloads. """ def gen_signature_and_args(count: int) -> Tuple[str, str, str]: # Generates "v1: int, v2: int" etc signature = ", ".join(f"v{i}: int" for i in range(count)) # Generates "10, 20" etc args = ", ".join(f"{i+1}0" for i in range(count)) # Generates "int, int" etc types = ", ".join("int" for _ in range(count)) return signature, args, types sig_too_few, args_too_few, types_too_few = gen_signature_and_args(args_count - 1) sig_too_many, args_too_many, types_too_many = gen_signature_and_args(args_count + 1) sig_ok, args_ok, types_ok = gen_signature_and_args(args_count) type_checker.make_file( f""" from oop_ext.foundation.callback import Callback{args_count} c = Callback{args_count}[{types_ok}]() def too_few_func({sig_too_few}) -> None: ... c.Register(too_few_func) c({args_too_few}) def too_many_func({sig_too_many}) -> None: ... c.Register(too_many_func) c({args_too_many}) def ok_func({sig_ok}) -> None: ... c.Register(ok_func) c({args_ok}) """ ) result = type_checker.run() result.assert_errors( [ "has incompatible type", "Missing positional argument", "has incompatible type", "Too many arguments", ] )
8459b040f2c7dc145a6a41ddebd4edb24873d704
11,145
def transform_unnamed_cols_range(df: pd.DataFrame, columns_range: range, new_column_name_prefix: str, inplace=False) -> object: """ This function transforms a range of columns based assuming the presence of following schema in dataframe: |base_column_name|Unnamed_n|Unnamed_n+1|Unnamed_n+2|--- |option_1 |NaN |NaN |NaN |--- |----------------|NaN |option_3 |NaN |--- |----------------|option_2 |NaN |NaN |--- |----------------|NaN |NaN |option_4 |--- Without a precise order, only one cell will be checked as "option_x" and that the following schema will be given as output: |base_column_name_option_1|base_column_name_option_2 |base_column_name_option_3|base_column_name_option_4|--- Also, it will replace cell values from this columns with binary data (1, 0) according to the presence or not of the corresponding categorical value. :param df: input dataframe to be processed :param columns_range: range of columns from input dataframe to be transformed :param new_column_name_prefix: new column_name to be added as base_name to rename map :param inplace: If False, return a copy. Otherwise, do operation inplace and return None. :return: input dataframe with Unnamed columns dropped and string values transformed to binary values (0,1) """ # extracting columns of interest df_target_columns = df.iloc[:, columns_range] return _even_out_categorical_as_binaries(df, df_target_columns.columns, new_column_name_prefix=new_column_name_prefix, inplace=inplace)
c54394531cec3aeef6e1717d3db0be17852ade9b
11,146
def shingles(tokens, n): """ Return n-sized shingles from a list of tokens. >>> assert list(shingles([1, 2, 3, 4], 2)) == [(1, 2), (2, 3), (3, 4)] """ return zip(*[tokens[i:-n + i + 1 or None] for i in range(n)])
93e8f3828bf4b49397e09cb46565199dcd7a68be
11,147
import json def load_json(filename): """Load JSON file as dict.""" with open(join(dirname(__file__), filename), "rb") as fp: return json.load(fp)
3ce3a92b4a11a005709ea3fab003d73133627183
11,148
def getLayerList(layer_list, criterionFn): """Returns a list of all of the layers in the stack that match the given criterion function, including substacks.""" matching_layer = [] for layer in layer_list: if criterionFn(layer): matching_layer.append(layer) if hasattr(layer, 'layerStack'): matching_layer.extend(getLayerList(layer.layerStack().layerList(), criterionFn)) if layer.hasMaskStack(): matching_layer.extend(getLayerList(layer.maskStack().layerList(), criterionFn)) if hasattr(layer, 'hasAdjustmentStack') and layer.hasAdjustmentStack(): matching_layer.extend(getLayerList(layer.adjustmentStack().layerList(), criterionFn)) return matching_layer
5e09065b350f1305a2fcd45379751fac6552031e
11,149
def getBiLinearMap(edge0, edge1, edge2, edge3): """Get the UV coordinates on a square defined from spacing on the edges""" if len(edge0) != len(edge1): raise ValueError("getBiLinearMap: The len of edge0 and edge1 are not the same") if len(edge2) != len(edge3): raise ValueError("getBiLinearMap: The len of edge2 and edge3 are no the same") N = len(edge0) M = len(edge2) UV = np.zeros((N, M, 2)) UV[:, 0, 0] = edge0 UV[:, 0, 1] = 0.0 UV[:, -1, 0] = edge1 UV[:, -1, 1] = 1.0 UV[0, :, 0] = 0.0 UV[0, :, 1] = edge2 UV[-1, :, 0] = 1.0 UV[-1, :, 1] = edge3 for i in range(1, N - 1): x1 = edge0[i] y1 = 0.0 x2 = edge1[i] y2 = 1.0 for j in range(1, M - 1): x3 = 0 y3 = edge2[j] x4 = 1.0 y4 = edge3[j] UV[i, j] = calcIntersection(x1, y1, x2, y2, x3, y3, x4, y4) return UV
a75626a846c18418db8dbb98afdb25ab0c903969
11,150
import argparse def _parse_args(): """parse arguments""" parser = argparse.ArgumentParser(description='train and export wdsr on modelarts') # train output path parser.add_argument('--train_url', type=str, default='', help='where training log and ckpts saved') # dataset dir parser.add_argument('--data_url', type=str, default='', help='where datasets located') # train config parser.add_argument('--data_train', type=str, default='DIV2K', help='train dataset name') parser.add_argument('--device_target', type=str, default='Ascend', help='target device to run') parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train') parser.add_argument('--batch_size', type=int, default=16, help='input batch size for training') parser.add_argument('--lr', type=float, default=1e-4, help='learning rate') parser.add_argument('--init_loss_scale', type=float, default=65536., help='scaling factor') parser.add_argument('--loss_scale', type=float, default=1024.0, help='loss_scale') parser.add_argument('--scale', type=str, default='2+3+4', help='super resolution scale') parser.add_argument('--ckpt_save_path', type=str, default='ckpt', help='path to save ckpt') parser.add_argument('--ckpt_save_interval', type=int, default=10, help='save ckpt frequency, unit is epoch') parser.add_argument('--ckpt_save_max', type=int, default=5, help='max number of saved ckpt') parser.add_argument('--task_id', type=int, default=0) # export config parser.add_argument("--export_batch_size", type=int, default=1, help="batch size") parser.add_argument("--export_file_name", type=str, default="wdsr", help="output file name.") parser.add_argument("--export_file_format", type=str, default="AIR", choices=['MINDIR', 'AIR', 'ONNX'], help="file format") args, _ = parser.parse_known_args() return args
8147e3d2c7bc60cb2ed379308118bcf7ef8157b6
11,151
def _parse_orientation(response: HtmlResponse): """Parse Orientation. Returns None if not available or is unknown. """ value = response.css('th:contains("Ausrichtung") + td ::text').get() if value: if value == "unbekannt" or value == "verschieden": return None fk_value = { "Nord": "N", "Nordost": "NO", "Ost": "O", "Südost": "SO", "Süd": "S", "Südwest": "SW", "West": "W", "Nordwest": "NW", } return Orientation.objects.get(name=fk_value[value]) else: return None
338fb6dbc8e3f1c0e116f766f86a01b110c922f2
11,152
def binaryread(file, vartype, shape=(1,), charlen=16): """ Uses numpy to read from binary file. This was found to be faster than the struct approach and is used as the default. """ # read a string variable of length charlen if vartype == str: result = file.read(charlen * 1) else: # find the number of values nval = np.prod(shape) result = np.fromfile(file, vartype, nval) if nval == 1: result = result # [0] else: result = np.reshape(result, shape) return result
221e0a71271eea4a31423a94244c12784af7fef2
11,153
def subsample_data(neuron_data, sample_size = 10000): """ Acquires a subsample of the Neuron dataset. This function samples a set of neurons without replacement. Params ----------- Returns ----------- rand_ix (array-like): Array containing the chosen indices sample_neurons (array-like ): Array with shape (sample_size, neuron_data.shape[1]) containing a subset of the neuron traces. """ # Get random indices sampling without replacement rand_ix = np.random.choice( np.arange(neuron_data.shape[0]), size= sample_size, replace=False ) # Get subsample by choosing indices along rows sample_neurons = neuron_data[rand_ix, :] return rand_ix, sample_neurons
801d0d618576e14b67b33bf9071c135409362bfe
11,154
import sys def appGet(*args, **kwargs): """ .. deprecated:: 0.42.0 Use :func:`app_get()` instead. """ print("dxpy.appGet is deprecated; please use app_get instead.", file=sys.stderr) return app_get(*args, **kwargs)
fd1f11b7a0d1af18faa4177e537fae8c7146eeae
11,155
def connect_db(): """Connects to the specific database.""" mongo = MongoClient(DATABASE_URL,replicaset=MONGO_REPLICASET) #if COLLECTION_NAME in mongo[DATABASE_NAME].collection_names(): collection = mongo[DATABASE_NAME][COLLECTION_NAME] #else: # mongo[DATABASE_NAME].create_collection(COLLECTION_NAME) # collection = mongo[DATABASE_NAME][COLLECTION_NAME] # collection.createIndex( { "timestamp": 1 }, { 'unique': True } ) return collection
0e037a2bfb8687d4ff2b477a59c3f5ba99335c44
11,156
def transient(func): """ decorator to make a function execution transient. meaning that before starting the execution of the function, a new session with a new transaction will be started, and after the completion of that function, the new transaction will be rolled back without the consideration or affecting the parent transaction which by default is scoped to request. the corresponding new session will also be removed after function execution. note that you *should not* commit, flush or rollback anything inside a transient function, the `@transient` decorator will handle rollback operation when needed. otherwise, unexpected behaviors may occur. also note that you *should not* remove the corresponding session from session factory when using `@transient` decorator. the removal operation will be handled by decorator itself and if you remove session manually, it will cause broken chain of sessions and unexpected behaviour. this decorator also supports multiple `@transient` usage in a single call hierarchy. for example: def service_root(): store = get_current_store() value = EntityRoot() store.add(value) service_a() @atomic def service_a(): store = get_current_store() value = EntityA() store.add(value) service_b() @transient def service_b(): store = get_current_store() value = EntityB() store.add(value) service_c() @transient def service_c(): value = EntityC() value.save() in the above example, if the call hierarchy starts with `service_root()`, at the end, the data of `service_root` and `service_a` will be persisted into database. but the data of `service_b` and `service_c` will not be persisted because they are decorated as transient. :param function func: function. :returns: function result. """ def decorator(*args, **kwargs): """ decorates the given function and makes its execution transient. :param object args: function arguments. :param object kwargs: function keyword arguments. :returns: function result. """ store = database_services.get_atomic_store() try: result = func(*args, **kwargs) return result finally: store.rollback() factory = database_services.get_current_session_factory() factory.remove(atomic=True) return update_wrapper(decorator, func)
454c808d15bbdddd800db70ec56d228f432921f8
11,157
def make_mapping(environ, start_response): """ Establishing a mapping, storing the provided URI as a field on a tiddler in the PRIVATEER bag. Accepted data is either a json dictory with a uri key or a POST CGI form with a uri query paramter. Respond with a location header containing the uri of the mapping. """ uri = None try: content_type = environ['tiddlyweb.type'] except KeyError: content_type = None if content_type == 'application/json': try: length = environ['CONTENT_LENGTH'] content = environ['wsgi.input'].read(int(length)) data = simplejson.loads(content) uri = data['uri'] except (KeyError, IOError, simplejson.JSONDecodeError), exc: raise HTTP400('Unable to parse input: %s' % exc) else: try: uri = environ['tiddlyweb.query']['uri'][0] except (KeyError, IndexError), exc: raise HTTP400('Unable to parse input: %s' % exc) if uri: title_uuid = _make_mapping_tiddler(environ, uri) else: raise HTTP400('No uri for mapping provided') start_response('201 Created', [ ('Location', _mapping_uri(environ, title_uuid))]) return []
e90a72bce2132d703504230d41f3e807ea77d7a2
11,158
def create_space_magnitude_region(region, magnitudes): """Simple wrapper to create space-magnitude region """ if not (isinstance(region, CartesianGrid2D) or isinstance(region, QuadtreeGrid2D)) : raise TypeError("region must be CartesianGrid2D") # bind to region class if magnitudes is None: raise ValueError("magnitudes should not be None if creating space-magnitude region.") region.magnitudes = magnitudes region.num_mag_bins = len(region.magnitudes) return region
64f4606c74ad38bd34ade7673074124e3d3faa48
11,159
import matplotlib.pyplot as plt def autocorrelation_plot(series, label, lower_lim=1, n_samples=None, ax=None, **kwds): """Autocorrelation plot for time series. Parameters: ----------- series: Time series ax: Matplotlib axis object, optional kwds : keywords Options to pass to matplotlib plotting method Returns: ----------- ax: Matplotlib axis object """ n = len(series) data = np.asarray(series) if ax is None: ax = plt.gca(xlim=(lower_lim, n_samples), ylim=(-1.0, 1.0)) mean = np.mean(data) c0 = np.sum((data - mean) ** 2) / float(n) def r(h): return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0 x = (np.arange(n) + 1).astype(int) y = lmap(r, x) z95 = 1.959963984540054 z99 = 2.5758293035489004 ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey') ax.axhline(y=z95 / np.sqrt(n), color='grey') ax.axhline(y=0.0, color='black') ax.axhline(y=-z95 / np.sqrt(n), color='grey') ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey') ax.set_xlabel("Lag") ax.set_ylabel("Autocorrelation") if n_samples: ax.plot(x[:n_samples], y[:n_samples], label=label, **kwds) else: ax.plot(x, y, label=label, **kwds) if 'label' in kwds: ax.legend() ax.grid() return ax
18254d8bf263c50d059921b682606700dd51ab5a
11,160
def get_productivity(coin_endowments): """Returns the total coin inside the simulated economy. Args: coin_endowments (ndarray): The array of coin endowments for each of the agents in the simulated economy. Returns: Total coin endowment (float). """ return np.sum(coin_endowments)
e6dfe2485bce54599bc919d9a2b2235b90166702
11,161
def prefix_attrs(source, keys, prefix): """Rename some of the keys of a dictionary by adding a prefix. Parameters ---------- source : dict Source dictionary, for example data attributes. keys : sequence Names of keys to prefix. prefix : str Prefix to prepend to keys. Returns ------- dict Dictionary of attributes with some keys prefixed. """ out = {} for key, val in source.items(): if key in keys: out[f"{prefix}{key}"] = val else: out[key] = val return out
e1c8102fddf51cd7af620f9158419bff4b3f0c57
11,162
import logging def add_polygon_to_image(image: np.ndarray, object: dict) -> np.ndarray: """ Add the polynom of the given object to the image. Since using CV, order is (B,G,R) Parameters ---------- img : np.ndarray Opencv image object : dict Dictionary of the polynom with meta infos. Returns ------- np.ndarray image with the polynom in (B,G,R) """ x_points = object['points']['x_points'] y_points = object['points']['y_points'] pts = np.array([a for a in zip(x_points, y_points)]) pts = pts[:, None, :] logging.debug(f'Polylines with shape {pts.shape} : {pts}') cv.polylines(image, [pts], True, (0, 0, 255), 5) return image
e598e80ab23df2e15c96d6ed1453b95ec10e5451
11,163
def add(coefficient_1, value_1, coefficient_2, value_2): """Provides an addition algebra for various types, including scalars and histogram objects. Incoming values are not modified. Args: coefficient_1: The first coefficient, a scalar value_1: The first value, a histogram or scalar coefficient_2: The second coefficient, a scalar value_2: The second value, a histogram or scalar Returns: The value of the expression: ((coefficient_1 * value_1) + (coefficient_2 * value_2)) """ # Verify that the incoming types match if type(value_1) != type(value_2): raise ValueError('values must be of the same type') # Handle based on type if isinstance(value_1, TH1): # Create the result result = value_1.Clone(uuid4().hex) # Add the histograms result.Add(value_1, value_2, coefficient_1, coefficient_2) else: # Create the result result = ((coefficient_1 * value_1) + (coefficient_2 * value_2)) # All done return result
70bccba3d504325a66090104ffc4d464649f2b32
11,164
import time def kotlin_object_type_summary(lldb_val, internal_dict = {}): """Hook that is run by lldb to display a Kotlin object.""" start = time.monotonic() log(lambda: f"kotlin_object_type_summary({lldb_val.unsigned:#x}: {lldb_val.GetTypeName()})") fallback = lldb_val.GetValue() if lldb_val.GetTypeName() != "ObjHeader *": if lldb_val.GetValue() is None: bench(start, lambda: "kotlin_object_type_summary:({:#x}) = NULL".format(lldb_val.unsigned)) return NULL bench(start, lambda: "kotlin_object_type_summary:({:#x}) = {}".format(lldb_val.unsigned, lldb_val.signed)) return lldb_val.value if lldb_val.unsigned == 0: bench(start, lambda: "kotlin_object_type_summary:({:#x}) = NULL".format(lldb_val.unsigned)) return NULL tip = internal_dict["type_info"] if "type_info" in internal_dict.keys() else type_info(lldb_val) if not tip: bench(start, lambda: "kotlin_object_type_summary:({0:#x}) = falback:{0:#x}".format(lldb_val.unsigned)) return fallback value = select_provider(lldb_val, tip, internal_dict) bench(start, lambda: "kotlin_object_type_summary:({:#x}) = value:{:#x}".format(lldb_val.unsigned, value._valobj.unsigned)) start = time.monotonic() str0 = value.to_short_string() bench(start, lambda: "kotlin_object_type_summary:({:#x}) = str:'{}...'".format(lldb_val.unsigned, str0[:3])) return str0
79883644017bfc35c77a17a3e5da4b5913864ef2
11,165
import argparse import os import sys def parse_arguments(root_dir): """ Will parse the command line arguments arnd return the arg object. """ # Create top level parser. TODO: add description, usage, etc parser = argparse.ArgumentParser(prog="aware.py", description="Probabilistic demultiplexer for Illumina bcl files. Works " "with single or dual-indexed reads, and single or pair-" "end reads. (github.com/edm1/aware-demultiplexer)", epilog="Enter sub-command to see specific options.", usage="pypy3 aware.py [-h] [-v] <subcommand> [options]") subparsers = parser.add_subparsers(title="The aware.py sub-commands include", prog="pypy3 aware.py", metavar="<subcommand>") # Create parser for the bcl2fastq (extracting reads from illumina folder) parser_b2f = subparsers.add_parser('bcl2fastq', description="Wrapper for picard-tools. Extracts multiplexed reads and " "barcodes from Illumina bcl files.", help="Extracts multiplexed reads and barcodes from Illumina bcl files.") # Required positional arguments parser_b2f.add_argument('baseCallDir', metavar='<baseCallDir>', type=str, help='Directory containing base call intensitites') parser_b2f.add_argument('runParamXML', metavar='<runParameters.xml>', type=str, help='runParameters.xml file') parser_b2f.add_argument('lane', metavar='<lane>', type=int, help='Lane number') # Optional arguments parser_b2f.add_argument('--outDir', '-o', metavar='<str>', type=str, default=os.path.join(root_dir, "output"), help='Location to create output files. (output)') parser_b2f.add_argument('--numCPU', '-p', metavar='<int>', type=int, default=1, help='Number of CPUs to use. (1)') parser_b2f.add_argument('--readsPerTile', '-r', metavar='<int>', type=int, default=120000, help=('Max number of reads in RAM per tile, reduce if ' 'you have problems with memory. (120000)')) parser_b2f.add_argument('--MaxInRam', '-m', metavar='<int>', type=int, default=500000, help=('Maximum number of records that are stored in the' ' RAM. (500000)')) parser_b2f.add_argument('--JavaRAM', '-mem', metavar='<int>', type=int, default=2, help='Amount of RAM (GB) allocated to the Java heap. (2)') parser_b2f.add_argument('--PicardJar', '-jar', metavar='<path>', type=str, default=os.path.join(root_dir, 'libs/picard.jar'), help='Location of picard.jar (libs/picard.jar)') # Add function to call if selected parser_b2f.set_defaults(func=basecalls2fastq.run) # Create parser for the demultiplexer parser_demux = subparsers.add_parser('demux', description="Demultiplexes multiplexed fastqs that are extracted " "by sub-command bcl2fastq.", help="Demultiplex the fastqs extracted by bcl2fastq using indexes " "provided in sampleSheet.csv.") # Required positional args parser_demux.add_argument('inDir', metavar='<inDir>', type=str, help='Directory created by bcl2fastq in output folder.') parser_demux.add_argument('sampleSheet', metavar='<SampleSheet.csv>', type=str, help='MiSeq SampleSheet.csv file, containing index info.') # Optional args parser_demux.add_argument('--uniqID', '-u', metavar='<str>', type=str, default=None, help='Unique ID to append to output folder. (None)') # parser_demux.add_argument('--numCPU', '-p', metavar='<int>', type=int, # default=1, help='Number of CPUs to use. (1)') parser_demux.add_argument('--minProb', '-min', metavar='<float>', type=float, default=0.05, help=('Minimum probability of a match else' ' discard. (0.05)')) parser_demux.add_argument('--phredOffset', '-s', metavar='<int>', type=int, required=False, default=33, help='FASTQ phred score offset (33)') parser_demux.add_argument('--indexQual', '-i', metavar='<int>', type=int, default=30, help='Phred-score given to barcode indexes (30)') # Add function to call if selected parser_demux.set_defaults(func=demultiplexer.run) # Add version number to the parser parser.add_argument('-v', '--version', action='version', version='v1.0.3') # Parse the arguments args = parser.parse_args() # Workaround for sub-parser bug (http://bugs.python.org/issue16308) try: a = getattr(args, "func") except AttributeError: parser.print_help() sys.exit(0) # Parse the arguments return args
6fc9c8e9e138d0745d41f098c8833aff217cb78d
11,166
def _grep_first_pair_of_parentheses(s): """ Return the first matching pair of parentheses in a code string. INPUT: A string OUTPUT: A substring of the input, namely the part between the first (outmost) matching pair of parentheses (including the parentheses). Parentheses between single or double quotation marks do not count. If no matching pair of parentheses can be found, a ``SyntaxError`` is raised. EXAMPLES:: sage: from sage.misc.sageinspect import _grep_first_pair_of_parentheses sage: code = 'def foo(a="\'):", b=4):\n return' sage: _grep_first_pair_of_parentheses(code) '(a="\'):", b=4)' sage: code = 'def foo(a="%s):", \'b=4):\n return'%("'") sage: _grep_first_pair_of_parentheses(code) Traceback (most recent call last): ... SyntaxError: The given string does not contain balanced parentheses """ out = [] single_quote = False double_quote = False escaped = False level = 0 for c in s: if level>0: out.append(c) if c=='(' and not single_quote and not double_quote and not escaped: level += 1 elif c=='"' and not single_quote and not escaped: double_quote = not double_quote elif c=="'" and not double_quote and not escaped: single_quote = not single_quote elif c==')' and not single_quote and not double_quote and not escaped: if level == 1: return '('+''.join(out) level -= 1 elif c=="\\" and (single_quote or double_quote): escaped = not escaped else: escaped = False raise SyntaxError("The given string does not contain balanced parentheses")
7441c1b8734c211b9b320e195155719452cf7407
11,167
import requests def login(): """ """ url = "http://127.0.0.1:5001/rest/login" data = {"username": "kivanc", "password": "1234"} r = requests.post(url, json=data) output = r.json() return output["access_token"]
a2b4bd68110fd053c48988f7cc490c88f148bc1f
11,168
def get_all_ops(ifshortcut=True, ifse=True, strides=[1, 2, 2, 2, 1, 2, 1]): """Get all possible ops of current search space Args: ifshortcut: bool, shortcut or not ifse: bool, se or not strides: list, list of strides for bottlenecks Returns: op_params: list, a list of all possible params """ op_params = [] # conv1_1 op_params.append(('conv', 0, 0, 1, image_shape[0], image_shape[1], image_shape[2], 32, 1, 3, 1, 2, 1)) op_params.append(('batch_norm', 'None', 1, 32, int(image_shape[1] / 2), int(image_shape[2] / 2))) op_params.append(('activation', 'relu6', 1, 32, int(image_shape[1] / 2), int(image_shape[2] / 2))) # bottlenecks, TODO: different h and w for images in_c, in_shape = [32], int(image_shape[1] / 2) for i in range(len(NAS_FILTER_SIZE) + 2): if i == 0: expansion, kernels, num_filters, s = [1], [3], [16], strides[i] elif i == len(NAS_FILTER_SIZE) + 1: expansion, kernels, num_filters, s = [6], [3], [320], strides[i] else: expansion, kernels, num_filters, s = NAS_FILTERS_MULTIPLIER, \ NAS_KERNEL_SIZE, \ NAS_FILTER_SIZE[i-1], \ strides[i] # first block tmp_ops = ops_of_inverted_residual_unit( in_c, in_shape, expansion, kernels, num_filters, s, False, ifse) op_params = op_params + tmp_ops in_c, in_shape = num_filters, int(in_shape / s) # repeated block: possibly more ops, but it is ok tmp_ops = ops_of_inverted_residual_unit(in_c, in_shape, expansion, kernels, num_filters, 1, ifshortcut, ifse) op_params = op_params + tmp_ops # last conv op_params.append(('conv', 0, 0, 1, 320, in_shape, in_shape, 1280, 1, 1, 0, 1, 1)) op_params.append(('batch_norm', 'None', 1, 1280, in_shape, in_shape)) op_params.append(('activation', 'relu6', 1, 1280, in_shape, in_shape)) op_params.append(('pooling', 1, 1, 1280, in_shape, in_shape, in_shape, 0, 1, 0, 3)) # fc, converted to 1x1 conv op_params.append(('conv', 0, 0, 1, 1280, 1, 1, class_dim, 1, 1, 0, 1, 1)) op_params.append(('eltwise', 2, 1, 1000, 1, 1)) op_params.append(('softmax', -1, 1, 1000, 1, 1)) op_params.append(('eltwise', 1, 1, 1, 1, 1)) op_params.append(('eltwise', 2, 1, 1, 1, 1)) return list(set(op_params))
7830a330da8709179096fb8b6e789107e0de66cf
11,169
import tqdm import torch def evaluation_per_relation(triples: dict, model: EvaluationModel, batch_size: int = 4): """ :param triples: It should be a dict in form (Relation id):[(s_1,p_1,o_1)...(s_n,p_n,o_n)] """ # Evaluate per relation and store scores/evaluation measures score_per_rel = dict() for k in tqdm.tqdm(triples.keys()): # use API to evaluate model and generate model output for error analysis sub = torch.tensor(triples[k][:, 0]).cuda() pra = torch.tensor(triples[k][:, 1]).cuda() obj = torch.tensor(triples[k][:, 2]).cuda() score_per_rel[k] = model.evaluate_only_metrics(sub, pra, obj, batch_size=batch_size) return score_per_rel
73262587c181fa285b97479110f49ea4dd178946
11,170
from datetime import datetime def check_upload(): """ 判断今天的代码是否上传 :return: """ ctime = datetime.date.today() # 当前日期 data = db_helper.fetchone('select id from record where ctime = %s and user_id = %s', (ctime, session['user_info']['id'])) return data
2dfceb7cc91668b3a41920b931e946188332c6e4
11,171
def get_package_object(): """Gets a sample package for the submission in Dev Center.""" package = { # The file name is relative to the root of the uploaded ZIP file. "fileName" : "bin/super_dev_ctr_api_sim.appxupload", # If you haven't begun to upload the file yet, set this value to "PendingUpload". "fileStatus" : "PendingUpload" } return package
d65329372f356325c08ecb814f48ad856b9509bc
11,172
def check_for_cmd(): """ Returns tuple of [Type] [Data] where type is the shuffle type and data will contain either random shuffle parameters or the top deck order required """ try: with open(CMD_FILE, 'r+') as f: data = f.readline() f.truncate(0) # DEBUGGIN TODO REMOVE # data = 'HOLD,4,true,A,Diamond,Q,Heart,K,,,Diamond,A,Club,7,,,,6,Heart,9,,A,,A,,,Spade,,Spade,,,,,,,,,,,,,,,,\n' # Clean and format data data = data.replace('\n','') data = data.replace('Diamond','D') data = data.replace('Heart','H') data = data.replace('Club','C') data = data.replace('Spade','S') rawdata = data.split(',') # Process data based on shuffle type key if rawdata[0] in SHUFFLES: shuffletype = SHUFFLES.index(rawdata[0]) if shuffletype is 0: return (rawdata[0], format_rand(rawdata[1:])) elif shuffletype is 1: return (rawdata[0], format_bjack(rawdata[1:])) elif shuffletype is 2: return (rawdata[0], format_holdem(rawdata[1:])) except: pass return (None, None)
42fe4831f910751c8f7760a5a7eed5dc0580d7b4
11,173
from satchmo_store.shop.models import Config def _set_quantity(request, force_delete=False): """Set the quantity for a specific cartitem. Checks to make sure the item is actually in the user's cart. """ cart = Cart.objects.from_request(request, create=False) if isinstance(cart, NullCart): return (False, None, None, _("No cart to update.")) cartplaces = config_value('SHOP', 'CART_PRECISION') if force_delete: qty = Decimal('0') else: try: roundfactor = config_value('SHOP', 'CART_ROUNDING') qty = round_decimal(request.POST.get('quantity', 0), places=cartplaces, roundfactor=roundfactor, normalize=True) except RoundedDecimalError, P: return (False, cart, None, _("Bad quantity.")) if qty < Decimal('0'): qty = Decimal('0') try: itemid = int(request.POST.get('cartitem')) except (TypeError, ValueError): return (False, cart, None, _("Bad item number.")) try: cartitem = CartItem.objects.get(pk=itemid, cart=cart) except CartItem.DoesNotExist: return (False, cart, None, _("No such item in your cart.")) if qty == Decimal('0'): cartitem.delete() cartitem = NullCartItem(itemid) else: config = Config.objects.get_current() if config_value('PRODUCT','NO_STOCK_CHECKOUT') == False: stock = cartitem.product.items_in_stock log.debug('checking stock quantity. Have %d, need %d', stock, qty) if stock < qty: return (False, cart, cartitem, _("Not enough items of '%s' in stock.") % cartitem.product.translated_name()) cartitem.quantity = round_decimal(qty, places=cartplaces) cartitem.save() satchmo_cart_changed.send(cart, cart=cart, request=request) return (True, cart, cartitem, "")
066314ac9689739ab12518e1049122390808221c
11,174
import os def load_typos_file(file_name, char_vocab = {}, filter_OOA_chars = False): """ Loads typos from a given file. Optionally, filters all entries that contain out-of-alphabet characters. """ basename, ext = os.path.splitext(file_name) replacement_rules = list() if ext == ".tsv": typos = load_typos_moe(file_name) else: typos = load_typos_belinkov_bisk(file_name) if "extracted" in basename: print("> applying replacement rules..") replacement_rules.append((chr(172), ' ')) typos = _normalize_typos(typos, replacement_rules) if filter_OOA_chars: typos = _filter_typos(typos, char_vocab) return typos
6d5a54c0cea6751affa4ccd343632d1d7e20fd21
11,175
def load_config(config_file="config.yaml"): """Load config file to initialize fragment factories. A config file is a Python file, loaded as a module. Example config file: # config.yaml name: My LDF server maintainer: chuck Norris <[email protected]> datasets: - name: DBpedia-2016-04 description: DBpedia dataset, version 2016-04 backend: hdt-file file: /home/chuck-norris/dbpedia-2016-04.hdt - name: Chuck-Norris-facts description: Best Chuck Norris facts ever backend: rdf-file format: nt file: /home/chuck-norris/facts.nt """ config = load(open(config_file)) # set page size, i.e. the number of triples per page quota = config['quota'] if 'quota' in config else 75 max_results = config['max_results'] if 'max_results' in config else inf config['quota'] = quota for c in config["datasets"]: if 'quota' not in c: c['quota'] = quota if 'max_results' not in c: c['max_results'] = max_results if 'queries' not in c: c['queries'] = [] # build graphs graphs = {c["name"]: Graph(c) for c in config["datasets"]} return (config, graphs)
b5ee03a3b30f4374da05469cd3a289566eb26540
11,176
import os import socket import timeit import time def execute_actor(actor_id, worker_id, execution_id, image, msg, user=None, d={}, privileged=False, mounts=[], leave_container=False, fifo_host_path=None, socket_host_path=None): """ Creates and runs an actor container and supervises the execution, collecting statistics about resource consumption from the Docker daemon. :param actor_id: the dbid of the actor; for updating worker status :param worker_id: the worker id; also for updating worker status :param execution_id: the id of the execution. :param image: the actor's image; worker must have already downloaded this image to the local docker registry. :param msg: the message being passed to the actor. :param user: string in the form {uid}:{gid} representing the uid and gid to run the command as. :param d: dictionary representing the environment to instantiate within the actor container. :param privileged: whether this actor is "privileged"; i.e., its container should run in privileged mode with the docker daemon mounted. :param mounts: list of dictionaries representing the mounts to add; each dictionary mount should have 3 keys: host_path, container_path and format (which should have value 'ro' or 'rw'). :param fifo_host_path: If not None, a string representing a path on the host to a FIFO used for passing binary data to the actor. :param socket_host_path: If not None, a string representing a path on the host to a socket used for collecting results from the actor. :return: result (dict), logs (str) - `result`: statistics about resource consumption; `logs`: output from docker logs. """ logger.debug("top of execute_actor()") # initial stats object, environment, binds and volumes result = {'cpu': 0, 'io': 0, 'runtime': 0 } # instantiate docker client cli = docker.APIClient(base_url=dd, version="auto") # don't try to pass binary messages through the environment as these can cause # broken pipe errors. the binary data will be passed through the FIFO momentarily. if not fifo_host_path: d['MSG'] = msg binds = {} volumes = [] # if container is privileged, mount the docker daemon so that additional # containers can be started. logger.debug("privileged: {}".format(privileged)) if privileged: binds = {'/var/run/docker.sock':{ 'bind': '/var/run/docker.sock', 'ro': False }} volumes = ['/var/run/docker.sock'] # add a bind key and dictionary as well as a volume for each mount for m in mounts: binds[m.get('host_path')] = {'bind': m.get('container_path'), 'ro': m.get('format') == 'ro'} volumes.append(m.get('host_path')) host_config = cli.create_host_config(binds=binds, privileged=privileged) # write binary data to FIFO if it exists: if fifo_host_path: try: fifo = os.open(fifo_host_path, os.O_RDWR) os.write(fifo, msg) except Exception as e: logger.error("Error writing the FIFO. Exception: {}".format(e)) os.remove(fifo_host_path) raise DockerStartContainerError("Error writing to fifo: {}".format(e)) # set up results socket try: server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) server.bind(socket_host_path) server.settimeout(RESULTS_SOCKET_TIMEOUT) except Exception as e: logger.error("could not instantiate or bind socket. Exception: {}".format(e)) raise e # instantiate the results channel: results_ch = ExecutionResultsChannel(actor_id, execution_id) # create and start the container logger.debug("Final container environment: {}".format(d)) logger.debug("Final binds: {} and host_config: {} for the container.".format(binds, host_config)) container = cli.create_container(image=image, environment=d, user=user, volumes=volumes, host_config=host_config) # get the URC time stampe start_time = get_current_utc_time() # start the timer to track total execution time. start = timeit.default_timer() logger.debug("right before cli.start: {}".format(start)) try: cli.start(container=container.get('Id')) except Exception as e: # if there was an error starting the container, user will need to debug logger.info("Got exception starting actor container: {}".format(e)) raise DockerStartContainerError("Could not start container {}. Exception {}".format(container.get('Id'), str(e))) # local bool tracking whether the actor container is still running running = True logger.debug("right before creating stats_cli: {}".format(timeit.default_timer())) # create a separate cli for checking stats objects since these should be fast and we don't want to wait stats_cli = docker.APIClient(base_url=dd, timeout=1, version="auto") logger.debug("right after creating stats_cli: {}".format(timeit.default_timer())) # under load, we can see UnixHTTPConnectionPool ReadTimeout's trying to create the stats_obj # so here we are trying up to 3 times to create the stats object for a possible total of 3s # timeouts ct = 0 while ct < 3: try: stats_obj = stats_cli.stats(container=container.get('Id'), decode=True) break except ReadTimeout: ct += 1 except Exception as e: logger.error("Unexpected exception creating stats_obj. Exception: {}".format(e)) # in this case, we need to kill the container since we cannot collect stats; running = False logger.debug("right after creating stats_obj: {}".format(timeit.default_timer())) while running: datagram = None try: datagram = server.recv(MAX_RESULT_FRAME_SIZE) except socket.timeout: pass except Exception as e: logger.error("got exception from server.recv: {}".format(e)) logger.debug("right after try/except datagram block: {}".format(timeit.default_timer())) if datagram: try: results_ch.put(datagram) except Exception as e: logger.error("Error trying to put datagram on results channel. Exception: {}".format(e)) logger.debug("right after results ch.put: {}".format(timeit.default_timer())) try: logger.debug("waiting on a stats obj: {}".format(timeit.default_timer())) stats = next(stats_obj) logger.debug("got the stats obj: {}".format(timeit.default_timer())) except ReadTimeoutError: # this is a ReadTimeoutError from docker, not requests. container is finished. logger.debug("next(stats) just timed out: {}".format(timeit.default_timer())) # container stopped before another stats record could be read, just ignore and move on running = False break try: result['cpu'] += stats['cpu_stats']['cpu_usage']['total_usage'] except KeyError as e: logger.info("Got a KeyError trying to fetch the cpu object: {}".format(e)) try: result['io'] += stats['networks']['eth0']['rx_bytes'] except KeyError as e: logger.info("Got KeyError exception trying to grab the io object. running: {}; Exception: {}".format(running, e)) if running: logger.debug("about to check container status: {}".format(timeit.default_timer())) # we need to wait for the container id to be available i = 0 while i < 10: try: c = cli.containers(all=True, filters={'id': container.get('Id')})[0] break except IndexError: logger.error("Got an IndexError trying to get the container object.") time.sleep(0.1) i += 1 logger.debug("done checking status: {}; i: {}".format(timeit.default_timer(), i)) if i == 10: logger.error("Never could retrieve the container object! container id: {}".format(container.get('Id'))) try: cli.stop(container.get('Id')) except Exception as e: logger.error("Got another exception trying to stop the actor container. Exception: {}".format(e)) finally: running = False continue state = c.get('State') if not state == 'running': logger.debug("container finished, final state: {}".format(state)) running = False else: # container still running; check if we are beyond the max_run_time runtime = timeit.default_timer() - start if max_run_time > 0 and max_run_time < runtime: logger.info("hit runtime limit: {}".format(timeit.default_timer())) cli.stop(container.get('Id')) running = False logger.debug("right after checking container state: {}".format(timeit.default_timer())) logger.info("container stopped:{}".format(timeit.default_timer())) stop = timeit.default_timer() # get info from container execution, including exit code try: container_info = cli.inspect_container(container.get('Id')) try: container_state = container_info['State'] try: exit_code = container_state['ExitCode'] except KeyError as e: logger.error("Could not determine ExitCode for container {}. e: {}".format(container.get('Id'), e)) exit_code = 'undetermined' except KeyError as e: logger.error("Could not determine final state for container {}. e: {} ".format(container.get('Id')), e) container_state = {'unavailable': True} except docker.errors.APIError as e: logger.error("Could not inspect container {}. e: {}".format(container.get('Id'), e)) logger.debug("right after getting container_info: {}".format(timeit.default_timer())) # get logs from container logs = cli.logs(container.get('Id')) logger.debug("right after getting container logs: {}".format(timeit.default_timer())) # get any additional results from the execution: while True: datagram = None try: datagram = server.recv(MAX_RESULT_FRAME_SIZE) except socket.timeout: break except Exception as e: logger.error("Got exception from server.recv: {}".format(e)) if datagram: try: results_ch.put(datagram) except Exception as e: logger.error("Error trying to put datagram on results channel. Exception: {}".format(e)) logger.debug("right after getting last execution results from datagram socket: {}".format(timeit.default_timer())) if socket_host_path: server.close() os.remove(socket_host_path) logger.debug("right after removing socket: {}".format(timeit.default_timer())) # remove container, ignore errors if not leave_container: try: cli.remove_container(container=container) logger.info("Container removed.") except Exception as e: logger.error("Exception trying to remove actor: {}".format(e)) else: logger.debug("leaving actor container since leave_container was True.") logger.debug("right after removing actor container: {}".format(timeit.default_timer())) if fifo_host_path: os.close(fifo) os.remove(fifo_host_path) result['runtime'] = int(stop - start) logger.debug("right after removing fifo; about to return: {}".format(timeit.default_timer())) return result, logs, container_state, exit_code, start_time
aab6a06de32737d6146945f365433e4b946ee659
11,177
def find_start_end(grid): """ Finds the source and destination block indexes from the list. Args grid: <list> the world grid blocks represented as a list of blocks (see Tutorial.pdf) Returns start: <int> source block index in the list end: <int> destination block index in the list """ #------------------------------------ # # Fill and submit this code # # return (None, None) #------------------------------------- counter = 0 eb_index = None rb_index = None air_block=[] diamond_block=[] state=[] for i in grid: if i =='diamond_block': diamond_block.append(counter) if i =='air': air_block.append(counter) if i == 'emerald_block': eb_index = counter if i == 'redstone_block': rb_index = counter state.append(counter) counter+=1 return (eb_index, rb_index,air_block,diamond_block)
d617af3d6ebf9a2c9f42250214e3fe52d2017170
11,178
from typing import Optional import inspect def find_method_signature(klass, method: str) -> Optional[inspect.Signature]: """Look through a class' ancestors and fill out the methods signature. A class method has a signature. But it might now always be complete. When a parameter is not annotated, we might want to look through the ancestors and determine the annotation. This is very useful when you have a base class that has annotations, and child classes that are not. Examples -------- >>> class Parent: ... ... def foo(self, x: int) -> int: ... ... >>> find_method_signature(Parent, 'foo') <Signature (self, x: int) -> int> >>> class Child(Parent): ... ... def foo(self, x, y: float) -> str: ... ... >>> find_method_signature(Child, 'foo') <Signature (self, x: int, y: float) -> str> """ m = getattr(klass, method) sig = inspect.signature(m) params = [] for param in sig.parameters.values(): if param.name == "self" or param.annotation is not param.empty: params.append(param) continue for ancestor in inspect.getmro(klass): try: ancestor_meth = inspect.signature(getattr(ancestor, m.__name__)) except AttributeError: break try: ancestor_param = ancestor_meth.parameters[param.name] except KeyError: break if ancestor_param.annotation is not param.empty: param = param.replace(annotation=ancestor_param.annotation) break params.append(param) return_annotation = sig.return_annotation if return_annotation is inspect._empty: for ancestor in inspect.getmro(klass): try: ancestor_meth = inspect.signature(getattr(ancestor, m.__name__)) except AttributeError: break if ancestor_meth.return_annotation is not inspect._empty: return_annotation = ancestor_meth.return_annotation break return sig.replace(parameters=params, return_annotation=return_annotation)
17d3e7d554720766ca62cb4ad7a66c42f947fc1c
11,179
def format_long_calc_line(line: LongCalcLine) -> LongCalcLine: """ Return line with .latex attribute formatted with line breaks suitable for positioning within the "\aligned" latex environment. """ latex_code = line.latex long_latex = latex_code.replace("=", "\\\\&=") # Change all... long_latex = long_latex.replace("\\\\&=", "&=", 1) # ...except the first one line_break = "\\\\\n" comment_space = "" comment = "" if line.comment: comment_space = "\\;" comment = format_strings(line.comment, comment=True) line.latex = f"{long_latex}{comment_space}{comment}{line_break}" return line
a6f19b7f3a1876f3b6b0c88baddfd02b16901b41
11,180
from riddle import emr, feature_importance from riddle.models import MLP import time import pickle def run(data_fn, prop_missing=0., max_num_feature=-1, feature_selection='random', k=10, data_dir='_data', out_dir='_out'): """Run RIDDLE classification interpretation pipeline. Arguments: data_fn: string data file filename prop_missing: float proportion of feature observations which should be randomly masked; values in [0, 1) max_num_feature: int maximum number of features to use feature_selection: string feature selection method; values = {'random', 'frequency', 'chi2'} k: int number of partitions for k-fold cross-validation interpret_model: bool whether to interpret the trained model for first k-fold partition which_half: str which half of experiments to do; values = {'first', 'last', 'both'} data_dir: string directory where data files are located cache_dir: string directory where cached files (e.g., saved parameters) are located out_dir: string outer directory where outputs (e.g., results) should be saved """ start = time.time() base_out_dir = get_base_out_dir(out_dir, 'riddle', data_fn, prop_missing, max_num_feature, feature_selection) recursive_mkdir(base_out_dir) # get common data x_unvec, y, idx_feat_dict, idx_class_dict, icd9_descript_dict, perm_indices = ( get_preprocessed_data(data_dir, data_fn, prop_missing=prop_missing)) num_feature = len(idx_feat_dict) num_class = len(idx_class_dict) list_sums_D, list_sums_D2, list_sums_contribs = [], [], [] for k_idx in range(k): full_out_dir = '{}/k_idx={}'.format(base_out_dir, k_idx) print('\nPartition k = {}'.format(k_idx)) x_train_unvec, y_train, _, _, x_test_unvec, y_test = emr.get_k_fold_partition( x_unvec, y, k_idx=k_idx, k=k, perm_indices=perm_indices) if max_num_feature > 0: # select features and re-encode feat_encoding_dict, idx_feat_dict = select_features( x_train_unvec, y_train, idx_feat_dict, method=feature_selection, num_feature=num_feature, max_num_feature=max_num_feature) x_test_unvec = subset_reencode_features( x_test_unvec, feat_encoding_dict) num_feature = max_num_feature # interpret start = time.time() temp_mlp = MLP(num_feature=num_feature, num_class=num_class) hdf5_path = full_out_dir + '/model.h5' sums_D, sums_D2, sums_contribs, pairs = \ feature_importance.get_diff_sums( hdf5_path, x_test_unvec, process_x_func=temp_mlp.process_x, num_feature=num_feature, num_class=num_class) with open(full_out_dir + '/sums_D.pkl', 'wb') as f: pickle.dump(sums_D, f) with open(full_out_dir + '/sums_D2.pkl', 'wb') as f: pickle.dump(sums_D2, f) with open(full_out_dir + '/sums_contribs.pkl', 'wb') as f: pickle.dump(sums_contribs, f) list_sums_D.append(sums_D) list_sums_D2.append(sums_D2) list_sums_contribs.append(sums_contribs) def compute_total_sums(list_sums): total_sums = list_sums[0] for i in range(1, len(list_sums)): for j in range(len(total_sums)): total_sums[j] = np.add(total_sums[j], list_sums[i][j]) return total_sums total_sums_D = compute_total_sums(list_sums_D) total_sums_D2 = compute_total_sums(list_sums_D2) total_sums_contribs = compute_total_sums(list_sums_contribs) num_sample = len(x_unvec) run_interpretation_summary( x_unvec, y, total_sums_D, total_sums_D2, total_sums_contribs, idx_feat_dict=idx_feat_dict, idx_class_dict=idx_class_dict, icd9_descript_dict=icd9_descript_dict, pairs=pairs, num_sample=num_sample, full_out_dir=base_out_dir) print('Computed DeepLIFT scores and analysis in {:.4f} seconds' .format(time.time() - start)) print('-' * 72) print()
ac28216cbea67b0bdc6d2b3f617c24c975623415
11,181
def h_lgn(t, mu, sigma, normalize=False): """ Log-normal density Args: t: input argument (array) mu: mean parameter (-infty,infty) sigma: std parameter > 0 normalize: trapz integral normalization over t Returns: function values """ y = np.zeros(len(t)) y[t>0] = 1/(t[t>0]*sigma*np.sqrt(2*np.pi)) * np.exp(-(np.log(t[t>0]) - mu)**2 / (2*sigma**2)) y[np.isinf(y) | np.isnan(y)] = 0 # Protect underflows if normalize: y /= np.abs(trapz(x=t, y=y)) # abs for numerical protection return y
63bd6ea48f5ea28c5631b3ce259066d3624d038b
11,182
from .background import set_background_alignment import copy def align_background(data, align='auto'): """ Determine the Qz value associated with the background measurement. The *align* flag determines which background points are matched to the sample points. It can be 'sample' if background is measured using an offset from the sample angle, or 'detector' if it is offset from detector angle. If *align* is 'auto', then use 'Qz_target' to align the background scan. For 'auto' alignment without Qz_target set, we can only distinguish relative and constant offsets, and cannot determine which of sample and detector is offset from the specular condition, so we must rely on convention. If the offset is constant for each angle, then it is assumed to be a sample offset. If the the offset is proportional to the angle (and therefore offset divided by angle is constant), then it is assumed to be a detector offset. If neither condition is met, it is assumed to be a sample offset. The 'auto' test is robust: 90% of the points should be within 5% of the median value of the vector for the offset to be considered a constant. **Inputs** data (refldata) : background data with unknown $q$ align (opt:auto|sample|detector) : angle which determines $q_z$ **Returns** output (refldata) : background with known $q$ 2015-12-17 Paul Kienzle 2020-10-16 Paul Kienzle rename 'offset' to 'align' """ data = copy(data) set_background_alignment(data, align) return data
a8b33aa5440cf6c212d964d58720bef771fe2083
11,183
def get_bounds_from_config(b, state, base_units): """ Method to take a 3- or 4-tuple state definition config argument and return tuples for the bounds and default value of the Var object. Expects the form (lower, default, upper, units) where units is optional Args: b - StateBlock on which the state vars are to be constructed state - name of state var as a string (to be matched with config dict) base_units - base units of state var to be used if conversion required Returns: bounds - 2-tuple of state var bounds in base units default_val - default value of state var in base units """ try: var_config = b.params.config.state_bounds[state] except (KeyError, TypeError): # State definition missing return (None, None), None if len(var_config) == 4: # Units provided, need to convert values bounds = (pyunits.convert_value(var_config[0], from_units=var_config[3], to_units=base_units), pyunits.convert_value(var_config[2], from_units=var_config[3], to_units=base_units)) default_val = pyunits.convert_value(var_config[1], from_units=var_config[3], to_units=base_units) else: bounds = (var_config[0], var_config[2]) default_val = var_config[1] return bounds, default_val
c9e757a2032178e656f7bbc27519bd1650eb9a79
11,184
def read_train_data(): """ train_data.shape = (73257, 32, 32, 3) train_label.shape = (73257,) extra_data.shape = (531131, 32, 32, 3) extra_label.shape = (531131,) data.shape = (604388, 32, 32, 3) labels.shape = (604388,) """ train_data, train_label = read_images(full_data_dir+'train_32x32.mat') extra_data, extra_label = read_images(full_data_dir+'extra_32x32.mat') data = np.concatenate( (train_data, extra_data) ) label = np.concatenate( (train_label, extra_label) ) return data, label
d6e5c06ceb3a95e20e8ae301d83e1f480fc48591
11,185
def laguerreFunction(n, alpha, t, normalized=True): """Evaluate Laguerre function using scipy.special""" if normalized: Z = np.exp( .5*sps.gammaln(n+1) - .5*sps.gammaln(n+alpha+1) ) else: Z = 1 return Z * np.sqrt(mu(alpha,t)) * sps.eval_genlaguerre(n, alpha, t)
6c48f3ddaed9db7d748ad8fc972a132795ad3916
11,186
def end(s): """Select the mobile or weight hanging at the end of a side.""" assert is_side(s), "must call end on a side" return branches(s)[0]
2bcbc61e989287d714e9401660e58bd2f54c6fe6
11,187
def get_ca_pos_from_atoms(df, atoms): """Look up alpha carbon positions of provided atoms.""" ca = df[df['atom_name'] == 'CA'].reset_index() nb = ca.reindex(atoms) nb = nb.reset_index().set_index('index') return nb
c069db751d94f6626be5d56e7b286ef3c873c04e
11,188
def split_inline_box(context, box, position_x, max_x, skip_stack, containing_block, containing_page, absolute_boxes, fixed_boxes, line_placeholders, waiting_floats, line_children): """Same behavior as split_inline_level.""" # In some cases (shrink-to-fit result being the preferred width) # max_x is coming from Pango itself, # but floating point errors have accumulated: # width2 = (width + X) - X # in some cases, width2 < width # Increase the value a bit to compensate and not introduce # an unexpected line break. The 1e-9 value comes from PEP 485. max_x *= 1 + 1e-9 is_start = skip_stack is None initial_position_x = position_x initial_skip_stack = skip_stack assert isinstance(box, (boxes.LineBox, boxes.InlineBox)) left_spacing = (box.padding_left + box.margin_left + box.border_left_width) right_spacing = (box.padding_right + box.margin_right + box.border_right_width) content_box_left = position_x children = [] waiting_children = [] preserved_line_break = False first_letter = last_letter = None float_widths = {'left': 0, 'right': 0} float_resume_at = 0 if box.style['position'] == 'relative': absolute_boxes = [] if is_start: skip = 0 else: skip, skip_stack = skip_stack for i, child in enumerate(box.children[skip:]): index = i + skip child.position_y = box.position_y if child.is_absolutely_positioned(): child.position_x = position_x placeholder = AbsolutePlaceholder(child) line_placeholders.append(placeholder) waiting_children.append((index, placeholder)) if child.style['position'] == 'absolute': absolute_boxes.append(placeholder) else: fixed_boxes.append(placeholder) continue elif child.is_floated(): child.position_x = position_x float_width = shrink_to_fit(context, child, containing_block.width) # To retrieve the real available space for floats, we must remove # the trailing whitespaces from the line non_floating_children = [ child_ for _, child_ in (children + waiting_children) if not child_.is_floated()] if non_floating_children: float_width -= trailing_whitespace_size( context, non_floating_children[-1]) if float_width > max_x - position_x or waiting_floats: # TODO: the absolute and fixed boxes in the floats must be # added here, and not in iter_line_boxes waiting_floats.append(child) else: child = float_layout(context, child, containing_block, containing_page, absolute_boxes, fixed_boxes) waiting_children.append((index, child)) # Translate previous line children dx = max(child.margin_width(), 0) float_widths[child.style['float']] += dx if child.style['float'] == 'left': if isinstance(box, boxes.LineBox): # The parent is the line, update the current position # for the next child. When the parent is not the line # (it is an inline block), the current position of the # line is updated by the box itself (see next # split_inline_level call). position_x += dx elif child.style['float'] == 'right': # Update the maximum x position for the next children max_x -= dx for _, old_child in line_children: if not old_child.is_in_normal_flow(): continue if ((child.style['float'] == 'left' and box.style['direction'] == 'ltr') or (child.style['float'] == 'right' and box.style['direction'] == 'rtl')): old_child.translate(dx=dx) float_resume_at = index + 1 continue elif child.is_running(): running_name = child.style['position'][1] page = context.current_page context.running_elements[running_name][page].append(child) continue last_child = (index == len(box.children) - 1) available_width = max_x child_waiting_floats = [] new_child, resume_at, preserved, first, last, new_float_widths = ( split_inline_level(context, child, position_x, available_width, skip_stack, containing_block, containing_page, absolute_boxes, fixed_boxes, line_placeholders, child_waiting_floats, line_children)) if last_child and right_spacing and resume_at is None: # TODO: we should take care of children added into absolute_boxes, # fixed_boxes and other lists. if box.style['direction'] == 'rtl': available_width -= left_spacing else: available_width -= right_spacing new_child, resume_at, preserved, first, last, new_float_widths = ( split_inline_level(context, child, position_x, available_width, skip_stack, containing_block, containing_page, absolute_boxes, fixed_boxes, line_placeholders, child_waiting_floats, line_children)) if box.style['direction'] == 'rtl': max_x -= new_float_widths['left'] else: max_x -= new_float_widths['right'] skip_stack = None if preserved: preserved_line_break = True can_break = None if last_letter is True: last_letter = ' ' elif last_letter is False: last_letter = ' ' # no-break space elif box.style['white_space'] in ('pre', 'nowrap'): can_break = False if can_break is None: if None in (last_letter, first): can_break = False else: can_break = can_break_text( last_letter + first, child.style['lang']) if can_break: children.extend(waiting_children) waiting_children = [] if first_letter is None: first_letter = first if child.trailing_collapsible_space: last_letter = True else: last_letter = last if new_child is None: # May be None where we have an empty TextBox. assert isinstance(child, boxes.TextBox) else: if isinstance(box, boxes.LineBox): line_children.append((index, new_child)) # TODO: we should try to find a better condition here. trailing_whitespace = ( isinstance(new_child, boxes.TextBox) and not new_child.text.strip()) margin_width = new_child.margin_width() new_position_x = new_child.position_x + margin_width if new_position_x > max_x and not trailing_whitespace: if waiting_children: # Too wide, let's try to cut inside waiting children, # starting from the end. # TODO: we should take care of children added into # absolute_boxes, fixed_boxes and other lists. waiting_children_copy = waiting_children[:] break_found = False while waiting_children_copy: child_index, child = waiting_children_copy.pop() # TODO: should we also accept relative children? if (child.is_in_normal_flow() and can_break_inside(child)): # We break the waiting child at its last possible # breaking point. # TODO: The dirty solution chosen here is to # decrease the actual size by 1 and render the # waiting child again with this constraint. We may # find a better way. max_x = child.position_x + child.margin_width() - 1 child_new_child, child_resume_at, _, _, _, _ = ( split_inline_level(context, child, child.position_x, max_x, None, box, containing_page, absolute_boxes, fixed_boxes, line_placeholders, waiting_floats, line_children)) # As PangoLayout and PangoLogAttr don't always # agree, we have to rely on the actual split to # know whether the child was broken. # https://github.com/Kozea/WeasyPrint/issues/614 break_found = child_resume_at is not None if child_resume_at is None: # PangoLayout decided not to break the child child_resume_at = (0, None) # TODO: use this when Pango is always 1.40.13+: # break_found = True children = children + waiting_children_copy if child_new_child is None: # May be None where we have an empty TextBox. assert isinstance(child, boxes.TextBox) else: children += [(child_index, child_new_child)] # As this child has already been broken # following the original skip stack, we have to # add the original skip stack to the partial # skip stack we get after the new rendering. # Combining skip stacks is a bit complicated # We have to: # - set `child_index` as the first number # - append the new stack if it's an absolute one # - otherwise append the combined stacks # (resume_at + initial_skip_stack) # extract the initial index if initial_skip_stack is None: current_skip_stack = None initial_index = 0 else: initial_index, current_skip_stack = ( initial_skip_stack) # child_resume_at is an absolute skip stack if child_index > initial_index: resume_at = (child_index, child_resume_at) break # combine the stacks current_resume_at = child_resume_at stack = [] while current_skip_stack and current_resume_at: skip, current_skip_stack = ( current_skip_stack) resume, current_resume_at = ( current_resume_at) stack.append(skip + resume) if resume != 0: break resume_at = current_resume_at while stack: resume_at = (stack.pop(), resume_at) # insert the child index resume_at = (child_index, resume_at) break if break_found: break if children: # Too wide, can't break waiting children and the inline is # non-empty: put child entirely on the next line. resume_at = (children[-1][0] + 1, None) child_waiting_floats = [] break position_x = new_position_x waiting_children.append((index, new_child)) waiting_floats.extend(child_waiting_floats) if resume_at is not None: children.extend(waiting_children) resume_at = (index, resume_at) break else: children.extend(waiting_children) resume_at = None is_end = resume_at is None new_box = box.copy_with_children( [box_child for index, box_child in children]) new_box.remove_decoration(start=not is_start, end=not is_end) if isinstance(box, boxes.LineBox): # We must reset line box width according to its new children in_flow_children = [ box_child for box_child in new_box.children if box_child.is_in_normal_flow()] if in_flow_children: new_box.width = ( in_flow_children[-1].position_x + in_flow_children[-1].margin_width() - new_box.position_x) else: new_box.width = 0 else: new_box.position_x = initial_position_x if box.style['box_decoration_break'] == 'clone': translation_needed = True else: translation_needed = ( is_start if box.style['direction'] == 'ltr' else is_end) if translation_needed: for child in new_box.children: child.translate(dx=left_spacing) new_box.width = position_x - content_box_left new_box.translate(dx=float_widths['left'], ignore_floats=True) line_height, new_box.baseline = strut_layout(box.style, context) new_box.height = box.style['font_size'] half_leading = (line_height - new_box.height) / 2. # Set margins to the half leading but also compensate for borders and # paddings. We want margin_height() == line_height new_box.margin_top = (half_leading - new_box.border_top_width - new_box.padding_top) new_box.margin_bottom = (half_leading - new_box.border_bottom_width - new_box.padding_bottom) if new_box.style['position'] == 'relative': for absolute_box in absolute_boxes: absolute_layout(context, absolute_box, new_box, containing_page, fixed_boxes) if resume_at is not None: if resume_at[0] < float_resume_at: resume_at = (float_resume_at, None) return ( new_box, resume_at, preserved_line_break, first_letter, last_letter, float_widths)
b1c7c0b7b831e8a7b4cb8d743690abfedc90685e
11,189
def LoadComponent(self,filename): # real signature unknown; restored from __doc__ """ LoadComponent(self: object,filename: str) -> object LoadComponent(self: object,stream: Stream) -> object LoadComponent(self: object,xmlReader: XmlReader) -> object LoadComponent(self: object,filename: TextReader) -> object LoadComponent(self: object,reader: XamlXmlReader) -> object """ return object()
17b893a6e91f4ef62b8ba18646d9dc2005c52ccd
11,190
def split_bits(word : int, amounts : list): """ takes in a word and a list of bit amounts and returns the bits in the word split up. See the doctests for concrete examples >>> [bin(x) for x in split_bits(0b1001111010000001, [16])] ['0b1001111010000001'] >>> [bin(x) for x in split_bits(0b1001111010000001, [8,8])] ['0b10011110', '0b10000001'] not the whole 16 bits! >>> [bin(x) for x in split_bits(0b1001111010000001, [8])] Traceback (most recent call last): AssertionError: expected to split exactly one word This is a test splitting MOVE.B (A1),D4 >>> [bin(x) for x in split_bits(0b0001001010000100, [2,2,3,3,3,3])] ['0b0', '0b1', '0b1', '0b10', '0b0', '0b100'] """ nums = [] pos = 0 for amount in amounts: # get a group of "amount" 1's mask = 2**amount - 1 # shift mask to the left so it aligns where the last # iteration ended off shift = 16 - amount - pos mask = mask << shift # update location in the word pos += amount # extract the relavent bits bits = word & mask # shift back and insert the list to be returned nums.append(bits >> shift) assert pos == 16, 'expected to split exactly one word' return nums
556a389bb673af12a8b11d8381914bf56f7e0599
11,191
def global_tracer(ot_tracer): """A function similar to one OpenTracing users would write to initialize their OpenTracing tracer. """ set_global_tracer(ot_tracer) return ot_tracer
87207d92179a0b23f20806e3e93ec7e78b1b31f1
11,192
import requests def getListProjectsInGroup(config, grp): """ Get list of issue in group """ print("Retrieve project of group: %s " % grp.name) data = None __prjLst = gitlabProjectList(grp) if (DUMMY_DATA): testFile = getFullFilePath(ISSUES_GRP_TEST_FILE) with open (testFile, 'rt') as f: data = f.read() f.close() else: # retrieve data from server url = getApiUrl(config, "groups/%s/projects" % grp.id) logD("URL " + url) token = config.getToken() hdrs = {"PRIVATE-TOKEN":config.getToken()} __totalPage = 0 __page = 1 while True: logD("Page %d" % (__page)) params = {'page': __page} logD("header %s" % hdrs) resp = requests.get(url, headers=hdrs, params=params) logD("resp status_code %s" % resp.status_code) if (resp.status_code == 200): data = resp.content logD (resp.headers) if (len(resp.headers.get('X-Next-Page')) > 0): __page = int(resp.headers.get('X-Next-Page')) else: __page = 0 logD("next page %d" % (__page)) else: __page = 0 break if (data is not None) and len(data) > 0: logD("data %s" % data) __prjLst.parseData(data) __totalPage += 1 if (config.getMaxProject() is not None) and (__prjLst.getLen() >= config.getMaxProject()): print("Reach max %s/%s" % (__prjLst.getLen(), config.getMaxProject())) break if (__page == 0): #ok, reach end, out break if (__totalPage > 500): # 500 pages? no way, something wrong, out print("SOMETHING WRONG, total is to big, out") break print("Total pages %d" % (__totalPage)) return __prjLst
1c926e8b855cba502229ab1c31c9706c20882a1c
11,193
def group_naptan_datatypes(gdf, naptan_column='LocalityName'): """[summary] groups together naptan datasets into subsets that are grouped by the given naptan column. Args: gdf ([type]): [description] naptan_column (str, optional): [description]. Defaults to 'LocalityName'. Returns: [type]: [description] """ # collapse dataset to minimum, keeping possibly useable datasets gdf2 = gdf[['LocalityName', 'NptgLocalityCode', 'AreaName', 'StopAreaCode', 'Latitude', 'Longitude']] # calculates the centroid of each given naptan segment. gdf3 = gdf2.groupby([naptan_column], as_index=False)[ ['Latitude', 'Longitude']].apply(lambda x: np.mean(x, axis=0)) # convert the lat lon into centroid geometry points. gdf4 = geo.calculate_naptan_geometry(gdf3) # save output to csv. gdf4.to_csv(f'{naptan_column}.csv', encoding='utf-8', sep=',') return gdf4
d4cca1180f1b3d6622c7c2fd5df1cdd1b369c5b3
11,194
def get_facts_by_name_and_value(api_url=None, fact_name=None, fact_value=None, verify=False, cert=list()): """ Returns facts by name and value :param api_url: Base PuppetDB API url :param fact_name: Name of fact :param fact_value: Value of fact """ return utils._make_api_request(api_url, '/facts/{0}/{1}'.format(fact_name, fact_value), verify, cert)
bdce7473bff944609ffdb191948b008ca4a1422a
11,195
def produce_phase(pipeline_run): """Produce result with Produce phase data.""" scores = pipeline_run['run']['results']['scores'] if len(scores) > 1: raise ValueError('This run has more than one score!') scores = scores[0] return { 'metric': scores['metric']['metric'], 'context': pipeline_run['context'], 'normalized_score': scores['normalized'] }
7ed003281eac240a407dac1d03a5e3f5a6e5b2cd
11,196
import json from unittest.mock import patch async def init_integration(hass: HomeAssistant, use_nickname=True) -> MockConfigEntry: """Set up the Mazda Connected Services integration in Home Assistant.""" get_vehicles_fixture = json.loads(load_fixture("mazda/get_vehicles.json")) if not use_nickname: get_vehicles_fixture[0].pop("nickname") get_vehicle_status_fixture = json.loads( load_fixture("mazda/get_vehicle_status.json") ) config_entry = MockConfigEntry(domain=DOMAIN, data=FIXTURE_USER_INPUT) config_entry.add_to_hass(hass) client_mock = MagicMock( MazdaAPI( FIXTURE_USER_INPUT[CONF_EMAIL], FIXTURE_USER_INPUT[CONF_PASSWORD], FIXTURE_USER_INPUT[CONF_REGION], aiohttp_client.async_get_clientsession(hass), ) ) client_mock.get_vehicles = AsyncMock(return_value=get_vehicles_fixture) client_mock.get_vehicle_status = AsyncMock(return_value=get_vehicle_status_fixture) client_mock.lock_doors = AsyncMock() client_mock.unlock_doors = AsyncMock() with patch( "homeassistant.components.mazda.config_flow.MazdaAPI", return_value=client_mock, ), patch("homeassistant.components.mazda.MazdaAPI", return_value=client_mock): assert await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() return client_mock
96756b011f66786c0c8a8704446546c0751de13f
11,197
def get_app_domain(): """ Returns the full URL to the domain. The output from this function gets generally appended with a path string. """ url = settings.INCOMEPROPERTYEVALUATOR_APP_HTTP_PROTOCOL url += settings.INCOMEPROPERTYEVALUATOR_APP_HTTP_DOMAIN return url
0a9c58a179c281072104fb5b7859b2d0ef8426ae
11,198
import inspect def deprecated(removal_version, hint_message=None, subject=None, ensure_stderr=False): """Marks a function or method as deprecated. A removal version must be supplied and it must be greater than the current 'pantsbuild.pants' version. When choosing a removal version there is a natural tension between the code-base, which benefits from short deprecation cycles, and the user-base which may prefer to deal with deprecations less frequently. As a rule of thumb, if the hint message can fully convey corrective action succinctly and you judge the impact to be on the small side (effects custom tasks as opposed to effecting BUILD files), lean towards the next release version as the removal version; otherwise, consider initiating a discussion to win consensus on a reasonable removal version. :param str removal_version: The pantsbuild.pants version which will remove the deprecated function. :param str hint_message: An optional hint pointing to alternatives to the deprecation. :param str subject: The name of the subject that has been deprecated for logging clarity. Defaults to the name of the decorated function/method. :param bool ensure_stderr: Forwarded to `ensure_stderr` in warn_or_error(). :raises DeprecationApplicationError if the @deprecation is applied improperly. """ validate_deprecation_semver(removal_version, 'removal version') def decorator(func): if not inspect.isfunction(func): raise BadDecoratorNestingError('The @deprecated decorator must be applied innermost of all ' 'decorators.') func_full_name = '{}.{}'.format(func.__module__, func.__name__) @wraps(func) def wrapper(*args, **kwargs): warn_or_error(removal_version, subject or func_full_name, hint_message, ensure_stderr=ensure_stderr) return func(*args, **kwargs) return wrapper return decorator
84b2ef33a40d8f28eba27e29679338093875eb25
11,199