content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Any def get_node_data(workspace: str, graph: str, table: str, node: str) -> Any: """Return the attributes associated with a node.""" return Workspace(workspace).graph(graph).node_attributes(table, node)
0ac48d715fd31876b62d837d5b18b2ee75c791dd
3,800
def siso_optional(fn, h_opt, scope=None, name=None): """Substitution module that determines to include or not the search space returned by `fn`. The hyperparameter takes boolean values (or equivalent integer zero and one values). If the hyperparameter takes the value ``False``, the input is simply put in the output. If the hyperparameter takes the value ``True``, the search space is instantiated by calling `fn`, and the substitution module is replaced by it. Args: fn (() -> (dict[str,deep_architect.core.Input], dict[str,deep_architect.core.Output])): Function returning a graph fragment corresponding to a sub-search space. h_opt (deep_architect.core.Hyperparameter): Hyperparameter for whether to include the sub-search space or not. scope (deep_architect.core.Scope, optional): Scope in which the module will be registered. If none is given, uses the default scope. name (str, optional): Name used to derive an unique name for the module. If none is given, uses the class name to derive the name. Returns: (dict[str,deep_architect.core.Input], dict[str,deep_architect.core.Output]): Tuple with dictionaries with the inputs and outputs of the substitution module. """ def substitution_fn(dh): return fn() if dh["opt"] else identity() return substitution_module(_get_name(name, "SISOOptional"), substitution_fn, {'opt': h_opt}, ['in'], ['out'], scope)
187a292c8dba59d5d4d7f67d54cdd087ee2b6582
3,801
def saconv3x3_block(in_channels, out_channels, stride=1, pad=1, **kwargs): """ 3x3 version of the Split-Attention convolution block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int, default 1 Stride of the convolution. pad : int or tuple/list of 2 int, default 1 Padding value for convolution layer. """ return SAConvBlock( in_channels=in_channels, out_channels=out_channels, ksize=3, stride=stride, pad=pad, **kwargs)
bda938d53bbb56a7035ae50125743e4eb9aa709b
3,802
def add_hook(**_kwargs): """Creates and adds the import hook in sys.meta_path""" hook = import_hook.create_hook( transform_source=transform_source, hook_name=__name__, extensions=[".pyfr"], ) return hook
20c7e37aead055e32bfcb520a579b66069a3e26c
3,803
def mul(n1, n2): """ multiply two numbers """ return n1 * n2
c137432dd2e5c6d4dbded08546e3d54b98fe03df
3,804
import torch def pytorch_normalze(img): """ https://github.com/pytorch/vision/issues/223 return appr -1~1 RGB """ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) img = normalize(torch.from_numpy(img)) return img.numpy()
7667d6fa3da69d89973bb804ad08a139ae7f3564
3,805
def get_nic_capacity(driver_info, ilo_fw): """Gets the FRU data to see if it is NIC data Gets the FRU data in loop from 0-255 FRU Ids and check if the returned data is NIC data. Couldn't find any easy way to detect if it is NIC data. We should't be hardcoding the FRU Id. :param driver_info: Contains the access credentials to access the BMC. :param ilo_fw: a tuple containing major and minor versions of firmware :returns: the max capacity supported by the NIC adapter. """ i = 0x0 value = None ilo_fw_rev = get_ilo_version(ilo_fw) or DEFAULT_FW_REV # Note(vmud213): iLO firmware versions >= 2.3 support reading the FRU # information in a single call instead of iterating over each FRU id. if ilo_fw_rev < MIN_SUGGESTED_FW_REV: for i in range(0xff): # Note(vmud213): We can discard FRU ID's between 0x6e and 0xee # as they don't contain any NIC related information if (i < 0x6e) or (i > 0xee): cmd = "fru print %s" % hex(i) out = _exec_ipmitool(driver_info, cmd) if out and 'port' in out and 'Adapter' in out: value = _parse_ipmi_nic_capacity(out) if value is not None: break else: continue else: cmd = "fru print" out = _exec_ipmitool(driver_info, cmd) if out: for line in out.split('\n'): if line and 'port' in line and 'Adapter' in line: value = _parse_ipmi_nic_capacity(line) if value is not None: break return value
cc20e1b35a47bec1242ed5dba60da8473527ca4f
3,806
import re def isValidInifileKeyName(key): """ Check that this key name is valid to be used in inifiles, and to be used as a python property name on a q or i object """ return re.match("^[\w_]+$", key)
9e68b987d6ac9af3c40e053c2347b01f737f0665
3,807
def installed_pkgs(): """ Return the list of installed packages on the machine Returns: list: List of installed packages CLI Example: .. code-block:: bash salt '*' macpackage.installed_pkgs """ cmd = "pkgutil --pkgs" return __salt__["cmd.run"](cmd).split("\n")
b9a66600327ea8eb0ec63745cacd8509a0f757d9
3,808
import math def extract_feature(audio, sr=44100): """ extract feature like below: sig: rmse: silence: harmonic: pitch: audio: audio file or audio list return feature_list: np of [n_samples, n_features] """ feature_list = [] y = [] if isinstance(audio, str): y, _ = librosa.load(audio, sr) elif isinstance(audio, np.ndarray): y = audio # 1. sig sig_mean = np.mean(abs(y)) feature_list.append(sig_mean) # sig_mean feature_list.append(np.std(y)) # sig_std # 2. rmse rmse = librosa.feature.rms(y + 0.0001)[0] feature_list.append(np.mean(rmse)) # rmse_mean feature_list.append(np.std(rmse)) # rmse_std # 3. silence silence = 0 for e in rmse: if e <= 0.4 * np.mean(rmse): silence += 1 silence /= float(len(rmse)) feature_list.append(silence) # silence # 4. harmonic y_harmonic = librosa.effects.hpss(y)[0] feature_list.append(np.mean(y_harmonic) * 1000) # harmonic (scaled by 1000) # 5. pitch (instead of auto_correlation) cl = 0.45 * sig_mean center_clipped = [] for s in y: if s >= cl: center_clipped.append(s - cl) elif s <= -cl: center_clipped.append(s + cl) elif np.abs(s) < cl: center_clipped.append(0) # auto_corrs = librosa.core.autocorrelate(np.array(center_clipped)) pitch, _, _ = librosa.pyin(y, fmin=librosa.note_to_hz('C2'), fmax=librosa.note_to_hz('C7')) pitch = [0 if math.isnan(p) else p for p in pitch] feature_list.append(np.mean(pitch)) feature_list.append(np.std(pitch)) return np.array(feature_list).reshape(1, -1)
d4eca914605bc87c57dbaf846a9a01d79a953c56
3,809
import time def run_with_config(sync, config): """ Execute the cartography.sync.Sync.run method with parameters built from the given configuration object. This function will create a Neo4j driver object from the given Neo4j configuration options (URI, auth, etc.) and will choose a sensible update tag if one is not specified in the given configuration. :type sync: cartography.sync.Sync :param sync: A sync task to run. :type config: cartography.config.Config :param config: The configuration to use to run the sync task. """ neo4j_auth = None if config.neo4j_user or config.neo4j_password: neo4j_auth = (config.neo4j_user, config.neo4j_password) try: neo4j_driver = GraphDatabase.driver( config.neo4j_uri, auth=neo4j_auth, ) except neobolt.exceptions.ServiceUnavailable as e: logger.debug("Error occurred during Neo4j connect.", exc_info=True) logger.error( ( "Unable to connect to Neo4j using the provided URI '%s', an error occurred: '%s'. Make sure the Neo4j " "server is running and accessible from your network." ), config.neo4j_uri, e, ) return except neobolt.exceptions.AuthError as e: logger.debug("Error occurred during Neo4j auth.", exc_info=True) if not neo4j_auth: logger.error( ( "Unable to auth to Neo4j, an error occurred: '%s'. cartography attempted to connect to Neo4j " "without any auth. Check your Neo4j server settings to see if auth is required and, if it is, " "provide cartography with a valid username and password." ), e, ) else: logger.error( ( "Unable to auth to Neo4j, an error occurred: '%s'. cartography attempted to connect to Neo4j with " "a username and password. Check your Neo4j server settings to see if the username and password " "provided to cartography are valid credentials." ), e, ) return default_update_tag = int(time.time()) if not config.update_tag: config.update_tag = default_update_tag return sync.run(neo4j_driver, config)
b5659863ed31f1a39a0d8c95b33eb02d5ea8d77d
3,810
from typing import OrderedDict import six def BuildPartialUpdate(clear, remove_keys, set_entries, field_mask_prefix, entry_cls, env_builder): """Builds the field mask and patch environment for an environment update. Follows the environments update semantic which applies operations in an effective order of clear -> remove -> set. Leading and trailing whitespace is stripped from elements in remove_keys and the keys of set_entries. Args: clear: bool, If true, the patch removes existing keys. remove_keys: iterable(string), Iterable of keys to remove. set_entries: {string: string}, Dict containing entries to set. field_mask_prefix: string, The prefix defining the path to the base of the proto map to be patched. entry_cls: AdditionalProperty, The AdditionalProperty class for the type of entry being updated. env_builder: [AdditionalProperty] -> Environment, A function which produces a patch Environment with the given list of entry_cls properties. Returns: (string, Environment), a 2-tuple of the field mask defined by the arguments and a patch environment produced by env_builder. """ remove_keys = set(k.strip() for k in remove_keys or []) # set_entries is sorted by key to make it easier for tests to set the # expected patch object. set_entries = OrderedDict( (k.strip(), v) for k, v in sorted(six.iteritems(set_entries or {}))) if clear: entries = [ entry_cls(key=key, value=value) for key, value in six.iteritems(set_entries) ] return field_mask_prefix, env_builder(entries) field_mask_entries = [] seen_keys = set() for key in remove_keys: field_mask_entries.append('{}.{}'.format(field_mask_prefix, key)) seen_keys.add(key) entries = [] for key, value in six.iteritems(set_entries): entries.append(entry_cls(key=key, value=value)) if key not in seen_keys: field_mask_entries.append('{}.{}'.format(field_mask_prefix, key)) # Sorting field mask entries makes it easier for tests to set the expected # field mask since dictionary iteration order is undefined. field_mask_entries.sort() return ','.join(field_mask_entries), env_builder(entries)
320c589cd45dcec9a3ebba4b295075e23ef805ed
3,811
import io import json import sys def readJSON( json_path: FilePath, file_text: str = "", conf_file_name: str = "" ) -> object: """Reads the JSON from the given file and saves it to a class object with the JSON elements as attributes. If an error occurs, the program is exited with an error message! The JSON must have an element `file_version` that has a value of at least `CFG_VERSION`, if not, the program is exited with an error message. Args: json_path (FilePath): The path to the JSON file to read. file_text (str, optional): The name of the JSON configuration file for logging proposes. Defaults to "", which will be logged as 'a', like in "Writing _a_ JSON configuration file ...". conf_file_name (str, optional): The string that has to be the value of `file_name` in the JSON file, if not, the program exits. Defaults to "". Returns: object: A class instance with the JSON elements as attributes. """ _logger.warning( 'Parsing {text} config file "{path}"'.format(text=file_text, path=json_path) ) try: with io.open(json_path, mode="r", encoding="utf-8") as file: ret_val = json.load(file, object_hook=lambda dict: SimpleNamespace(**dict)) except Exception as exp: _logger.critical( 'error "{error}" parsing file "{path}"'.format(error=exp, path=json_path) ) sys.exit(EXT_ERR_LD_FILE) try: checkConfigName(json_path, conf_file_name, ret_val) checkConfigVersion(json_path, ret_val) except Exception as excp: _logger.critical( 'error "{error}" parsing file "{path}", JSON file not valid'.format( error=excp, path=json_path ) ) sys.exit(EXT_ERR_NOT_VLD) try: setOrigFile(json_path, ret_val) except Exception as excp: _logger.critical( 'error "{error}" generating JSON file "{file}" checksum'.format( error=excp, file=json_path ) ) return ret_val
5c1fba7e9967592f9a61eb0cbefedb1db5384276
3,812
def create_schema_usb(): """Create schema usb.""" return vol.Schema(CONFIG_SCHEMA_USB)
e543a5950788ad629ed3986cc7a6c5a58931a478
3,813
def _build_field_queries(filters): """ Builds field queries. Same as _build_field_query but expects a dict of field/values and returns a list of queries. """ return [ _build_field_query(field, value) for field, value in filters.items() ]
9b1241cce6c421a79cd5ea26dd134d5fd93d6fde
3,814
def bycode(ent, group): """ Get the data with the given group code from an entity. Arguments: ent: An iterable of (group, data) tuples. group: Group code that you want to retrieve. Returns: The data for the given group code. Can be a list of items if the group code occurs multiple times. """ data = [v for k, v in ent if k == group] if len(data) == 1: return data[0] return data
c5b92f2bbd1cd5bc383a1102ccf54031222d82c3
3,815
import numbers import os def subtract_bg(inputs, out_names=None, x_order=None, y_order=None, reprocess=None): """ Model the instrumental background or scattered light level as a function of position in the input files (based on the counts within specified nominally-unilluminated regions) and subtract the result from each input to remove the estimated contamination. Parameters ---------- inputs : DataFileList or DataFile Input, bias-subtracted images, in the raw data format, each of which must have an entry named 'bg_reg' in its `cals` dictionary, specifying the unilluminated detector regions to use for background estimation; see ``background_regions``. out_names : `str`-like or list of `str`-like, optional Names of output images containing the background-subtracted spectra. If None (default), the names of the DataFile instances returned will be constructed from those of the corresponding input files, prefixed with 'b' as in the Gemini IRAF package. x_order, y_order : int or list of int, optional Order of the Legendre surface fit along rows and columns, respectively, for each CCD (or all CCDs if a single integer). With the default of None, orders of [5,9,5] or [5,5,9,5,5,5] are used for x and [5,7,5] or [5,5,7,5,5,5] for columns, as appropriate. The index of the higher number may need adjusting by the user to match the CCD where the IFU slits overlap (if applicable). This logic will probably be made a bit more intelligent in a future version. See "help gfscatsub" in IRAF for more detailed information. Returns ------- outimage : DataFileList The background-subtracted images produced by gfscatsub. Package 'config' options ------------------------ reprocess : bool or None Re-generate and overwrite any existing output files on disk or skip processing and re-use existing results, where available? The default of None instead raises an exception where outputs already exist (requiring the user to delete them explicitly). The processing is always performed for outputs that aren't already available. """ # Here we have to expand out the output filenames instead of letting # run_task do it because it currently doesn't recognize text files as main # inputs. This should be replaced by run-task-like decorator functionality # in the longer run. # Convert inputs to a DataFileList if needed: inputs = to_datafilelist(inputs) # Use default prefix if the output filenames are unspecified: prefix = 'b' if not out_names: out_names = [FileName(indf, prefix=prefix) for indf in inputs] elif len(out_names) != len(inputs): raise ValueError('inputs & out_names have unmatched lengths') # Get lists of bg regions to use from the input file "cals" dictionaries: try: bg_reg_list = [df.cals['bg_reg'] for df in inputs] except KeyError: raise KeyError('one or more inputs is missing associated list of '\ 'background regions') # Avoid raising obscure errors if the wrong thing gets attached as bg_reg. # To do: consider writing a more generic type-checking function. if not all(bg_reg and hasattr(bg_reg, '__iter__') and \ all(reg and hasattr(reg, '__iter__') and \ all(isinstance(n, (int, str)) for n in reg) \ for reg in bg_reg \ ) for bg_reg in bg_reg_list ): raise ValueError('cals[\'bg_reg\'] should be a list of limit lists') # Loop over the inputs explicitly, since run_task currently can't recognize # lists of text files as inputs: mode = 'update' if not reprocess else 'overwrite' outputs = DataFileList(mode=mode) for indf, bg_reg, outname in zip(inputs, bg_reg_list, out_names): # Save the background regions for each instance as a temporary file # for IRAF: gapfn = new_filename(base=indf.filename.base+'_gaps', ext='') with open(gapfn, 'w') as gapfile: for reg in bg_reg: gapfile.write('{0}\n'.format(' '.join(str(n) for n in reg))) # Generate default orders appropriate for the number of detectors in # each DataFile, if unspecified: len_df = len(indf) if x_order is None: xorder = [5] * len_df xorder[(len_df-1)//2] = 9 else: if isinstance(x_order, (numbers.Integral, str)): xorder = (x_order,) else: xorder = x_order if y_order is None: yorder = [5] * len_df yorder[(len_df-1)//2] = 7 else: if isinstance(y_order, (numbers.Integral, str)): yorder = (y_order,) else: yorder = y_order # Convert list of orders to comma-separated IRAF syntax: xorder = ','.join(str(n) for n in xorder) yorder = ','.join(str(n) for n in yorder) result = run_task( 'gemini.gmos.gfscatsub', inputs={'image' : indf}, outputs={'outimage' : outname}, prefix=None, suffix=None, comb_in=False, MEF_ext=False, path_param=None, reprocess=reprocess, mask=gapfn, xorder=xorder, yorder=yorder, cross=True ) # Finished with the temporary file: os.remove(gapfn) # Accumulate the output DataFileList, copying the dictionary of cals # from each input to the output until persistence is implemented, since # the same ones are usually needed at the next step: outdf = result['outimage'][0] outdf.cals.update(indf.cals) outputs.append(outdf) return outputs
81a3c4bf73fe9361538312a767cea353091b6a1a
3,816
from typing import List from typing import Tuple def get_midi_programs(midi: MidiFile) -> List[Tuple[int, bool]]: """ Returns the list of programs of the tracks of a MIDI, deeping the same order. It returns it as a list of tuples (program, is_drum). :param midi: the MIDI object to extract tracks programs :return: the list of track programs, as a list of tuples (program, is_drum) """ return [(int(track.program), track.is_drum) for track in midi.instruments]
7249baa46b80b8b42400068edacf5ce9e829c71f
3,817
def is_depth_wise_conv(module): """Determine Conv2d.""" if hasattr(module, "groups"): return module.groups != 1 and module.in_channels == module.out_channels elif hasattr(module, "group"): return module.group != 1 and module.in_channels == module.out_channels
27127f54edbf8d0653cab6c7dbfb1448f33ecab4
3,818
def list_all_routed(): """ List all the notifications that have been routed to any repository, limited by the parameters supplied in the URL. See the API documentation for more details. :return: a list of notifications appropriate to the parameters """ return _list_request()
d67141d6fa5908d99292d898a5a77df3e80d47aa
3,819
import os def read(file_name): """Read in the supplied file name from the root directory. Args: file_name (str): the name of the file Returns: the content of the file """ this_dir = os.path.dirname(__file__) file_path = os.path.join(this_dir, file_name) with open(file_path) as f: return f.read()
252b9d70febf6bbf36987b1e435501bcf8ce1ce8
3,820
from typing import Dict from typing import Any from typing import Optional def prepare_stdin( method: str, basis: str, keywords: Dict[str, Any], charge: int, mult: int, geoopt: Optional[str] = "" ) -> str: """Prepares a str that can be sent to define to produce the desired input for Turbomole.""" # Load data from keywords unrestricted = keywords.get("unrestricted", False) grid = keywords.get("grid", "m3") methods_flat = list(it.chain(*[m for m in METHODS.values()])) if method not in methods_flat: raise InputError(f"Method {method} not in supported methods " f"{methods_flat}!") # This variable may contain substitutions that will be made to # the control file after it was created from a define call, e.g. # setting XC functionals that aren't hardcoded in define etc. subs = None def occ_num_mo_data(charge: int, mult: int, unrestricted: Optional[bool] = False) -> str: """Handles the 'Occupation Number & Molecular Orbital' section of define. Sets appropriate charge and multiplicity in the system and decided between restricted and unrestricted calculation. RHF and UHF are supported. ROHF could be implemented later on by using the 's' command to list the available MOs and then close the appropriate number of MOs to doubly occupied MOs by 'c' by comparing the number of total MOs and the desired multiplicity.""" # Do unrestricted calculation if explicitly requested or mandatory unrestricted = unrestricted or (mult != 1) unpaired = mult - 1 charge = int(charge) occ_num_mo_data_stdin = f"""eht y {charge} y """ if unrestricted: # Somehow Turbomole/define asks us if we want to write # natural orbitals... we don't want to. occ_num_mo_data_stdin = f"""eht y {charge} n u {unpaired} * n """ return occ_num_mo_data_stdin def set_method(method, grid): if method == "hf": method_stdin = "" elif method in METHODS["ricc2"]: # Setting geoopt in $ricc2 will make the ricc2 module to produce # a gradient. # Drop the 'ri'-prefix of the method string. geoopt_stdin = f"geoopt {method[2:]} ({geoopt})" if geoopt else "" method_stdin = f"""cc freeze * cbas * ricc2 {method} list models {geoopt_stdin} list geoopt * * """ elif method in METHODS["dft_hardcoded"]: method_stdin = f"""dft on func {method} grid {grid} """ # TODO: Handle xcfuncs that aren't defined in define, e.g. # new functionals introduced in 7.4 from libxc. ... # Maybe the best idea would be to not set the functional here # but just turn on DFT and add it to the control file later on. elif method in METHODS["dft_libxc"]: raise InputError("libxc functionals are not supported right now.") return method_stdin # Resolution of identity def set_ri(keywords): # TODO: senex/RIJCOSX? ri_kws = {ri_kw: keywords.get(ri_kw, False) for ri_kw in KEYWORDS["ri"]} ri_stdins = {"rijk": "rijk\non\n\n", "ri": "ri\non\n\n", "marij": "marij\n\n"} ri_stdin = "\n".join([ri_stdins[ri_kw] for ri_kw, use in ri_kws.items() if use]) return ri_stdin # ri_stdin = "" # # Use either RIJK or RIJ if requested. # if ri_kws["rijk"]: # ri_stdin = """rijk # on # """ # elif ri_kws["rij"]: # ri_stdin = """rij # on # """ # # MARIJ can be used additionally. # if ri_kws["marij"]: # ri_stdin += """marij # """ # return ri_stdin # Dispersion correction def set_dsp(keywords): # TODO: set_ri and set_dsp are basically the same funtion. Maybe # we could abstract this somehow? dsp_kws = {dsp_kw: keywords.get(dsp_kw, False) for dsp_kw in KEYWORDS["dsp"]} dsp_stdins = {"d3": "dsp\non\n\n", "d3bj": "dsp\nbj\n\n"} dsp_stdin = "\n".join([dsp_stdins[dsp_kw] for dsp_kw, use in dsp_kws.items() if use]) return dsp_stdin kwargs = { "init_guess": occ_num_mo_data(charge, mult, unrestricted), "set_method": set_method(method, grid), "ri": set_ri(keywords), "dsp": set_dsp(keywords), "title": "QCEngine Turbomole", "scf_conv": 8, "scf_iters": 150, "basis": basis, } stdin = """ {title} a coord * no b all {basis} * {init_guess} {set_method} {ri} {dsp} scf conv {scf_conv} iter {scf_iters} * """.format( **kwargs ) return stdin, subs
a4c70cfa97530108c2e969f424fa0f02c32fd927
3,821
def Lstart(gridname='BLANK', tag='BLANK', ex_name='BLANK'): """ This adds more run-specific entries to Ldir. """ # put top level information from input into a dict Ldir['gridname'] = gridname Ldir['tag'] = tag Ldir['ex_name'] = ex_name # and add a few more things Ldir['gtag'] = gridname + '_' + tag Ldir['gtagex'] = gridname + '_' + tag + '_' + ex_name Ldir['grid'] = Ldir['data'] / 'grids' / gridname Ldir['forecast_days'] = 3 Ldir['ds_fmt'] = ds_fmt Ldir['roms_time_units'] = roms_time_units Ldir['modtime0'] = modtime0 return Ldir.copy() # the use of copy() means different calls to Lstart (e.g. when importing # plotting_functions) to not overwrite each other
92d992c3a7eba7bbba9146018060bca7844d4a78
3,822
def rfe_w2(x, y, p, classifier): """RFE algorithm, where the ranking criteria is w^2, described in [Guyon02]_. `classifier` must be an linear classifier with learn() and w() methods. .. [Guyon02] I Guyon, J Weston, S Barnhill and V Vapnik. Gene Selection for Cancer Classification using Support Vector Machines. Machine Learning, 2002. :Parameters: x: 2d array_like object (N,P) training data y : 1d array_like object integer (N) class labels (only two classes) p : float [0.0, 1.0] percentage of features (upper rounded) to remove at each iteration (p=0 one variable) classifier : object with learn() and w() methods object :Returns: ranking : 1d numpy array int feature ranking. ranking[i] contains the feature index ranked in i-th position. """ if (p < 0.0) or (p > 1.0): raise ValueError("parameter p must be in [0.0, 1.0]") if not (hasattr(classifier, 'learn') and hasattr(classifier, 'w')): raise ValueError("parameter classifier must have learn() and w() methods") xarr = np.asarray(x, dtype=np.float) yarr = np.asarray(y, dtype=np.int) if xarr.ndim != 2: raise ValueError("x must be a 2d array_like object") if yarr.ndim != 1: raise ValueError("y must be an 1d array_like object") if xarr.shape[0] != yarr.shape[0]: raise ValueError("x, y shape mismatch") labels = np.unique(yarr) if labels.shape[0] != 2: raise ValueError("number of classes must be = 2") idxglobal = np.arange(xarr.shape[1], dtype=np.int) ranking = [] while True: nelim = np.max((int(np.ceil(idxglobal.shape[0] * p)), 1)) xi = xarr[:, idxglobal] classifier.learn(xi, yarr) w = classifier.w() idxsorted = np.argsort(w**2) # indexes to remove idxelim = idxglobal[idxsorted[:nelim]][::-1] ranking.insert(0, idxelim) # update idxglobal idxglobal = idxglobal[idxsorted[nelim:]] idxglobal.sort() if len(idxglobal) <= 1: ranking.insert(0, idxglobal) break return np.concatenate(ranking)
9176ee36c1180ab862b23be9d9a09584abea50ca
3,823
from typing import List def compress_timeline(timeline: List, salt: bytes) -> List: """ Compress the verbose Twitter feed into a small one. Just keep the useful elements. The images are downloaded per-request. Args: timeline (List): The Twitter timeline. salt (bytes): The salt to apply on the filename. Returns: List: The timeline with less information and links to the (locally) stored images. """ compressed_timeline = [] for tweet in timeline: profile_image_url = tweet["user"]["profile_image_url_https"] compressed_tweet = { "created_at": tweet["created_at"], "text": tweet["text"], "id_str": tweet["id_str"], "user": { "name": tweet["user"]["name"], "screen_name": tweet["user"]["screen_name"], "profile_image_origin": encode_media_origin(profile_image_url), "profile_image_filename": create_media_filename( profile_image_url, salt ), }, } if tweet["retweeted"]: original_source = tweet["retweeted_status"]["user"] profile_image_url = original_source["profile_image_url_https"] compressed_tweet["retweeted_status"] = { "user": { "name": original_source["name"], "screen_name": original_source["screen_name"], "profile_image_origin": encode_media_origin(profile_image_url), "profile_image_filename": create_media_filename( profile_image_url, salt ), } } compressed_timeline.append(compressed_tweet) return compressed_timeline
aff1364714d7e83685ab2257167fcd8bc7e10436
3,824
def createFinalCompactedData(compacted_data,elevations): """ This function creates a dataframe that combines the RGB data and the elevations data into a dataframe that can be used for analysis Parameters ---------- compacted_data : list of compacted data returned from condensePixels. elevations : list of elevations from getUSGSElevations. Returns ------- final_compacted_data : dataframe of merged data. """ lats = [] lons = [] reds = [] greens = [] blues = [] els = [] for i in range(len(compacted_data)): for j in range(len(compacted_data[0])): reds.append(compacted_data[i][j][0]) greens.append(compacted_data[i][j][1]) blues.append(compacted_data[i][j][2]) lats.append(compacted_data[i][j][3]) lons.append(compacted_data[i][j][4]) els.append(elevations[i][j]) final_compacted_data = pd.DataFrame({'Lat':lats,'Lon':lons,'Elevation':els,'Red':reds,'Green':greens,'Blue':blues}) return final_compacted_data
0d8b6a5e10504c32988e05e7450ebcf077305949
3,825
def get_sorted_nodes_edges(bpmn_graph): """ Assure an ordering as-constant-as-possible Parameters -------------- bpmn_graph BPMN graph Returns -------------- nodes List of nodes of the BPMN graph edges List of edges of the BPMN graph """ graph = bpmn_graph.get_graph() graph_nodes = list(graph.nodes(data=False)) graph_edges = list(graph.edges(data=False)) bfs = bfs_bpmn(graph_nodes, graph_edges) graph_nodes = sort_nodes_given_bfs(graph_nodes, bfs) graph_edges = sort_edges_given_bfs(graph_edges, bfs) return graph_nodes, graph_edges
879d7e8e3e5e4e9a8db3fc01622b96dde2b7af25
3,826
from typing import Optional from typing import Dict from typing import Any def list_commits( access_key: str, url: str, owner: str, dataset: str, *, revision: Optional[str] = None, offset: Optional[int] = None, limit: Optional[int] = None, ) -> Dict[str, Any]: """Execute the OpenAPI `GET /v2/datasets/{owner}/{dataset}/commits`. Arguments: access_key: User's access key. url: The URL of the graviti website. owner: The owner of the dataset. dataset: Name of the dataset, unique for a user. revision: The information to locate the specific commit, which can be the commit id, the branch name, or the tag name. offset: The offset of the page. The default value of this param in OpenAPIv2 is 0. limit: The limit of the page. The default value of this param in OpenAPIv2 is 24. Returns: The response of OpenAPI. Examples: >>> list_commits( ... "ACCESSKEY-********", ... "https://api.graviti.com", ... "czhual", ... "MNIST", ... ) { "commits": [ { "commit_id": "85c57a7f03804ccc906632248dc8c359", "parent_commitId": "784ba0d3bf0a41f6a7bfd771d8c00fcb", "title": "upload data", "description": "", "committer": "Gravitier", "committed_at": "2021-03-03T18:58:10Z" } ], "offset": 0, "record_size": 1, "total_count": 1 } """ url = f"{url}/v2/datasets/{owner}/{dataset}/commits" params: Dict[str, Any] = {} if offset is not None: params["offset"] = offset if limit is not None: params["limit"] = limit if revision is not None: params["revision"] = revision return open_api_do("GET", access_key, url, params=params).json()
be3899be0b77de069c7d32ca39aaec2039fe89e4
3,827
import heapq def dijkstra(graph, start, end=None): """ Find shortest paths from the start vertex to all vertices nearer than or equal to the end. The input graph G is assumed to have the following representation: A vertex can be any object that can be used as an index into a dictionary. G is a dictionary, indexed by vertices. For any vertex v, G[v] is itself a dictionary, indexed by the neighbors of v. For any edge v->w, G[v][w] is the length of the edge. The output is a pair (D,P) where D[v] is the distance from start to v and P[v] is the predecessor of v along the shortest path from s to v. Original by David Eppstein, UC Irvine, 4 April 2002 http://code.activestate.com/recipes/119466-dijkstras-algorithm-for-shortest-paths/ >>> G = DirectedGraph({'s':{'u':10, 'x':5}, 'u':{'v':1, 'x':2}, 'v':{'y':4}, 'x':{'u':3, 'v':9, 'y':2}, \ 'y':{'s':7, 'v':6}}) >>> distances, predecessors = dijkstra(G, 's', 'v') >>> sorted(distances.items()) [('s', 14), ('u', 8), ('v', 9), ('x', 5), ('y', 7)] >>> sorted(predecessors.items()) [('s', 'y'), ('u', 'x'), ('v', 'u'), ('x', 's'), ('y', 'x')] """ distances = {} # dictionary of final distances predecessors = {} # dictionary of predecessors (previous node) queue = [] # queue heapq.heappush(queue, (0, start)) while len(queue) > 0: distance, node = heapq.heappop(queue) if node in distances and distance > distances[node]: continue if node == end: break # Loop through neighbours edges = graph.edges(node, distance=distance) for neighbour, length in edges.items(): total = distance + length if neighbour in distances: if total >= distances[neighbour]: continue distances[neighbour] = total predecessors[neighbour] = node heapq.heappush(queue, (total, neighbour)) return distances, predecessors
b2a1ee983534c0a4af36ae7e3490c3b66949609b
3,828
import sys import pwd import os def get_owner_from_path(path): """Get the username of the owner of the given file""" if "pwd" in sys.modules: # On unix return pwd.getpwuid(os.stat(path).st_uid).pw_name # On Windows f = win32security.GetFileSecurity(path, win32security.OWNER_SECURITY_INFORMATION) username, _, _ = win32security.LookupAccountSid( None, f.GetSecurityDescriptorOwner() ) return username
df038aff54d44654403beee0af63ac0aed9385a4
3,829
def tournament_selection(pop, size): """ tournament selection individual eliminate one another until desired breeding size is reached """ participants = [ind for ind in pop.population] breeding = [] # could implement different rounds here # but I think that's almost the same as calling tournament different times with smaller sizes for i in range(size): a, b = rng.choice(participants, 2) if a > b: breeding.append(a) participants.remove(a) else: breeding.append(b) participants.remove(b) return breeding
78bebc2de25d0744f3f8dabd67f70136d5f020b5
3,830
import math def bond_number(r_max, sigma, rho_l, g): """ calculates the Bond number for the largest droplet according to Cha, H.; Vahabi, H.; Wu, A.; Chavan, S.; Kim, M.-K.; Sett, S.; Bosch, S. A.; Wang, W.; Kota, A. K.; Miljkovic, N. Dropwise Condensation on Solid Hydrophilic Surfaces. Science Advances 2020, 6 (2), eaax0746. https://doi.org/10.1126/sciadv.aax0746""" l_y = math.sqrt(sigma / (rho_l*g)) bond = r_max**2 / l_y**2 return bond
2098a762dd7c2e80ff4a570304acf7cfbdbba2e5
3,831
def spatial_conv(inputs, conv_type, kernel, filters, stride, is_training, activation_fn='relu', data_format='channels_last'): """Performs 1x1 conv followed by 2d or depthwise conv. Args: inputs: `Tensor` of size `[batch*time, height, width, channels]`. Only supports 'channels_last' as the data format. conv_type: 'string' of "std", "depth", "maxpool", or "avgpool" this selects the spatial conv/pooling method. kernel: `int` kernel size to be used for `conv2d` or max_pool2d` operations. Should be a positive integer. filters: `int` number of filters in the convolution. stride: 'int' temporal stride is_training: 'bool' specifying whether in training mode or not. activation_fn: 'string' the activation function to use (relu or swish) data_format: `str`. Only supports 'channels_last' as the data format. Returns: A `Tensor` of the same data_format """ if kernel == 1: return inputs use_relu = (activation_fn == 'relu') if conv_type == 'std' or conv_type == 'depth': inputs = conv2d(inputs, 1, filters, 1, is_training, use_relu=use_relu) if not use_relu: inputs = hard_swish(inputs) if conv_type == 'std' or conv_type == '1std': inputs = conv2d(inputs, int(kernel), filters, int(stride), is_training, use_relu=use_relu) if not use_relu: inputs = hard_swish(inputs) elif conv_type == 'depth': depth_multiplier = 1 depthwise_kernel_shape = (int(kernel), int(kernel), inputs.shape[-1], depth_multiplier) depthwise_kernel = contrib_framework.model_variable( name='depthwise_kernel', shape=depthwise_kernel_shape, dtype=tf.float32, initializer=contrib_layers.variance_scaling_initializer( factor=2.0, mode='FAN_IN', uniform=False), trainable=True) inputs = tf.nn.depthwise_conv2d( inputs, tf.cast(depthwise_kernel, inputs.dtype), strides=[1, int(stride), int(stride), 1], padding='SAME', rate=[1, 1], data_format='NHWC' if data_format == 'channels_last' else 'NCHW') inputs = bn.batch_norm_relu( inputs, is_training, relu=use_relu, data_format=data_format) if not use_relu: inputs = hard_swish(inputs) elif conv_type == 'maxpool': inputs = tf.layers.max_pooling2d( inputs, int(kernel), int(stride), padding='same', data_format=data_format) elif conv_type == 'avgpool': inputs = tf.layers.average_pooling2d( inputs, int(kernel), int(stride), padding='same', data_format=data_format) return inputs
e87820eaa5b8ed13157fe0790c4e09b1bc546a0d
3,832
async def timeron(websocket, battleID): """Start the timer on a Metronome Battle. """ return await websocket.send(f'{battleID}|/timer on')
f1601694e2c37d41adcc3983aa535347dc13db71
3,833
import numpy def to_unit_vector(this_vector): """ Convert a numpy vector to a unit vector Arguments: this_vector: a (3,) numpy array Returns: new_vector: a (3,) array with the same direction but unit length """ norm = numpy.linalg.norm(this_vector) assert norm > 0.0, "vector norm must be greater than 0" if norm: return this_vector/numpy.linalg.norm(this_vector) else: return this_vector
ae46bf536b8a67a1be1e98ae051eebf1f8696e37
3,834
import base64 def decode(msg): """ Convert data per pubsub protocol / data format Args: msg: The msg from Google Cloud Returns: data: The msg data as a string """ if 'data' in msg: data = base64.b64decode(msg['data']).decode('utf-8') return data
32e85b3f0c18f3d15ecb0779825941024da75909
3,835
def pivot_longer_by_humidity_and_temperature(df: pd.DataFrame) -> pd.DataFrame: """ Reshapes the dataframe by collapsing all of the temperature and humidity columns into an temperature, humidity, and location column Parameters ---------- df : pd.DataFrame The cleaned and renamed dataframe from add_date_features(). Returns ------- pd.DataFrame A much longer dataframe with an exposed location column to perform operations on. """ # Need to melt both variables individually, which creates # a ton of meaningless rows in the second melt. temporary_df = df.melt( id_vars=[colname for colname in df.columns if "temp" not in colname], var_name="temperature_location", value_name="temperature", ignore_index=False, ) temporary_df = temporary_df.melt( id_vars=[ colname for colname in temporary_df.columns if "humidity" not in colname ], var_name="humidity_location", value_name="humidity", ignore_index=False, ) temporary_df["temperature_location"] = temporary_df[ "temperature_location" ].str.replace("temperature_", "") temporary_df["humidity_location"] = temporary_df["humidity_location"].str.replace( "humidity_", "" ) # We know all measurements come from slices of time that contain a measurement of both humidity # and temperature from one location, so if we combine the location columns we can drop # the extra rows created during the second melt. df = temporary_df[ temporary_df["temperature_location"] == temporary_df["humidity_location"] ] df = df.drop(columns=["humidity_location"]).rename( columns={"temperature_location": "measurement_location"} ) return df
d60b92b523c31b3f7db799f58a42bd9ca810d258
3,836
def add_counter_text(img, box_shape, people_in): """ Add person counter text on the image Args: img (np.array): Image box_shape (tuple): (width, height) of the counter box people_in (int): Number representing the amount of people inside the space Returns: (np.array): Updated image """ box_width, box_height = box_shape img_pil = Image.fromarray(img) draw = ImageDraw.Draw(img_pil) # set in/capacity numbers text_in = "{}".format(people_in) text_cap = "{}".format(CAPACITY) # import constants for re-use TEXT_COUNTER_UP = TEXT_CONF["TEXT_COUNTER_UP"] TEXT_COUNTER_DOWN = TEXT_CONF["TEXT_COUNTER_DOWN"] # get shapes for parts of text w_up, h_up = draw.textsize(TEXT_COUNTER_UP, stroke_width=1, font=FONT_SMALL) w_down, h_down = draw.textsize(TEXT_COUNTER_DOWN, stroke_width=1, font=FONT_SMALL) w_in, h_in = draw.textsize(text_in, stroke_width=1, font=FONT_SMALL) w_cap, h_cap = draw.textsize(text_cap, stroke_width=1, font=FONT_SMALL) w_slash, h_slash = draw.textsize(" / ", stroke_width=1, font=FONT_SMALL) # calculate coordinates for each part of the text textX_up = int((box_width - w_up) / 2) textY_up = int(0.05 * box_height) textX_down = int((box_width - w_down) / 2) textY_down = int(0.1 * box_height + h_up) textX_in = int((box_width - w_slash) / 2 - w_in) textY_stat = int(0.2 * box_height + h_down + h_up) textX_slash = int((box_width - w_slash) / 2) textX_cap = int((box_width + w_slash) / 2) # add text on image draw.text( (textX_up, textY_up), TEXT_COUNTER_UP, font=FONT_SMALL, fill=WHITE, stroke_width=1, ) draw.text( (textX_down, textY_down), TEXT_COUNTER_DOWN, font=FONT_SMALL, fill=WHITE, stroke_width=1, ) draw.text( (textX_in, textY_stat), text_in, font=FONT_SMALL, fill=(0, 255, 0), stroke_width=1, ) draw.text( (textX_slash, textY_stat), " / ", font=FONT_SMALL, fill=WHITE, stroke_width=1 ) draw.text( (textX_cap, textY_stat), text_cap, font=FONT_SMALL, fill=WHITE, stroke_width=1 ) img = np.array(img_pil, dtype="uint8") return img
ca182338a7dc11596b8375d788036d5de50381e2
3,837
def create_override(override): """Takes override arguments as dictionary and applies them to copy of current context""" override_context = bpy.context.copy() for key, value in override.items(): override_context[key] = value return override_context
25ecb761d8e9225081752fef10d2a6a885ba14d2
3,838
import os import mimetypes def get_result_file(request): """Return the content of the transformed code. """ resdir = get_resultdir(request) workdir = os.path.basename(request.session['workdir']) # sanitized name = os.path.basename(request.matchdict.get('name', 'result-%s.txt' % workdir)) ext = os.path.splitext(name)[1] path = os.path.join(resdir, name) request.response.headers['Content-type'] = mimetypes.types_map.get(ext, 'text/plain;charset=utf-8') if ext == '.txt': # Open text file as an attachment request.response.headers['Content-disposition'] = str('attachment; filename=%s' % name) return file(path).read()
83b418f0ce0ce99fcba337db6fe0122e5a38606c
3,839
import json def load_or_make_json(file, *, default=None): """Loads a JSON file, or makes it if it does not exist.""" if default is None: default = {} return __load_or_make(file, default, json.load, json.dump)
3045cf141d26313fe8ffe60d6e74ff7af18ddce2
3,840
import warnings def plot_predictions(image, df, color=None, thickness=1): """Plot a set of boxes on an image By default this function does not show, but only plots an axis Label column must be numeric! Image must be BGR color order! Args: image: a numpy array in *BGR* color order! Channel order is channels first df: a pandas dataframe with xmin, xmax, ymin, ymax and label column color: color of the bounding box as a tuple of BGR color, e.g. orange annotations is (0, 165, 255) thickness: thickness of the rectangle border line in px Returns: image: a numpy array with drawn annotations """ if image.shape[0] == 3: raise ValueError("Input images must be channels last format [h, w, 3] not channels first [3, h, w], use np.rollaxis(image, 0, 3) to invert") if image.dtype == "float32": image = image.astype("uint8") image = image.copy() if not color: if not ptypes.is_numeric_dtype(df.label): warnings.warn("No color was provided and the label column is not numeric. Using a single default color.") color=(0,165,255) for index, row in df.iterrows(): if not color: color = label_to_color(row["label"]) cv2.rectangle(image, (int(row["xmin"]), int(row["ymin"])), (int(row["xmax"]), int(row["ymax"])), color=color, thickness=thickness, lineType=cv2.LINE_AA) return image
c666b1a92eefbc04abc7da1c3a4bc6cccde93769
3,841
from sympy import solveset, diff from re import S def stationary_points(f, symbol, domain=S.Reals): """ Returns the stationary points of a function (where derivative of the function is 0) in the given domain. Parameters ========== f : Expr The concerned function. symbol : Symbol The variable for which the stationary points are to be determined. domain : Interval The domain over which the stationary points have to be checked. If unspecified, S.Reals will be the default domain. Examples ======== >>> from sympy import Symbol, S, sin, log, pi, pprint, stationary_points >>> from sympy.sets import Interval >>> x = Symbol('x') >>> stationary_points(1/x, x, S.Reals) EmptySet() >>> pprint(stationary_points(sin(x), x), use_unicode=False) pi 3*pi {2*n*pi + -- | n in Integers} U {2*n*pi + ---- | n in Integers} 2 2 >>> stationary_points(sin(x),x, Interval(0, 4*pi)) {pi/2, 3*pi/2, 5*pi/2, 7*pi/2} """ if isinstance(domain, EmptySet): return S.EmptySet domain = continuous_domain(f, symbol, domain) set = solveset(diff(f, symbol), symbol, domain) return set
21011d7925c136de43f962a56edd5ffcc09c144f
3,842
def _create_table(data_list, headers): """ Create a table for given data list and headers. Args: data_list(list): list of dicts, which keys have to cover headers headers(list): list of headers for the table Returns: new_table(tabulate): created table, ready to print """ list_table = list() for row in data_list: row_data = list() for header in headers: if header.lower() in row: row_data.append(row[header.lower()]) else: row_data.append(None) list_table.append(row_data) new_table = tabulate(list_table, headers=headers) return new_table
d072857776c16128808b7e2b4b64075cc4894199
3,843
def _validate_attribute_id(this_attributes, this_id, xml_ids, enforce_consistency, name): """ Validate attribute id. """ # the given id is None and we don't have setup attributes # -> increase current max id for the attribute by 1 if this_id is None and this_attributes is None: this_id = max(xml_ids) + 1 # the given id is None and we do have setup attributes # set id to the id present in the setup elif this_id is None and this_attributes is not None: this_id = this_attributes[name] # the given id is not None and we do have setup attributes # -> check that the ids match (unless we are in over-write mode) elif this_id is not None and this_attributes is not None: if (this_id != this_attributes[name]) and enforce_consistency: raise ValueError("Expect id %i for attribute %s, got %i" % (this_attributes[name], name, this_id)) return this_id
e85201c85b790576f7c63f57fcf282a985c22347
3,844
def Arrows2D(startPoints, endPoints=None, shaftLength=0.8, shaftWidth=0.09, headLength=None, headWidth=0.2, fill=True, c=None, cmap=None, alpha=1): """ Build 2D arrows between two lists of points `startPoints` and `endPoints`. `startPoints` can be also passed in the form ``[[point1, point2], ...]``. Color can be specified as a colormap which maps the size of the arrows. :param float shaftLength: fractional shaft length :param float shaftWidth: fractional shaft width :param float headLength: fractional head length :param float headWidth: fractional head width :param bool fill: if False only generate the outline :param c: color :param float alpha: set transparency :Example: .. code-block:: python from vedo import Grid, Arrows2D g1 = Grid(sx=1, sy=1) g2 = Grid(sx=1.2, sy=1.2).rotateZ(4) arrs2d = Arrows2D(g1, g2, c='jet') arrs2d.show(axes=1, bg='white') |quiver| """ if isinstance(startPoints, Points): startPoints = startPoints.points() if isinstance(endPoints, Points): endPoints = endPoints.points() startPoints = np.array(startPoints) if endPoints is None: strt = startPoints[:,0] endPoints = startPoints[:,1] startPoints = strt else: endPoints = np.array(endPoints) if headLength is None: headLength = 1 - shaftLength arr = Arrow2D((0,0,0), (1,0,0), shaftLength, shaftWidth, headLength, headWidth, fill) orients = endPoints - startPoints if orients.shape[1] == 2: # make it 3d orients = np.c_[np.array(orients), np.zeros(len(orients))] pts = Points(startPoints) arrg = Glyph(pts, arr.polydata(False), orientationArray=orients, scaleByVectorSize=True, c=c, alpha=alpha).flat().lighting('off') if c is not None: arrg.color(c) arrg.name = "Arrows2D" return arrg
d2276def355c56c6fe494c29bab04cd6f1e28221
3,845
def filter_characters(results: list) -> str: """Filters unwanted and duplicate characters. Args: results: List of top 1 results from inference. Returns: Final output string to present to user. """ text = "" for i in range(len(results)): if results[i] == "$": continue elif i + 1 < len(results) and results[i] == results[i + 1]: continue else: text += results[i] return text
6b2ca1446450751258e37b70f2c9cbe5110a4ddd
3,846
def seq_alignment_files(file1, file2, outputfile=""): """This command takes 2 fasta files as input, each file contains a single sequence. It reads the 2 sequences from files and get all their alignments along with the score. The -o is an optional parameter if we need the output to be written on a file instead of the screen. """ try: seq1 = SeqIO.read(file1, 'fasta') seq2 = SeqIO.read(file2, 'fasta') except OSError as Error: print(Error) return 'Please Enter a valid File name' alignments = pairwise2.align.globalxx(seq1, seq2) # global alignment if outputfile == '': for alignment in alignments: print(alignment) print(format_alignment(*alignment)) else: output_alignment(alignments, outputfile) print('Alignmnet Done to File ', outputfile)
b225d97e29040040755cc3f2260b60f90c390bce
3,847
def main(Block: type[_Block], n: int, difficulty: int) -> list[tuple[float, int]]: """Test can hash a block""" times_and_tries = [] for i in range(n): block = Block(rand_block_hash(), [t], difficulty=difficulty) # print(f"starting {i}... ", end="", flush=True) with time_it() as timer: block.hash() # print(f"took {timer.interval:.3g} seconds and {block.nonce+1} tries") times_and_tries.append((timer.interval, block.nonce + 1)) return times_and_tries
27c729604b3f3441e1ceb5f6d6d28f47d64fdb13
3,848
from typing import Union from typing import SupportsFloat def is_castable_to_float(value: Union[SupportsFloat, str, bytes, bytearray]) -> bool: """ prüft ob das objekt in float umgewandelt werden kann Argumente : o_object : der Wert der zu prüfen ist Returns : True|False Exceptions : keine >>> is_castable_to_float(1) True >>> is_castable_to_float('1') True >>> is_castable_to_float('1.0') True >>> is_castable_to_float('1,0') False >>> is_castable_to_float('True') False >>> is_castable_to_float(True) True >>> is_castable_to_float('') False >>> is_castable_to_float(None) # noqa False """ try: float(value) return True except (ValueError, TypeError): return False
e3882c0e64da79dc9a0b74b4c2414c7bf29dd6c9
3,849
from operator import itemgetter def list_unique(hasDupes): """Return the sorted unique values from a list""" # order preserving d = dict((x, i) for i, x in enumerate(hasDupes)) return [k for k, _ in sorted(d.items(), key=itemgetter(1))]
0ba0fcb216400806aca4a11d5397531dc19482f6
3,850
def filter_by_networks(object_list, networks): """Returns a copy of object_list with all objects that are not in the network removed. Parameters ---------- object_list: list List of datamodel objects. networks: string or list Network or list of networks to check for. Returns ------- filtered List of filtered datamodel objects. """ filtered = [obj for obj in object_list if check_network(networks, obj)] return filtered
9ffb2cedd1508e5924f3a2894a2f842bc5673440
3,851
def get_all_data(year=year, expiry=1, fielding=False, chadwick=False): """Grab all data and write core files.""" """Options for fielding data and bio data for rookies/master.""" name_url_pairs = url_maker(year=year, fielding=fielding) # if debugging warn about the webviews if module_log.isEnabledFor(logging.DEBUG): print "ALERT: Spynner windows should open." print "ALERT: This throws more AttributeError(s)." print "ALERT: No need to worry. They're uncaught but it all works." # loop over tuples and get_dats for pair in name_url_pairs: get_data(pair[1], pair[0], year) # either do chadwick or not if chadwick is True: get_biographical() # Check if data is there, new and in range of len past_due, exists = check_files(year, expiry, fielding=fielding) # , chadwick=chadwick) if past_due is False and exists is True: module_log.info("Files now up to date.") return past_due, exists
2bd4be520941dab31fc1c0b410fedad25c08fea9
3,852
def score_per_year_by_country(country): """Returns the Global Terrorism Index (GTI) per year of the given country.""" cur = get_db().execute('''SELECT iyear, ( 1*COUNT(*) + 3*SUM(nkill) + 0.5*SUM(nwound) + 2*SUM(case propextent when 1.0 then 1 else 0 end) + 2*SUM(case propextent when 2.0 then 1 else 0 end) + 2*SUM(case propextent when 3.0 then 1 else 0 end) + 2*SUM(case propextent when 4.0 then 1 else 0 end)) FROM Attacks WHERE iso_code="{}" GROUP BY iyear''' .format(country)) score = cur.fetchall() cur.close() return jsonify(score)
ac8992a0bd2227b7b9f5622b9395e4c7933af35a
3,853
def build(options, is_training): """Builds a model based on the options. Args: options: A model_pb2.Model instance. Returns: A model instance. Raises: ValueError: If the model proto is invalid or cannot find a registered entry. """ if not isinstance(options, model_pb2.Model): raise ValueError('The options has to be an instance of model_pb2.Model.') for extension, model_proto in options.ListFields(): if extension in MODELS: return MODELS[extension](model_proto, is_training) raise ValueError('Invalid model config!')
99fc2f283075091254743a9d70ecab3d7a65066d
3,854
def string_to_rdkit(frmt: str, string: str, **kwargs) -> RDKitMol: """ Convert string representation of molecule to RDKitMol. Args: frmt: Format of string. string: String representation of molecule. **kwargs: Other keyword arguments for conversion function. Returns: RDKitMol corresponding to string representation. """ try: converter = RDKIT_STRING_TO_MOL_CONVERTERS[frmt.lower()] except KeyError: raise ValueError(f'{frmt} is not a recognized RDKit format') else: remove_hs = kwargs.pop('removeHs', False) # Don't remove hydrogens by default rdkit_mol = converter(string, removeHs=remove_hs, **kwargs) return RDKitMol(rdkit_mol)
34803a46d5228644bb3db614aca5580bcb286655
3,855
from datetime import datetime def clean_datetime_remove_ms(atime): """ 将时间对象的 毫秒 全部清零 :param atime: :return: """ return datetime(atime.year, atime.month, atime.day, atime.hour, atime.minute, atime.second)
94a47ad8802b3eb4d58d332d71bb3d3e0c67d947
3,856
def perDay(modified): """Auxiliary in provenance filtering: chunk the trails into daily bits.""" chunks = {} for m in modified: chunks.setdefault(dt.date(m[1]), []).append(m) return [chunks[date] for date in sorted(chunks)]
ce9fe31c39c9c6c5e0753aa2dc6dc5113fb199e4
3,857
import argparse def parse_args(): """Parse input arguments.""" parser = argparse.ArgumentParser(description='Faster R-CNN demo') parser.add_argument('im', help="Input image", default= '000456.jpg') parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]', default=0, type=int) parser.add_argument('--cpu', dest='cpu_mode', help='Use CPU mode (overrides --gpu)', action='store_true') parser.add_argument('--prototxt', dest='prototxt', help='Prototxt of Network') parser.add_argument('--weights', dest='caffemodel', help='Weights of trained network') parser.add_argument('--labels', dest='labels', help='file contain labels', default=None) parser.add_argument('--cf', dest='min_cf', help='cutoff confidence score', default=0.8, type=float) parser.add_argument('--output', dest='destination', help='Output location of image detections', default=None ) args = parser.parse_args() return args
40764cbb9987560e18e22e3bec9d4ce993e8b789
3,858
def login(): """The screen to log the user into the system.""" # call create_all to create database tables if this is the first run db.create_all() # If there are no users, create a default admin and non-admin if len(User.query.all()) == 0: create_default_users() # Redirect the user if already logged in if current_user.is_authenticated: # Send admins and non-admins to different pages if current_user.admin: return redirect(url_for('admin.admin_home')) else: return redirect(url_for('export.export_home')) form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(username=form.username.data).first() if user is None or not user.check_password(form.password.data): flash("Invalid username or password") return redirect(url_for('login.login')) login_user(user) current_app.logger.info(f"Logged in {user}") # If the user was redirected here, send the user back to the original page next_page = request.args.get('next') if not next_page or url_parse(next_page).netloc != '': # If no next page given, default to these pages if user.admin: next_page = url_for('admin.admin_home') else: next_page = url_for('export.export_home') return redirect(next_page) nav_bar_title = "Login" return render_template('login/login.html', title='Sign in', form=form, nav_bar_title=nav_bar_title)
0912dca53b40677da9a9443c4500badf05fff8a8
3,859
def freight_sep_2014(): """Find the number of freight of the month""" for i in fetch_data_2014(): if i[1] == "Freight" and i[4] == "September": num_0 = i[6] return int(num_0)
b7f770362f7a85ffc92591a48660d01d7f784dc1
3,860
from typing import Union import os from typing import Any from typing import Optional from typing import List from pathlib import Path def convexhull( input_path: Union[str, "os.PathLike[Any]"], output_path: Union[str, "os.PathLike[Any]"], input_layer: Optional[str] = None, output_layer: Optional[str] = None, columns: Optional[List[str]] = None, explodecollections: bool = False, nb_parallel: int = -1, batchsize: int = -1, force: bool = False, ): """ Applies a convexhull operation on the input file. The result is written to the output file specified. Args: input_path (PathLike): the input file output_path (PathLike): the file to write the result to input_layer (str, optional): input layer name. Optional if the input file only contains one layer. output_layer (str, optional): input layer name. Optional if the input file only contains one layer. columns (List[str], optional): If not None, only output the columns specified. Defaults to None. explodecollections (bool, optional): True to output only simple geometries. Defaults to False. nb_parallel (int, optional): the number of parallel processes to use. Defaults to -1: use all available processors. batchsize (int, optional): indicative number of rows to process per batch. A smaller batch size, possibly in combination with a smaller nb_parallel, will reduce the memory usage. Defaults to -1: (try to) determine optimal size automatically. force (bool, optional): overwrite existing output file(s). Defaults to False. """ logger.info(f"Start convexhull on {input_path}") return _geoops_sql.convexhull( input_path=Path(input_path), output_path=Path(output_path), input_layer=input_layer, output_layer=output_layer, columns=columns, explodecollections=explodecollections, nb_parallel=nb_parallel, batchsize=batchsize, force=force, )
958b482e7faf13f4fc63193e784ec8c20959e295
3,861
def piotroski_f(df_cy,df_py,df_py2): """function to calculate f score of each stock and output information as dataframe""" f_score = {} tickers = df_cy.columns for ticker in tickers: ROA_FS = int(df_cy.loc["NetIncome",ticker]/((df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2) > 0) CFO_FS = int(df_cy.loc["CashFlowOps",ticker] > 0) ROA_D_FS = int(df_cy.loc["NetIncome",ticker]/(df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2 > df_py.loc["NetIncome",ticker]/(df_py.loc["TotAssets",ticker]+df_py2.loc["TotAssets",ticker])/2) CFO_ROA_FS = int(df_cy.loc["CashFlowOps",ticker]/df_cy.loc["TotAssets",ticker] > df_cy.loc["NetIncome",ticker]/((df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2)) LTD_FS = int((df_cy.loc["LTDebt",ticker] + df_cy.loc["OtherLTDebt",ticker])<(df_py.loc["LTDebt",ticker] + df_py.loc["OtherLTDebt",ticker])) CR_FS = int((df_cy.loc["CurrAssets",ticker]/df_cy.loc["CurrLiab",ticker])>(df_py.loc["CurrAssets",ticker]/df_py.loc["CurrLiab",ticker])) DILUTION_FS = int(df_cy.loc["CommStock",ticker] <= df_py.loc["CommStock",ticker]) GM_FS = int((df_cy.loc["GrossProfit",ticker]/df_cy.loc["TotRevenue",ticker])>(df_py.loc["GrossProfit",ticker]/df_py.loc["TotRevenue",ticker])) ATO_FS = int(df_cy.loc["TotRevenue",ticker]/((df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2)>df_py.loc["TotRevenue",ticker]/((df_py.loc["TotAssets",ticker]+df_py2.loc["TotAssets",ticker])/2)) f_score[ticker] = [ROA_FS,CFO_FS,ROA_D_FS,CFO_ROA_FS,LTD_FS,CR_FS,DILUTION_FS,GM_FS,ATO_FS] f_score_df = pd.DataFrame(f_score,index=["PosROA","PosCFO","ROAChange","Accruals","Leverage","Liquidity","Dilution","GM","ATO"]) return f_score_df
119a3dd426fbe5e8b5106cbebebf4b000799a839
3,862
from typing import Dict def evaluate_circuit( instances: Dict[str, SType], connections: Dict[str, str], ports: Dict[str, str], ) -> SDict: """evaluate a circuit for the given sdicts.""" # it's actually easier working w reverse: reversed_ports = {v: k for k, v in ports.items()} block_diag = {} for name, S in instances.items(): block_diag.update( {(f"{name},{p1}", f"{name},{p2}"): v for (p1, p2), v in sdict(S).items()} ) sorted_connections = sorted(connections.items(), key=_connections_sort_key) all_connected_instances = {k: {k} for k in instances} for k, l in sorted_connections: name1, _ = k.split(",") name2, _ = l.split(",") connected_instances = ( all_connected_instances[name1] | all_connected_instances[name2] ) for name in connected_instances: all_connected_instances[name] = connected_instances current_ports = tuple( p for instance in connected_instances for p in set([p for p, _ in block_diag] + [p for _, p in block_diag]) if p.startswith(f"{instance},") ) block_diag.update(_interconnect_ports(block_diag, current_ports, k, l)) for i, j in list(block_diag.keys()): is_connected = i == k or i == l or j == k or j == l is_in_output_ports = i in reversed_ports and j in reversed_ports if is_connected and not is_in_output_ports: del block_diag[i, j] # we're no longer interested in these port combinations circuit_sdict: SDict = { (reversed_ports[i], reversed_ports[j]): v for (i, j), v in block_diag.items() if i in reversed_ports and j in reversed_ports } return circuit_sdict
7dd6d019845dbf7f69c6324143d88d4d48af9dea
3,863
def canonical_smiles_from_smiles(smiles, sanitize = True): """ Apply canonicalisation with rdkit Parameters ------------ smiles : str sanitize : bool Wether to apply rdkit sanitisation, default yes. Returns --------- canonical_smiles : str Returns None if canonicalisation fails """ try: mol = Chem.MolFromSmiles(smiles, sanitize = sanitize) mol.UpdatePropertyCache() #mol = Chem.AddHs(mol) Chem.GetSSSR(mol) return Chem.MolToSmiles(mol,canonical=True, allHsExplicit=True, kekuleSmiles = False, allBondsExplicit = True, isomericSmiles = True) except: return None
0c4dc4583d9a12439b915412cab8458e380a4e6c
3,864
def get_ref(struct, ref, leaf=False): """ Figure out if a reference (e.g., "#/foo/bar") exists within a given structure and return it. """ if not isinstance(struct, dict): return None parts = ref_parts(ref) result = {} result_current = result struct_current = struct for part in parts: if part not in struct_current: return None result_current[part] = {} result_current = result_current[part] struct_current = struct_current[part] if leaf: return struct_current result_current.update(struct_current) return result
61ebb2561c2c79c58c297c91ac266e9e786a5b7f
3,865
import os def delete_files(dpath: str, label: str='') -> str: """ Delete all files except the files that have names matched with label If the directory path doesn't exist return 'The path doesn't exist' else return the string with the count of all files in the directory and the count of deleted files. Args: dpath Type: string Description: Directory path label Type: string Description: Store characters or name that could be matched with name of files in the directory. If match are true the file will not be deleted. Returns: Type: string Description: The 'The path doesn't exist' string or the string with the count of all files in the directory and the count of deleted files. """ directory = os.path.abspath(dpath) print(directory) # Test whether the path exists if not os.path.exists(dpath): return "The path doesn't exist" else: # Make list of files files = os.listdir(directory) all_files_count = len(files) delete_files_count = 0 for file in files: if file.find(label) == -1: os.remove(directory + "\\" + file) delete_files_count += 1 return "All files: {} Delete files: {}".format(all_files_count, delete_files_count)
c8b95d9b14d698145667383bbfe88045330e5cb0
3,866
def edit_maker_app( operator, app_maker_code, app_name="", app_url="", developer="", app_tag="", introduction="", add_user="", company_code="", ): """ @summary: 修改 maker app @param operator:操作者英文id @param app_maker_code: maker app编码 @param app_name:app名称,可选参数,为空则不修改名称 @param app_url:app链接,可选参数,为空则不修改链接 @param developer: 填写开发者英文id列表,请用英文分号";"隔开, 可选参数,为空则不修改开发者 需传入修改后的所有开发者信息 @param app_tag: 可选 String 轻应用分类 @param introduction: 可选 String 轻应用描述 @param add_user: 冗余字段,多版本兼容 @param company_code: 冗余字段,多版本兼容 @return: {'result': True, 'message':u"APP Maker 修改成功"} {'result': False, 'message':u"APP Maker 修改出错"} """ data = { "bk_app_code": settings.APP_CODE, "bk_app_secret": settings.SECRET_KEY, "light_app_code": app_maker_code, "app_name": app_name, } if app_url: data["app_url"] = app_url if developer: data["developers"] = developer.split(",") if app_tag: data["app_tag"] = app_tag if introduction: data["introduction"] = introduction resp = _request_paasv3_light_app_api(url=LIGHT_APP_API, method="patch", data=data) return resp
abb2d57235e6c231b96182f989606060f8ebb4ab
3,867
def fifo(): """ Returns a callable instance of the first-in-first-out (FIFO) prioritization algorithm that sorts ASDPs by timestamp Returns ------- prioritize: callable a function that takes an ASDP type name and a dict of per-type ASDPDB metadata, as returned by `asdpdb.load_asdp_metadata_by_type`, and returns a list of dicts containing ordered ASDPs with metadata (in the format expected by `asdpdb.save_asdp_ordering`) """ def prioritize(asdp_type, metadata): # Extract metadata entries ids = metadata['asdp_id'] sue = metadata['sue'] ts = metadata['timestamp'] untransmitted = metadata['downlink_status'] n_untransmitted = np.sum(untransmitted) if n_untransmitted == 0: logger.info(f'No untransmitted {asdp_type} products to prioritize') return [] size_bytes = metadata['asdp_size_bytes'] sue_per_byte = sue / size_bytes # Fill in bad values with zeros sue_per_byte[np.isnan(sue_per_byte)] = 0.0 sue_per_byte[np.isinf(sue_per_byte)] = 0.0 order = np.argsort(ts) for cand_id in order: if untransmitted[cand_id]: logger.info( f'Selected ASDP {ids[cand_id]}, ' f'initial SUE = {sue_per_byte[cand_id]:.2e}' ) products = [ { 'asdp_id': ids[cand_id], 'initial_sue': sue[cand_id], 'final_sue': sue[cand_id], 'initial_sue_per_byte': sue_per_byte[cand_id], 'final_sue_per_byte': sue_per_byte[cand_id], 'size_bytes': size_bytes[cand_id], 'timestamp': ts[cand_id], } for cand_id in order if untransmitted[cand_id] ] return products return prioritize
8f0d24c43a15467c9e6b9f195d12978664867bd3
3,868
def super(d, t): """Pressure p and internal energy u of supercritical water/steam as a function of density d and temperature t (deg C).""" tk = t + tc_k tau = tstar3 / tk delta = d / dstar3 taupow = power_array(tau, tc3) delpow = power_array(delta, dc3) phidelta = nr3[0] * delpow[-1] + sum([n * i * delpow[i - 1] * taupow[j] for (i, j, n) in zip(ir3, jr3, nr3)]) phitau = sum([n * delpow[i] * j * taupow[j - 1] for (i, j, n) in zip(ir3, jr3, nr3)]) rt = rconst * tk p = d * rt * delta * phidelta u = rt * tau * phitau return (p, u)
937d58264b94b041aafa63b88d5fd4498d4acb8e
3,869
import tty import logging import json def ls(query=None, quiet=False): """List and count files matching the query and compute total file size. Parameters ---------- query : dict, optional (default: None) quiet : bool, optional Whether to suppress console output. """ tty.screen.status('Searching ...', mode='static') if query is None: query = CONFIG['GENERAL']['QUERY'] file_list = scihub.search(query, verbose=True) size = 0.0 for f in file_list: size += f['size'] if not quiet: msg = 'Found {0:d} files ({1}).'.format(len(file_list), utils.b2h(size)) logging.info(msg) tty.screen.result(msg) for f in file_list: msg = '{:>8} {}'.format(utils.b2h(f['size']), f['filename']) # tty.update(f['filename'],msg) logging.info(f['filename']) # # Write file_list to JSON file # so it can be read later by the get() and store() commands. # if 'OUT_FILE' in CONFIG['GENERAL'] and \ CONFIG['GENERAL']['OUT_FILE'] is not None: with open(CONFIG['GENERAL']['OUT_FILE'], 'w') as f: json.dump(file_list, f, default=str, indent=2) return file_list
acbf576170f34cfc09e4a3a8d64c1c313a7d3b51
3,870
def _create_full_gp_model(): """ GP Regression """ full_gp_model = gpflow.models.GPR( (Datum.X, Datum.Y), kernel=gpflow.kernels.SquaredExponential(), mean_function=gpflow.mean_functions.Constant(), ) opt = gpflow.optimizers.Scipy() opt.minimize( full_gp_model.training_loss, variables=full_gp_model.trainable_variables, options=dict(maxiter=300), ) return full_gp_model
bebe02e89e4ad17c5832cfced8f7cd1dce9a3b11
3,871
def read_file_header(fd, endian): """Read mat 5 file header of the file fd. Returns a dict with header values. """ fields = [ ('description', 's', 116), ('subsystem_offset', 's', 8), ('version', 'H', 2), ('endian_test', 's', 2) ] hdict = {} for name, fmt, num_bytes in fields: data = fd.read(num_bytes) hdict[name] = unpack(endian, fmt, data) hdict['description'] = hdict['description'].strip() v_major = hdict['version'] >> 8 v_minor = hdict['version'] & 0xFF hdict['__version__'] = '%d.%d' % (v_major, v_minor) return hdict
d994f74a889cedd7e1524102ffd1c62bb3764a0f
3,872
def shape_padleft(t, n_ones=1): """Reshape `t` by left-padding the shape with `n_ones` 1s. See Also -------- shape_padaxis shape_padright Dimshuffle """ _t = aet.as_tensor_variable(t) pattern = ["x"] * n_ones + [i for i in range(_t.type.ndim)] return _t.dimshuffle(pattern)
44e68fed0ea7497ba244ad83fbd4ff53cec22f24
3,873
def zad1(x): """ Функция выбирает все элементы, идущие за нулём. Если таких нет, возвращает None. Если такие есть, то возвращает их максимум. """ zeros = (x[:-1] == 0) if np.sum(zeros): elements_to_compare = x[1:][zeros] return np.max(elements_to_compare) return None
e54f99949432998bf852afb8f7591af0af0b8b59
3,874
def skopt_space(hyper_to_opt): """Create space of hyperparameters for the gaussian processes optimizer. This function creates the space of hyperparameter following skopt syntax. Parameters: hyper_to_opt (dict): dictionary containing the configuration of the hyperparameters to optimize. This dictionary must follow the next syntax: .. code:: python hyper_to_opt = {'hyperparam_1': {'type': ..., 'range: ..., 'step': ...}, 'hyperparam_2': {'type': ..., 'range: ..., 'step': ...}, ... } See the oficial documentation for more details. Returns: list: space of hyperparameters following the syntax required by the gaussian processes optimization algorithm. Example:: hyper_top_opt = { 'cnn_rnn_dropout':{ 'type': 'uniform', 'range': [0,1]}, 'optimizer_type':{ 'type': 'choice',, 'range': ['Adadelta', 'Adam', 'RMSProp', 'SGD']}, 'base_learning_rate':{ 'type': 'loguniform', 'range': [-5, 0]}, 'layer1_filters':{ 'type': 'quniform', 'range': [16, 64], 'step': 1}} Raises: KeyError: if ``type`` is other than ``uniform``, ``quniform``, ``loguniform`` or ``choice``. """ space = [] # loop over the hyperparameters to optimize dictionary and add each # hyperparameter to the space for key, items in hyper_to_opt.items(): if items['type'] == 'uniform': space.append(skopt.space.Real(items['range'][0], items['range'][1], name=key)) elif items['type'] == 'quniform': space.append(skopt.space.Integer(items['range'][0], items['range'][1], name=key)) elif items['type'] == 'loguniform': space.append(skopt.space.Real(items['range'][0], items['range'][1], name=key, prior='log-uniform')) elif items['type'] == 'choice': space.append(skopt.space.Categorical(items['range'], name=key)) else: raise KeyError('The gaussian processes optimizer supports only \ uniform, quniform, loguniform and choice space types') return space
bdfbc685b5fd51f8f28cb9b308d3962179d15c7e
3,875
import argparse import sys def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Train a R2CNN network') parser.add_argument('--img_dir', dest='img_dir', help='images path', default='/mnt/USBB/gx/DOTA/DOTA_clip/val/images/', type=str) parser.add_argument('--image_ext', dest='image_ext', help='image format', default='.png', type=str) parser.add_argument('--test_annotation_path', dest='test_annotation_path', help='test annotate path', default=cfgs.TEST_ANNOTATION_PATH, type=str) parser.add_argument('--gpu', dest='gpu', help='gpu index', default='0', type=str) if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args
c0ca3c7aadd82e61cb6ff2f078d3b308822206ba
3,876
import torch def process_text_embedding(text_match, text_diff): """ Process text embedding based on embedding type during training and evaluation Args: text_match (List[str]/Tensor): For matching caption, list of captions for USE embedding and Tensor for glove/fasttext embeddings text_diff (List[str]/Tensor): For non-matching caption, list of captions for USE embedding and Tensor for glove/fasttext embeddings Returns: text_match (Tensor): Processed text-embedding for matching caption text_diff (Tensor): Processed text-embedding for non-matching caption """ if embed_type == 'use': text_match = torch.tensor(use_embed(text_match).numpy()) text_diff = torch.tensor(use_embed(text_diff).numpy()) text_match = text_match.to(device) text_diff = text_diff.to(device) return text_match, text_diff
6f052cc29186f8bcc1598780bf7f437098774498
3,877
import requests import json import base64 def x5u_vulnerability(jwt=None, url=None, crt=None, pem=None, file=None): """ Check jku Vulnerability. Parameters ---------- jwt: str your jwt. url: str your url. crt: str crt path file pem: str pem file name file: str jwks file name Returns ------- str your new jwt. """ if not is_valid_jwt(jwt): raise InvalidJWT("Invalid JWT format") if file is None: file = "jwks_with_x5c.json" jwt_json = jwt_to_json(jwt) if "x5u" not in jwt_json[HEADER]: raise InvalidJWT("Invalid JWT format JKU missing") if crt is None or pem is None: crt, pem = create_crt() with open(crt) as f: content = f.read() f.close() x5u = requests.get(jwt_json[HEADER]["x5u"]).json() x5u["keys"][0]["x5c"] = ( content.replace("-----END CERTIFICATE-----", "") .replace("-----BEGIN CERTIFICATE-----", "") .replace("\n", "") ) if ".json" not in file: file += ".json" if not url.endswith("/"): url += "/" jwt_json[HEADER]["x5u"] = f"{url}{file}" f = open(file, "w") f.write(json.dumps(x5u)) f.close() s = encode_jwt(jwt_json) key = crypto.load_privatekey(crypto.FILETYPE_PEM, open(pem).read()) priv = key.to_cryptography_key() sign = priv.sign( bytes(s, encoding="UTF-8"), algorithm=hashes.SHA256(), padding=padding.PKCS1v15(), ) return s + "." + base64.urlsafe_b64encode(sign).decode("UTF-8").rstrip("=")
0424072951e99d0281a696b94889538c1d17ed81
3,878
from typing import List import os from typing import Dict def from_kitti( data_dir: str, data_type: str, ) -> List[Frame]: """Function converting kitti data to Scalabel format.""" if data_type == "detection": return from_kitti_det(data_dir, data_type) frames = [] img_dir = osp.join(data_dir, "image_02") label_dir = osp.join(data_dir, "label_02") cali_dir = osp.join(data_dir, "calib") oxt_dir = osp.join(data_dir, "oxts") assert osp.exists(img_dir), f"Folder {img_dir} is not found" vid_names = sorted(os.listdir(img_dir)) global_track_id = 0 for vid_name in vid_names: trackid_maps: Dict[str, int] = {} img_names = sorted( [ f.path for f in os.scandir(osp.join(img_dir, vid_name)) if f.is_file() and f.name.endswith("png") ] ) projection = read_calib(cali_dir, int(vid_name)) if osp.exists(label_dir): label_file = osp.join(label_dir, f"{vid_name}.txt") labels_dict, trackid_maps, global_track_id = parse_label( data_type, label_file, trackid_maps, global_track_id ) for fr, img_name in enumerate(sorted(img_names)): with Image.open(img_name) as img: width, height = img.size image_size = ImageSize(height=height, width=width) fields = read_oxts(oxt_dir, int(vid_name)) poses = [KittiPoseParser(fields[i]) for i in range(len(fields))] rotation = tuple( R.from_matrix(poses[fr].rotation).as_euler("xyz").tolist() ) position = tuple( np.array(poses[fr].position - poses[0].position).tolist() ) cam2global = Extrinsics(location=position, rotation=rotation) intrinsics = Intrinsics( focal=(projection[0][0], projection[1][1]), center=(projection[0][2], projection[1][2]), ) if osp.exists(label_dir): if not fr in labels_dict: labels = [] else: labels = labels_dict[fr] else: labels = [] img_name = data_type + img_name.split(data_type)[-1] video_name = "/".join(img_name.split("/")[:-1]) f = Frame( name=img_name.split("/")[-1], videoName=video_name, frameIndex=fr, size=image_size, extrinsics=cam2global, intrinsics=intrinsics, labels=labels, ) frames.append(f) return frames
805224b4166238150f6f56c3d14220563a931a64
3,879
def get_all_interactions(L, index_1=False): """ Returns a list of all epistatic interactions for a given sequence length. This sets of the order used for beta coefficients throughout the code. If index_1=True, then returns epistatic interactions corresponding to 1-indexing. """ if index_1: pos = range(1, L+1) else: pos = range(L) all_U = list(powerset(pos)) return all_U
f8a151e5d44f2e139820b3d06af3995f60945dd2
3,880
import xml import math def convertSVG(streamOrPath, name, defaultFont): """ Loads an SVG and converts it to a DeepSea vector image FlatBuffer format. streamOrPath: the stream or path for the SVG file. name: the name of the vector image used to decorate material names. defaultFont: the default font to use. The binary data is returned. """ svg = minidom.parse(streamOrPath) materials = Materials(name) commands = [] for rootNode in svg.childNodes: if rootNode.nodeType != xml.dom.Node.ELEMENT_NODE: continue if rootNode.tagName == 'svg': if rootNode.hasAttribute('viewBox'): box = rootNode.getAttribute('viewBox').split() if len(box) != 4: raise Exception("Invalid view box '" + rootNode.getAttribute('viewbox') + "'") if sizeFromString(box[0], 0.0) != 0.0 or sizeFromString(box[1], 0.0) != 0.0: raise Exception("View box must have an origin of (0, 0)") size = (sizeFromString(box[2], 0.0), sizeFromString(box[3], 0.0)) elif rootNode.hasAttribute('width') and rootNode.hasAttribute('height'): size = (sizeFromString(rootNode.getAttribute('width'), 0.0), sizeFromString(rootNode.getAttribute('height'), 0.0)) else: raise Exception("No size set on SVG.") diagonalSize = math.sqrt(size[0]*size[0] + size[1]*size[1])/math.sqrt(2) for node in rootNode.childNodes: if node.nodeType != xml.dom.Node.ELEMENT_NODE: continue if node.tagName == 'defs': readMaterials(node, materials, size, diagonalSize) else: commands.extend(readShapes(node, defaultFont, materials, size, diagonalSize, \ Transform())) break builder = flatbuffers.Builder(0) materials.write(builder) commandOffsets = [] for command in commands: commandOffsets.extend(command(builder)) VectorImage.StartCommandsVector(builder, len(commandOffsets)) for offset in reversed(commandOffsets): builder.PrependUOffsetTRelative(offset) commandsOffset = builder.EndVector() VectorImage.Start(builder) materials.writeToVectorImage(builder) VectorImage.AddCommands(builder, commandsOffset) VectorImage.AddSize(builder, CreateVector2f(builder, size[0], size[1])) builder.Finish(VectorImage.End(builder)) return builder.Output()
f71b22af076a466f951815e73f83ea989f920cdf
3,881
def to_accumulo(df, config: dict, meta: dict, compute=True, scheduler=None): """ Paralell write of Dask DataFrame to Accumulo Table Parameters ---------- df : Dataframe The dask.Dataframe to write to Accumulo config : dict Accumulo configuration to use to connect to accumulo meta : dict Data model to apply to dataframe compute : bool Should compute be called; immediately call write if True, delayed otherwise scheduler : str The scheduler to use, like “threads” or “processes” Returns ------- The number of Accumulo rows written if they were computed right away. If not, the delayed tasks associated with the writing of the table """ dfs = df.to_delayed() values = [delayed(pandas_write_dataframe)(config, d, meta) for d in dfs] if compute: return sum(delayed(values).compute(scheduler=scheduler)) else: return values
016ee1cc516b8fd6c055902002a196b30ceb0e07
3,882
def compute_euclidean_distance(x, y): """ Computes the euclidean distance between two tensorflow variables """ d = tf.reduce_sum(tf.square(x-y),axis=1,keep_dims=True) return d
26171d3a0c719d0744ab163b33590f4bb1f92480
3,883
def vpn_ping(address, port, timeout=0.05, session_id=None): """Sends a vpn negotiation packet and returns the server session. Returns False on a failure. Basic packet structure is below. Client packet (14 bytes):: 0 1 8 9 13 +-+--------+-----+ |x| cli_id |?????| +-+--------+-----+ x = packet identifier 0x38 cli_id = 64 bit identifier ? = unknown, probably flags/padding Server packet (26 bytes):: 0 1 8 9 13 14 21 2225 +-+--------+-----+--------+----+ |x| srv_id |?????| cli_id |????| +-+--------+-----+--------+----+ x = packet identifier 0x40 cli_id = 64 bit identifier ? = unknown, probably flags/padding bit 9 was 1 and the rest were 0 in testing """ if session_id is None: session_id = random.randint(0, 0xffffffffffffffff) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) data = struct.pack("!BQxxxxxx", 0x38, session_id) sock.sendto(data, (address, port)) sock.settimeout(timeout) try: received = sock.recv(2048) except socket.timeout: return False finally: sock.close() fmt = "!BQxxxxxQxxxx" if len(received) != struct.calcsize(fmt): print struct.calcsize(fmt) return False (identifier, server_sess, client_sess) = struct.unpack(fmt, received) if identifier == 0x40 and client_sess == session_id: return server_sess
dcc4d8cf347486b0f10f1dd51d230bd6fb625551
3,884
def is_admin(): """Checks if author is a server administrator, or has the correct permission tags.""" async def predicate(ctx): return ( # User is a server administrator. ctx.message.channel.permissions_for(ctx.message.author).administrator # User is a developer. or (ctx.author.id == developer_id) # User has a permission tag. or (discord.utils.get(ctx.author.roles, name=str(f"fox:{ctx.command.name}"))) ) return commands.check(predicate)
70a87d8ae4970b05aa39339fec2aa1ade43d238a
3,885
def send_message(chat_id): """Send a message to a chat If a media file is found, send_media is called, else a simple text message is sent """ files = request.files if files: res = send_media(chat_id, request) else: message = request.form.get("message", default="Empty Message") res = g.driver.chat_send_message(chat_id, message) if res: return jsonify(res) else: return False
df77e115497cfc975b9fad6f9a3b43648349133e
3,886
def get_neighbours(sudoku, row, col): """Funkcja zwraca 3 listy sasiadow danego pola, czyli np. wiersz tego pola, ale bez samego pola""" row_neighbours = [sudoku[row][y] for y in range(9) if y != col] col_neighbours = [sudoku[x][col] for x in range(9) if x != row] sqr_neighbours = [sudoku[x][y] for x in range(9) if x//3 == row//3 for y in range(9) if y//3 == col//3 if x!=row or y!=col] return row_neighbours, col_neighbours, sqr_neighbours
b10766fc8925b54d887925e1a684e368c0f3b550
3,887
import torch import PIL def to_ndarray(image): """ Convert torch.Tensor or PIL.Image.Image to ndarray. :param image: (torch.Tensor or PIL.Image.Image) image to convert to ndarray :rtype (ndarray): image as ndarray """ if isinstance(image, torch.Tensor): return image.numpy() if isinstance(image, PIL.Image.Image): return np.array(image) raise TypeError("to_ndarray: expect torch.Tensor or PIL.Image.Image")
f12444779e2d2eb78e3823821c8c6acec7c601a6
3,888
def make_map_exposure_true_energy(pointing, livetime, aeff, ref_geom, offset_max): """Compute exposure WcsNDMap in true energy (i.e. not convolved by Edisp). Parameters ---------- pointing : `~astropy.coordinates.SkyCoord` Pointing direction livetime : `~astropy.units.Quantity` Livetime aeff : `~gammapy.irf.EffectiveAreaTable2D` Effective area table ref_geom : `~gammapy.maps.WcsGeom` Reference WcsGeom object used to define geometry (space - energy) offset_max : `~astropy.coordinates.Angle` Maximum field of view offset. Returns ------- expmap : `~gammapy.maps.WcsNDMap` Exposure cube (3D) in true energy bins """ offset = make_separation_map(ref_geom, pointing).quantity # Retrieve energies from WcsNDMap # Note this would require a log_center from the geometry # Or even better edges, but WcsNDmap does not really allows it. energy = ref_geom.axes[0].center * ref_geom.axes[0].unit exposure = aeff.data.evaluate(offset=offset, energy=energy) exposure *= livetime # We check if exposure is a 3D array in case there is a single bin in energy # TODO: call np.atleast_3d ? if len(exposure.shape) < 3: exposure = np.expand_dims(exposure, 0) # Put exposure outside offset max to zero # This might be more generaly dealt with a mask map exposure[:, offset >= offset_max] = 0 data = exposure.to('m2 s') return WcsNDMap(ref_geom, data)
fa3def16e0509f50a21936aef0784bca91a84d07
3,889
def calc_random_piv_error(particle_image_diameter): """ Caclulate the random error amplitude which is proportional to the diameter of the displacement correlation peak. (Westerweel et al., 2009) """ c = 0.1 error = c*np.sqrt(2)*particle_image_diameter/np.sqrt(2) return error
91b02b658c0c6476739695017925c44c92bf67c8
3,890
def resolve(name, module=None): """Resolve ``name`` to a Python object via imports / attribute lookups. If ``module`` is None, ``name`` must be "absolute" (no leading dots). If ``module`` is not None, and ``name`` is "relative" (has leading dots), the object will be found by navigating relative to ``module``. Returns the object, if found. If not, propagates the error. """ name = name.split('.') if not name[0]: if module is None: raise ValueError("relative name without base module") module = module.split('.') name.pop(0) while not name[0]: module.pop() name.pop(0) name = module + name used = name.pop(0) found = __import__(used) for n in name: used += '.' + n try: found = getattr(found, n) except AttributeError: __import__(used) found = getattr(found, n) return found
d778ff9e4ea821be6795cc9007552e6c0afeb565
3,891
def fibonacci(n:int) -> int: """Return the `n` th Fibonacci number, for positive `n`.""" if 0 <= n <= 1: return n n_minus1, n_minus2 = 1,0 result = None for f in range(n - 1): result = n_minus2 + n_minus1 n_minus2 = n_minus1 n_minus1 = result return result
4be929f69dc9c35679af580767bfe047fc1963e9
3,892
import select def get_budget(product_name, sdate): """ Budget for a product, limited to data available at the database :param product_name: :param sdate: starting date :return: pandas series """ db = DB('forecast') table = db.table('budget') sql = select([table.c.budget]).where(table.c.product_name == product_name).order_by(asc('month')) ans = db.query(sql).fetchall() ret = [] for row in ans: ret.append(float(row[0])) date_index = pd.date_range(start = sdate, periods = len(ret), freq = 'M') return pd.Series(data = ret, index = date_index)
a17ae7db2734c2c877a41eb0986016a4f0241f07
3,893
def _residual_block_basic(filters, kernel_size=3, strides=1, use_bias=False, name='res_basic', kernel_initializer='he_normal', kernel_regularizer=regulizers.l2(1e-4)): """ Return a basic residual layer block. :param filters: Number of filters. :param kernel_size: Kernel size. :param strides: Convolution strides :param use_bias: Flag to use bias or not in Conv layer. :param kernel_initializer: Kernel initialisation method name. :param kernel_regularizer: Kernel regularizer. :return: Callable layer block """ def layer_fn(x): x_conv1 = _res_conv( filters=filters, kernel_size=kernel_size, padding='same', strides=strides, use_relu=True, use_bias=use_bias, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name=name + '_cbr_1')(x) x_residual = _res_conv( filters=filters, kernel_size=kernel_size, padding='same', strides=1, use_relu=False, use_bias=use_bias, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name=name + '_cbr_2')(x_conv1) merge = _merge_with_shortcut(kernel_initializer, kernel_regularizer,name=name)(x, x_residual) merge = Activation('relu')(merge) return merge return layer_fn
87c041f58de71d7bd2d3fcbe97ec35b8fa057468
3,894
def console_script(tmpdir): """Python script to use in tests.""" script = tmpdir.join('script.py') script.write('#!/usr/bin/env python\nprint("foo")') return script
be6a38bec8bb4f53de83b3c632ff3d26d88ef1c7
3,895
def parse_tpl_file(tpl_file): """ parse a PEST-style template file to get the parameter names Args: tpl_file (`str`): path and name of a template file Returns: [`str`] : list of parameter names found in `tpl_file` Example:: par_names = pyemu.pst_utils.parse_tpl_file("my.tpl") """ par_names = set() with open(tpl_file, "r") as f: try: header = f.readline().strip().split() assert header[0].lower() in [ "ptf", "jtf", ], "template file error: must start with [ptf,jtf], not:" + str(header[0]) assert ( len(header) == 2 ), "template file error: header line must have two entries: " + str(header) marker = header[1] assert len(marker) == 1, ( "template file error: marker must be a single character, not:" + str(marker) ) for line in f: par_line = set(line.lower().strip().split(marker)[1::2]) par_names.update(par_line) # par_names.extend(par_line) # for p in par_line: # if p not in par_names: # par_names.append(p) except Exception as e: raise Exception( "error processing template file " + tpl_file + " :\n" + str(e) ) # par_names = [pn.strip().lower() for pn in par_names] # seen = set() # seen_add = seen.add # return [x for x in par_names if not (x in seen or seen_add(x))] return [p.strip() for p in list(par_names)]
01ed281f4ee9f1c51032d4f3655bd3e17b73bbb2
3,896
from datetime import datetime def _save_downscaled( item: Item, image: Image, ext: str, target_type: str, target_width: int, target_height: int, ) -> Media: """Common downscale function.""" if ext != 'jpg': image = image.convert('RGB') # TODO - these parameters are only for jpg kwargs = { 'quality': 80, 'progressive': True, 'optimize': True, 'subsampling': 0, } width, height = calculate_size( original_width=image.width, original_height=image.height, target_width=target_width, target_height=target_height, ) smaller_image = image.resize((width, height)) return Media( item_uuid=item.uuid, created_at=datetime.datetime.now(tz=datetime.timezone.utc), processed_at=None, status='init', type=target_type, ext='jpg', content=image_to_bytes(smaller_image, **kwargs), )
4200aa5812372d780fc1a4a6e4fa9752f4ab7993
3,897
def get_single_image_results(pred_boxes, gt_boxes, iou_thr): """Calculates number of true_pos, false_pos, false_neg from single batch of boxes. Args: gt_boxes (list of list of floats): list of locations of ground truth objects as [xmin, ymin, xmax, ymax] pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`) and 'scores' iou_thr (float): value of IoU to consider as threshold for a true prediction. Returns: dict: true positives (int), false positives (int), false negatives (int) """ all_pred_indices = range(len(pred_boxes)) all_gt_indices = range(len(gt_boxes)) if len(all_pred_indices) == 0: tp = 0 fp = 0 fn = len(gt_boxes) return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} if len(all_gt_indices) == 0: tp = 0 fp = len(pred_boxes) fn = 0 return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} gt_idx_thr = [] pred_idx_thr = [] ious = [] for ipb, pred_box in enumerate(pred_boxes): for igb, gt_box in enumerate(gt_boxes): iou = calc_iou_individual(pred_box, gt_box) if iou > iou_thr: gt_idx_thr.append(igb) pred_idx_thr.append(ipb) ious.append(iou) args_desc = np.argsort(ious)[::-1] if len(args_desc) == 0: # No matches tp = 0 fp = len(pred_boxes) fn = len(gt_boxes) else: gt_match_idx = [] pred_match_idx = [] for idx in args_desc: gt_idx = gt_idx_thr[idx] pr_idx = pred_idx_thr[idx] # If the boxes are unmatched, add them to matches if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx): gt_match_idx.append(gt_idx) pred_match_idx.append(pr_idx) tp = len(gt_match_idx) fp = len(pred_boxes) - len(pred_match_idx) fn = len(gt_boxes) - len(gt_match_idx) return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
3f3bc93641e2f7d04a21fed9a8d0c40fcbc9eacc
3,898
def get_list(caller_id): """ @cmview_user @response{list(dict)} PublicIP.dict property for each caller's PublicIP """ user = User.get(caller_id) ips = PublicIP.objects.filter(user=user).all() return [ip.dict for ip in ips]
41f7855eb258df444b29dc85860e5e85ae6de441
3,899