content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def state(state_vec): """ Qiskit wrapper of qobj """ return gen_operator.state(state_vec)
74094c7c6e3c33cff28777f54d8852245c31f276
10,149
def get_games_for_platform(platform_id): """Return the list of all the games for a given platform""" controller = GameController return controller.get_list_by_platform(MySQLFactory.get(), platform_id)
83855b6cdb4d39442e255d14d2f94d76a702a0ea
10,151
def validate_dataset(elem: object) -> Dataset: """Check that `elem` is a :class:`~pydicom.dataset.Dataset` instance.""" if not isinstance(elem, Dataset): raise TypeError('Sequence contents must be Dataset instances.') return elem
d4744b06f0ccdc8dca0deab57585706c1ee91db9
10,152
def conv3x3(in_channels, out_channels, stride=1): """3x3 convolution """ weight_shape = (out_channels, in_channels, 3, 3) weight = Tensor(np.ones(weight_shape).astype(np.float32)) conv = Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=0, weight_init=weight, has_bias=False, pad_mode="same") conv.conv2d.shard(strategy_weight) return conv
011a3f74e8665669f9ecf5d4b9e8abf14f52e053
10,154
def _workflow_complete(workflow_stage_dict: dict): """Check if the workflow is complete. This function checks if the entire workflow is complete. This function is used by `execute_processing_block`. Args: workflow_stage_dict (dict): Workflow metadata dictionary. Returns: bool, True if the workflow is complete, otherwise False. """ # Check if all stages are complete, if so end the PBC by breaking # out of the while loop complete_stages = [] for _, stage_config in workflow_stage_dict.items(): complete_stages.append((stage_config['status'] == 'complete')) if all(complete_stages): LOG.info('PB workflow complete!') return True return False
4e5be4c4768d82e8b1e76d1964c3effb2e604dd2
10,155
def get_name(f, opera_format=True): """Load dataset and extract radar name from it""" ds = xr.open_dataset(f) if hasattr(ds, 'source'): radar = ds.source else: filename = osp.splitext(osp.basename(f))[0] radar = filename.split('_')[-1] if opera_format: if '/' in radar: radar = (radar[:2]+radar[-3:]).lower() else: if radar.islower(): radar = radar[:2] + '/' + radar[-3:] return radar
8c50bebfde1300aa6de55981537cbc23171e6ee8
10,157
import six def range_join(numbers, to_str=False, sep=",", range_sep=":"): """ Takes a sequence of positive integer numbers given either as integer or string types, and returns a sequence 1- and 2-tuples, denoting either single numbers or inclusive start and stop values of possible ranges. When *to_str* is *True*, a string is returned in a format consistent to :py:func:`range_expand` with ranges constructed by *range_sep* and merged with *sep*. Example: .. code-block:: python range_join([1, 2, 3, 5]) # -> [(1, 3), (5,)] range_join([1, 2, 3, 5, 7, 8, 9]) # -> [(1, 3), (5,), (7, 9)] range_join([1, 2, 3, 5, 7, 8, 9], to_str=True) # -> "1:3,5,7:9" """ if not numbers: return "" if to_str else [] # check type, convert, make unique and sort _numbers = [] for n in numbers: if isinstance(n, six.string_types): try: n = int(n) except ValueError: raise ValueError("invalid number format '{}'".format(n)) if isinstance(n, six.integer_types): _numbers.append(n) else: raise TypeError("cannot handle non-integer value '{}' in numbers to join".format(n)) numbers = sorted(set(_numbers)) # iterate through numbers, keep track of last starts and stops and fill a list of range tuples ranges = [] start = stop = numbers[0] for n in numbers[1:]: if n == stop + 1: stop += 1 else: ranges.append((start,) if start == stop else (start, stop)) start = stop = n ranges.append((start,) if start == stop else (start, stop)) # convert to string representation if to_str: ranges = sep.join( (str(r[0]) if len(r) == 1 else "{1}{0}{2}".format(range_sep, *r)) for r in ranges ) return ranges
c1b2d10ec1b47fa5c917fccead2ef8d5fc506370
10,158
def power_spectrum(x, fs, N=None): """ Power spectrum of instantaneous signal :math:`x(t)`. :param x: Instantaneous signal :math:`x(t)`. :param fs: Sample frequency :math:`f_s`. :param N: Amount of FFT bins. The power spectrum, or single-sided autospectrum, contains the squared RMS amplitudes of the signal. A power spectrum is a spectrum with squared RMS values. The power spectrum is calculated from the autospectrum of the signal. .. warning:: Does not include scaling to reference value! .. seealso:: :func:`auto_spectrum` """ N = N if N else x.shape[-1] f, a = auto_spectrum(x, fs, N=N) a = a[..., N//2:] f = f[..., N//2:] a *= 2.0 a[..., 0] /= 2.0 # DC component should not be doubled. if not N%2: # if not uneven a[..., -1] /= 2.0 # And neither should fs/2 be. return f, a
f665c529541420ada0ae4819e53de1e73035d83f
10,159
def CreateInstanceTemplate(task, task_dir): """Create the Compute Engine instance template that will be used to create the instances. """ backend_params = task.BackendParams() instance_count = backend_params.get('instance_count', 0) if instance_count <= 0: clovis_logger.info('No template required.') return True bucket = backend_params.get('storage_bucket') if not bucket: clovis_logger.error('Missing bucket in backend_params.') return False return instance_helper.CreateTemplate(task.BackendParams()['tag'], bucket, task_dir)
558e4ed3152bb87a51bd2bb7dd107af5dd76bcd1
10,160
def get_config_string(info_type, board_num, dev_num, config_item, max_config_len): """Returns configuration or device information as a null-terminated string. Parameters ---------- info_type : InfoType The configuration information for each board is grouped into different categories. This parameter specifies which category you want. Always set this parameter to InfoType.BOARDINFO. board_num : int The number associated with the board when it was installed with InstaCal or created with :func:`.create_daq_device`. dev_num : int The purpose of the dev_num parameter depends on the value of the config_item parameter. It can serve as a channel number, an index into the config_item, or it can be ignored. Unless otherwise noted in the "config_item parameter values" section below, this value is ignored. config_item : BoardInfo The type of information to read from the device. Set it to one of the constants listed in the "config_item parameter values" section below. max_config_len : int The maximum number of bytes to be read from the device into config_val. Returns ------- string The specified configuration item .. table:: **config_item parameter values** ============ ============================================================================= config_item Description ============ ============================================================================= DEVMACADDR MAC address of an Ethernet device. ------------ ----------------------------------------------------------------------------- DEVSERIALNUM Factory serial number of a USB or Bluetooth device. dev_num specifies either a base board (0) or an expansion board (1). ------------ ----------------------------------------------------------------------------- DEVUNIQUEID Unique identifier of a discoverable device, such as the serial number of a USB device or MAC address of an Ethernet device. ------------ ----------------------------------------------------------------------------- DEVVERSION Firmware version and FPGA version installed on a device. Use this setting in conjunction with one of these dev_num settings: - MAIN (main firmware version) - MEASUREMENT (measurement firmware version) - MEASUREMENT_EXP (expansion board measurement firmware version) - RADIO (radio firmware version) - FPGA (FPGA version) ------------ ----------------------------------------------------------------------------- USERDEVID User-configured string identifier of up to maxConfigLen character/bytes from an Ethernet, Bluetooth, or USB device. ============ ============================================================================= """ config_val = create_string_buffer(max_config_len) _check_err(_cbw.cbGetConfigString( info_type, board_num, dev_num, config_item, config_val, byref(c_int(max_config_len)))) return config_val.value.decode('utf-8')
72a35d984cb35e38a5e0742c7d790dc72ccbc928
10,161
import inspect def _get_kwargs(func, locals_dict, default=None): """ Convert a function's args to a kwargs dict containing entries that are not identically default. Parameters ---------- func : function The function whose args we want to convert to kwargs. locals_dict : dict The locals dict for the function. default : object Don't include arguments whose values are this object. Returns ------- dict The non-default keyword args dict. """ return {n: locals_dict[n] for n in inspect.signature(func).parameters if locals_dict[n] is not default}
ae0a06cb4e17b5512a03e89d7ca2119c58ea762b
10,162
from datetime import datetime def iso_to_date(iso_str: str): """Convert a date string with iso formating to a datetime date object""" if not iso_str: return None return datetime.date(*map(int, iso_str.split('-')))
a0d0541298ed538d7df9940ceef7b2bac121af27
10,163
def call_with_error(error_type): """Collects a bunch of errors and returns them all once. Decorator that collects the errors in the decorated function so that the user can see everything they need to fix at once. All errors are thrown with the same error type. The decorated must have an `error` keyword parameter. The `error` parameter is then ignored if the end user passes in that argument. Parameters ---------- error_type: type The type of error to throw. For example, `ValueError`. Returns ------- Callable[Callable[[Any], Any], Callable[[Any], Any]] Returns a decorator Example ------- >>> @call_with_error(ValueError) >>> def func(a: int, b: int, error: Callable[[str], None]) -> int: ... if a < 0: ... error("a must be zero or greater") ... if b < 0: ... error("b must be zero or greater") ... return a + b >>> func(-1, 0) ValueError("a must be zero or greater") >>> func(0, -1) ValueError("b must be zero or greater") >>> func(-1, -1) ValueError("a must be zero or greater\nb must be zero or greater") """ def _call_with_error(f): @curry def error(log, msg): log.append(msg) @wraps(f) def wrapped(*args, **kwargs): log = [] result = f(*args, error=error(log), **kwargs) if len(log) > 0: raise error_type("\n".join(log)) return result return wrapped return _call_with_error
9a64fb630b0a491bc9e01d77ebe35199df47ab55
10,164
def _get_metadata_and_fingerprint(instance_name, project, zone): """Return the metadata values and fingerprint for the given instance.""" instance_info = _get_instance_info(instance_name, project, zone) if not instance_info: logs.log_error('Failed to fetch instance metadata') return None, None fingerprint = instance_info['metadata']['fingerprint'] metadata_items = instance_info['metadata']['items'] return metadata_items, fingerprint
7049805d538c7942dc8249e91c27faa2c1867936
10,165
def solve_token_pair_and_fee_token_economic_viable( token_pair, accounts, b_orders, s_orders, f_orders, fee, xrate=None ): """Match orders between token pair and the fee token, taking into account all side constraints, including economic viability. If xrate is given, then it will be used instead of trying to find optimal xrate. Sets b_orders/s_orders/f_orders (integral) buy_amounts for the best execution. Also returns the (integral) prices found. """ b_buy_token, s_buy_token = token_pair orders, prices = TRIVIAL_SOLUTION # Search for an economically viable solution. while len(b_orders) > 0 or len(s_orders) > 0: # Solve current problem. orders, prices = solve_token_pair_and_fee_token( token_pair, accounts, b_orders, s_orders, f_orders, fee, xrate ) # If solution is economically viable, exit. # Hopefully, in large majority of cases this will occur in the first iteration. if is_economic_viable(orders, prices, fee, IntegerTraits) or is_trivial(orders): break # If solution cannot be made economically viable (assuming prices wouldn't change) if len(compute_approx_economic_viable_subset( orders, prices, fee, IntegerTraits )) == 0: orders, prices = TRIVIAL_SOLUTION break # Note: to increase performance, we could consider removing all orders that do not # satisfy min_abs_fee_per_order here at once, instead of removing one at a time as # it is currently. The advantage of removing one by one is that it will not remove # more than needed (note that prices, and hence order fees, keep changing). # Find and remove the order paying the least fee. b_order_with_min_buy_amount = min( [o for o in b_orders if o.buy_amount > 0], key=lambda o: o.buy_amount ) s_order_with_min_buy_amount = min( [o for o in s_orders if o.buy_amount > 0], key=lambda o: o.buy_amount ) if b_order_with_min_buy_amount.buy_amount * prices[b_buy_token]\ < s_order_with_min_buy_amount.buy_amount * prices[s_buy_token]: b_orders = [ o for o in b_orders if o.id != b_order_with_min_buy_amount.id ] else: s_orders = [ o for o in s_orders if o.id != s_order_with_min_buy_amount.id ] # Make sure the solution is correct. validate(accounts, orders, prices, fee) return orders, prices
f6fcf5bccdf498f29852614751cf120a8e2addd4
10,166
def two_categorical(df, x, y, plot_type="Cross tab"): """ ['Cross tab', "Stacked bone_numeric_one_categorical"] """ if plot_type is None: plot_type = 'Cross tab' if plot_type == 'Stacked bar': # 20 df_cross = pd.crosstab(df[x], df[y]) data = [] for x in df_cross.columns: data.append(go.Bar(name=str(x), x=df_cross.index, y=df_cross[x])) fig = go.Figure(data) fig.update_layout(barmode = 'stack') #For you to take a look at the result use if plot_type == "Cross tab": # 21 df_cross = pd.crosstab(df[x], df[y]) return df_cross return fig
5c0908055848d9de02920f8e2718de0918f4b460
10,167
def yices_bvconst_one(n): """Set low-order bit to 1, all the other bits to 0. Error report: if n = 0 code = POS_INT_REQUIRED badval = n if n > YICES_MAX_BVSIZE code = MAX_BVSIZE_EXCEEDED badval = n. """ # let yices deal with int32_t excesses if n > MAX_INT32_SIZE: n = MAX_INT32_SIZE return libyices.yices_bvconst_one(n)
6a70c4773a6558e068d2bbafb908657d5b5b4d1d
10,168
def showItem(category_id): """Show all Items""" category = session.query(Category).filter_by(id=category_id).one() items = session.query(Item).filter_by( category_id=category_id).all() return render_template('item.html', items=items, category=category)
230cc9b7e8043b0bb3e78866b2a27a0aec287828
10,169
def get_standard_t_d(l, b, d): """ Use NE2001 to estimate scintillation time at 1 GHz and 1 km/s transverse velocity. Parameters ---------- l : float Galactic longitude b : float Galactic latitude d : float Distance in kpc Returns ------- t_d : float Scintillation timescale in s """ return query_ne2001(l, b, d, field='SCINTIME')
e1743ae75a6893376c5e13126deda6f0eb41d38f
10,170
def match(pattern: str, text: str) -> bool: """ 匹配同样长度的字符串 """ if pattern: return True elif pattern == "$" and text == "": return True elif pattern[1] == "?": return _match_question(pattern, text) elif pattern[1] == "*": return _match_star(pattern, text) else: return match_one(pattern[0], text[0]) and match(pattern[1:], text[1:])
0dc71f00323502de7c1a2e00c502c99d75f56fc1
10,172
def config_func(tools, index, device_id, config_old: {}, config_new: {}): """ CANedge configuration update function :param tools: A collection of tools used for device configuration :param index: Consecutive device index (from 0) :param device_id: Device ID :param config_old: The current device configuration :param config_new: Default new device configuration :return: Update configuration """ # This is an example of how to upgrade existing access point and S3 credentials from plain to encrypted form. Note # that below assumes that the existing configuration holds the information in unencrypted form. # Devices already using encrypted credentials are skipped (no configuration returned) # New configuration uses same structure. The old configuration can safely be copied to the new. config_new = config_old # Only update configurations unencrypted credentials if config_new["connect"]["wifi"]["keyformat"] == 0 and config_new["connect"]["s3"]["server"]["keyformat"] == 0: # Set the server kpub config_new["general"]["security"] = {"kpub": tools.security.user_public_key_base64} # Set the access point key format to 1 (encrypted) config_new["connect"]["wifi"]["keyformat"] = 1 # Loop each accesspoint in list for ap in config_new["connect"]["wifi"]["accesspoint"]: # Encrypt the wifi password unencrypted_wifi_pwd = ap["pwd"] ap["pwd"] = tools.security.encrypt_encode(unencrypted_wifi_pwd) # Encrypt the S3 secret key unencrypted_s3_secretkey = config_new["connect"]["s3"]["server"]["secretkey"] config_new["connect"]["s3"]["server"]["keyformat"] = 1 config_new["connect"]["s3"]["server"]["secretkey"] = tools.security.encrypt_encode(unencrypted_s3_secretkey) return config_new
c0585c3a268fb40e3e00a2613e03001bc561566a
10,173
def load_figure(file_path: str) -> matplotlib.figure.Figure: """Fully loads the saved figure to be able to be modified. It can be easily showed by: fig_object.show() Args: file_path: String file path without file extension. Returns: Figure object. Raises: None. """ with open(file_path + '.pkl', 'rb') as handle: fig_object = pk.load(handle) return fig_object
76dcc0a27a3ae04e574a3d69fb431eedbc0c618a
10,174
def cluster_vectors(vectors, k=500, n_init=100, **kwargs): """Build NearestNeighbors tree.""" kwargs.pop('n_clusters', None) kwargs.pop('init', None) kwargs.pop('n_init', None) return KMeans(n_clusters=k, init='k-means++', n_init=n_init, **kwargs).fit(vectors)
28984811ea58a2a2c123d36cdb5e56c1d5b8d0db
10,175
import torch import math def positional_encoding(d_model, length): """ :param d_model: dimension of the model :param length: length of positions :return: length*d_model position matrix """ if d_model % 2 != 0: raise ValueError("Cannot use sin/cos positional encoding with " "odd dim (got dim={:d})".format(d_model)) pe = torch.zeros(length, d_model) position = torch.arange(0, length).unsqueeze(1) div_term = torch.exp((torch.arange(0, d_model, 2, dtype=torch.float) * -(math.log(10000.0) / d_model))) pe[:, 0::2] = torch.sin(position.float() * div_term) pe[:, 1::2] = torch.cos(position.float() * div_term) return pe
de41f0c99b46f16dbe300d59527e11b98a0b1f14
10,176
def invert_hilbert_QQ(n=40, system='sage'): """ Runs the benchmark for calculating the inverse of the hilbert matrix over rationals of dimension n. INPUT: - ``n`` - matrix dimension (default: ``300``) - ``system`` - either 'sage' or 'magma' (default: 'sage') EXAMPLES:: sage: import sage.matrix.benchmark as b sage: ts = b.invert_hilbert_QQ(30) sage: tm = b.invert_hilbert_QQ(30, system='magma') # optional - magma """ if system == 'sage': A = hilbert_matrix(n) t = cputime() d = A**(-1) return cputime(t) elif system == 'magma': code = """ h := HilbertMatrix(%s); tinit := Cputime(); d := h^(-1); s := Cputime(tinit); delete h; """%n if verbose: print(code) magma.eval(code) return float(magma.eval('s'))
153e7400467a57cf07f839042d31d4800fe161bd
10,179
def getModelListForEnumProperty(self, context): """Returns a list of (str, str, str) elements which contains the models contained in the currently selected model category. If there are no model categories (i.e. '-') return ('-', '-', '-'). Args: context: Returns: """ category = context.window_manager.category if category == '-' or category == '': return [('-',) * 3] return sorted(model_previews[category].enum_items)
46f642933dd220b0f71431ff4a9cb7410858fbf0
10,180
import logging def run_wcs(*args, **kwargs): """ Set up the environment and run the bundled wcs.exe (from the Talon distribution) using the supplied command line arguments """ # Pull out keyword args that we are interested in write_stdout_to_console = kwargs.get("write_stdout_to_console", False) # Override the TELHOME environment variable so that we can use relative # paths when specifying the location of the GSC directory and ip.cfg environment = dict(TELHOME=paths.talon_wcs_path()) stdout_destination = PIPE if write_stdout_to_console: stdout_destination = None # Make sure all passed-in arguments are strings args = [str(x) for x in args] args = [ WCS_EXE, # wcs.exe will use the last-specified values for -i and -c, so # we'll provide defaults below but they can be overridden by values # coming in via the *args array "-i", "ip.cfg", # Specify the path to ip.cfg (relative to TELHOME) "-c", "gsc" # Specify the path to the GSC catalog (relative to TELHOME) ] + list(args) # Include additional args specified by the user process = Popen( args, env=environment, stdout=stdout_destination, stderr=PIPE ) (stdout, stderr) = process.communicate() # Obtain stdout and stderr output from the wcs tool exit_code = process.wait() # Wait for process to complete and obtain the exit code if not write_stdout_to_console: logging.info(stdout.decode("utf-8")) if exit_code != 0: logging.info("Error finding WCS solution.\n" + "Exit code: " + str(exit_code) + "\n" + "Error output: " + stderr.decode("utf-8")) return False return True
51461fcbc4ed5a08063d630aea926d312b9da825
10,182
def store_exposure_fp(fp, exposure_type): """ Preserve original exposure file extention if its in a pandas supported compressed format compression : {‘infer’, ‘gzip’, ‘bz2’, ‘zip’, ‘xz’, None}, default ‘infer’ For on-the-fly decompression of on-disk data. If ‘infer’ and filepath_or_buffer is path-like, then detect compression from the following extensions: ‘.gz’, ‘.bz2’, ‘.zip’, or ‘.xz’ (otherwise no decompression). If using ‘zip’, the ZIP file must contain only one data file to be read in. Set to None for no decompression. New in version 0.18.1: support for ‘zip’ and ‘xz’ compression. """ compressed_ext = ('.gz', '.bz2', '.zip', '.xz') filename = SOURCE_FILENAMES[exposure_type] if fp.endswith(compressed_ext): return '.'.join([filename, fp.rsplit('.')[-1]]) else: return filename
c187e79d4cce7ea79b66671a2e7378a35de4841f
10,184
def velocity_genes(data, vkey='velocity', min_r2=0.01, highly_variable=None, copy=False): """Estimates velocities in a gene-specific manner Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. vkey: `str` (default: `'velocity'`) Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`. min_r2: `float` (default: 0.01) Minimum threshold for coefficient of determination highly_variable: `bool` (default: `None`) Whether to include highly variable genes only. copy: `bool` (default: `False`) Return a copy instead of writing to `adata`. Returns ------- Updates `adata` attributes velocity_genes: `.var` genes to be used for further velocity analysis (velocity graph and embedding) """ adata = data.copy() if copy else data if vkey + '_genes' not in adata.var.keys(): velocity(data, vkey) adata.var[vkey + '_genes'] = np.array(adata.var[vkey + '_genes'], dtype=bool) & (adata.var[vkey + '_r2'] > min_r2) if highly_variable and 'highly_variable' in adata.var.keys(): adata.var[vkey + '_genes'] &= adata.var['highly_variable'] logg.info('Number of obtained velocity_genes:', np.sum(adata.var[vkey + '_genes'])) return adata if copy else None
7d2a0b86d2fb4402cdef9ab56fe2638f89d09fac
10,185
def update_graphics_labels_from_node_data(node, n_id_map, add_new_props): """Updates the graphics labels so they match the node-data""" try: gfx = select_child(node, n_id_map, 'nodegraphics').getchildren()[0].getchildren() except: return None node_label = select_child(node, n_id_map, 'labelcount').text node_props = select_child(node, n_id_map, 'node_prop_text').text # Nodes have either 0, 1, or 2 node labels. If 1, its just title and count # If 2, the first one is title count, second is properties and counts i = 0 for elem in gfx: if elem.tag.endswith('NodeLabel'): if i == 0: elem.text = node_label i += 1 # not all nodes have a props-label elif i == 1 and node_props: # Add all properties to the label text, even if new elem.text = node_props
c2d3104dbc3a20ff6c34de754ff681b176091787
10,186
def ProcuraPalavra(dicionário, palavra): """ Procura as possíveis palavras para substituir a palavra passada, e as devolve numa lista """ #Antes de mais nada tornamos a palavra maiuscula #para realizar as comparações palavra = palavra.upper() #Primeiro olhamos para o caso de haver uma #primeira letra selecionada, o que facilitaria #a nossa busca if palavra[0] != '*': #Primeiro nós encontramos o ponto do dionário #onde começa nossa letra for i in range(len(dicionário)): if i % 100 == 0: print('Procurando Letra no dicionário...') if dicionário[i][0] == palavra[0]: break #E também o ponto do dicionário onde nossa #letra acaba for j in range(i, len(dicionário)): if j % 100 == 0: print('Procurando Letra no dicionário...') if dicionário[j][0] != palavra[0]: break return SeparaPorTamanho(dicionário[i:j], palavra) else: return SeparaPorTamanho(dicionário, palavra)
1a283aec6670c0e2fe6ca6f4366ef43c6ba97e9f
10,189
import json import six def _build_auth_record(response): """Build an AuthenticationRecord from the result of an MSAL ClientApplication token request""" try: id_token = response["id_token_claims"] if "client_info" in response: client_info = json.loads(_decode_client_info(response["client_info"])) home_account_id = "{uid}.{utid}".format(**client_info) else: # MSAL uses the subject claim as home_account_id when the STS doesn't provide client_info home_account_id = id_token["sub"] # "iss" is the URL of the issuing tenant e.g. https://authority/tenant issuer = six.moves.urllib_parse.urlparse(id_token["iss"]) # tenant which issued the token, not necessarily user's home tenant tenant_id = id_token.get("tid") or issuer.path.strip("/") # AAD returns "preferred_username", ADFS returns "upn" username = id_token.get("preferred_username") or id_token["upn"] return AuthenticationRecord( authority=issuer.netloc, client_id=id_token["aud"], home_account_id=home_account_id, tenant_id=tenant_id, username=username, ) except (KeyError, ValueError) as ex: auth_error = ClientAuthenticationError( message="Failed to build AuthenticationRecord from unexpected identity token" ) six.raise_from(auth_error, ex)
96aed71945c354e41cafd89bf5d7a7d62c31a40a
10,191
def loc_data_idx(loc_idx): """ Return tuple of slices containing the unflipped idx corresponding to loc_idx. By 'unflipped' we mean that if a slice has a negative step, we wish to retrieve the corresponding indices but not in reverse order. Examples -------- >>> loc_data_idx(slice(11, None, -3)) (slice(2, 12, 3),) """ retval = [] for i in as_tuple(loc_idx): if isinstance(i, slice) and i.step is not None and i.step == -1: if i.stop is None: retval.append(slice(0, i.start+1, -i.step)) else: retval.append(slice(i.stop+1, i.start+1, -i.step)) elif isinstance(i, slice) and i.step is not None and i.step < -1: if i.stop is None: lmin = i.start while lmin >= 0: lmin += i.step retval.append(slice(lmin-i.step, i.start+1, -i.step)) else: retval.append(slice(i.stop+1, i.start+1, -i.step)) elif is_integer(i): retval.append(slice(i, i+1, 1)) else: retval.append(i) return as_tuple(retval)
d30b1b27957e24ff30caf19588487645ac198dc8
10,192
def eat_descriptor(descr): """ Read head of a field/method descriptor. Returns a pair of strings, where the first one is a human-readable string representation of the first found type, and the second one is the tail of the parameter. """ array_dim = 0 while descr[0] == '[': array_dim += 1 descr = descr[1:] if (descr[0] == 'L'): try: end = descr.find(';') except Exception: raise ParserError("Not a valid descriptor string: " + descr) type = descr[1:end] descr = descr[end:] else: global code_to_type_name try: type = code_to_type_name[descr[0]] except KeyError: raise ParserError("Not a valid descriptor string: %s" % descr) return (type.replace("/", ".") + array_dim * "[]", descr[1:])
411ce48ce250fe15438cd89f43b91ee9b87908a6
10,193
def legendre(a, p): """Legendre symbol""" tmp = pow(a, (p-1)//2, p) return -1 if tmp == p-1 else tmp
66b86dce23ae10ba226ffb19942b98550bb7c218
10,194
def precheck_arguments(args): """ Make sure the argument choices are valid """ any_filelist = (len(args.filelist_name[0]) > 0 or len(args.output_dir[0]) > 0 or args.num_genomes[0] > 0) if len(args.filelist_name[0]) > 0 and len(args.output_dir[0]) == 0: print("Error: Need to specify output directory with -O if using -F") exit(1) if len(args.filelist_name[0]) == 0 and len(args.output_dir[0]) > 0: print("Error: Need to specify a filelist with -F if using -O") exit(1) if len(args.input_fasta[0]) > 0 and any_filelist: print("Error: When using -i flag, cannot use any of other options that imply multiple files") exit(1) if len(args.input_fasta[0]) > 0 and not any_filelist: return "single" elif any_filelist and len(args.input_fasta[0]) == 0: return "multi" else: print("Error: Need to specify either -i or the combination of -F and -O") exit(1)
984865d214cca63eae8bacf5bc7be238e7209ddb
10,196
def get_image_blob(im): """Converts an image into a network input. Arguments: im (ndarray): a color image Returns: blob (ndarray): a data blob holding an image pyramid im_scale_factors (list): list of image scales (relative to im) used in the image pyramid """ im_orig = im.astype(np.float32, copy=True) im_orig -= cfg.PIXEL_MEANS im_shape = im_orig.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) processed_ims = [] im_scale_factors = [] for target_size in cfg.TEST.SCALES: im_scale = float(target_size) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE: im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max) im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) im_scale_factors.append(im_scale) processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) return blob, np.array(im_scale_factors)
6d72ea1ffcdf20bbf05f75f6084a9027e771196a
10,197
def get_expr_fields(self): """ get the Fields referenced by switch or list expression """ def get_expr_field_names(expr): if expr.op is None: if expr.lenfield_name is not None: return [expr.lenfield_name] else: # constant value expr return [] else: if expr.op == '~': return get_expr_field_names(expr.rhs) elif expr.op == 'popcount': return get_expr_field_names(expr.rhs) elif expr.op == 'sumof': # sumof expr references another list, # we need that list's length field here field = None for f in expr.lenfield_parent.fields: if f.field_name == expr.lenfield_name: field = f break if field is None: raise Exception("list field '%s' referenced by sumof not found" % expr.lenfield_name) # referenced list + its length field return [expr.lenfield_name] + get_expr_field_names(field.type.expr) elif expr.op == 'enumref': return [] else: return get_expr_field_names(expr.lhs) + get_expr_field_names(expr.rhs) # get_expr_field_names() # resolve the field names with the parent structure(s) unresolved_fields_names = get_expr_field_names(self.expr) # construct prefix from self prefix = [('', '', p) for p in self.parents] if self.is_container: prefix.append(('', '', self)) all_fields = _c_helper_resolve_field_names (prefix) resolved_fields_names = list(filter(lambda x: x in all_fields.keys(), unresolved_fields_names)) if len(unresolved_fields_names) != len(resolved_fields_names): raise Exception("could not resolve all fields for %s" % self.name) resolved_fields = [all_fields[n][1] for n in resolved_fields_names] return resolved_fields
103aa0ac54be37b23d9695dddfda9972a9f0d7f0
10,198
import math def add_bias_towards_void(transformer_class_logits, void_prior_prob=0.9): """Adds init bias towards the void (no object) class to the class logits. We initialize the void class with a large probability, similar to Section 3.3 of the Focal Loss paper. Reference: Focal Loss for Dense Object Detection, ICCV 2017. https://arxiv.org/abs/1708.02002 Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, Piotr Dollár. Args: transformer_class_logits: A [batch, num_mask_slots, num_classes] tensor, the class logits predicted by the transformer. It concats (num_classes - 1) non-void classes, including both thing classes and stuff classes, and the void class (the last channel). If the dataset class IDs do not follow this order, MaX-DeepLab loss functions will handle the mapping and thus the architecture still supports any dataset. void_prior_prob: A float, the desired probability (after softmax) of the void class at initialization. Defaults to 0.9 as in MaX-DeepLab. Returns: updated_transformer_class_logits: A [batch, num_mask_slots, num_classes] Raises: ValueError: If the rank of transformer_class_logits is not 3. """ class_logits_shape = transformer_class_logits.get_shape().as_list() if len(class_logits_shape) != 3: raise ValueError('Input transformer_class_logits should have rank 3.') init_bias = [0.0] * class_logits_shape[-1] init_bias[-1] = math.log( (class_logits_shape[-1] - 1) * void_prior_prob / (1 - void_prior_prob)) # Broadcasting the 1D init_bias to the 3D transformer_class_logits. return transformer_class_logits + tf.constant(init_bias, dtype=tf.float32)
f5b3439fc7fbc987bbcbc3b64fd689208db7c5e6
10,199
def trajnet_batch_multi_eval(preds, gt, seq_start_end): """Calculate Top-k ADE, Top-k FDE for batch of samples. pred = Num_modes x Num_ped x Num_timesteps x 2 gt = Num_ped x Num_timesteps x 2 seq_start_end (batch delimiter) = Num_batches x 2 """ s_topk_ade = 0 s_topk_fde = 0 for (start, end) in seq_start_end: s_preds = [pred[start:end] for pred in preds] s_topk_ade += topk_ade(s_preds, gt[start:end]) s_topk_fde += topk_fde(s_preds, gt[start:end]) return s_topk_ade, s_topk_fde
ff93309e61d871a2d337810cc1836950f883c184
10,200
def disemvowel(sentence): """Disemvowel: Given a sentence, return the sentence with all vowels removed. >>> disemvowel('the quick brown fox jumps over the lazy dog') 'th qck brwn fx jmps vr th lzy dg' """ vowels = ('a','e','i','o','u') for x in sentence: if x in vowels: sentence = sentence.replace(x,"") return sentence pass
d9b6d873c29e82cb65e43f71e2b6298af18b25fd
10,201
def runPolyReg(xValueList, yValueList, degrees): """ Preforms *Polynomial Regression* based on the arguments provided. Note that we split the data by the *First* 80 percent of the data and then the *Last* 20 percent of the data, rather than randomly splitting the data by 80/20 for the Train/Test split. Args: xValueList (list of floats) : List of X values used for polynomial regression. Offset 1 day earlier than the y values so we have something to predict. Prepared by *prepDataSets*. Can change based on the values in saved in the configuration file. yValueList (list of floats) : Close values tied to the X value list for the following day. degrees (int) : Level of degress the polynomial will be operating at. :return: model: The actual machine Learning model. float: the R^2 score for the model. """ splitValue = int(len(xValueList) * 0.2) xTrain, xTest, yTrain, yTest = ( xValueList.iloc[:-splitValue], xValueList.iloc[splitValue:], yValueList[:-splitValue], yValueList[splitValue:], ) polyreg = make_pipeline(PolynomialFeatures(degree=degrees), LinearRegression()) polyreg.fit(xTrain, yTrain) yPred = polyreg.predict(xTest) results = metrics.rmse_score(yTest, yPred) return (polyreg, results)
25d4699f720d943dc49264edc12f2246df51f053
10,202
def unfold_phi_vulpiani(phidp, kdp): """Alternative phase unfolding which completely relies on :math:`K_{DP}`. This unfolding should be used in oder to iteratively reconstruct :math:`Phi_{DP}` and :math:`K_{DP}` (see :cite:`Vulpiani2012`). Parameters ---------- phidp : :class:`numpy:numpy.ndarray` array of floats kdp : :class:`numpy:numpy.ndarray` array of floats """ # unfold phidp shape = phidp.shape phidp = phidp.reshape((-1, shape[-1])) kdp = kdp.reshape((-1, shape[-1])) for beam in range(len(phidp)): below_th3 = kdp[beam] < -20 try: idx1 = np.where(below_th3)[0][2] phidp[beam, idx1:] += 360 except Exception: pass return phidp.reshape(shape)
72386a05500c4ba11385e3b57288655e0a207352
10,203
def get_result_df(session): """ query the match table and put results into pandas dataframe, to train the team-level model. """ df_past = pd.DataFrame( np.array( [ [s.fixture.date, s.fixture.home_team, s.fixture.away_team, s.home_score, s.away_score] for s in session.query(Result).all() ] ), columns=["date", "home_team", "away_team", "home_goals", "away_goals"], ) df_past["home_goals"] = df_past["home_goals"].astype(int) df_past["away_goals"] = df_past["away_goals"].astype(int) df_past["date"] = pd.to_datetime(df_past["date"]) return df_past
364d9e7f9ef1a97018402fa964f246954f51f945
10,204
def permute1d(preserve_symmetry = True): """Choose order to rearrange rows or columns of puzzle.""" bp = block_permutation(preserve_symmetry) ip = [block_permutation(False),block_permutation(preserve_symmetry)] if preserve_symmetry: ip.append([2-ip[0][2],2-ip[0][1],2-ip[0][0]]) else: ip.append(block_permutation(False)) return [bp[i]*3+ip[i][j] for i in [0,1,2] for j in [0,1,2]]
a9ccd2cb486e0ee3d50840c6ab41871396f3ca93
10,205
from typing import Iterable from typing import List def take(n: int, iterable: Iterable[T_]) -> List[T_]: """Return first n items of the iterable as a list""" return list(islice(iterable, n))
491cdaaa20ad67b480ea92acaeb53e4edf2b4d56
10,208
def abs(rv): """ Returns the absolute value of a random variable """ return rv.abs()
6bf2f8420f8a5e883dfddfc9a93106662a8f1a74
10,209
def compute_ssm(X, metric="cosine"): """Computes the self-similarity matrix of X.""" D = distance.pdist(X, metric=metric) D = distance.squareform(D) for i in range(D.shape[0]): for j in range(D.shape[1]): if np.isnan(D[i, j]): D[i, j] = 0 D /= D.max() return 1 - D
646d9af2134db13b69391817ddfeace0fef1217d
10,210
def escape(instruction): """ Escape used dot graph characters in given instruction so they will be displayed correctly. """ instruction = instruction.replace('<', r'\<') instruction = instruction.replace('>', r'\>') instruction = instruction.replace('|', r'\|') instruction = instruction.replace('{', r'\{') instruction = instruction.replace('}', r'\}') instruction = instruction.replace(' ', ' ') return instruction
936ed1d6c55650bf5f9ce52af8f113a9d466a534
10,211
def _json_object_hook(d): """ JSON to object helper :param d: data :return: namedtuple """ keys = [] for k in d.keys(): if k[0].isdigit(): k = 'd_{}'.format(k) keys.append(k) return namedtuple('X', keys)(*d.values())
a4a534a975d6faff440f66065d4954e2a5a91ff2
10,212
def _fourier_interpolate(x, y): """ Simple linear interpolation for FFTs""" xs = np.linspace(x[0], x[-1], len(x)) intp = interp1d(x, y, kind="linear", fill_value="extrapolate") ys = intp(xs) return xs, ys
cfe663b9e261bbaea2ab6fe58366f4ec3726468c
10,213
import hashlib def compute_hash_json_digest(*args, **kwargs): """compute json hash of given args and kwargs and return md5 hex digest""" as_json = compute_hash_json(*args, **kwargs) return hashlib.md5(as_json).hexdigest()
98dfedb000e2780dba5007d9fe6abd7a74a43a31
10,214
def hello_world(): """Print welcome message as the response body.""" return '{"info": "Refer to internal http://metadata-db for more information"}'
ecb2208053e4ff530bcc0dcc117172449a51afbd
10,215
import google from datetime import datetime def build_timestamp(timestamp=None) -> google.protobuf.timestamp_pb2.Timestamp: """Convert Python datetime to Protobuf Timestamp""" # https://github.com/protocolbuffers/protobuf/issues/3986 proto_timestamp = google.protobuf.timestamp_pb2.Timestamp() return proto_timestamp.FromDatetime(timestamp or datetime.datetime.utcnow())
ae2278b66c200f007240ca5f683a60ebc1ebddf2
10,217
def read_blosum(): """Read blosum dict and delete some keys and values.""" with open('./psiblast/blosum62.pkl', 'rb') as f: blosum_dict = cPickle.load(f) temp = blosum_dict.pop('*') temp = blosum_dict.pop('B') temp = blosum_dict.pop('Z') temp = blosum_dict.pop('X') temp = blosum_dict.pop('alphas') for key in blosum_dict: for i in range(4): temp = blosum_dict[key].pop() return blosum_dict
ddbf71c03e05bd156ad688a9fe9692da1d0a3dc4
10,219
from typing import List from typing import Tuple def parse_spans_bio_with_errors(seq: List[str]) -> Tuple[List[Span], List[Error]]: """Parse a sequence of BIO labels into a list of spans but return any violations of the encoding scheme. Note: In the case where labels violate the span encoded scheme, for example the tag is a new type (like ``I-ORG``) in the middle of a span of another type (like ``PER``) without a proper starting token (``B-ORG``) we will finish the initial span and start a new one, resulting in two spans. This follows the ``conlleval.pl`` script. Note: Span are returned sorted by their starting location. Due to the fact that spans are not allowed to overlap there is no resolution policy when two spans have same starting location. Note: Errors are returned sorted by the location where the violation occurred. In the case a single transition triggered multiple errors they are sorted lexically based on the error type. Args: seq: The sequence of labels Returns: A list of spans and a list of errors. """ errors = [] spans = [] # This tracks the type of the span we are building out span = None # This tracks the tokens of the span we are building out tokens = [] for i, s in enumerate(seq): func = extract_function(s) _type = extract_type(s) # A `B` ends a span and starts a new one if func == BIO.BEGIN: # Save out the old span if span is not None: spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens))) # Start the new span span = _type tokens = [i] # An `I` will continue a span when types match and start a new one otherwise. elif func == BIO.INSIDE: # A span is already being built if span is not None: # The types match so we just add to the current span if span == _type: tokens.append(i) # Types mismatch so create a new span else: # Log error from type mismatch LOGGER.warning("Illegal Label: I doesn't match previous token at %d", i) errors.append(Error(i, "Illegal Transition", s, safe_get(seq, i - 1), safe_get(seq, i + 1))) # Save out the previous span spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens))) # Start a new span span = _type tokens = [i] # No span was being build so start a new one with this I else: # Log error from starting with I LOGGER.warning("Illegal Label: starting a span with `I` at %d", i) errors.append(Error(i, "Illegal Start", s, safe_get(seq, i - 1), safe_get(seq, i + 1))) span = _type tokens = [i] # An `O` will cut off a span being built out. else: if span is not None: spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens))) # Set so no span is being built span = None tokens = [] # If we fell off the end so save the entity that we were making. if span is not None: spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens))) return sort_spans(spans), sort_errors(errors)
6cea777cfb8bf96325f2695af2c48cc22c4884cf
10,220
from typing import Sequence from typing import Tuple def find_best_similar_match(i1: int, i2: int, j1: int, j2: int, a: Sequence, b: Sequence, sm: SequenceMatcher = None) \ -> Tuple[int, int, float]: """ Finds most similar pair of elements in sequences bounded by indexes a[i1:i2], b[j1: j2]. :param i1: starting index in "a" sequence. :param i2: ending index in "a" sequence. :param j1: starting index in "b" sequence. :param j2: ending index in "b" sequence. :param a: first sequence. :param b: second sequence. :param sm: SequenceMatcher object. Creates new difflib.SequenceMatcher instance if not passed. :return: Tuple (best_i, best_j, best_ratio) where: best_i: is index of most similar element in sequence "a". best_j: is index of most similar element in sequence "b". best_ratio: similarity ratio of elements a[best_i] and b[best_j], where 1.0 means elements are identical and 0.0 means that elements are completely different. """ best_ratio = 0.0 best_i = best_j = None if not sm: sm = SequenceMatcher() for i in range(i1, i2): sm.set_seq1(a[i]) for j in range(j1, j2): if a[i] == b[j]: continue sm.set_seq2(b[j]) if sm.real_quick_ratio() > best_ratio and sm.quick_ratio() > best_ratio and sm.ratio() > best_ratio: best_i = i best_j = j best_ratio = sm.ratio() return best_i, best_j, best_ratio
ca6e73c2315e2d2419b631cb505131f3daabea4b
10,221
def ConvUpscaleBlock(inputs, n_filters, kernel_size=[3, 3], scale=2): """ Basic conv transpose block for Encoder-Decoder upsampling Apply successivly Transposed Convolution, BatchNormalization, ReLU nonlinearity """ net = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None) net = tf.nn.relu(slim.batch_norm(net, fused=True)) return net
787104a3015bd901105383b203551573f9f07fcb
10,222
def make_random_password(self, length = 10, allowed_chars = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'): """ Generate a random password with the given length and given allowed_chars. The default value of allowed_chars does not have "I" or "O" or letters and digits that look similar -- just to avoid confusion. """ return get_random_string(length, allowed_chars)
be155b2537b062a396ed1d5aed6367857b21d49e
10,224
def autocov_vector(x, nlags=None): """ This method computes the following function .. math:: R_{xx}(k) = E{ x(t)x^{*}(t-k) } = E{ x(t+k)x^{*}(k) } k \in {0, 1, ..., nlags-1} (* := conjugate transpose) Note: this is related to the other commonly used definition for vector autocovariance .. math:: R_{xx}^{(2)}(k) = E{ x(t-k)x^{*}(k) } = R_{xx}^{*}(k) = R_{xx}(-k) Parameters ---------- x: ndarray (nc, N) nlags: int, optional compute lags for k in {0, ..., nlags-1} Returns ------- rxx : ndarray (nc, nc, nlags) """ return crosscov_vector(x, x, nlags=nlags)
8725b2695b51c014e8234605bc5e64ad1ca0c26b
10,225
def sequence_masking(x, mask, mode=0, axis=None, heads=1): """为序列条件mask的函数 mask: 形如(batch_size, sequence)的0-1矩阵; mode: 如果是0,则直接乘以mask; 如果是1,则在padding部分减去一个大正数。 axis: 序列所在轴,默认为1; heads: 相当于batch这一维要被重复的次数。 """ if mask is None or mode not in [0, 1]: return x else: if heads is not 1: mask = K.expand_dims(mask, 1) mask = K.tile(mask, (1, heads, 1)) mask = K.reshape(mask, (-1, K.shape(mask)[2])) if axis is None: axis = 1 if axis == -1: axis = K.ndim(x) - 1 assert axis > 0, "axis must be greater than 0" for _ in range(axis - 1): mask = K.expand_dims(mask, 1) for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1): mask = K.expand_dims(mask, K.ndim(mask)) if mode == 0: return x * mask else: return x - (1 - mask) * 1e12
ac7e0da24eca87ab3510c1c274f0caeb2d527816
10,226
def __long_description() -> str: """Returns project long description.""" return f"{__readme()}\n\n{__changelog()}"
53260637e4e4f1e59e6a67238577fb6969e7769c
10,228
def captains_draft(path=None, config=None): """Similar to captains mode with a 27 heroes, only 3 bans per teams""" game = _default_game(path, config=config) game.options.game_mode = int(DOTA_GameMode.DOTA_GAMEMODE_CD) return game
05af49626cff0827ff1b78ffb1da082bba160d29
10,229
def create(width, height, pattern=None): """Create an image optionally filled with the given pattern. :note: You can make no assumptions about the return type; usually it will be ImageData or CompressedImageData, but patterns are free to return any subclass of AbstractImage. :Parameters: `width` : int Width of image to create `height` : int Height of image to create `pattern` : ImagePattern or None Pattern to fill image with. If unspecified, the image will initially be transparent. :rtype: AbstractImage """ if not pattern: pattern = SolidColorImagePattern() return pattern.create_image(width, height)
dcd287353c84924afcdd0a56e9b51f00cde7bb85
10,230
import logging def compute_conformer(smile: str, max_iter: int = -1) -> np.ndarray: """Computes conformer. Args: smile: Smile string. max_iter: Maximum number of iterations to perform when optimising MMFF force field. If set to <= 0, energy optimisation is not performed. Returns: A tuple containing index, fingerprint and conformer. Raises: RuntimeError: If unable to convert smile string to RDKit mol. """ mol = rdkit.Chem.MolFromSmiles(smile) if not mol: raise RuntimeError('Unable to convert smile to molecule: %s' % smile) conformer_failed = False try: mol = generate_conformers( mol, max_num_conformers=1, random_seed=45, prune_rms_thresh=0.01, max_iter=max_iter) except IOError as e: logging.exception('Failed to generate conformers for %s . IOError %s.', smile, e) conformer_failed = True except ValueError: logging.error('Failed to generate conformers for %s . ValueError', smile) conformer_failed = True except: # pylint: disable=bare-except logging.error('Failed to generate conformers for %s.', smile) conformer_failed = True atom_features_list = [] conformer = None if conformer_failed else list(mol.GetConformers())[0] for atom in mol.GetAtoms(): atom_features_list.append(atom_to_feature_vector(atom, conformer)) conformer_features = np.array(atom_features_list, dtype=np.float32) return conformer_features
0b923d7616741312d8ac129d7c7c99081a2c3f97
10,231
def get_api_key(): """Load API key.""" api_key_file = open('mailgun_api_key.txt', 'r') api_key = api_key_file.read() api_key_file.close() return api_key.strip()
55c87d15d616f0f6dfbc727253c2222128b63560
10,232
def bitserial_conv2d_strategy_hls(attrs, inputs, out_type, target): """bitserial_conv2d hls strategy""" strategy = _op.OpStrategy() layout = attrs.data_layout if layout == "NCHW": strategy.add_implementation( wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw), wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nchw), name="bitserial_conv2d_nchw.hls", ) elif layout == "NHWC": strategy.add_implementation( wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc), wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nhwc), name="bitserial_conv2d_nhwc.hls", ) else: raise ValueError("Data layout {} not supported.".format(layout)) return strategy
f009b1f7ac073573877b1ddab616868cdf1d42c7
10,233
def get_fpga_bypass_mode(serverid): """ Read back FPGA bypass mode setting """ try: interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaread", "mode"]) return parse_get_fpga_bypass_mode(interface, "mode") except Exception, e: return set_failure_dict("get_fpga_bypass_mode() Exception {0}".format(e), completion_code.failure)
b572a372c6c73bb0f65686b3235e3362d31e8655
10,235
def lookup_complement(binding): """ Extracts a complement link from the scope of the given binding. Returns an instance of :class:`htsql.core.tr.binding.Recipe` or ``None`` if a complement link is not found. `binding` (:class:`htsql.core.tr.binding.Binding`) A binding node. """ probe = ComplementProbe() return lookup(binding, probe)
104f7b0139a8ca6390cb90dc10529d3be9a723ea
10,236
import itertools def flatten(colours): """Flatten the cubular array into one long list.""" return list(itertools.chain.from_iterable(itertools.chain.from_iterable(colours)))
41576ef947354c30d1995fefdd30ad86bddbfe6f
10,237
import numpy def create_word_search_board(number: int): """ This function creates a numpy array of zeros, with dimensions of number x number, which is set by the user. The array is then iterated through, and zeros are replaced with -1's to avoid confusion with the alphabet (A) beginning at 0. """ board = numpy.zeros((number, number)) for i in range(len(board)): for x in range(number): board[i][x] = -1 return board
31f22d56c947f61840ba87d028eb7de275d33cc9
10,239
def get_parent_choices(menu, menu_item=None): """ Returns flat list of tuples (possible_parent.pk, possible_parent.caption_with_spacer). If 'menu_item' is not given or None, returns every item of the menu. If given, intentionally omit it and its descendant in the list. """ def get_flat_tuples(menu_item, excepted_item=None): if menu_item == excepted_item: return [] else: choices = [(menu_item.pk, mark_safe(menu_item.caption_with_spacer()))] if menu_item.has_children(): for child in menu_item.children(): choices += get_flat_tuples(child, excepted_item) return choices return get_flat_tuples(menu.root_item, menu_item)
c88ca93f7e8a7907425a51323ba53bb75bdf29c2
10,240
def _update_jacobian(state, jac): """ we update the jacobian using J(t_{n+1}, y^0_{n+1}) following the scipy bdf implementation rather than J(t_n, y_n) as per [1] """ J = jac(state.y0, state.t + state.h) n_jacobian_evals = state.n_jacobian_evals + 1 LU = jax.scipy.linalg.lu_factor(state.M - state.c * J) n_lu_decompositions = state.n_lu_decompositions + 1 return state._replace( J=J, n_jacobian_evals=n_jacobian_evals, LU=LU, n_lu_decompositions=n_lu_decompositions, )
31570ad29dca3ee01281819865e6efe1aec4050d
10,241
from typing import Tuple from typing import List def reduce_pad(sess: tf.Session, op_tensor_tuple: Tuple[Op, List[tf.Tensor]], _) -> (str, tf.Operation, tf.Operation): """ Pad module reducer :param sess: current tf session :param op_tensor_tuple: tuple containing the op to reduce, and a list of input tensors to the op """ name = "reduced_" + op_tensor_tuple[0].dotted_name pad_op = op_tensor_tuple[0].get_module() # Get padding tensor dimensions # Padding dimension information is captured in an input tensor to the pad op, index 1 of pad op inputs # Dimensions of this tensor are always (N, 2), where N is the dimensionality of the input tensor coming into pad. # The value of padding[N][0] gives the amount to pad in dimension N prior to the contents of the input to pad, while # padding[N][1] gives the amount to pad in dimension N after the contents of the input. # Currently we do not support reducing a pad op that modifies the channel dimension, which is the last dimension, # indexed by -1 below. So check to make sure that indices [-1][0] and [-1][1] remain 0 (no padding). padding_tensor_eval = sess.run(pad_op.inputs[1]) if padding_tensor_eval[-1][0] != 0 or padding_tensor_eval[-1][1] != 0: raise NotImplementedError("Attempting to reduce pad operation that modifies channel size, not supported.") new_padding_tensor = tf.constant(padding_tensor_eval) # No need to actually modify padding tensor # Get constant value for padding # If pad op takes a non default constant value (default = 0), it appears as a third input tensor to pad op, index 2 const_val = 0 if len(pad_op.inputs) > 2: const_val = sess.run(pad_op.inputs[2]) # Get mode # Mode can be 'CONSTANT', 'SYMMETRIC', or 'REFLECT'. 'CONSTANT' is default, and will not appear as a mode attribute # if it is the case. try: mode = pad_op.get_attr('mode') mode = mode.decode('utf-8') except ValueError: mode = 'CONSTANT' new_tensor = tf.pad(op_tensor_tuple[1][0], new_padding_tensor, constant_values=const_val, mode=mode, name=name) module = sess.graph.get_operation_by_name(name) return name, new_tensor.op, module
29d7e8daf85a9fe8fee118fd5ec5dc00018120a9
10,242
def parse_fastq(fh): """ Parse reads from a FASTQ filehandle. For each read, we return a name, nucleotide-string, quality-string triple. """ reads = [] while True: first_line = fh.readline() if len(first_line) == 0: break # end of file name = first_line[1:].rstrip() seq = fh.readline().rstrip() fh.readline() # ignore line starting with + qual = fh.readline().rstrip() reads.append((name, seq, qual)) return reads
d33d3efebdd1c5f61e25397328c6b0412f1911dd
10,243
def minhash_256(features): # type: (List[int]) -> bytes """ Create 256-bit minimum hash digest. :param List[int] features: List of integer features :return: 256-bit binary from the least significant bits of the minhash values :rtype: bytes """ return compress(minhash(features), 4)
1dba3d02dd05bfd2358211fa97d99ce136cc198d
10,244
def coalesce(*values): """Returns the first not-None arguement or None""" return next((v for v in values if v is not None), None)
245177f43962b4c03c2347725a2e87f8eb5dc08a
10,245
from mitsuba.core.xml import load_string def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals): """Tests the OBJ & PLY loaders with combinations of vertex / face normals, presence and absence of UVs, etc. """ def test(): shape = load_string(""" <shape type="{0}" version="2.0.0"> <string name="filename" value="resources/data/tests/{0}/rectangle_{1}.{0}" /> <boolean name="face_normals" value="{2}" /> </shape> """.format(mesh_format, features, str(face_normals).lower())) assert shape.has_vertex_normals() == (not face_normals) positions = shape.vertex_positions_buffer() normals = shape.vertex_normals_buffer() texcoords = shape.vertex_texcoords_buffer() faces = shape.faces_buffer() (v0, v2, v3) = [positions[i*3:(i+1)*3] for i in [0, 2, 3]] assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3) assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3) assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3) if 'uv' in features: assert shape.has_vertex_texcoords() (uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in [0, 2, 3]] # For OBJs (and .serialized generated from OBJ), UV.y is flipped. if mesh_format in ['obj', 'serialized']: assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3) else: assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3) if shape.has_vertex_normals(): for n in [normals[i*3:(i+1)*3] for i in [0, 2, 3]]: assert ek.allclose(n, [0.0, 1.0, 0.0]) return fresolver_append_path(test)()
a0117fe48b53e448181014e006ce13368c777d90
10,246
import torch def euc_reflection(x, a): """ Euclidean reflection (also hyperbolic) of x Along the geodesic that goes through a and the origin (straight line) """ xTa = torch.sum(x * a, dim=-1, keepdim=True) norm_a_sq = torch.sum(a ** 2, dim=-1, keepdim=True).clamp_min(MIN_NORM) proj = xTa * a / norm_a_sq return 2 * proj - x
83b5a8559e783b24d36a18fb30059dce82bf9cf7
10,247
def is_online(): """Check if host is online""" conn = httplib.HTTPSConnection("www.google.com", timeout=1) try: conn.request("HEAD", "/") return True except Exception: return False finally: conn.close()
4dd9d2050c94674ab60e0dfbcfa0c713915aa2f3
10,248
def text_value(s): """Convert a raw Text property value to the string it represents. Returns an 8-bit string, in the encoding of the original SGF string. This interprets escape characters, and does whitespace mapping: - linebreak (LF, CR, LFCR, or CRLF) is converted to \n - any other whitespace character is replaced by a space - backslash followed by linebreak disappears - other backslashes disappear (but double-backslash -> single-backslash) """ s = _newline_re.sub(b"\n", s) s = s.translate(_whitespace_table) is_escaped = False result = [] for chunk in _chunk_re.findall(s): if is_escaped: if chunk != b"\n": result.append(chunk) is_escaped = False elif chunk == b"\\": is_escaped = True else: result.append(chunk) return b"".join(result)
24d40367dbefcfbdd0420eb466cf6d09657b2768
10,249
def modifica_immobile_pw(): """La funzione riceve l' ID immobile da modificare e ne modifica un attibuto scelto dall'utente """ s = input("Vuoi la lista degli immobili per scegliere il ID Immobile da modificare? (S/N)") if s == "S" or s =="s": stampa_immobili_pw() s= input("Dammi ID Immobile da modificare -") immo = Immobile.select().where(Immobile.id == int(s)).get() scel = input("Cosa vuoi modificare?\ni=ID proprietario -\nd=Indirizzo -\np=Prezzo -\nc=ClasseEnergetica ") if scel == "i": #controllare se immo e' una lista va iterata se oggetto no id_cliente = (input("Dammi il nuovo ID Cliente del Proprietario -")) immo.cliente_id=int(id_cliente) elif scel == "d": new_indirizzo = input("Dammi il nuovo indirizzo dell'immobile -") immo.indirizzo = new_indirizzo elif scel == "p": new_prezzo = input("Dammi il nuovo prezzo dell'Immobile -") immo.prezzo = int(new_prezzo) elif scel == "c": new_classe = input("Dammi la nuova Classe Energetica dell'Immobile -") immo.classe_energ = new_classe immo.save() return True
99905d61d91178092dba8860265b2034b3f8430b
10,250
def hpat_pandas_series_len(self): """ Pandas Series operator :func:`len` implementation .. only:: developer Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_len Parameters ---------- series: :class:`pandas.Series` Returns ------- :obj:`int` number of items in the object """ _func_name = 'Operator len().' if not isinstance(self, SeriesType): raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self)) def hpat_pandas_series_len_impl(self): return len(self._data) return hpat_pandas_series_len_impl
57bdd2a7f7ae54861943fb44f3bc51f1f6544911
10,251
from typing import List def arrays_not_same_size(inputs: List[np.ndarray]) -> bool: """Validates that all input arrays are the same size. Args: inputs (List[np.ndarray]): Input arrays to validate Returns: true if the arrays are the same size and false if they are not """ shapes = [i.shape for i in inputs] shp_first = shapes[0] shp_rest = shapes[1:] return not np.array_equiv(shp_first, shp_rest)
8b9988f49d766bc7a27b79cf6495182e98a8fe18
10,252
def GetReaderForFile(filename): """ Given a filename return a VTK reader that can read it """ r = vtkPNGReader() if not r.CanReadFile(filename): r = vtkPNMReader() if not r.CanReadFile(filename): r = vtkJPEGReader() if not r.CanReadFile(filename): r = vtkTIFFReader() if not r.CanReadFile(filename): return None r.SetFileName(filename) return r
f574417df44f8a43277e62967ec6fd4c986fa85a
10,253
def build_figure_nn(df, non_private, semantic): """ Dataframe with one semantic and one model """ l = df.query("epsilon > 0").sort_values(["train_size", "epsilon"]) naive, low, high = get_plot_bounds(df) fig = px.line( l, x="train_size", y="accuracy", range_y=[low, high], color="epsilon", hover_data=["n_blocks", "delta", "noise"], title=f"{list(l['task'])[0]} {list(l['model'])[0]} {semantic} accuracy", log_y=False, ).update_traces(mode="lines+markers") fig.add_trace( go.Scatter( x=non_private.sort_values("train_size")["train_size"], y=non_private.sort_values("train_size")["accuracy"], mode="lines+markers", name="Non private", ) ) fig.add_trace( go.Scatter( x=l["train_size"], y=[naive] * len(l), mode="lines", name="Naive baseline", ) ) return fig
5eab366e20eaec721d7155d82e42d9222cacd3b5
10,254
def get_incomplete_sample_nrs(df): """ Returns sample nrs + topologies if at least 1 algorithm result is missing """ topology_incomplete_sample_nr_map = dict() n_samples = df.loc[df['sample_idx'].idxmax()]['sample_idx'] + 1 for ilp_method in np.unique(df['algorithm_complete']): dfx = df[df['algorithm_complete'] == ilp_method] dfg_tops = dfx.groupby(by='topology_name') for key, group in dfg_tops: if n_samples > group.shape[0]: if key not in topology_incomplete_sample_nr_map: topology_incomplete_sample_nr_map[key] = set() for s_nr in range(n_samples): if s_nr not in list(group['sample_idx']): topology_incomplete_sample_nr_map[key].add(s_nr) return topology_incomplete_sample_nr_map
2d816d80bb2f0c2686780ca49d0c01e89c69e7b5
10,255
from typing import Optional def _read_pos_at_ref_pos(rec: AlignedSegment, ref_pos: int, previous: Optional[bool] = None) -> Optional[int]: """ Returns the read or query position at the reference position. If the reference position is not within the span of reference positions to which the read is aligned an exception will be raised. If the reference position is within the span but is not aligned (i.e. it is deleted in the read) behavior is controlled by the "previous" argument. Args: rec: the AlignedSegment within which to find the read position ref_pos: the reference position to be found previous: Controls behavior when the reference position is not aligned to any read position. True indicates to return the previous read position, False indicates to return the next read position and None indicates to return None. Returns: The read position at the reference position, or None. """ if ref_pos < rec.reference_start or ref_pos >= rec.reference_end: raise ValueError(f"{ref_pos} is not within the reference span for read {rec.query_name}") pairs = rec.get_aligned_pairs() index = 0 read_pos = None for read, ref in pairs: if ref == ref_pos: read_pos = read break else: index += 1 if not read_pos and previous is not None: if previous: while read_pos is None and index > 0: index -= 1 read_pos = pairs[index][0] else: while read_pos is None and index < len(pairs): read_pos = pairs[index][0] index += 1 return read_pos
51270a1c1a5f69b179e3623824632443775ec9c7
10,256
from astropy.io import fits as pf import numpy as np import logging def load_gtis(fits_file, gtistring=None): """Load GTI from HDU EVENTS of file fits_file.""" gtistring = _assign_value_if_none(gtistring, 'GTI') logging.info("Loading GTIS from file %s" % fits_file) lchdulist = pf.open(fits_file, checksum=True) lchdulist.verify('warn') gtitable = lchdulist[gtistring].data gti_list = np.array([[a, b] for a, b in zip(gtitable.field('START'), gtitable.field('STOP'))], dtype=np.longdouble) lchdulist.close() return gti_list
c1a8019d052ce437680e6505e65134a5ed66a1a3
10,257
import requests def macro_australia_unemployment_rate(): """ 东方财富-经济数据-澳大利亚-失业率 http://data.eastmoney.com/cjsj/foreign_5_2.html :return: 失业率 :rtype: pandas.DataFrame """ url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx" params = { "type": "GJZB", "sty": "HKZB", "js": "({data:[(x)],pages:(pc)})", "p": "1", "ps": "2000", "mkt": "5", "stat": "2", "_": "1625474966006", } r = requests.get(url, params=params) data_text = r.text data_json = demjson.decode(data_text[1:-1]) temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]]) temp_df.columns = [ "时间", "前值", "现值", "发布日期", ] temp_df["前值"] = pd.to_numeric(temp_df["前值"]) temp_df["现值"] = pd.to_numeric(temp_df["现值"]) return temp_df
260debcfaf342d08acacfe034da51b3d3162393e
10,258
from typing import List import math def _convert_flattened_paths( paths: List, quantization: float, scale_x: float, scale_y: float, offset_x: float, offset_y: float, simplify: bool, ) -> "LineCollection": """Convert a list of FlattenedPaths to a :class:`LineCollection`. Args: paths: list of FlattenedPaths quantization: maximum length of linear elements to approximate curve paths scale_x, scale_y: scale factor to apply offset_x, offset_y: offset to apply simplify: should Shapely's simplify be run Returns: new :class:`LineCollection` instance containing the converted geometries """ lc = LineCollection() for result in paths: # Here we load the sub-part of the path element. If such sub-parts are connected, # we merge them in a single line (e.g. line string, etc.). If there are disconnection # in the path (e.g. multiple "M" commands), we create several lines sub_paths: List[List[complex]] = [] for elem in result: if isinstance(elem, svg.Line): coords = [elem.start, elem.end] else: # This is a curved element that we approximate with small segments step = int(math.ceil(elem.length() / quantization)) coords = [elem.start] coords.extend(elem.point((i + 1) / step) for i in range(step - 1)) coords.append(elem.end) # merge to last sub path if first coordinates match if sub_paths: if sub_paths[-1][-1] == coords[0]: sub_paths[-1].extend(coords[1:]) else: sub_paths.append(coords) else: sub_paths.append(coords) for sub_path in sub_paths: path = np.array(sub_path) # transform path += offset_x + 1j * offset_y path.real *= scale_x path.imag *= scale_y lc.append(path) if simplify: mls = lc.as_mls() lc = LineCollection(mls.simplify(tolerance=quantization)) return lc
876421cd7f89dc5f3d64357e76f302c633e41ba7
10,259
def _CustomSetAttr(self, sAttr, oValue): """ Our setattr replacement for DispatchBaseClass. """ try: return _g_dCOMForward['setattr'](self, ComifyName(sAttr), oValue) except AttributeError: return _g_dCOMForward['setattr'](self, sAttr, oValue)
8a0fea986531aec66564bafcc679fed3b8631c10
10,260
def reduce_to_contemporaneous(ts): """ Simplify the ts to only the contemporaneous samples, and return the new ts + node map """ samples = ts.samples() contmpr_samples = samples[ts.tables.nodes.time[samples] == 0] return ts.simplify( contmpr_samples, map_nodes=True, keep_unary=True, filter_populations=False, filter_sites=False, record_provenance=False, filter_individuals=False, )
7661a58b6f4b95d5cb4b711db39bb28852151304
10,261
def get_name(tree, from_='name'): """ Get the name (token) of the AST node. :param tree ast: :rtype: str|None """ # return tree['name']['name'] if 'name' in tree and isinstance(tree['name'], str): return tree['name'] if 'parts' in tree: return djoin(tree['parts']) if from_ in tree: return get_name(tree[from_]) return None
b5f1e97eb570859b01bf9489c6b9d4874511fdcc
10,264
def pcaTable(labels,vec_mean,vec_std,val_mean,val_std): """Make table with PCA formation mean and std""" header="\\begin{center}\n\\begin{tabular}{| l |"+" c |"*6+"}\\cline{2-7}\n" header+="\\multicolumn{1}{c|}{} & \\multicolumn{2}{c|}{PC1} & \multicolumn{2}{c|}{PC2} & \multicolumn{2}{c|}{PC3} \\\\\\cline{2-7}" header+="\\multicolumn{1}{c|}{} & $\mu$ & $\sigma$ & $\mu$ & $\sigma$ & $\mu$ & $\sigma$ \\\\\\hline\n" tt=n.zeros((vec_mean.shape[0],6)) tt[:,::2]=vec_mean tt[:,1::2]=vec_std tt_=n.zeros(6) tt_[::2]=val_mean tt_[1::2]=val_std tab_data=n.vstack((tt,tt_)) footer="\\hline\\end{tabular}\n\\end{center}" table=header + makeTables(labels,tab_data,True) + footer return table
646fc1b5344a716b8f30714f112a477063bf91ce
10,265
def render_reference_page(conn: Connection, reference: str) -> str: """Create HTML section that lists all notes that cite the reference.""" sql = """ SELECT note, Bibliography.html,Notes.html FROM Citations JOIN Notes ON Citations.note = Notes.id JOIN Bibliography ON Bibliography.key = Citations.reference WHERE reference = ? ORDER BY note """ notes = [] text = "" for note, _text, html in conn.execute(sql, (reference,)): assert not text or text == _text text = _text notes.append(Note(note, get_section_title(html))) section = Elem("section", Elem("h1", '@' + reference[4:]), Elem("p", text), note_list(notes), id=reference, title=reference, **{"class": "level1"}) return render(section)
6ab73d0d85da28676e7bb3cf42b3304cd0d6ad47
10,266
import logging def normalize_bridge_id(bridge_id: str): """Normalize a bridge identifier.""" bridge_id = bridge_id.lower() # zeroconf: properties['id'], field contains semicolons after each 2 char if len(bridge_id) == 17 and sum(True for c in "aa:bb:cc:dd:ee:ff" if c == ":"): return bridge_id.replace(':', '') # nupnp: contains 4 extra characters in the middle: "fffe" if len(bridge_id) == 16 and bridge_id[6:10] == "fffe": return bridge_id[0:6] + bridge_id[-6:] # SSDP/UPNP and Hue Bridge API contains right ID. if len(bridge_id) == 12: return bridge_id logging.getLogger(__name__).warn("Received unexpected bridge id: %s", bridge_id) return bridge_id
5370cc49e4c0272da2a471006bbbf3fd5e5521bf
10,267
import imp def pyc_file_from_path(path): """Given a python source path, locate the .pyc. See http://www.python.org/dev/peps/pep-3147/ #detecting-pep-3147-availability http://www.python.org/dev/peps/pep-3147/#file-extension-checks """ has3147 = hasattr(imp, 'get_tag') if has3147: return imp.cache_from_source(path) else: return path + "c"
459011ca1f07a023b139695cd2368767d46ca396
10,268
def get_bytes_per_data_block(header): """Calculates the number of bytes in each 128-sample datablock.""" N = 128 # n of amplifier samples # Each data block contains N amplifier samples. bytes_per_block = N * 4 # timestamp data bytes_per_block += N * 2 * header['num_amplifier_channels'] # DC amplifier voltage (absent if flag was off) # bytes_per_block += N * 2 * header['dc_amplifier_data_saved'] if header['dc_amplifier_data_saved'] > 0: bytes_per_block += N * 2 * header['num_amplifier_channels'] # Stimulation data, one per enabled amplifier channels bytes_per_block += N * 2 * header['num_amplifier_channels'] # Board analog inputs are sampled at same rate as amplifiers bytes_per_block += N * 2 * header['num_board_adc_channels'] # Board analog outputs are sampled at same rate as amplifiers bytes_per_block += N * 2 * header['num_board_dac_channels'] # Board digital inputs are sampled at same rate as amplifiers if header['num_board_dig_in_channels'] > 0: bytes_per_block += N * 2 # Board digital outputs are sampled at same rate as amplifiers if header['num_board_dig_out_channels'] > 0: bytes_per_block += N * 2 return bytes_per_block
524e9015dacaf99042dd1493b24a418fff8c6b04
10,269
def recovered(): """ Real Name: b'Recovered' Original Eqn: b'INTEG ( RR, 0)' Units: b'Person' Limits: (None, None) Type: component b'' """ return integ_recovered()
1a5133a3cc9231e3f7a90b54557ea9e836975eae
10,270