content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def body_contour(binary_image): """Helper function to get body contour""" contours = find_contours(binary_image) areas = [cv2.contourArea(cnt) for cnt in contours] body_idx = np.argmax(areas) return contours[body_idx]
0ccfa7340d492f89c6c0090c296e7aede379754a
8,152
def rule_like(rule, pattern): """ Check if JsonLogic rule matches a certain 'pattern'. Pattern follows the same structure as a normal JsonLogic rule with the following extensions: - '@' element matches anything: 1 == '@' "jsonlogic" == '@' [1, 2] == '@' {'+': [1, 2]} == '@' {'+': [1, 2]} == {'@': [1, 2]} {'+': [1, 2]} == {'+': '@'} {'+': [1, 2]} == {'+': ['@', '@']} {'+': [1, 2]} == {'@': '@'} - 'number' element matches any numeric value: 1 == 'number' 2.34 == 'number' [1, 2] == ['number', 'number'] {'+': [1, 2]} == {'+': ['number', 'number']} - 'string' element matches any string value: "name" == 'string' {'cat': ["json", "logic"]} = {'cat': ['string', 'string']} - 'array' element matches an array of any length: [] == 'array' [1, 2, 3] = 'array' {'+': [1, 2]} == {'+': 'array'} Use this method to make sure JsonLogic rule is correctly constructed. """ if pattern == rule: return True if pattern == '@': return True if pattern == 'number': return _is_numeric(rule) if pattern == 'string': return _is_string(rule) if pattern == "array": return _is_array(rule) if is_logic(pattern): if is_logic(rule): # Both pattern and rule are a valid JsonLogic rule, go deeper pattern_operator = _get_operator(pattern) rule_operator = _get_operator(rule) if pattern_operator == '@' or pattern_operator == rule_operator: # Operators match, go deeper and try matching values return rule_like( _get_values(rule, rule_operator, normalize=False), _get_values(pattern, pattern_operator, normalize=False)) return False # All above assumptions failed if _is_array(pattern): if _is_array(rule): # Both pattern and rule are arrays, go deeper if len(pattern) == len(rule): # Length of pattern and rule arrays are the same, # go deeper and try matching each value return all( rule_like(rule_elem, pattern_elem) for rule_elem, pattern_elem in zip(rule, pattern)) return False # All above assumptions failed return False
cd058d799cdee4d548c3e2075e1555ea28e594f1
8,153
def apt_repo(module, *args): """run apt-repo with args and return its output""" # make args list to use in concatenation args = list(args) rc, out, err = module.run_command([APT_REPO_PATH] + args) if rc != 0: module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err)) return out
d4572cb9d586b973d461e4ac33709e582c26dda7
8,154
def get_rasterization_params() -> RasterizationParams: """ Construct the RasterizationParams namedtuple from the static configuration file :return: the rasterization parameters """ if cfg is None: load_cfg() # get rasterization section rasterization_dict = cfg[compute_dsm_tag][rasterization_tag] rasterization_params = RasterizationParams(*rasterization_dict.values()) return rasterization_params
9adea0ffd838cbf3425cad5a0ab30bfe92829bc7
8,156
def rain_attenuation_probability(lat, lon, el, hs=None, Ls=None, P0=None): """ The following procedure computes the probability of non-zero rain attenuation on a given slant path Pr(Ar > 0). Parameters ---------- lat : number, sequence, or numpy.ndarray Latitudes of the receiver points lon : number, sequence, or numpy.ndarray Longitudes of the receiver points el : sequence, or number Elevation angle (degrees) hs : number, sequence, or numpy.ndarray, optional Heigh above mean sea level of the earth station (km). If local data for the earth station height above mean sea level is not available, an estimate is obtained from the maps of topographic altitude given in Recommendation ITU-R P.1511. Ls : number, sequence, or numpy.ndarray, optional Slant path length from the earth station to the rain height (km). If data about the rain height is not available, this value is estimated automatically using Recommendation ITU-R P.838 P0 : number, sequence, or numpy.ndarray, optional Probability of rain at the earth station, (0 ≤ P0 ≤ 1) Returns ------- p: Quantity Probability of rain attenuation on the slant path (%) References ---------- [1] Propagation data and prediction methods required for the design of Earth-space telecommunication systems: https://www.itu.int/dms_pubrec/itu-r/rec/p/R-REC-P.618-12-201507-I!!PDF-E.pdf """ type_output = get_input_type(lat) lat = prepare_input_array(lat) lon = prepare_input_array(lon) lon = np.mod(lon, 360) el = prepare_quantity(prepare_input_array(el), u.deg, 'Elevation angle') hs = prepare_quantity( hs, u.km, 'Heigh above mean sea level of the earth station') Ls = prepare_quantity( Ls, u.km, 'Heigh above mean sea level of the earth station') P0 = prepare_quantity(P0, u.pct, 'Point rainfall rate') val = __model.rain_attenuation_probability(lat, lon, el, hs, Ls, P0) return prepare_output_array(val, type_output) * 100 * u.pct
728879dc2b51de813f8e1c83a99a8117883c423f
8,157
import itertools def largets_prime_factor(num): """ Returns the largest prime factor of num. """ prime_factors = [] for n in itertools.count(2): if n > num: break if num%n == 0: prime_factors.append(n) while (num%n == 0): num = num/n return max(prime_factors)
12100b6cdc2e0553295c1803e699544aa930bbfb
8,159
def compute_eigenvectors(exx, exy, eyy): """ exx, eyy can be 1d arrays or 2D arrays :param exx: strain component, float or 1d array :param exy: strain component, float or 1d array :param eyy: strain component, float or 1d array :rtype: list """ e1, e2 = np.zeros(np.shape(exx)), np.zeros(np.shape(exx)); # eigenvalues v00, v01 = np.zeros(np.shape(exx)), np.zeros(np.shape(exx)); v10, v11 = np.zeros(np.shape(exx)), np.zeros(np.shape(exx)); # eigenvectors dshape = np.shape(exx); if len(dshape) == 1: for i in range(len(exx)): [e11, e22, v] = eigenvector_eigenvalue(exx[i], exy[i], eyy[i]); e1[i], e2 = e11, e22; # convention of this code returns negative eigenvalues compared to my other codes v00[i], v10[i] = v[0][0], v[1][0]; v01[i], v11[i] = v[0][1], v[1][1]; elif len(dshape) == 2: for j in range(dshape[0]): for i in range(dshape[1]): [e11, e22, v] = eigenvector_eigenvalue(exx[j][i], exy[j][i], eyy[j][i]); e1[j][i], e2[j][i] = e11, e22; v00[j][i], v01[j][i] = v[0][0], v[0][1]; v10[j][i], v11[j][i] = v[1][0], v[1][1]; return [e1, e2, v00, v01, v10, v11];
524fe0cabeda91ca3086c3e46e88f19a919ff489
8,161
def name_looks_valid(name: str) -> bool: """ Guesses if a name field is valid. Valid is defined as being at least two words, each beginning with a capital letter and ending with a lowercase letter. :param name: the name to check :return: whether this name is considered valid """ existing_parts = name.split() parts_that_look_like_names = list( filter(lambda part: fullmatch(r"[A-Z](?:[A-Za-z-']+)?[a-z]", part), existing_parts) ) if len(existing_parts) < 2 or len(parts_that_look_like_names) < 2: return False if len(parts_that_look_like_names) > 2 or len(existing_parts) == len(parts_that_look_like_names): return True return False
3f980ac4db9623c599733794253e6563abe698cb
8,162
import tqdm from pathlib import Path def convert_polygons_to_lines(src_polygons, dst_lines, crs=None, add_allone_col=False): """Convert polygons to lines. Arguments: src_polygons {path to geopandas-readable file} -- Filename of the the polygon vector dataset to be converted to lines. dst_lines {[type]} -- Filename where to write the line vector dataset to. Keyword Arguments: crs {dict or str} -- Output projection parameters as string or in dictionary format. This will reproject the data when a crs is given (not {None}) (default: {None}). add_allone_col {bool} -- Add an additional attribute column with all ones. This is useful, e.g. in case you want to use the lines with gdal_proximity afterwards (default: {True}). Returns: int -- Exit code 0 if successeful. """ gdf = gpd.read_file(src_polygons) geom_coords = gdf["geometry"] # featureset.get(5)["geometry"]["coordinates"] lines = [] row_ids = [] for i_row, pol in tqdm(enumerate(geom_coords), total=len(geom_coords)): boundary = pol.boundary if boundary.type == 'MultiLineString': for line in boundary: lines.append(line) row_ids.append(i_row) else: lines.append(boundary) row_ids.append(i_row) gdf_lines = gdf.drop("geometry", axis=1).iloc[row_ids, :] gdf_lines["Coordinates"] = lines gdf_lines = gpd.GeoDataFrame(gdf_lines, geometry='Coordinates', crs=gdf.crs) if crs is not None: gdf_lines = gdf_lines.to_crs(crs) if add_allone_col: gdf_lines["ALLONE"] = 1 Path(dst_lines).parent.mkdir(exist_ok=True, parents=True) gdf_lines.to_file(dst_lines) return 0
7340eccc3b02f70d38967f3c325c968bcec67f26
8,163
def format_decimal(amount): """ jinja2 filter function for decimal number treatment """ amt_whole = int(amount) amt_whole_len = len(str(amt_whole)) if amount < 1: amt_str = '{:0.15f}'.format(amount).rstrip("0").rstrip(".") elif amt_whole_len < 4: amt_str = '{:0.3f}'.format(amount).rstrip("0").rstrip(".") elif amt_whole_len < 6: amt_str = '{:0.2f}'.format(amount).rstrip("0").rstrip(".") elif amt_whole_len < 9: amt_str = '{:0.1f}'.format(amount).rstrip("0").rstrip(".") else: amt_str = '{}'.format(amt_whole) return amt_str
55ee4b6134abd409ade396233fa07061d0a30764
8,164
def remove_special_char(df, col): """Removes special characters such as % and $ from numeric variables and converts them into float""" df[col] = df[col].replace(regex = True, to_replace = r'[^0-9.\-]', value=r'') df[col] = df[col].astype("float") return df[col]
c6c4c86eb480d2f045e40b3eb831d0b8d5381d33
8,165
def getNonlinearInfo(numHiddenLayers, numBinary, unaryPerBinary): """ Generates a 2D list to be used as a nonlinearInfo argument in building an EQL/EQL-div model # Arguments numHiddenLayers: integer, number of hidden layers (i.e. layers including nonlinear keras layer components) numBinary: list of integers, available numbers to be used as number of binary functions in a nonlinear layer component unaryPerBinary: integer, number of unary function per binary function in a nonlinear layer component # Returns A 2D list of integers with dimension numHiddenLayers x 2. Rows represent layers, first column is number of unary functions, second column is number of binary functions """ nonlinearInfo = [0 for i in range(numHiddenLayers)] for i in range(numHiddenLayers): v = np.random.choice(numBinary) # binary nodes u = unaryPerBinary * v # unary nodes nonlinearInfo[i] = [u, v] return nonlinearInfo
e62f8d016501ad48aeae09ebd8e61b659618e0b0
8,166
import types def construct_magmad_gateway_payload(gateway_id: str, hardware_id: str) -> types.Gateway: """ Returns a default development magmad gateway entity given a desired gateway ID and a hardware ID pulled from the hardware secrets. Args: gateway_id: Desired gateway ID hardware_id: Hardware ID pulled from the VM Returns: Gateway object with fields filled in with reasonable default values """ return types.Gateway( name='TestGateway', description='Test Gateway', tier='default', id=gateway_id, device=types.GatewayDevice( hardware_id=hardware_id, key=types.ChallengeKey( key_type='ECHO', ), ), magmad=types.MagmadGatewayConfigs( autoupgrade_enabled=True, autoupgrade_poll_interval=60, checkin_interval=60, checkin_timeout=30, ), )
87826b72fd2f33a4a862ffbacbbce14f206dc086
8,167
def script_rename_number(config): """ The scripting version of `rename_number`. This function applies the rename to the entire directory. It also adds the tags to the header file of each fits. Parameters ---------- config : ConfigObj The configuration object that is to be used for this function. Returns ------- None """ # Extract the configuration parameters. data_directory = core.config.extract_configuration( config_object=config, keys=['data_directory']) begin_garbage = core.config.extract_configuration( config_object=config, keys=['renaming','begin_garbage']) # Obtain the labels. labels, raw = rename_number(data_directory=data_directory, begin_garbage=begin_garbage) # Add to all file headers. Assume that the order has not # changed between renaming steps. core.error.ifas_info("Adding the file number under the `NUMBER` card " "in the headers of the fits files in {data_dir} " "based on the file order." .format(data_dir=data_directory)) fits_files = core.io.get_fits_filenames(data_directory=data_directory) for (filedex, headerdex) in zip(fits_files, raw): __ = core.io.append_astropy_header_card( file_name=filedex, header_cards={'NUMBER':headerdex}) # Finally rename the files based on parallel appending. Glob # provides the directory. core.error.ifas_info("Appending the file number to the end of " "the files in {data_dir}." .format(data_dir=data_directory)) core.io.rename_by_parallel_append(file_names=fits_files, appending_names=labels, directory=None) return None
bd0c14cbec43644ed5b6e7b9e23e1c1f71f51984
8,169
def jasper10x4(**kwargs): """ Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.chainer/models' Location for keeping the model parameters. """ return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs)
4b8e210a619a28ca0d28ee6ba85fd7a7acf15335
8,170
def swap(lst, idx1, idx2): """ >>> swap([0, 1, 2], 0, 1) [1, 0, 2] >>> swap([0, 1, 2], 0, 0) [0, 1, 2] """ # print("Swapping [{}, {}] from {}".format(idx1, idx2, lst)) lst[idx1], lst[idx2] = lst[idx2], lst[idx1] # print("resulting to {}".format(lst)) return lst
81dee804db05eedaa1a9b5611e836a4c1da89b4b
8,171
def substring_index(column, delim=' ', cnt=1): """ Returns the substring from string ``column`` before ``cnt`` occurrences of the delimiter ``delim``. If ``cnt`` is positive, everything the left of the final delimiter (counting from left) is returned. If ``cnt`` is negative, every to the right of the final delimiter (counting from the right) is returned. substring_index performs a case-sensitive match when searching for ``delim``. """ return _with_expr(exprs.SubstringIndex, column, delim, cnt)
b17fe73e19ece0d9e2511f8b45c43accb65f4138
8,172
def askPrize(mon: int) -> str: """ Args : n:欲查詢的期別 Returns: 查詢結果字串 """ (date, data) = initData(mon) date = f"{date}月\n" ssp_prize = f"特別獎:{data[0]}\n" sp_prize = f"特獎:{data[1]}\n" first_prize = f"頭獎:{data[2]}、{data[3]}、{data[4]}\n" six_prize = f"六獎:{data[2][5:]}、{data[3][5:]}、{data[4][5:]}、{data[5]}\n" return date + ssp_prize + sp_prize + first_prize + six_prize
1fcd388a38823a53719e8b50b02d2758b8ebe6dc
8,173
import pickle import tokenize def get_letters_df(letters_dict_pickle): """Get the letters Pandas Dataframe Parameters ---------- letters_dict_pickle: string Path to the dict with the letters text Returns ------- Pandas DataFrame Pandas DataFrame with a columns with the tokens """ with open(letters_dict_pickle, 'rb') as handle: letters_dict = pickle.load(handle) letters_df = pd.DataFrame(letters_dict, index=[const.LETTER_TEXT]).T letters_df[const.TOKENIZED] = letters_df[const.LETTER_TEXT].apply(tokenize) return letters_df
f6c40627ae917d51ce30cd572bb02c378ca7f7e2
8,174
def lorenzmod1(XYZ, t, a=0.1, b=4, dz=14, d=0.08): """ The Lorenz Mod 1 Attractor. x0 = (0,1,0) """ x, y, z = XYZ x_dt = -a * x + y**2 - z**2 + a * dz y_dt = x * (y - b * z) + d z_dt = -z + x * (b * y + z) return x_dt, y_dt, z_dt
17dbd87b25968ca0e24b6e6fc602007932983f54
8,175
def binomial_p(x, n, p0, reps=10**5, alternative='greater', keep_dist=False, seed=None): """ Parameters ---------- sample : array-like list of elements consisting of x in {0, 1} where 0 represents a failure and 1 represents a seccuess p0 : int hypothesized number of successes in n trials n : int number of trials reps : int number of repetitions (default: 10**5) alternative : {'greater', 'less', 'two-sided'} alternative hypothesis to test (default: 'greater') keep_dis : boolean flag for whether to store and return the array of values of the test statistics (default: false) seed : RandomState instance or {None, int, RandomState instance} If None, the pseudorandom number generator is the RandomState instance used by `np.random`; If int, seed is the seed used by the random number generator; If RandomState instance, seed is the pseudorandom number generator Returns ------- float estimated p-value float test statistic list distribution of test statistics (only if keep_dist == True) """ if n < x: raise ValueError("Cannot observe more ones than the population size") prng = get_prng(seed) def generate(): return prng.binomial(n, p0, 1)[0] if keep_dist: permutations = np.empty(reps) for i in range(reps): permutations[i] = generate() if alternative == 'two-sided': hits_up = np.sum(permutations >= x) hits_low = np.sum(permutations <= x) p_value = 2*np.min([hits_up/reps, hits_low/reps, 0.5]) elif alternative == 'greater': p_value = np.mean(permutations >= x) else: p_value = np.mean(permutations <= x) return p_value, x, permutations else: hits_up = 0 hits_low = 0 for i in range(reps): ts = generate() hits_up += (ts >= x) hits_low += (ts <= x) if alternative == 'two-sided': p_value = 2*np.min([hits_up/reps, hits_low/reps, 0.5]) elif alternative == 'greater': p_value = hits_up/reps else: p_value = hits_low/reps return p_value, x
486257dfc1c517313556d08c8e2ad4ed3e85980d
8,176
def is_guild_owner() -> commands.check: """ Returns True under the following conditions: - **ctx.author** is the owner of the guild where this command was called from """ def predicate(ctx): if ctx.guild is None: raise commands.NoPrivateMessage('This command can only be used in a server.') author: Member = ctx.author if author != ctx.guild.owner.id: commands.MissingPermissions('This command can only be run by the owner of this guild.') return commands.check(predicate)
3f3c9a5d5990794bced7b021c646041e514e72ed
8,177
def round_grade(grade: int) -> int: """ Round the grade according to policy. Parameters ---------- grade: int Raw grade. Returns ------- rounded_grade: int Rounded grade. """ if grade < 38: rounded_grade = grade else: closest_multiple_5 = (grade // 5 + 1) * 5 if (closest_multiple_5 - grade) >= 3: rounded_grade = grade else: rounded_grade = closest_multiple_5 return rounded_grade
8f1be9575d98b4ed24ff1e5904a5345d7ebc3e48
8,178
def patch_indecies(i_max: int, j_max: int, ps: int, pstr: int): """ Given the sizes i_max and j_max of an image, it extracts the top-left corner pixel location of all the patches of size (ps,ps) and distant "pstr" pixels away from each other. If pstr < ps, the patches are overlapping. Input: i_max, j_max - int, sizes of the image ps - int, patch size pstr - int, patch stride Output: idx - int, array of [total_num_patches, 2], pixels locations """ idx = [] for i in range(0, i_max - ps + 1, pstr): for j in range(0, j_max - ps + 1, pstr): idx.append([i, j]) return tf.convert_to_tensor(idx)
6b760311513b3ded56f85690bab6622c999cc40d
8,179
def model_fn(): """ Defines a convolutional neural network for steering prediction. """ model = Sequential() # Input layer and normalization model.add(InputLayer(input_shape=(20, 80, 1))) model.add(Lambda(lambda x: (x / 255.0) - 0.5)) # Convolutional layer 1 model.add(Conv2D(filters=48, kernel_size=(3,3), strides=(1,1), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) # Convolutional layer 2 model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) # Dropout for regularization. model.add(Dropout(0.2)) # Full connected layer model.add(Flatten()) model.add(Dense(100)) # Predicted steering model.add(Dense(1)) print(model.summary()) return model
42b85775635ee71ac6ed76f64170776eb36b5953
8,180
def authenticate(request): """Return the user model instance associated with the given request If no user is retrieved, return an instance of `AnonymousUser` """ token, _ = get_token_from_request(request) jwt_info = { 'token': token, 'case': TokenCase.OK, 'payload': None, } if not token: jwt_info['case'] = TokenCase.NO_TOKEN return get_user(), jwt_info try: payload = decode(token) user_pk = payload[JWT_CONFIG.user_pk_key] return get_user(user_pk=user_pk), jwt_info except jwt.ExpiredSignatureError: jwt_info['case'] = TokenCase.EXPIRED except jwt.DecodeError: jwt_info['case'] = TokenCase.DECODE_ERROR except jwt.InvalidTokenError: jwt_info['case'] = TokenCase.INVALID_TOKEN except KeyError: jwt_info['case'] = TokenCase.MISSING_KEY return get_user(), jwt_info
15d2d4343673cd30f2b201b834bd26889813d4ab
8,181
def _urpc_test_func_2(buf): """! @brief u-RPC variable length data test function. @param buf A byte string buffer @return The same byte string repeated three times """ return buf*3
f13f7dcf45eaa0706b69eb09c63d29ba2bbd3d60
8,182
from datetime import datetime def main(): """ Use Netmiko to connect to each of the devices. Execute 'show version' on each device. Record the amount of time required to do this """ start_time = datetime.now() for device in devices: print() print('#' * 40) output = show_version(device) print(output) print() print('#' * 40) print("\nBenoetigte Zeit: " + str(datetime.now() - start_time)) return None
9182a63ea3e6a98d995c4ae00126175164cd6dfc
8,183
def versionString(version): """Create version string.""" ver = [str(v) for v in version] numbers, rest = ver[:2 if ver[2] == '0' else 3], ver[3:] return '.'.join(numbers) + '-'.join(rest)
2feec3f8ac5a1f2b848d0805dfa0c3ff53a44ead
8,186
import warnings import builtins def any(iterable, pred): """Returns True if ANY element in the given iterable is True for the given pred function""" warnings.warn( "pipe.any is deprecated, use the builtin any(...) instead.", DeprecationWarning, stacklevel=4, ) return builtins.any(pred(x) for x in iterable)
32f48ab7a6be329b8758ba3dbbe6721923890e11
8,187
def build_prev_df_n( dispositions) -> pd.DataFrame: """Build admissions dataframe from Parameters.""" days = np.array(range(0, n_days)) data_dict = dict( zip( ["day", "hosp", "icu", "vent"], [days] + [disposition for disposition in dispositions], ) ) projection = pd.DataFrame.from_dict(data_dict) # New cases projection_admits = projection.iloc[:-1, :] - projection.shift(1) projection_admits["day"] = range(projection_admits.shape[0]) projection_admits.loc[0,'hosp'] = 25 return projection_admits
e17ca1ab78e16aeaeac0afa5a1a9fa193cb9777f
8,189
import html def main() -> VDOMNode: """Main entry point.""" vdom = html("<{Heading} />") return vdom
c29d55ec4d469373e5504cc089e8370d0573a719
8,190
def GT(x=None, y=None): """ Compares two values and returns: true when the first value is greater than the second value. false when the first value is less than or equivalent to the second value. See https://docs.mongodb.com/manual/reference/operator/aggregation/gt/ for more details :param x: first value or expression :param y: second value or expression :return: Aggregation operator """ if x is None and y is None: return {'$gt': []} return {'$gt': [x, y]}
62a4321d5d36306b9cc5b910e7eac0eec4d914f3
8,191
def pymongo_formatter(credentials): """Returns a DSN for a pymongo-MongoDB connection. Note that the username and password will still be needed separately in the constructor. Args: credentials (dict): The credentials dictionary from the relationships. Returns: (string) A formatted pymongo DSN. """ return '{0}:{1}/{2}'.format( credentials['host'], credentials['port'], credentials['path'] )
69216575258f297c368ec3015c1c14569bb82cd2
8,192
def sigma_disp_over_vcirc(gal, R=None): """The velocity dispersion over circular velocity computed at R=x*Rs [km/s]. Isotropic NFW is assumed. :param R: radius [kpc] :param gal: galaxy object """ # get Rs (rho, rs, c) = reconstruct_density_DM(gal, DM_profile='NFW') # make array of r, preferably with gal.R if R is None: x_arr = np.array(gal.R / rs) ratio_arr = sigma_over_vcirc(x_arr) else: R, is_scalar = tl.treat_as_arr(R) x_arr = np.array(R / rs) ratio_arr = sigma_over_vcirc(x_arr) if is_scalar: ratio_arr = np.squeeze(ratio_arr) return ratio_arr
f05bb1f1a7ca2e0899ab67bb1c2a355236e3e810
8,193
def filters(param: str, default_value: str, base_key: str, key_manager: KeyManager) -> list: """Filter combo box selector for parameter""" update_type = '|filters|' row = combo_row(param, default_value, base_key, key_manager, update_type) return row
6645d369116d10cc4392810c9228f0e72fc21fd5
8,194
def get_scanner(fs_id): """ get scanner 3T or 1.5T""" sc = fs_id.split("_")[2] if sc in ("15T", "1.5T", "15t", "1.5t"): scanner = "15T" elif sc in ("3T", "3t"): scanner = "3T" else: print("scanner for subject " + fs_id + " cannot be identified as either 1.5T or 3T...") print("Please double check the IDs in the list of subjects") scanner = "false" return scanner
f905bd16f3103b0c6c02193d30fb945646afb54c
8,195
def _find_query_rank(similarities, library_keys, query_keys): """tf.py_func wrapper around _find_query_rank_helper. Args: similarities: [batch_size, num_library_elements] float Tensor. These are not assumed to be sorted in any way. library_keys: [num_library_elements] string Tensor, where each column j of similarities corresponds to library_key j. query_keys: [num_queries] string Tensor Returns: query_ranks: a dictionary with keys 'highest', 'lowest' and 'avg', where each value is a [batch_size] Tensor. The 'lowest' Tensor contains for each batch the lowest index of a library key that matches the query key for that batch element when the library keys are sorted in descending order by similarity score. The 'highest' and 'avg' Tensors are defined similarly. The first two are tf.int32 and the final is a tf.float32. Note that the behavior of these metrics is undefined when there are ties within a row of similarities. best_query_similarities: the value of the similarities evaluated at the lowest query rank. """ (highest_rank, lowest_rank, avg_rank, best_query_similarities) = tf.py_func( _find_query_rank_helper, [similarities, library_keys, query_keys], (tf.int32, tf.int32, tf.float32, tf.float32), stateful=False) query_ranks = { 'highest': highest_rank, 'lowest': lowest_rank, 'avg': avg_rank } return query_ranks, best_query_similarities
f3b002b77c77845681b35c3d6f629f6290324a47
8,196
def random_adjust_brightness(image, max_delta=0.2, seed=None): """Randomly adjusts brightness. """ delta = tf.random_uniform([], -max_delta, max_delta, seed=seed) image = tf.image.adjust_brightness(image / 255, delta) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image
9d371ebb268708b983a523ce71a64103d3e46717
8,197
import re def get_assign_ops_and_restore_dict(filename, restore_all=False): """Helper function to read variable checkpoints from filename. Iterates through all vars in restore_all=False else all trainable vars. It attempts to match variables by name and variable shape. Returns a possibly empty list of assign_ops, and a possibly empty dictionary for tf.train.Saver() """ def check_name_and_shape(name, var, shape_map): if name in shape_map: # Cannot check variables with unknown sizes such as cudnn rnns if str(var.shape) == "<unknown>": # Just return True and hope the shapes match return True if var.shape == shape_map[name]: return True return False assign_ops = [] restore_dict = {} try: reader = tf.train.NewCheckpointReader(filename) var_to_shape_map = reader.get_variable_to_shape_map() variables = tf.trainable_variables() if restore_all: variables = tf.get_collection(tf.GraphKeys.VARIABLES) for var in variables: idx = var.name.find(":") if idx != -1: true_name = var.name[:idx] loss_idx = re.search("Loss_Optimization", true_name) if 'EmbeddingMatrix' in true_name: embed_restore, assign = _restore_embed(var, var_to_shape_map, reader) if assign: assign_ops.append(embed_restore) else: restore_dict[true_name] = embed_restore if check_name_and_shape(true_name, var, var_to_shape_map): tensor = reader.get_tensor(true_name) if tensor.dtype != var.dtype.as_numpy_dtype(): assign_ops.append(var.assign(tf.cast(tensor, var.dtype))) else: restore_dict[true_name] = var elif loss_idx: loss_idx = loss_idx.end() if FP32_TEST.search(true_name): true_name = FP32_TEST.sub("", true_name) else: true_name = (true_name[:loss_idx] + "/Loss_Optimization/FP32-master-copy" + true_name[loss_idx:]) if check_name_and_shape(true_name, var, var_to_shape_map): tensor = reader.get_tensor(true_name) if tensor.dtype != var.dtype.as_numpy_dtype(): assign_ops.append(var.assign(tf.cast(tensor, var.dtype))) else: restore_dict[true_name] = var else: print("Not restoring {}".format(var.name)) if true_name not in var_to_shape_map: print("true name [{}] was not in shape map".format(true_name)) else: if var.shape != var_to_shape_map[true_name]: print(("var.shape [{}] does not match var_to_shape_map[true_name]" "[{}]").format(var.shape, var_to_shape_map[true_name])) print("WARNING: Run will mostly error out due to this") except Exception as e: # pylint: disable=broad-except print(str(e)) if "corrupted compressed block contents" in str(e): print("It's likely that your checkpoint file has been compressed " "with SNAPPY.") if ("Data loss" in str(e) and (any([e in filename for e in [".index", ".meta", ".data"]]))): proposed_file = ".".join(filename.split(".")[0:-1]) v2_file_error_template = """ It's likely that this is a V2 checkpoint and you need to provide the filename *prefix*. Try removing the '.' and extension. Try: inspect checkpoint --file_name = {}""" print(v2_file_error_template.format(proposed_file)) raise ValueError("Error in loading checkpoint") return assign_ops, restore_dict
d93583c914bbe066b6a62d1f2041ab60cd511ab6
8,198
def log_creations(model, **extra_kwargs_for_emit): """ Sets up signal handlers so that whenever an instance of `model` is created, an Entry will be emitted. Any further keyword arguments will be passed to the constructor of Entry as-is. As a special case, if you specify the sentinel value `INSTANCE` as the value of a keyword argument, the newly created instance of `model` will be passed instead. If the value of the keyword argument is a function, it will be called with the newly created instance to determine the value of the keyword argument to the Entry constructor. For examples on usage, see `feedback/handlers/feedback_message.py`. """ meta = model._meta entry_type_name = '{app_label}.{model_name}.created'.format( app_label=meta.app_label, model_name=meta.model_name, ) @receiver(post_save, sender=model, weak=False) def on_save_emit_event_log_entry(sender, instance, created, **kwargs): if not created: return kwargs_for_emit = dict() for key, value in extra_kwargs_for_emit.items(): if value is INSTANCE: value = instance elif callable(value): value = value(instance) kwargs_for_emit[key] = value emit(entry_type_name, **kwargs_for_emit) return on_save_emit_event_log_entry
4eee202ccb335c658c1f6bf15b02f00955eb3da7
8,199
def notebook(request, id=0): """ :param request: :param id: :return: """ get_notebook = JupyterNotebooks.objects.get(id=id) return render(request, "customdashboard/notebook.html", {'get_notebook': get_notebook})
3d1e3880182f6c8d507391fc66a9c0b41f18e3bc
8,200
def encrypt_decrypt(data_string, password, mode='encrypt'): """Encrypts OR Decrypts data_string w.r.t password based on mode specified Parameters: data_string: Text that needs to be encoded. passed in string format password: a string to encrypt data before encoding into an image. mode: 'encrypt' --> encrypts the data 'decrypt' --> decrypts the data Returns: Data string either encrypted or decrypted based on mode specified """ _hash = md5(password.encode()) hash_value = _hash.hexdigest() key = urlsafe_b64encode(hash_value.encode()) cipher = Fernet(key) # 32-byte key - URLsafe - base64-encoded if mode=='encrypt': data_bytes = data_string.encode() encrypted_bytes = cipher.encrypt(data_bytes) encrypted_data_string = encrypted_bytes.decode() return encrypted_data_string elif mode=='decrypt': encrypted_bytes = data_string.encode() decrypted_bytes = cipher.decrypt(encrypted_bytes) decrypted_data_string = decrypted_bytes.decode() return decrypted_data_string else: raise InvalidModeError("Expected 'encrypt' OR 'decrypt' ")
c0ecdf2009fe1b40cb9ed86e12904e241eb5ea86
8,201
def compute_alphabet(sequences): """ Returns the alphabet used in a set of sequences. """ alphabet = set() for s in sequences: alphabet = alphabet.union(set(s)) return alphabet
cf8f7dc1e31a28fe0910d806d18189aae7d7a85b
8,202
def Diff(a, b): """Returns the number of different elements between 2 interables. Args: a(iterable): first iterable. b(iterable): second iterable. Returns: int: the number of different elements. """ return sum(map(lambda x, y: bool(x-y), a, b))
0885bd224f956f138e80a4b681ebc581c733cc51
8,204
def load_description(model): """Load description of the <model>.""" desc = get_available_pkgyaml(model) entry = read_mlhubyaml(desc) return entry
2a97ee446d0693af704c6b0ebd14376d3e6dea37
8,205
import math def generate_star(rect: RectType, line_count: int = 20) -> vp.LineCollection: """Generate a set of line from a random point.""" orig_x = np.random.uniform(rect[0], rect[0] + rect[2]) orig_y = np.random.uniform(rect[1], rect[1] + rect[3]) r = math.hypot(rect[2], rect[3]) angles = np.linspace(0, 2 * math.pi, num=line_count, endpoint=False) phase = np.random.normal(0, math.pi / 4) mls = MultiLineString( [ ([orig_x, orig_y], [orig_x + r * math.cos(a), orig_y + r * math.sin(a)]) for a in angles + phase ] ) return vp.LineCollection(mls.intersection(rect_to_polygon(rect)))
a430b486af8606d949a057b4578fdddd9968386b
8,206
def failure(request): """Display failure message""" return HttpResponse(f"Failure! {request.session['failure message'] if request.session['failure message'] is not None else ''}")
c9eee874106fce87d6816e3216e3a86d6eef5fab
8,207
import torch def visualize_image(cam, rgb_img, target_category): """ Visualize output for given image """ input_tensor = preprocess_image(rgb_img) grayscale_cam = cam(input_tensor=input_tensor, target_category=target_category) grayscale_cam = grayscale_cam[0, :] output = cam.activations_and_grads(input_tensor) softmax = torch.nn.Softmax(dim = 1) print("PRED: ", softmax(output).tolist()) visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True) return visualization
2644eca6cd50078a167b7d791a625ecb13fef17d
8,208
import random def _get_random_hangul(count=(0xd7a4 - 0xac00)): """Generate a sequence of random, unique, valid Hangul characters. Returns all possible modern Hangul characters by default. """ valid_hangul = [chr(_) for _ in range(0xac00, 0xd7a4)] return random.sample(valid_hangul, count)
3a41edd36cd2aac05e51a121743bcfb61455bd9b
8,210
def build(dir, **kwargs): """run cmake to generate a project buildsystem Parameters: ---------- dir str: Location of the CMake build directory Keyword Args: ---------- parallel int: The maximum number of concurrent processes to use when building. Default: 1 less than the number of available logical cores. target str: Path to directory which CMake will use as the root of build directory. config str: For multi-configuration tools, choose specified configuration flags seq(str): Sequence of flags (or any other unlisted argument). Include preceding dash(es). tooloptions seq(str): Sequence of options to be passed onto the build tool env: A mapping that defines the environment variables for the new process """ # prune empty entries kwargs = {key: value for key, value in kwargs.items() if value} # add defaults if not specified if not "parallel" in kwargs: kwargs["parallel"] = _getWorkerCount() # build cmake arguments args = [findexe("cmake"), "--build", dir] env = None for key, value in kwargs.items(): if key in ("parallel", "target", "config"): args.append(f"--{key}") args.append(f"{value}") elif key == "flags": for f in value: args.append(f) elif key == "env": env = value elif key is not "tooloptions": raise KeyError if "tooloptions" in kwargs: args.append("--") for f in value: args.append(f) return run(args, env=env).check_returncode()
2f21fc901d7c95d3b1a2b37d6ba3584d6cb96efb
8,211
def is_builtin_model(target: type) -> bool: """returns ``True`` if the given type is a model subclass""" return is_builtin_class_except(target, ["MetaModel", "Model", "DataBag"])
6b1cf3b0fdd0db50c0dde6a2ca3f3bcb8e8328cf
8,212
def runQ(qparsed, params=dict(), nbcircuits=1, nbqubits = None): """ qparsed: qlang circuit (already parsed) params:{x:np.array, t:np.array} """ #*** verify if parameters are ok for qparsed circuit **** _ , vector_parameters = parseqlang.parameters_from_gates(qparsed) for pname, dimension in vector_parameters.items(): if pname not in params: raise Exception(f'Vector parameter "{pname}" not provided') if params[pname].shape[0] != dimension: raise Exception(f"Vector parameter {pname} is of dimension {dimension} but %d are provided"%params[pname].shape[0]) if len(params[pname].shape)==1: nb_rows = 1 else: nb_rows =params[pname].shape[1] if nbcircuits==1 and nb_rows>1: nbcircuits= nb_rows elif nbcircuits != nb_rows and nb_rows != 1: raise Exception(f"{pname}: got {nb_rows} rows ({nbcircuits} expected)") #*** determine nb qubits **** qbits = parseqlang.nbqubits_from_gates(qparsed) if(nbqubits==None): nbqubits = qbits elif nbqubits<qbits: raise Exception(f"{nbqubits} qubits asked, but {qbits} qubits are needed") #*** run circuit(s) with manyq **** initQreg(nbqubits,n=nbcircuits) for gate in qparsed: gname = gate[0] gparam = gate[1] qbit0 = gparam[0] # print(gate) # print(f"qbit0: {qbit0}") if gname in {"CZ","CX"}: qbit1 = gparam[1] # print(f"Running {gname}({qbit0},{qbit1})") globals()[gname](qbit0,qbit1) continue pname = gparam[1][0] pindex = gparam[1][1] # print(f"Running {gname}({qbit0},{pname}_{pindex})") globals()[gname](qbit0,params.get(pname)[pindex]) return Qreg.outQ
d6ca4afd14e56961920033e9e7ab40f4fc4a9ae6
8,213
def simulation_wells(self): """Get a list of all simulation wells for a case Returns: :class:`rips.generated.generated_classes.SimulationWell` """ wells = self.descendants(SimulationWell) return wells
ecf13fc524f12be21593c49d8d22c365564716e9
8,215
from typing import Optional def getSmartMeter() -> Optional[str]: """Return smartmeter name used in recording.""" mapping = getDeviceMapping() # Identifier for smartmeter is meter with phase 0 try: return next(key for key in mapping if mapping[key]["phase"] == 0) except StopIteration: return None
8e947d5d9078886f9bc2b662162304eb2fb6474b
8,216
import http from typing import Optional def view_index( request: http.HttpRequest, workflow: Optional[models.Workflow] = None, ) -> http.HttpResponse: """Render the list of views attached to a workflow. :param request: Http request received. :param workflow: Workflow being processed :return: HTTP response with the table """ # Get the views views = workflow.views.values( 'id', 'name', 'description_text', 'modified') # Build the table only if there is anything to show (prevent empty table) return render( request, 'table/view_index.html', { 'query_builder_ops': workflow.get_query_builder_ops_as_str(), 'table': services.ViewTable(views, orderable=False), }, )
2dca3e42a3d2b855d795fc0c61b41c2a2f449724
8,218
def inverseLPSB(data, mu, g): """Compute regularized L-PSB step.""" mUpd = data.mUpd gamma = data.gamma gammah = data.gamma + mu Q22 = np.tril(data.STY[:mUpd, :mUpd], -1) + np.tril(data.STY[:mUpd, :mUpd], -1).T + \ np.diag(np.diag(data.STY[:mUpd, :mUpd])) + \ gamma * np.diag(np.diag(data.STS[:mUpd, :mUpd])) Q = np.block([ [np.zeros((mUpd, mUpd)), np.triu(data.STS[:mUpd, :mUpd])], [np.triu(data.STS[:mUpd, :mUpd]).T, Q22] ]) Q += 1/gammah * np.block([ [data.STS[:mUpd, :mUpd], data.STY[:mUpd, :mUpd]], [data.STY[:mUpd, :mUpd].T, data.YTY[:mUpd, :mUpd]] ]) ATg = np.block([data.S[:, :mUpd].T @ g, data.Y[:, :mUpd].T @ g]) p = np.linalg.solve(Q, ATg) #p = scipy.linalg.solve(Q, ATg, assume_a='sym') Ap = data.S[:, :mUpd] @ p[:mUpd] + data.Y[:, :mUpd] @ p[mUpd:] d = 1/gammah**2 * Ap - 1/gammah * g return d
500705d0fd5cb5ccae7fa8dfeaa1548a65fa2203
8,219
import json def canPlay(request): """ Endpoint qui retourne la liste des cartes qui peuvent être jouées ( pour le Player ) rq : { "cards_played" : [ { "card_name": "As", "value_non_atout": 0, "value_atout": 0, "id" : "A" }, { "card_name": "7s", "value_non_atout": 0, "value_atout": 0, "id" : "7" }, { "card_name": "8s", "value_non_atout": 0, "value_atout": 0, "id" : "8" } ], "atout" : "c", "opening_color" : "s", "remaining_cards": [ { "card_name": "7d", "value_non_atout": 0, "value_atout": 0, "id":"7" }, { "card_name": "Kh", "value_non_atout": 4, "value_atout": 4, "id":"K" }, { "card_name": "Ks", "value_non_atout": 4, "value_atout": 4, "id":"K" }, { "card_name": "Ac", "value_non_atout": 11, "value_atout": 11, "id":"A" }, { "card_name": "9c", "value_non_atout": 0, "value_atout": 14, "id":"9" } ] } """ body = json.loads(request.body) cards_played = body['cards_played'] remaining_cards = body['remaining_cards'] opening_color = body['opening_color'] atout = body['atout'] can_play = [] cards_atout = [] order_no_a = ['7','8','9','J','Q','K','10','A'] order_a = ['7','8','Q','K','10','A','9','J'] if cards_played: if opening_color == atout: for x in remaining_cards: if opening_color in x['card_name']: cards_atout.append(x) if not cards_atout: can_play=remaining_cards else: max=0 if len(cards_played)==1: max=order_a.index(cards_played[0]['idc']) for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout elif len(cards_played)==2: max = order_a.index(cards_played[0]['idc']) if atout in cards_played[1]['card_name']: if order_a.index(cards_played[1]['idc']) > max : max = order_a.index(cards_played[1]['idc']) for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: max = order_a.index(cards_played[0]['idc']) if atout in cards_played[1]['card_name']: if order_a.index(cards_played[1]['idc']) > max : max = order_a.index(cards_played[1]['idc']) if atout in cards_played[2]['card_name']: if order_a.index(cards_played[2]['idc']) > max : max = order_a.index(cards_played[2]['idc']) for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: if atout in cards_played[2]['card_name']: if order_a.index(cards_played[2]['idc']) > max : max = order_a.index(cards_played[2]['idc']) for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: if atout in cards_played[2]['card_name']: if order_a.index(cards_played[2]['idc']) > max : max = order_a.index(cards_played[2]['idc']) for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for x in remaining_cards: if opening_color in x['card_name']: can_play.append(x) if not can_play: i=0 for x in remaining_cards: if atout in x['card_name']: i+=1 cards_atout.append(x) if i==0: can_play=remaining_cards else: # Le joueur possede un atout, il faut regarder qui est maître if len(cards_played)==3: max=0 if atout in cards_played[1]['card_name']: max = order_a.index(cards_played[1]['idc']) if atout in cards_played[2]['card_name']: if order_a.index(cards_played[2]['idc']) > max : max = order_a.index(cards_played[2]['idc']) for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: can_play=remaining_cards else: if atout in cards_played[2]['card_name']: max = order_a.index(cards_played[2]['idc']) for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: if order_no_a.index(cards_played[2]['idc'])<order_no_a.index(cards_played[1]['idc']) and order_no_a.index(cards_played[1]['idc']) >order_no_a.index(cards_played[0]['idc']): can_play=remaining_cards else: can_play=cards_atout elif len(cards_played)==1: can_play=cards_atout else: max=0 if atout in cards_played[1]['card_name']: max = order_a.index(cards_played[1]['idc']) for e in cards_atout: if order_a.index(e['idc']) > max: can_play.append(e) if not can_play: can_play=cards_atout else: if order_no_a.index(cards_played[1]['idc'])<order_no_a.index(cards_played[0]['idc']): can_play=remaining_cards else: can_play=cards_atout else: can_play=remaining_cards return Response(can_play)
b579e0e99eebed68fa55f8eeb287cb2cdf283de6
8,220
async def async_setup(hass: HomeAssistant, config: dict): """Set up the Genie Aladdin component.""" return True
22f6c6126d8d7b3ce7df124b144b9ccfb4fc30c2
8,221
import importlib def _module_available(module_path: str) -> bool: """Testing if given module is avalaible in your env >>> _module_available('os') True >>> _module_available('bla.bla') False """ mods = module_path.split('.') assert mods, 'nothing given to test' # it has to be tested as per partets for i in range(len(mods)): module_path = '.'.join(mods[:i + 1]) if importlib.util.find_spec(module_path) is None: return False return True
6673bf25845af6c12494ffbec0dc8bb8ab950ff2
8,222
def _GetPathBeforeFinalDir(uri): """ Returns the part of the path before the final directory component for the given URI, handling cases for file system directories, bucket, and bucket subdirectories. Example: for gs://bucket/dir/ we'll return 'gs://bucket', and for file://dir we'll return file:// Args: uri: StorageUri. Returns: String name of above-described path, sans final path separator. """ sep = uri.delim # If the source uri argument had a wildcard and wasn't expanded by the # shell, then uri.names_file() will always be true, so we check for # this case explicitly. assert ((not uri.names_file()) or ContainsWildcard(uri.object_name)) if uri.names_directory(): past_scheme = uri.uri[len('file://'):] if past_scheme.find(sep) == -1: return 'file://' else: return 'file://%s' % past_scheme.rstrip(sep).rpartition(sep)[0] if uri.names_bucket(): return '%s://' % uri.scheme # Else it names a bucket subdir. return uri.uri.rstrip(sep).rpartition(sep)[0]
20cad56a858e8feccfd7154ecff33d9508a7ec80
8,223
import toml def load_page_details(data, filename=None): """ # Raises ValueError of (filename, error) """ try: options = toml.loads(data) except toml.TomlDecodeError as exc: raise ValueError(filename, exc) if not isinstance(options, dict): raise ValueError(filename, 'page details could not be parsed into a JSON object') return options
117bb7d84625475745a30522fda7dccf1bc5a487
8,224
def nested_render(cfg, fully_rendered_cfgs, replacements): """ Template render the provided cfg by recurisevly replacing {{var}}'s which values from the current "namespace". The nested config is treated like nested namespaces where the inner variables are only available in current block and further nested blocks. Said the opposite way: the namespace with available vars that can be used includes the current block's vars and parent block vars. This means that you can do replacements for top-level (global namespaced) config vars anywhere, but you can only use inner configs within that block or further nested blocks. An example is worth a thousand words: --------------------------------------------------------------------------------- fence-config.yaml -------------------------------------------------------------------------------- BASE_URL: 'http://localhost/user' OPENID_CONNECT: fence: api_base_url: 'http://other_fence/user' client_kwargs: redirect_uri: '{{BASE_URL}}/login/fence/login' authorize_url: '{{api_base_url}}/oauth2/authorize' THIS_WONT_WORK: '{{api_base_url}}/test' -------------------------------------------------------------------------------- "redirect_uri" will become "http://localhost/user/login/fence/login" - BASE_URL is in the global namespace so it can be used in this nested cfg "authorize_url" will become "http://other_fence/user/oauth2/authorize" - api_base_url is in the current namespace, so it is available "THIS_WONT_WORK" will become "/test" - Why? api_base_url is not in the current namespace and so we cannot use that as a replacement. the configuration (instead of failing) will replace with an empty string Args: cfg (TYPE): Description fully_rendered_cfgs (TYPE): Description replacements (TYPE): Description Returns: dict: Configurations with template vars replaced """ try: for key, value in cfg.iteritems(): replacements.update(cfg) fully_rendered_cfgs[key] = {} fully_rendered_cfgs[key] = nested_render( value, fully_rendered_cfgs=fully_rendered_cfgs[key], replacements=replacements, ) # new namespace, remove current vars (no longer available as replacements) for old_cfg, value in cfg.iteritems(): replacements.pop(old_cfg, None) return fully_rendered_cfgs except AttributeError: # it's not a dict, so lets try to render it. But only if it's # truthy (which means there's actually something to replace) if cfg: t = Template(str(cfg)) rendered_value = t.render(**replacements) try: cfg = yaml_load(rendered_value) except ScannerError: # it's not loading into yaml, so let's assume it's a string with special # chars such as: {}[],&*#?|:-<>=!%@\) # # in YAML, we have to "quote" a string with special chars. # # since yaml_load isn't loading from a file, we need to wrap the Python # str in actual quotes. cfg = yaml_load('"{}"'.format(rendered_value)) return cfg
9958e792ef09aa7c88e4c8b7d29a61a8713927a2
8,225
from operator import sub import warnings def element_by_atomic_number(atomic_number): """Search for an element by its atomic number Look up an element from a list of known elements by atomic number. Return None if no match found. Parameters ---------- atomic_number : int Element atomic number that need to look for if a string is provided, only numbers are considered during the search Returns ------- matched_element : element.Element Return an element from the periodic table if we find a match, otherwise raise GMSOError """ if isinstance(atomic_number, str): atomic_number_trimmed = int(sub('[a-z -]', '', atomic_number.lower()).lstrip('0')) msg = '''Letters and spaces are not considered when searching by element atomic number. \n {} became {}'.format(atomic_number, atomic_number_trimmed)''' warnings.warn(msg) else: atomic_number_trimmed = atomic_number matched_element = atomic_dict.get(atomic_number_trimmed) if matched_element is None: raise GMSOError(f'Failed to find an element with atomic number {atomic_number_trimmed}') return matched_element
42a23d0bd2ce1391a74ee8b5d5f97aa5fc8b2d3f
8,226
import tqdm def get_data_from_db(cursor): """ Get data from the database given a query-instantiated cursor :param cursor: query-instantiated database cursor :return: tuple of labels and training data """ training_data, labels = [], [] cols = [desc[0] for desc in cursor.description] for record in tqdm(cursor, total=cursor.rowcount): record = dict(record) record['purposes'] = [purpose_to_english[p] for p in record['purposes']] # just duplicate for house_number and year of construction record['house_number_vec'] = record['house_number'] record['year_of_construction_vec'] = record['year_of_construction'] # one-hot encoding for house number addition if record['house_number_addition']: hna = np.zeros(shape=(len(record['house_number_addition']), len(VOCABULARY))) for idx, char in enumerate(record['house_number_addition']): hna[idx, VOCABULARY.index(char.lower())] = 1. else: hna = np.zeros(shape=(1, len(VOCABULARY))) record['house_number_addition_vec'] = hna # 'multi-hot' encoding for building purposes purposes = np.zeros(shape=(len(PURPOSES,))) for purpose in record['purposes']: purposes[PURPOSES.index(purpose)] = 1. record['purposes_vec'] = purposes # character-level vectorization of postal code pc = np.zeros((len(record['postal_code']), len(VOCABULARY))) for idx, char in enumerate(record['postal_code']): pc[idx, VOCABULARY.index(char.lower())] = 1. record['postal_code_vec'] = pc # building geometry vectorization geom = record['geometry_crs84'] geom = vectorize_wkt(geom) record['geometry_vec'] = geom record['centroid_vec'] = vectorize_wkt(record['centroid_crs84'])[0, :2] # vectorization of neighbouring buildings neighbours = record['neighbouring_buildings_crs84'] neighbours = vectorize_wkt(neighbours) record['neighbouring_buildings_vec'] = neighbours rd = record['recorded_date'] record['recorded_date_vec'] = [rd.year, rd.month, rd.day, rd.weekday()] rgd = record['registration_date'] record['registration_date_vec'] = [rgd.year, rgd.month, rgd.day, rgd.weekday()] training_data.append(record) labels.append({ 'energy_performance_index': record['energy_performance_index'], 'energy_performance_label': record['energy_performance_label'], 'energy_performance_vec': ENERGY_CLASSES.index(record['energy_performance_label']) }) return training_data, labels
a3db9af7912fd38e9966f0c95639613cb4dac087
8,227
def parse(input_str, file_path=True): """ Parse a GLM into an omf.feeder tree. This is so we can walk the tree, change things in bulk, etc. Input can be a file path or GLM string. """ tokens = _tokenize_glm(input_str, file_path) return _parse_token_list(tokens)
2e53a6870baae3fa9bfb511b28baf73e697b44a0
8,228
def pred(model, x_pred_scaled, scaler_y): """ Predict :param model: model for prediction :param x_pred_scaled: scaled x values we need to predict for :param scaler_y: scaler for y values :return: """ MAX_PREDICT_SIZE = 10000 g_mean_full = g_std_full = None start = 0 while start < len(x_pred_scaled): end = start + MAX_PREDICT_SIZE x_pred_scaled_slice = x_pred_scaled[start:end] g_mean_scaled, g_std_scaled = model_gpflow.predict_gpflow(model, x_pred_scaled_slice) g_mean = scaler_y.inverse_transform(g_mean_scaled) g_std = g_std_scaled * scaler_y.scale_ if g_mean_full is None: g_mean_full = g_mean g_std_full = g_std else: g_mean_full = np.vstack((g_mean_full, g_mean)) g_std_full = np.vstack((g_std_full, g_std)) start = end return g_mean_full, g_std_full
7a43020a2b4817b3287c849bc0059027346bf5a5
8,229
def metadataAbstractElementEmptyValuesTest3(): """ Empty value for unknown attribute. >>> doctestMetadataAbstractElementFunction( ... testMetadataAbstractElementEmptyValue, ... metadataAbstractElementEmptyValuesTest3(), ... requiredAttributes=["required1"], ... optionalAttributes=["optional1"]) [] """ metadata = """<?xml version="1.0" encoding="UTF-8"?> <test required1="foo" optional1="foo" unknown1="" /> """ return ElementTree.fromstring(metadata)
4f9e0d33948a9ac1ab35bc9cf0c5c3274cfc9d41
8,230
def orthonormal_initializer(input_size, output_size): """from https://github.com/patverga/bran/blob/32378da8ac339393d9faa2ff2d50ccb3b379e9a2/src/tf_utils.py#L154""" I = np.eye(output_size) lr = .1 eps = .05/(output_size + input_size) success = False tries = 0 while not success and tries < 10: Q = np.random.randn(input_size, output_size) / np.sqrt(output_size) for i in range(100): QTQmI = Q.T.dot(Q) - I loss = np.sum(QTQmI**2 / 2) Q2 = Q**2 Q -= lr*Q.dot(QTQmI) / (np.abs(Q2 + Q2.sum(axis=0, keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps) if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss): tries += 1 lr /= 2 break success = True if success: print('Orthogonal pretrainer loss: %.2e' % loss) else: print('Orthogonal pretrainer failed, using non-orthogonal random matrix') Q = np.random.randn(input_size, output_size) / np.sqrt(output_size) return Q.astype(np.float32)
11cc28d6342ed20699c96051a36199c3b8941381
8,231
def stick_figure (ax, type, num, start, end, prev_end, scale, linewidth, opts): """ General function for drawing stick based parts (e.g., ribozyme and protease sites). """ # Default options color = (0,0,0) start_pad = 2.0 end_pad = 2.0 x_extent = 5.0 y_extent = 10.0 linestyle = '-' linetype = ""; shapetype = ""; if(type == "Ribozyme"): linetype = 'dash' headgroup = 'O' elif(type == "Protease"): linetype = 'dash' headgroup = 'X' elif(type == "ProteinStability"): linetype = 'solid' headgroup = 'O' elif(type == "Ribonuclease"): linetype = 'solid' headgroup = 'X' # Reset defaults if provided if opts != None: if 'color' in opts.keys(): color = opts['color'] if 'start_pad' in opts.keys(): start_pad = opts['start_pad'] if 'end_pad' in opts.keys(): end_pad = opts['end_pad'] if 'x_extent' in opts.keys(): x_extent = opts['x_extent'] if 'y_extent' in opts.keys(): y_extent = opts['y_extent'] if 'linestyle' in opts.keys(): linestyle = opts['linestyle'] if 'linewidth' in opts.keys(): linewidth = opts['linewidth'] if 'scale' in opts.keys(): scale = opts['scale'] # Check direction add start padding final_end = end final_start = prev_end if start > end: start = prev_end+end_pad+x_extent end = prev_end+end_pad final_end = start+start_pad rbs_center = (end+((start-end)/2.0),-y_extent) c1 = Circle(rbs_center, x_extent/2.0, linewidth=linewidth, edgecolor=color, facecolor=(1,1,1), zorder=8) x1 = Line2D([start,end],[-y_extent*1.25,-y_extent/1.5], linewidth=linewidth, color=color, zorder=12, linestyle='-') x2 = Line2D([start,end],[-y_extent/1.5,-y_extent*1.25], linewidth=linewidth, color=color, zorder=12, linestyle='-') dash1 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent/4], linewidth=linewidth, color=color, zorder=8, linestyle=linestyle) dash2 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[-y_extent/2,-y_extent+(x_extent/2.0)], linewidth=linewidth, color=color, zorder=8, linestyle=linestyle) solidO = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent+(x_extent/2.0)], linewidth=linewidth, color=color, zorder=8, linestyle=linestyle) solidX = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent], linewidth=linewidth, color=color, zorder=8, linestyle=linestyle) if(headgroup == "O" and linetype == "dash"): ax.add_patch(c1) ax.add_line(dash1) ax.add_line(dash2) elif(headgroup == "X" and linetype == "dash"): ax.add_line(x1) ax.add_line(x2) ax.add_line(dash1) ax.add_line(dash2) elif(headgroup == "O" and linetype == "solid"): ax.add_patch(c1) ax.add_line(solidO) elif(headgroup == "X" and linetype == "solid"): ax.add_line(x1) ax.add_line(x2) ax.add_line(solidX) else: start = prev_end+start_pad end = start+x_extent final_end = end+end_pad rbs_center = (start+((end-start)/2.0),y_extent) c1 = Circle(rbs_center, x_extent/2.0, linewidth=linewidth, edgecolor=color, facecolor=(1,1,1), zorder=8) x1 = Line2D([start,end],[y_extent*1.25,y_extent/1.5], linewidth=linewidth, color=color, zorder=12, linestyle='-') x2 = Line2D([start,end],[y_extent/1.5,y_extent*1.25], linewidth=linewidth, color=color, zorder=12, linestyle='-') dash1 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent/4], linewidth=linewidth, color=color, zorder=8, linestyle=linestyle) dash2 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[y_extent/2,y_extent-(x_extent/2.0)], linewidth=linewidth, color=color, zorder=8, linestyle=linestyle) solidO = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent-(x_extent/2.0)], linewidth=linewidth, color=color, zorder=8, linestyle=linestyle) solidX = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent], linewidth=linewidth, color=color, zorder=8, linestyle=linestyle) if(headgroup == 'O' and linetype == 'dash'): ax.add_patch(c1) ax.add_line(dash1) ax.add_line(dash2) elif(headgroup == "X" and linetype == "dash"): ax.add_line(x1) ax.add_line(x2) ax.add_line(dash1) ax.add_line(dash2) elif(headgroup == "O" and linetype == "solid"): ax.add_patch(c1) ax.add_line(solidO) elif(headgroup == "X" and linetype == "solid"): ax.add_line(x1) ax.add_line(x2) ax.add_line(solidX) if opts != None and 'label' in opts.keys(): if final_start > final_end: write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts) else: write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts) if final_start > final_end: return prev_end, final_start else: return prev_end, final_end
68ae3194ad1dd38e0e18f8da3b0b5f1b0e22071a
8,232
def display_datetime(datetime_str, time_zone=None, verbose=True): """Returns a formatted datetime with TZ (if provided) or 'Error (Missing)""" """ >>> print(datetime.datetime.utcnow().strftime("%Y/%m/%d %a %I:%M %p")) 2019/05/19 Sun 01:10 AM """ if datetime_str: # and type(datetime_str) == datetime.datetime.now(): if verbose: return f'{datetime_str.strftime("%Y/%m/%d %a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}' else: return f'{datetime_str.strftime("%a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}' else: return 'Error (Missing)'
45caa488688e790ae19f8f3f2cda2cb0f250b1fd
8,233
import torch def mask_channels(mask_type, in_channels, out_channels, data_channels=3): """ Creates an autoregressive channel mask. Input: mask_type: str Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others. in_channels: int Number of input channels to layer. out_channels: int Number of output channels of layer. data_channels: int Number of channels in the input data, e.g. 3 for RGB images. (default = 3). Output: mask: torch.FloatTensor Shape (out_channels, in_channels). A mask with 0 in places for masked elements. """ in_factor = in_channels // data_channels + 1 out_factor = out_channels // data_channels + 1 base_mask = torch.ones([data_channels,data_channels]) if mask_type == 'A': base_mask = base_mask.tril(-1) else: base_mask = base_mask.tril(0) mask_p1 = torch.cat([base_mask]*in_factor, dim=1) mask_p2 = torch.cat([mask_p1]*out_factor, dim=0) mask = mask_p2[0:out_channels,0:in_channels] return mask
772fa71f63d2f31c80966db0b0eb43a70ac5e9a9
8,234
import textwrap def dedent(text): """ Remove all common indentation from every line but the 0th. This will avoid getting <code> blocks when rendering text via markdown. Ignoring the 0th line will also allow the 0th line not to be aligned. Args: text: A string of text to dedent. Returns: String dedented by above rules. For example: assertEquals("bar\nline1\nline2", dedent("bar\n line1\n line2")) assertEquals("bar\nline1\nline2", dedent(" bar\n line1\n line2")) assertEquals("bar\n line1\nline2", dedent(" bar\n line1\n line2")) """ text = textwrap.dedent(text) text_lines = text.split('\n') text_not_first = "\n".join(text_lines[1:]) text_not_first = textwrap.dedent(text_not_first) text = text_lines[0] + "\n" + text_not_first return text
b450a873c4c2b667d10c66985d19f8057aa205f9
8,235
def dpsplit(n,k, sig): """ Perform the dynamic programming optimal segmentation, using the sig function to determine the cost of a segment sig(i,j) is the cost of the i,j segment. These are then added together """ # Set up the tracking tables K = k + 1 N = n segtable = np.zeros((n,K)) + np.nan segtable[:,0] = [ sig(0,j+1) for j in xrange(N) ] segindtable = np.zeros((N,K), dtype='int') - 1 # fill up the table in a clever order for k in xrange(1,K): for j in xrange(k,N): #fill the j,k element ans = min( ( (segtable[l,k-1] + sig(l+1,j+1), l+1 ) for l in xrange(k-1,j) ) ) segtable[j,k] = ans[0] segindtable[j,k] = ans[1] # read out the path current_pointer = segindtable[-1,K-1] path = [current_pointer] for k in xrange(K-2, 0, -1): current_pointer = segindtable[current_pointer-1, k] path.append(current_pointer) return sorted(path + [N]), segtable[-1,K-1]
db1513ae0a4725b63e62b102dc2c5fdd77fd4ceb
8,237
from typing import List def get_wind_tiles() -> List[Tile]: """return a list of four wind tiles """ return [Tile(Suit.JIHAI.value, Jihai.TON.value), Tile(Suit.JIHAI.value, Jihai.NAN.value), Tile(Suit.JIHAI.value, Jihai.SHAA.value), Tile(Suit.JIHAI.value, Jihai.PEI.value)]
469ec29795291bb8345fa3beccbbf2c6d4bb3101
8,238
from datetime import datetime def datetime_now_filename_string(): """create a string representation for now() for use as part of the MHL filename""" return datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d_%H%M%S")
37a733ddd93ca1bc4eed82e920222c644c494fcd
8,240
from pathlib import Path import inspect import tqdm def generate_simulation_dataset(path, runs, **kawrgs): """Generate and save a simulation dataset. Parameters ---------- path : str Root path where simulation data will be stored. runs : int, array If int then number of runs to use. If array then array must be of one dim more than simulation grid dim. kawrgs : run_multiple_sources kwargs. Returns ------- dataset : zarr.hierarchy.Group Simulation dataset. """ # Convert path to pathlib path path = Path(path) # Create dataset dataset = zarr.open(path.as_posix(), mode='w') if not isinstance(runs, int): full_speed_array = runs runs = len(runs) else: full_speed_array = None # Add dataset attributes dataset.attrs['waver'] = True dataset.attrs['dataset'] = True dataset.attrs['runs'] = runs # Add simulation attributes based on kwargs and defaults parameters = inspect.signature(run_multiple_sources).parameters for param, value in parameters.items(): if param in kawrgs: dataset.attrs[param] = kawrgs[param] else: dataset.attrs[param] = value.default # Initialize speed and wave arrays speed_array = None wave_array = None # Move through runs for run in tqdm(range(runs), leave=False): if full_speed_array is not None: kawrgs['speed'] = full_speed_array[run] wave, speed = run_multiple_sources(**kawrgs) if speed_array is None: speed_array = dataset.zeros('speed', shape=(runs, ) + speed.shape, chunks=(1,) + (64,) * speed.ndim) if wave_array is None: wave_array = dataset.zeros('wave', shape=(runs, ) + wave.shape, chunks=(1,) + (64,) * wave.ndim) speed_array[run] = speed wave_array[run] = wave return dataset
8383f42cfe2604e5f82761bb351b6cf4f16f33aa
8,241
def num_sites(sequence, rule, **kwargs): """Count the number of sites where `sequence` can be cleaved using the given `rule` (e.g. number of miscleavages for a peptide). Parameters ---------- sequence : str The sequence of a polypeptide. rule : str or compiled regex A regular expression describing the site of cleavage. It is recommended to design the regex so that it matches only the residue whose C-terminal bond is to be cleaved. All additional requirements should be specified using `lookaround assertions <http://www.regular-expressions.info/lookaround.html>`_. labels : list, optional A list of allowed labels for amino acids and terminal modifications. Returns ------- out : int Number of cleavage sites. """ return len(_cleave(sequence, rule, **kwargs)) - 1
dc0840e33206c9db7058a7257a60c59bee0403f8
8,242
def get_packages(code: str) -> defaultdict: """Extracts the packages that were included in the file being inspected. Source for this code: https://stackoverflow.com/questions/2572582/ Example: input: 'from collections import Counter\n import kivy\n from stats import median as stats_median\n' output: defaultdict(<class 'list'>, {'import_name': ['collections', 'kivy', 'stats'], 'import_from': ['Counter', 'median']} ) """ instructions = get_instructions(code) import_instructions = [i for i in instructions if "IMPORT" in i.opname] imports = defaultdict(list) for instr in import_instructions: imports[instr.opname.lower()].append(instr.argval) return imports
977f20e2d3c12993ef26ff8b199f943fb153c79b
8,243
def beam_name(): """Session level fixture for beam path.""" return str(beam_path)
a702d91c62024685d14125123ead41a2a4e38942
8,244
import itertools def get_mv_sandwich(a_blade_indices, b_blade_indices, signature, prod="gp"): """a b ~a""" out_indices = [] out_blade_indices = [] out_signs = [] out_indices = [] indices_a = [] indices_b = [] indices_a_r = [] blade_to_index = {} for (i_a, index_a), (i_b, index_b), (i_a_r, index_a_r) in itertools.product( enumerate(a_blade_indices), enumerate(b_blade_indices), enumerate(reverse_indices(a_blade_indices)) ): out_sign_1, out_index_1 = reduce_bases(index_a, index_b, signature) out_sign_2, out_index = reduce_bases(out_index_1, index_a_r, signature) out_sign = out_sign_1 * out_sign_2 if out_sign != 0 and ( prod == "gp" or (prod == "op" and len(out_index) == abs(len(index_a) + len(index_b))) or (prod == "ip" and len(out_index) == abs(len(index_a) - len(index_b))) ): out_signs.append(out_sign) indices_a.append(i_a) indices_b.append(i_b) indices_a_r.append(i_a_r) if out_index in blade_to_index: out_indices.append(blade_to_index[out_index]) else: blade_to_index[out_index] = len(blade_to_index) out_indices.append(blade_to_index[out_index]) out_blade_indices.append(out_index) if len(out_indices) == 0: def _values_mv_sandwich(a_values, b_values): return jnp.zeros((), dtype=jnp.float32) else: out_size = max(out_indices) + 1 def _values_mv_sandwich(a_values, b_values): out_batch_shape = jnp.broadcast_shapes( a_values.shape[1:], b_values.shape[1:] ) out_values = jnp.zeros( [out_size, *out_batch_shape], dtype=jnp.float32 ) for index_a, index_b, index_a_r, out_sign, out_index in zip(indices_a, indices_b, indices_a_r, out_signs, out_indices): out_values = out_values.at[out_index].add( out_sign * a_values[index_a] * b_values[index_b] * a_values[index_a_r] ) return out_values _values_mv_sandwich_jit = jax.jit(_values_mv_sandwich) return _values_mv_sandwich_jit, tuple(out_blade_indices)
8e30f31de944bd6aa19e60fe7a93aceb8ccb73ef
8,245
def ldns_dnssec_create_nsec3(*args): """LDNS buffer.""" return _ldns.ldns_dnssec_create_nsec3(*args)
653d899f7d30e1e272c0a7026e4383e191b3e78f
8,246
def S_difference_values(_data_lista, _data_listb): """ Returns new data samples where values are transformed by transformer values. """ d_data = [] dsa = len(_data_lista) dsb = len(_data_listb) if dsa != dsb: return [] for i in range(dsa): d_data.append(_data_lista[i] - _data_listb[i]) return d_data
40ec82cb7ef53d5e227b3287a9c1d08e78112e09
8,247
def parseFixedZone(s): """Convert a +hhmm or -hhmm zone suffix. [ s is a string -> if s is a time zone suffix of the form "+hhmm" or "-hhmm" -> return that zone information as an instance of a class that inherits from datetime.tzinfo else -> raise SyntaxError ] """ #-- 1 -- if s.startswith('+'): sign = 1 elif s.startswith('-'): sign = -1 else: raise SyntaxError("Expecting zone modifier as {0}hhmm: " "'{1}'".format(s[0], s)) #-- 2 -- # [ if s[1:] matches HHMM_PAT -> # hours := the HH part as an int # minutes := the MM part as an int # else -> raise SyntaxError ] rawHHMM = s[1:] m = HHMM_PAT.match(rawHHMM) if m is None: raise SyntaxError("Expecting zone modifier as {0}HHMM: " "'{1}'".format(s[0], s)) else: hours = int(rawHHMM[:2]) minutes = int(rawHHMM[2:]) #-- 3 -- return FixedZone(sign*hours, sign*minutes, s)
681b5ad02f228ee40b099a461131de42309e58f0
8,248
def tica_eigenvalues_plot(tica, num=12, plot_file=None): """ Plots the highest eigenvalues over the number of the time-lagged independent components. Parameters ---------- tica : TICA obj Time-lagged independent components information. num : int, default = 12 Number of eigenvalues to plot. plot_file : str, optional, default = None Path and name of the file to save the plot. """ # Plot eigenvalues over component numbers. fig,ax = plt.subplots(1, 1, figsize=[4,3], dpi=300) componentnr = np.arange(num)+1 eigenvalues = tica.eigenvalues[:num] ax.bar(componentnr, eigenvalues) ax.set_xlabel('component number') ax.set_ylabel('eigenvalue') fig.tight_layout() # Save the figure to a file. if plot_file: fig.savefig(plot_file, dpi=300) return componentnr, eigenvalues
707ebbacf0dc90e96760ae26d316da4c27bdd997
8,249
def _split_train_test(features, labels, train_set, random_seed): """Split the dataset into training and test sets. Parameters ---------- features : pandas.DataFrame Features of the dataset events. labels : pandas.DataFrame Labels of the dataset events. train_set : {float, list-like} If float, it is the fraction of objects that will be used as training set. If list, it is the IDs of the objects to use as training set. random_seed : {int, RandomState instance} Random seed or random state instance to use. It allows reproducible results. Returns ------- X_train : pandas.DataFrame Features of the events with which to train the classifier. X_test : pandas.DataFrame Features of the events with which to test the classifier. y_train : pandas.core.series.Series Labels of the events with which to train the classifier. y_test : pandas.core.series.Series Labels of the events with which to test the classifier. """ if np.isscalar(train_set): # `train_set` was the size of training set X_train, X_test, y_train, y_test = model_selection.train_test_split( features, labels, train_size=train_set, random_state=random_seed) else: # `train_set` was a list of object names X_train = features.loc[train_set] y_train = labels.loc[train_set] is_not_train_set = ~ features.index.isin(train_set) X_test = features[is_not_train_set] y_test = labels[is_not_train_set] return X_train, X_test, y_train, y_test
67210e0462cdd4be58f5446b6220f921aa8c4ea0
8,251
def generate_enc_keypair(): """ Generate Curve25519 keypair :returns tuple: A byte pair containing the encryption key and decryption key. """ private_key = PrivateKey.generate() return private_key.public_key.encode(), private_key.encode()
c2ba00b6463d7ab1708dcc43d9cafba2d11af0c6
8,253
from typing import List from typing import Tuple def filter_molecular_components( components: List[Component], ) -> Tuple[List[Component], List[Component]]: """Separate list of components into molecular and non-molecular components. Args: components: A list of structure components, generated using :obj:`pymatgen.analysis.dimensionality.get_structure_components`. Returns: The filtered components as a tuple of ``(molecular_components, other_components)``. """ molecular_components = [c for c in components if c["dimensionality"] == 0] other_components = [c for c in components if c["dimensionality"] != 0] return molecular_components, other_components
72a43a5195ef3d35ca8216225dcae7f699c7bbd5
8,254
import types def argument(name, type): """ Set the type of a command argument at runtime. This is useful for more specific types such as mitmproxy.types.Choice, which we cannot annotate directly as mypy does not like that. """ def decorator(f: types.FunctionType) -> types.FunctionType: assert name in f.__annotations__ f.__annotations__[name] = type return f return decorator
8f93c8e8cd4289d2b4747feb93ecfe3df74350f7
8,255
async def async_google_actions_request_sync(cloud): """Request a Google Actions sync request.""" return await cloud.websession.post( f"{cloud.google_actions_report_state_url}/request_sync", headers={AUTHORIZATION: f"Bearer {cloud.id_token}"}, )
5d75e4b67bc04878108066660b0f43939a1eab4e
8,256
def convert_time_string_to_secs(string: str) -> int: """ Takes a string in the format '1h30m25s' and converts it to an integer in seconds. This functions uses the regular expression RE_CONVERT_TIME above for matching the string. """ match = regexp_time.match(string) if not match: raise ValueError("String {0} has an invalid representation") h, m, s, ms, us = match.groups() h = int(h) if h else 0 m = int(m) if m else 0 s = int(float(s)) if s else 0 total_time_seconds = h*3600 + m*60 + s return total_time_seconds
b520fde06640cd3d22a6c619031633bf21383687
8,257
def polar_cube(c, index, n=512, interp='cubic'): """VIMS cube polar projected. Parameters ---------- c: pyvims.VIMS Cube to interpolate. index: int, float, str, list, tuple VIMS band or wavelength to plot. n: int, optional Number of pixel for the grid interpolation. interp: str, optional Interpolation method """ # Pixel data data = c[index] # Choose which pole to display n_pole = c.sc_lat > 0 # Pixel positions in polar projection pixels = polar_proj(c.ground_lon, c.ground_lat, n_pole=n_pole) # Contour positions in polar projection contour = polar_proj(*c.clonlat, n_pole=n_pole) # Interpolate data (with mask) z, grid, extent = polar_interp(pixels, data, contour, n=n, method=interp) return z, grid, extent, pixels, contour, n_pole
6a02932e8685a1cdd43e6831b2a3544bd903a40b
8,258
from nibabel.gifti.parse_gifti_fast import ParserCreate, Outputter import gzip def _load_surf_files_gifti_gzip(surf_file): """Load surface data Gifti files which are gzipped. This function is used by load_surf_mesh and load_surf_data for extracting gzipped files. Part of the code can be removed while bumping nibabel 2.0.2 """ with gzip.open(surf_file) as f: as_bytes = f.read() if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'): parser = gifti.GiftiImage.parser() parser.parse(as_bytes) gifti_img = parser.img else: parser = ParserCreate() parser.buffer_text = True out = Outputter() parser.StartElementHandler = out.StartElementHandler parser.EndElementHandler = out.EndElementHandler parser.CharacterDataHandler = out.CharacterDataHandler parser.Parse(as_bytes) gifti_img = out.img return gifti_img
2fdc083a208f1a288b7d99bd3863bff22d36bf50
8,259
import platform def get_platform_system(): """return platform.system platform module has many regexp, so importing it is slow... import only if required """ return platform.system()
2531f1883d5acd0c192c0061d7cbf29637197706
8,260
import sympy def factorial(n): """Stop sympy blindly calculating factorials no matter how large. If 'n' is a number of some description, ensure that it is smaller than a cutoff, otherwise sympy will simply evaluate it, no matter how long that may take to complete! - 'n' should be a sympy object, that sympy.factorial(...) can use. """ if isinstance(n, (Integer, Float, Rational)) and n > 50: raise ValueError("[Factorial]: Too large integer to compute factorial effectively!") else: return sympy.factorial(n)
73dc223df2b23a93aafb0ec2b897f1668869e07a
8,262
import re def getSupplier(num): """" get the supplier for a card number Attributes: @num: card number """ supplier = str() for key, value in suppliers.items(): if bool(re.match(value, num)): supplier = key break if supplier == "": supplier = "Ukwnow" return supplier
2572a0595d03cc3056b1155f8a3f0b007ec65b9e
8,263
from typing import Optional import torch def load_torch_hub_model(repo: str, model: str, *args, **kwargs): """Tries to load a torch hub model and handles different exceptions that could be raised. Args: repo: The GitHub repository containing the models. model: The model name to download. max_retries: The maximum number of tries to download the model. Returns: The downloaded torch model. """ error: Optional[Exception] = None for _ in range(TORCH_HUB_DOWNLOAD_MAX_RETRIES + 1): try: try: return torch.hub.load( repo, model, *args, **kwargs, ) except RuntimeError: return torch.hub.load( repo, model, *args, **kwargs, force_reload=True, ) except Exception as e: error = e assert error is not None raise error
3cc928f1026d276290ed97360a0cfebbcde82bb8
8,264
def compute_medoid(data): """ Get medoid of data Parameters ---------- data: ndarray Data points Returns ------ medoid: ndarray Medoid """ dist_mat = pairwise_distances(data) return data[np.argmin(dist_mat.sum(axis=0))]
3fd071cd6c48566caa3a52ead26f06621682703d
8,265
def int_sphere(fx, xgrid): """ Computes integrals over the sphere defined by the logarithmic grid provided as input Parameters ---------- fx : array_like The function (array) to be integrated xgrid : ndarray The logarithmic radial grid Returns ------- I_sph : float The value of the integrand Notes ----- The integral formula is given by .. math:: I = 4 \pi \int \dd{x} e^{3x} f(x) """ func_int = 4.0 * pi * np.exp(3.0 * xgrid) * fx I_sph = np.trapz(func_int, xgrid) return I_sph
d92962cde5200f0c25d8bd0e1011c969e2287125
8,266
def run_webserver(destination_root_dir): """ Run a local """ destination_root_dir = destination_root_dir if destination_root_dir.startswith('/'): destination_root_dir = destination_root_dir[1:] if destination_root_dir.endswith('/'): destination_root_dir = destination_root_dir[:-1] app = Flask(__name__) @app.route('/') @app.route('/<path:filename>') def serve_static_html(filename='index.html'): """ Serve static HTML files :type filename: str :param filename: Path to the static HTML file """ if filename.startswith(destination_root_dir): filename = filename.replace('{}/'.format(destination_root_dir), '') return redirect('/{}'.format(filename)) response = make_response( send_from_directory('/{}'.format(destination_root_dir), filename)) response.cache_control.no_cache = True return response app.run()
df5d26ae754009135061abb4a1a2d1cad0937e97
8,268