content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def decode_hint(hint: int) -> str: """Decodes integer hint as a string. The format is: ⬜ (GRAY) -> . 🟨 (YELLOW) -> ? 🟩 (GREEN) -> * Args: hint: An integer representing the hint. Returns: A string representing the hint. """ hint_str = [] for _ in range(_WORD_LENGTH): hint_chr = hint % 3 hint //= 3 if hint_chr == 0: hint_str.append(_HINT_NOT_IN_ANY_SPOT) elif hint_chr == 1: hint_str.append(_HINT_WRONG_SPOT) else: hint_str.append(_HINT_CORRECT_SPOT) return ''.join(hint_str[::-1])
4180b847cd252a1e3c762431327b1b6d359dac3d
8,962
def is_symmetric(arr, i_sym=True, j_sym=True): """ Takes in an array of shape (n, m) and check if it is symmetric Parameters ---------- arr : 1D or 2D array i_sym : array symmetric with respect to the 1st axis j_sym : array symmetric with respect to the 2nd axis Returns ------- a binary array with the symmetry condition for the corresponding quadrants. The globa Notes ----- If both **i_sym** = ``True`` and **j_sym** = ``True``, the input array is checked for polar symmetry. See `issue #34 comment <https://github.com/PyAbel/PyAbel/issues/34#issuecomment-160344809>`_ for the defintion of a center of the image. """ Q0, Q1, Q2, Q3 = tools.symmetry.get_image_quadrants( arr, reorient=False) if i_sym and not j_sym: valid_flag = [np.allclose(np.fliplr(Q1), Q0), np.allclose(np.fliplr(Q2), Q3)] elif not i_sym and j_sym: valid_flag = [np.allclose(np.flipud(Q1), Q2), np.allclose(np.flipud(Q0), Q3)] elif i_sym and j_sym: valid_flag = [np.allclose(np.flipud(np.fliplr(Q1)), Q3), np.allclose(np.flipud(np.fliplr(Q0)), Q2)] else: raise ValueError('Checking for symmetry with both i_sym=False \ and j_sym=False does not make sense!') return np.array(valid_flag)
488744d34d851b690eb6114b06d754e46b04e36f
8,964
def linear_powspec(k, a): """linear power spectrum P(k) - linear_powspec(k in h/Mpc, scale factor)""" return _cosmocalc.linear_powspec(k, a)
9abe99ef5251b4b8ef04e7113a30dd26ed86d14a
8,965
def light_eff(Pmax, Iz, I0, Ik): """ Photosynthetic efficiency based on the light conditions. By definition, the efficiency has a value between 0 and 1. Parameters ---------- Pmax : numeric Maximum photosynthetic rate [-]. Iz : numeric Coral biomass-averaged light-intensity [mol photons m^-2 s^-1]. I0 : numeric Light-intensity at the surface water (but within the water column) [mol photons m^-2 s^-1]. Ik : numeric Saturation light-intensity [mol photons m^-2 s^-1]. Returns ------- PI : numeric Photo-efficiency [-]. """ # # calculations try: if Ik > 0: PI = Pmax * (np.tanh(Iz / Ik) - np.tanh(.01 * I0 / Ik)) else: PI = 0. except ValueError: PI = np.zeros(len(Ik)) PI[Ik > 0] = Pmax[Ik > 0] * (np.tanh(Iz[Ik > 0] / Ik[Ik > 0]) - np.tanh(.01 * I0 / Ik[Ik > 0])) # # Output return PI
a2e0de2cb0791d3afea15f4c78b7d673200504b3
8,966
import array import time def radial_kernel_evaluate(rmax, kernel, pos, wts, log=null_log, sort_data=False, many_ngb_approx=None): """ Perform evaluation of radial kernel over neighbours. Note you must set-up the linear-interpolation kernel before calling this function. rmax - radius to evaluate within kernel - kernel table pos - (N,3) array of positions wts - (N,) array of weights [many_ngb_approx - guess for number of neighbours. If this is included and large, i.e. >140, we will use combine the kernels due to particles in non-adjacent cells (=monopole approximation for the 1/r^2 force)] returns pairs, f where pairs - the number of pairs within rmax f - An (N,3) array s.t. f_i = Sum_j wts_j (pos_j - pos_i) * kernel(|pos_j - pos_i|) """ pos_arr = array(pos) num_pts = len(pos) if len(pos) != len(wts): raise Exception('Number of weights ({:,}) must be the same as number of points ({:,})'.format(len(wts),num_pts)) stencil = None # Choose a stencil based on number of neighbours if many_ngb_approx is not None: guess_ngb = int(many_ngb_approx) if guess_ngb>400: # 7x7x7 stencil (only inner 3x3x3 direct) stencil = 7 ngrid = int(3.0/rmax) elif guess_ngb>140: # 5x5x5 stencil (inner 3x3x3 direct) stencil = 5 ngrid = int(2.0/rmax) else: # 3x3x3, all direct ngrid = int(1.0/rmax) else: ngrid = int(1.0/rmax) # 3x3x3 by direct summation # Avoid nasty hashing problems, make sure ngrid&3 == 3 if ngrid&3!=3 and ngrid >=3: ngrid = (ngrid//4)*4 -1 print('Using hash grid of size {:,}^3 bins, binning particles.'.format(ngrid), file=log) cells = get_cells(pos_arr, ngrid, log) sort_idx, cellbin_data = _bin_id_data(cells, log) pos = pos_arr[sort_idx].copy() wts= array(wts)[sort_idx].copy() print(MU.OKBLUE+'Kernel evalations at {:,} positions'.format(num_pts)+MU.ENDC, file=log) t0 = time() lattice_setup_kernel(rmax, kernel, log) pairs, accel = lattice_kernel(pos, cellbin_data, ngrid, masses=wts, stencil=stencil) t1 = time() dt = t1-t0 if stencil is None: mean_num_ngb = pairs * 2.0 / num_pts print('Within r=%.4f, mean number of neighbours was'%rmax, MU.OKBLUE+'%.2f'%(mean_num_ngb)+MU.ENDC, file=log) print('{:,} pairs in'.format(pairs), '%.2f seconds'%dt, 'i.e. {:,} positions/sec, {:,} kernels/sec'.format(int(num_pts/dt), int(2*pairs/dt)), file=log) else: print('%dx%dx%d monopole approximation, so no exact count for neighbours\n'%((stencil,)*3), 'but {:,} force-pairs in'.format(pairs), '%.2f seconds'%dt, 'i.e. {:,} positions/sec, {:,} kernels/sec'.format(int(num_pts/dt), int(2*pairs/dt)), file=log) if sort_data: # return the sort index along with sorted positions and masses, and corresponding accelerations. # If you want to unsort you need to do it yourself return pairs, sort_idx, pos, wts, accel # indices for 'un'-sorting unsort = empty_like(sort_idx) unsort[sort_idx] = arange(num_pts) return pairs, accel[unsort]
41c4600be3a5684c97d69acb4ebe15846dcc4b0d
8,967
def get_referents(source, exclude=None): """ :return: dict storing lists of objects referring to source keyed by type. """ res = {} for obj_cls, ref_cls in [ (models.Language, models.LanguageSource), (models.ValueSet, models.ValueSetReference), (models.Sentence, models.SentenceReference), (models.Contribution, models.ContributionReference), ]: if obj_cls.mapper_name().lower() in (exclude or []): continue q = DBSession.query(obj_cls).join(ref_cls).filter(ref_cls.source_pk == source.pk) if obj_cls == models.ValueSet: q = q.options( joinedload_all(models.ValueSet.parameter), joinedload_all(models.ValueSet.language)) res[obj_cls.mapper_name().lower()] = q.all() return res
2aeccbbe61cdcb2b3183682a5cce8ed959fc14c9
8,968
import array def asarray(buffer=None, itemsize=None, shape=None, byteoffset=0, bytestride=None, padc=" ", kind=CharArray): """massages a sequence into a chararray. If buffer is *already* a chararray of the appropriate kind, it is returned unaltered. """ if isinstance(buffer, kind) and buffer.__class__ is kind: return buffer else: return array(buffer, itemsize, shape, byteoffset, bytestride, padc, kind)
346eaaa9ece9671f5b2fa0633552f72e40300adc
8,969
def cumulative_gain_curve(df: pd.DataFrame, treatment: str, outcome: str, prediction: str, min_rows: int = 30, steps: int = 100, effect_fn: EffectFnType = linear_effect) -> np.ndarray: """ Orders the dataset by prediction and computes the cumulative gain (effect * proportional sample size) curve according to that ordering. Parameters ---------- df : Pandas' DataFrame A Pandas' DataFrame with target and prediction scores. treatment : Strings The name of the treatment column in `df`. outcome : Strings The name of the outcome column in `df`. prediction : Strings The name of the prediction column in `df`. min_rows : Integer Minimum number of observations needed to have a valid result. steps : Integer The number of cumulative steps to iterate when accumulating the effect effect_fn : function (df: pandas.DataFrame, treatment: str, outcome: str) -> int or Array of int A function that computes the treatment effect given a dataframe, the name of the treatment column and the name of the outcome column. Returns ---------- cumulative gain curve: float The cumulative gain according to the predictions ordering. """ size = df.shape[0] n_rows = list(range(min_rows, size, size // steps)) + [size] cum_effect = cumulative_effect_curve(df=df, treatment=treatment, outcome=outcome, prediction=prediction, min_rows=min_rows, steps=steps, effect_fn=effect_fn) return np.array([effect * (rows / size) for rows, effect in zip(n_rows, cum_effect)])
ca493a85d1aa7d74335b1ddb65f2f2a94fcaa152
8,972
def last(*args): """Return last value from any object type - list,tuple,int,string""" if len(args) == 1: return int(''.join(map(str,args))) if isinstance(args[0],int) else args[0][-1] return args[-1]
ad8d836597dd6a5dfe059756b7d8d728f6ea35fc
8,973
def predict(self, celldata): """ This is the method that's to perform prediction based on a model For now it just returns dummy data :return: """ ai_model = load_model_parameter() ret = predict_unseen_data(ai_model, celldata) print("celldata: ", celldata) print("Classification: ", ret) return ret
435be195c765aa3823a710982bdc6f7954a24178
8,976
from typing import List def statements_to_str(statements: List[ASTNode], indent: int) -> str: """Takes a list of statements and returns a string with their C representation""" stmt_str_list = list() for stmt in statements: stmt_str = stmt.to_str(indent + 1) if not is_compound_statement(stmt) and not isinstance(stmt, Label): stmt_str += ";" + NEW_LINE stmt_str_list.append(stmt_str) return "".join(stmt_str_list)
01bd0546be8b7a212dbb73fae3c505bbe0086b48
8,977
def _make_filter(class_name: str, title: str): """https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-enumwindows""" def enum_windows(handle: int, h_list: list): if not (class_name or title): h_list.append(handle) if class_name and class_name not in win32gui.GetClassName(handle): return True # continue enumeration if title and title not in win32gui.GetWindowText(handle): return True # continue enumeration h_list.append(handle) return enum_windows
3b9d5f3fe4afd666cfa7ed43f8abe103b9575249
8,978
def is_float(s): """ Detertmine if a string can be converted to a floating point number. """ try: float(s) except: return False return True
2df52b4f8e0835d9f169404a6cb4f003ca661fff
8,979
def build_lm_model(config): """ """ if config["model"] == "transformer": model = build_transformer_lm_model(config) elif config["model"] == "rnn": model = build_rnn_lm_model(config) else: raise ValueError("model not correct!") return model
03a84f28ec4f4a7cd847575fcbcf278943b72b8a
8,980
def __virtual__(): """ Only load if boto3 libraries exist. """ has_boto_reqs = salt.utils.versions.check_boto_reqs() if has_boto_reqs is True: __utils__["boto3.assign_funcs"](__name__, "cloudfront") return has_boto_reqs
63d2f1102713b8da66e75b28c4c642427fe69e8a
8,981
def search_range(nums, target): """ Find first and last position of target in given array by binary search :param nums: given array :type nums : list[int] :param target: target number :type target: int :return: first and last position of target :rtype: list[int] """ result = [-1, -1] left, right = 0, len(nums) - 1 while left <= right: mid = (left + right) // 2 # note that we move right pointer when nums[mid] == target # to find the first occurrence of target if nums[mid] >= target: right = mid - 1 else: left = mid + 1 if 0 <= left < len(nums) and nums[left] == target: result[0] = left left, right = 0, len(nums) - 1 while left <= right: mid = (left + right) // 2 # note that we move left pointer when nums[mid] == target # to find the last occurrence of target if nums[mid] > target: right = mid - 1 else: left = mid + 1 if 0 <= right < len(nums) and nums[right] == target: result[1] = right return result
8165e3a2f33741c15494d5d98a82a85c2fb610ff
8,983
def process_mean_results(data, capacity, constellation, scenario, parameters): """ Process results. """ output = [] adoption_rate = scenario[1] overbooking_factor = parameters[constellation.lower()]['overbooking_factor'] constellation_capacity = capacity[constellation] max_capacity = constellation_capacity['capacity_kmsq'] number_of_satellites = constellation_capacity['number_of_satellites'] satellite_coverage_area = constellation_capacity['satellite_coverage_area'] for idx, item in data.iterrows(): users_per_km2 = item['pop_density_km2'] * (adoption_rate / 100) active_users_km2 = users_per_km2 / overbooking_factor if active_users_km2 > 0: per_user_capacity = max_capacity / active_users_km2 else: per_user_capacity = 0 output.append({ 'scenario': scenario[0], 'constellation': constellation, 'number_of_satellites': number_of_satellites, 'satellite_coverage_area': satellite_coverage_area, 'iso3': item['iso3'], 'GID_id': item['regions'], 'population': item['population'], 'area_m': item['area_m'], 'pop_density_km2': item['pop_density_km2'], 'adoption_rate': adoption_rate, 'users_per_km2': users_per_km2, 'active_users_km2': active_users_km2, 'per_user_capacity': per_user_capacity, }) return output
0619c397a21d27440988c4b23284e44700ba69eb
8,984
def identify_ossim_kwl(ossim_kwl_file): """ parse geom file to identify if it is an ossim model :param ossim_kwl_file : ossim keyword list file :type ossim_kwl_file : str :return ossim kwl info : ossimmodel or None if not an ossim kwl file :rtype str """ try: with open(ossim_kwl_file, encoding="utf-8") as ossim_file: content = ossim_file.readlines() geom_dict = {} for line in content: (key, val) = line.split(": ") geom_dict[key] = val.rstrip() if "type" in geom_dict: if geom_dict["type"].strip().startswith("ossim"): return geom_dict["type"].strip() return None except Exception: # pylint: disable=broad-except return None
9a63a8b5e7ece79b11336e71a8afa5a703e3acbc
8,985
def conv_cond_concat(x, y): """ Concatenate conditioning vector on feature map axis. # Arguments x: 4D-Tensor y: 4D-Tensor # Return 4D-Tensor """ x_shapes = x.get_shape() y_shapes = y.get_shape() return tf.concat(3, [x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])
c30a4328d3a6e8cf2b1e38cf012edca045e9de69
8,986
def swath_pyresample_gdaltrans(file: str, var: str, subarea: dict, epsilon: float, src_tif: str, dst_tif: str): """Reprojects swath data using pyresample and translates the image to EE ready tif using gdal Parameters ---------- file: str file to be resampled and uploaded to GC -> EE var: str input variable name subarea: dict string name of the projection to resample the data onto (pyproj supported) epsilon: float The distance to a found value is guaranteed to be no further than (1 + eps) times the distance to the correct neighbour. Allowing for uncertainty decreases execution time. src_tif: str temporary target geotif file dst_tif: str final geotif output, GDAL processed Returns ------- dict: global and var attributes """ # ----------- # get dataset # ----------- resample_dst = create_dataset(file=file, key=var, subarea=subarea) resample_dst['epsilon'] = epsilon # --------------- # resample swaths # --------------- if var in ('l2_flags', 'QA_flag'): meta = flags_band(dataset=resample_dst, key=var, src_tif=src_tif, dst_tif=dst_tif) else: attrs = resample_dst.pop(var) glob_attrs = resample_dst.pop('glob_attrs') proj = resample_dst.pop('proj') fill_value = attrs['_FillValue'] result = swath_resample(swath=resample_dst, trg_proj=proj) np.ma.set_fill_value(result, fill_value=fill_value) # --------------------- # write out the g-tif-f # --------------------- meta = write_tif(file=src_tif, dataset=result, data_type='Float32', metadata={var: attrs, 'glob_attrs': glob_attrs}, area_def=proj) gdal_translate(src_tif=src_tif, dst_tif=dst_tif, ot='Float32', nodata=fill_value) return meta
b716a6b45cf48457d0c6ca5849997b7c37c6c795
8,988
def getKeyPairPrivateKey(keyPair): """Extracts the private key from a key pair. @type keyPair: string @param keyPair: public/private key pair @rtype: base string @return private key PEM text """ return crypto.dump_privatekey(crypto.FILETYPE_PEM, keyPair)
0decc2dbb77343a7a200ace2af9277ee7e5717a5
8,990
def playbook_input(request, playbook_id, config_file=None, template=None): """Playbook input view.""" # Get playbook playbook = Playbook.objects.get(pk=playbook_id) # Get username user = str(request.user) # Check user permissions if user not in playbook.permissions.users: return playbooks(request) # Get asset name if provided asset_name = request.POST.get('asset_name', None) # Get Assets if playbook.asset_filter != '*': inventory = netspot.NetSPOT() assets = inventory.search(playbook.asset_filter, key='asset') else: assets = None # Get config if confgi_file is provided config = None if config_file: with open(config_file, 'r') as file_handle: config = file_handle.read().strip() variables = PlaybookVariable.objects.filter(playbook=playbook) return render( request, 'playbook.htm', context={'playbook': playbook.name, 'playbook_id': playbook.id, 'assets': assets, 'asset_name': asset_name, 'asset_filter': playbook.asset_filter, 'user_auth': playbook.user_auth, 'inputs': variables, 'config_file': config_file, 'config': config, 'template': template, 'description': playbook.description}, )
4b01e08414f38bdaad45245043ec30adb876e40e
8,991
def _filter_gtf_df(GTF_df, col, selection, keep_columns, silent=False): """ Filter a GTF on a specific feature type (e.g., genes) Parameters: ----------- GTF_df pandas DataFrame of a GTF type: pd.DataFrame col colname on which df.loc will be performed type: str selection value in df[col] type: str, int, float, etc. (most likely str) keep_columns A list of strings of colnames to keep. If False (default behavior), all cols are kept. type: bool default: False silent default: False type: bool Returns: -------- GTF_filtered type: pandas.DataFrame """ msg = _Messages(silent) msg.filtering(col, selection) return GTF_df.loc[GTF_df[col] == selection][keep_columns]
5f41141d69c0c837e396ec95127500a826013500
8,992
def validation_generator_for_dir(data_dir, model_dict): """Create a Keras generator suitable for validation No data augmentation is performed. :param data_dir: folder with subfolders for the classes and images therein :param model_dict: dict as returned by `create_custom_model` :returns: a generator for batches suitable for validating the model :rtype: ?? """ return _generator_for_dir(test_datagen, data_dir, model_dict)
57b0a83e98438b8e397377a5626094f69ea21083
8,993
def convert_cbaois_to_kpsois(cbaois): """Convert coordinate-based augmentables to KeypointsOnImage instances. Parameters ---------- cbaois : list of imgaug.augmentables.bbs.BoundingBoxesOnImage or list of imgaug.augmentables.bbs.PolygonsOnImage or list of imgaug.augmentables.bbs.LineStringsOnImage or imgaug.augmentables.bbs.BoundingBoxesOnImage or imgaug.augmentables.bbs.PolygonsOnImage or imgaug.augmentables.bbs.LineStringsOnImage Coordinate-based augmentables to convert, e.g. bounding boxes. Returns ------- list of imgaug.augmentables.kps.KeypointsOnImage or imgaug.augmentables.kps.KeypointsOnImage ``KeypointsOnImage`` instances containing the coordinates of input `cbaois`. """ if not isinstance(cbaois, list): return cbaois.to_keypoints_on_image() kpsois = [] for cbaoi in cbaois: kpsois.append(cbaoi.to_keypoints_on_image()) return kpsois
6eee2715de3bfc76fac9bd3c246b0d2352101be1
8,994
def get_query_dsl( query_string, global_filters=None, facets_query_size=20, default_operator='and'): """ returns an elasticsearch query dsl for a query string param: query_string : an expression of the form type: person title:foo AND description:bar where type corresponds to an elastic search document type which gets added as a filter param: global_filters : a dictionary of the form {user_id: 1234}. This gets added as a filter to the query so that the query can be narrowed down to fewer documents. It is translated into an elastic search term filter. """ global FACETS_QUERY_SIZE, DEFAULT_OPERATOR FACETS_QUERY_SIZE = facets_query_size DEFAULT_OPERATOR = default_operator global_filters = global_filters if global_filters else {} expression = tokenizer.tokenize(query_string) bool_lists = expression['query']['filtered']['filter']['bool'] [bool_lists['should'].append({"term": orele}) for orele in global_filters.get('or', [])] [bool_lists['must'].append({"term": andele}) for andele in global_filters.get('and', [])] [bool_lists['must_not'].append({"term": notele}) for notele in global_filters.get('not', [])] if global_filters.has_key('sort'): expression['sort'] = global_filters.get('sort') return expression
9f6c1371e0de1f28737415c0454f645748af054f
8,996
def prune_visualization_dict(visualization_dict): """ Get rid of empty entries in visualization dict :param visualization_dict: :return: """ new_visualization_dict = {} # when the form is left blank the entries of visualization_dict have # COLUMN_NAME key that points to an empty list for vis_key, vis_dict in visualization_dict.items(): if vis_dict.get(COLUMN_NAME): new_visualization_dict[vis_key] = vis_dict return new_visualization_dict
fae81eb69fc25d61282eb151d931d740c51b8bae
8,997
def _LocationListToGoTo( request_data, positions ): """Convert a LSP list of locations to a ycmd GoTo response.""" try: if len( positions ) > 1: return [ responses.BuildGoToResponseFromLocation( *_PositionToLocationAndDescription( request_data, position ) ) for position in positions ] return responses.BuildGoToResponseFromLocation( *_PositionToLocationAndDescription( request_data, positions[ 0 ] ) ) except ( IndexError, KeyError ): raise RuntimeError( 'Cannot jump to location' )
2ee39fdadd721920a3737561979308223a64b57a
8,998
def calculate_average_grades_and_deviation(course): """Determines the final average grade and deviation for a course.""" avg_generic_likert = [] avg_contribution_likert = [] dev_generic_likert = [] dev_contribution_likert = [] avg_generic_grade = [] avg_contribution_grade = [] dev_generic_grade = [] dev_contribution_grade = [] for __, contributor, __, results, __ in calculate_results(course): average_likert = avg([result.average for result in results if result.question.is_likert_question]) deviation_likert = avg([result.deviation for result in results if result.question.is_likert_question]) average_grade = avg([result.average for result in results if result.question.is_grade_question]) deviation_grade = avg([result.deviation for result in results if result.question.is_grade_question]) (avg_contribution_likert if contributor else avg_generic_likert).append(average_likert) (dev_contribution_likert if contributor else dev_generic_likert).append(deviation_likert) (avg_contribution_grade if contributor else avg_generic_grade).append(average_grade) (dev_contribution_grade if contributor else dev_generic_grade).append(deviation_grade) # the final total grade will be calculated by the following formula (GP = GRADE_PERCENTAGE, CP = CONTRIBUTION_PERCENTAGE): # final_likert = CP * likert_answers_about_persons + (1-CP) * likert_answers_about_courses # final_grade = CP * grade_answers_about_persons + (1-CP) * grade_answers_about_courses # final = GP * final_grade + (1-GP) * final_likert final_likert_avg = mix(avg(avg_contribution_likert), avg(avg_generic_likert), settings.CONTRIBUTION_PERCENTAGE) final_likert_dev = mix(avg(dev_contribution_likert), avg(dev_generic_likert), settings.CONTRIBUTION_PERCENTAGE) final_grade_avg = mix(avg(avg_contribution_grade), avg(avg_generic_grade), settings.CONTRIBUTION_PERCENTAGE) final_grade_dev = mix(avg(dev_contribution_grade), avg(dev_generic_grade), settings.CONTRIBUTION_PERCENTAGE) final_avg = mix(final_grade_avg, final_likert_avg, settings.GRADE_PERCENTAGE) final_dev = mix(final_grade_dev, final_likert_dev, settings.GRADE_PERCENTAGE) return final_avg, final_dev
95b26efedba076e0b9b54c565fe2e0787d5fbb0e
8,999
from datetime import datetime def get_slurm_params(n,runtime=None,mem=None,n_jobs=None): """Get remaining parameters to submit SLURM jobs based on specified parameters and number of files to process. Parameters ---------- n : int Number of files to process. runtime : str, None Time per run, string formatted 'hours:minutes:seconds". mem : str, None Memory, string formatted for SLURM e.g. '1G', '500MB'. n_jobs : int, None Number of SLURM jobs to launch. Returns ------- str Time per job. str Memory per job. int Number of jobs. """ #TIME ~5s per subject (ADHD200 and fmri dev dataset) #MEM 1G overall (cleans up after each subject, takes about peak around ~500) #Tested w/ MIST64 and MIST444 if mem == None: mem = '1G' if runtime==None: if n_jobs==None: if n < 1000: n_per_job = 50 elif n < 10000: n_per_job = 200 else: n_per_job = 500 n_jobs = int(n/n_per_job) else: n_per_job = int(n/n_jobs) #round down (add one later to calc for time) if n_per_job == 0: n_per_job = 1 sec = 2*n_per_job*5 #(seconds) if sec < 300: sec = 300 runtime = str(datetime.timedelta(seconds=sec)) else: if len(runtime.split(':')) == 3: sec = int(runtime.split(':')[0])*3600 + int(runtime.split(':')[1])*60 + int(runtime.split(':')[2]) elif len(runtime.split(':')) == 2: sec = int(runtime.split(':')[1])*60 + int(runtime.split(':')[2]) if n_jobs == None: n_jobs = int((10*n)/sec) if n_jobs == 0: n_jobs = 1 return runtime,mem,n_jobs
f2bf08430fbde0dcc430fd3e01d6b5ca1bd64487
9,000
def source_open() -> bool: """Open a source MS Excel spreadsheet file. Returns ------- boolean Flag about successful processing. """ try: Source.wbook = openpyxl.load_workbook(cmdline.workbook) except Exception: logger.error( 'Cannot open the MS Excel workbook %s', cmdline.workbook ) return False return True
19a2c214131afa6c1126bc1e0a4b4892a13bc32b
9,002
import re def get_license_match_error(lic, lic_file_path): """Returns an Error of the type 'warning' if the FreeRTOS license is present in the input file. Otherwise an empty list is returned. """ # Get the words in the license template with open('license.templ', 'r') as file: template_lic = file.read() template_lic_words = list(filter(None, re.split('[^0-9a-zA-Z]+', template_lic))) # Split on non-alphanumeric characters # re.split() will match the empty string. lic_words = list(filter(None, re.split('[^0-9a-zA-Z]+', lic))) i = 0 same = False for i, word in enumerate(lic_words): if word == template_lic_words[0]: # Element wise comparison of the two arrays. if lic_words[i:i+len(template_lic_words)] == template_lic_words: same = True break if same: return [Error(type='warning', info='FreeRTOS license is in file: ' + lic_file_path)] return []
d3f53f3d25c4d56b41fb561cf37b845d1efdc9fe
9,004
import queue def start_workers(size, delete=False, migrate=False): """Starts FluxxWorkers. :returns: Pair of queues. """ streams = (queue.Queue(), queue.Queue(maxsize=size)) for _ in range(THREAD_COUNT): worker = FluxxWorker(streams, delete, migrate) worker.daemon = True worker.start() return streams
358d8d3bc0d12edbe9e422cdfc206de626fd2a7d
9,005
def harmonizationApply(data, covars, model): """ Applies harmonization model with neuroCombat functions to new data. Arguments --------- data : a numpy array data to harmonize with ComBat, dimensions are N_samples x N_features covars : a pandas DataFrame contains covariates to control for during harmonization all covariates must be encoded numerically (no categorical variables) must contain a single column "SITE" with site labels for ComBat dimensions are N_samples x (N_covariates + 1) model : a dictionary of model parameters the output of a call to harmonizationLearn() Returns ------- bayes_data : a numpy array harmonized data, dimensions are N_samples x N_features """ # transpose data as per ComBat convention data = data.T # prep covariate data batch_col = covars.columns.get_loc('SITE') cat_cols = [] num_cols = [covars.columns.get_loc(c) for c in covars.columns if c!='SITE'] covars = np.array(covars, dtype='object') # load the smoothing model smooth_model = model['smooth_model'] smooth_cols = smooth_model['smooth_cols'] ### additional setup code from neuroCombat implementation: # convert batch col to integer covars[:,batch_col] = np.unique(covars[:,batch_col],return_inverse=True)[-1] # create dictionary that stores batch info (batch_levels, sample_per_batch) = np.unique(covars[:,batch_col],return_counts=True) info_dict = { 'batch_levels': batch_levels.astype('int'), 'n_batch': len(batch_levels), 'n_sample': int(covars.shape[0]), 'sample_per_batch': sample_per_batch.astype('int'), 'batch_info': [list(np.where(covars[:,batch_col]==idx)[0]) for idx in batch_levels] } ### # check sites are identical in training dataset check_sites = info_dict['n_batch']==model['info_dict']['n_batch'] if not check_sites: raise ValueError('Number of sites in holdout data not identical to training data.') # apply ComBat without re-learning model parameters design = make_design_matrix(covars, batch_col, cat_cols, num_cols) ### additional setup if smoothing is performed if smooth_model['perform_smoothing']: # create cubic spline basis for smooth terms X_spline = covars[:, smooth_cols].astype(float) bs_basis = smooth_model['bsplines_constructor'].transform(X_spline) # construct formula and dataframe required for gam formula = 'y ~ ' df_gam = {} for b in batch_levels: formula = formula + 'x' + str(b) + ' + ' df_gam['x' + str(b)] = design[:, b] for c in num_cols: if c not in smooth_cols: formula = formula + 'c' + str(c) + ' + ' df_gam['c' + str(c)] = covars[:, c].astype(float) formula = formula[:-2] + '- 1' df_gam = pd.DataFrame(df_gam) # check formulas are identical in training dataset check_formula = formula==smooth_model['formula'] if not check_formula: raise ValueError('GAM formula for holdout data not identical to training data.') # for matrix operations, a modified design matrix is required design = np.concatenate((df_gam, bs_basis), axis=1) ### s_data, stand_mean, var_pooled = ApplyStandardizationAcrossFeatures(data, design, info_dict, model) bayes_data = adjust_data_final(s_data, design, model['gamma_star'], model['delta_star'], stand_mean, var_pooled, info_dict) # transpose data to return to original shape bayes_data = bayes_data.T return bayes_data
7789d3a75d043df5048a7b0adced771c7e1ddd81
9,006
import re def from_rkm(code): """Convert an RKM code string to a string with a decimal point. Parameters ---------- code : str RKM code string. Returns ------- str String with a decimal point and an R value. Examples -------- >>> from pyaedt.circuit import from_rkm >>> from_rkm('R47') '0.47' >>> from_rkm('4R7') '4.7' >>> from_rkm('470R') '470' >>> from_rkm('4K7') '4.7k' >>> from_rkm('47K') '47k' >>> from_rkm('47K3') '47.3k' >>> from_rkm('470K') '470k' >>> from_rkm('4M7') '4.7M' """ # Matches RKM codes that start with a digit. # fd_pattern = r'([0-9]+)([LREkKMGTFmuµUnNpP]+)([0-9]*)' fd_pattern = r'([0-9]+)([{}]+)([0-9]*)'.format(''.join(RKM_MAPS.keys()), ) # matches rkm codes that end with a digit # ld_pattern = r'([0-9]*)([LREkKMGTFmuµUnNpP]+)([0-9]+)' ld_pattern = r'([0-9]*)([{}]+)([0-9]+)'.format(''.join(RKM_MAPS.keys())) fd_regex = re.compile(fd_pattern, re.I) ld_regex = re.compile(ld_pattern, re.I) for regex in [fd_regex, ld_regex]: m = regex.match(code) if m: fd, base, ld = m.groups() ps = RKM_MAPS[base] if ld: return_str = ''.join([fd, '.', ld, ps]) else: return_str = ''.join([fd, ps]) return return_str return code
8cb41a58fab685e5e7de4af533fade1aeee09c2c
9,007
def get_arguments(method, rpc_version): """ Get arguments for method in specified Transmission RPC version. """ if method in ('torrent-add', 'torrent-get', 'torrent-set'): args = constants.TORRENT_ARGS[method[-3:]] elif method in ('session-get', 'session-set'): args = constants.SESSION_ARGS[method[-3:]] else: return ValueError('Method "%s" not supported' % (method)) accessible = [] for argument, info in args.iteritems(): valid_version = True if rpc_version < info[1]: valid_version = False if info[2] and info[2] <= rpc_version: valid_version = False if valid_version: accessible.append(argument) return accessible
dcd8b3f0e5e93409518d7e9d72ffe954c3b99915
9,008
import functools def compose_local_noises(*functions: NoiseModel) -> NoiseModel: """Helper to compose multiple NoiseModel. Args: *functions: a list of functions Returns: The mathematical composition of *functions. The last element is applied first. If *functions is [f, g, h], it returns f∘g∘h. """ return functools.reduce( lambda f, g: lambda x: f(g(x)), functions, lambda x: x )
4b6e90ff2def9a988d8aa66782d990971b8de586
9,009
import copy def sls_build( repository, tag="latest", base="opensuse/python", mods=None, dryrun=False, **kwargs ): """ .. versionchanged:: 2018.3.0 The repository and tag must now be passed separately using the ``repository`` and ``tag`` arguments, rather than together in the (now deprecated) ``image`` argument. Build a Docker image using the specified SLS modules on top of base image .. versionadded:: 2016.11.0 The base image does not need to have Salt installed, but Python is required. repository Repository name for the image to be built .. versionadded:: 2018.3.0 tag : latest Tag name for the image to be built .. versionadded:: 2018.3.0 name .. deprecated:: 2018.3.0 Use both ``repository`` and ``tag`` instead base : opensuse/python Name or ID of the base image mods A string containing comma-separated list of SLS with defined states to apply to the base image. saltenv : base Specify the environment from which to retrieve the SLS indicated by the `mods` parameter. pillarenv Specify a Pillar environment to be used when applying states. This can also be set in the minion config file using the :conf_minion:`pillarenv` option. When neither the :conf_minion:`pillarenv` minion config option nor this CLI argument is used, all Pillar environments will be merged together. .. versionadded:: 2018.3.0 pillar Custom Pillar values, passed as a dictionary of key-value pairs .. note:: Values passed this way will override Pillar values set via ``pillar_roots`` or an external Pillar source. .. versionadded:: 2018.3.0 dryrun: False when set to True the container will not be committed at the end of the build. The dryrun succeed also when the state contains errors. **RETURN DATA** A dictionary with the ID of the new container. In case of a dryrun, the state result is returned and the container gets removed. CLI Example: .. code-block:: bash salt myminion docker.sls_build imgname base=mybase mods=rails,web """ create_kwargs = __utils__["args.clean_kwargs"](**copy.deepcopy(kwargs)) for key in ("image", "name", "cmd", "interactive", "tty", "extra_filerefs"): try: del create_kwargs[key] except KeyError: pass # start a new container ret = create( image=base, cmd="sleep infinity", interactive=True, tty=True, **create_kwargs ) id_ = ret["Id"] try: start_(id_) # Now execute the state into the container ret = sls(id_, mods, **kwargs) # fail if the state was not successful if not dryrun and not __utils__["state.check_result"](ret): raise CommandExecutionError(ret) if dryrun is False: ret = commit(id_, repository, tag=tag) finally: stop(id_) rm_(id_) return ret
d3d047334ea8b02e61d26b3fc471eb2cedd7a8c5
9,010
import re from datetime import datetime def parse_date(date): """ Parses a date string and returns number of seconds from the EPOCH. """ # yyyy-mm-dd [hh:mm:ss[.s][ [+-]hh[:][mm]]] p = re.compile( r'''(?P<year>\d{1,4}) # yyyy - # (?P<month>\d{1,2}) # mm or m - # (?P<day>\d{1,2}) # dd or d # (?: # [optional time and timezone] (?:\s|T) # (?P<hour>\d{1,2}) # hh or h :? # (?P<min>\d{1,2})? # mm or m (?: # [optional seconds] : # (?P<sec>\d{1,2}) # ss or s # (?: # [optional decisecond] \. # . (?P<dsec>\d) # s )? # )? # (?: # [optional timezone] \s? # ((?: # (?P<ho>[+-]? # [+ or -] \d{1,2}) # hh or h :? # [:] (?P<mo>\d{2})? # [mm] ) # | # or (?:UTC)|(?:Z)) # UTC | Z )? # )? # $ # EOL ''', re.VERBOSE) m = p.match(date) if m: c = m.groupdict(0) for k, v in c.items(): c[k] = int(v) # get timezone offset in seconds tz_offset = c['ho']*HOUR + c['mo']*MINUTE # Some datasets use the date "0000-01-01 00:00:00" as an origin, even though # the year zero does not exist in the Gregorian/Julian calendars. if c['year'] == 0: c['year'] = 1 year_offset = LEAP_YEAR else: year_offset = 0 origin = datetime(c['year'], c['month'], c['day'], c['hour'], c['min'], c['sec'], c['dsec'] * 100000) dt = origin - EPOCH return dt.days*DAY + dt.seconds + dt.microseconds*MICROSECOND - year_offset - tz_offset raise ParserError('Invalid date: %s' % date)
44dbf7c9ded2004118b64827e5c5016dc3967ec6
9,011
def CorrectOrWrong(Input,word): """Check if Input is inside word""" if Input in word: return True else: return False
fa3f06fd156c2523334a057366e88c5b7b376eb1
9,012
def get_fair_metrics(dataset, pred, pred_is_dataset=False): """ Measure fairness metrics. Parameters: dataset (pandas dataframe): Dataset pred (array): Model predictions pred_is_dataset, optional (bool): True if prediction is already part of the dataset, column name 'labels'. Returns: fair_metrics: Fairness metrics. """ if pred_is_dataset: dataset_pred = pred else: dataset_pred = dataset.copy() dataset_pred.labels = pred cols = ['statistical_parity_difference', 'equal_opportunity_difference', 'average_abs_odds_difference', 'disparate_impact', 'theil_index'] obj_fairness = [[0,0,0,1,0]] fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols) for attr in dataset_pred.protected_attribute_names: idx = dataset_pred.protected_attribute_names.index(attr) privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}] unprivileged_groups = [{attr:dataset_pred.unprivileged_protected_attributes[idx][0]}] classified_metric = ClassificationMetric(dataset, dataset_pred, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) metric_pred = BinaryLabelDatasetMetric(dataset_pred, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) acc = classified_metric.accuracy() row = pd.DataFrame([[metric_pred.mean_difference(), classified_metric.equal_opportunity_difference(), classified_metric.average_abs_odds_difference(), metric_pred.disparate_impact(), classified_metric.theil_index()]], columns = cols, index = [attr] ) fair_metrics = fair_metrics.append(row) fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2) return fair_metrics
1cf4a8655bf569f5d8ddfa530f46c65fe8f2be3f
9,013
from typing import Sequence from typing import Dict from typing import Union from typing import Tuple def make_params( key_parts: Sequence[str], variable_parts: VariablePartsType) -> Dict[str, Union[str, Tuple[str]]]: """ Map keys to variables. This map\ URL-pattern variables to\ a URL related parts :param key_parts: A list of URL parts :param variable_parts: A linked-list\ (ala nested tuples) of URL parts :return: The param dict with the values\ assigned to the keys :private: """ # The unwrapped variable parts are in reverse order. # Instead of reversing those we reverse the key parts # and avoid the O(n) space required for reversing the vars return dict(zip(reversed(key_parts), _unwrap(variable_parts)))
4da736f2057e06be1ceb51968d6c205cd28b7093
9,014
def load_sentiments(file_name=DATA_PATH + "sentiments.csv"): """Read the sentiment file and return a dictionary containing the sentiment score of each word, a value from -1 to +1. """ sentiments = {} for line in open(file_name): word, score = line.split(',') sentiments[word] = float(score.strip()) return sentiments
a98ae77a051ea3b599ee2fd5036e1bd33c1f4d64
9,015
def get_draft_url(url): """ Return the given URL with a draft mode HMAC in its querystring. """ if verify_draft_url(url): # Nothing to do. Already a valid draft URL. return url # Parse querystring and add draft mode HMAC. url = urlparse.urlparse(url) salt = get_random_string(5) # QueryDict requires a bytestring as its first argument query = QueryDict(force_bytes(url.query), mutable=True) query['edit'] = '%s:%s' % (salt, get_draft_hmac(salt, url.path)) # Reconstruct URL. parts = list(url) parts[4] = query.urlencode(safe=':') return urlparse.urlunparse(parts)
f8eaaa7daaba2b5bfe448b5386e88d9f738b0f5d
9,017
def make_datum(source: str, img_id: str, sent_id: int, sent: str): """ Create a datum from the provided infos. :param source: the dataset of the particular sentence. :param img_id: id of the image :param sent_id: id of the sentence (of the image) :param sent: the sentence :return: a dict of datum """ uid = make_uid(img_id, source, sent_id) img_path = get_img_path(source, img_id) return { 'uid': uid, 'img_id': img_id, 'img_path': img_path, 'sent': sent, }
4814093519aad09e0f81d6e0841d130e1b2e43a4
9,018
def list_for_consumer(req): """List allocations associated with a consumer.""" context = req.environ['placement.context'] context.can(policies.ALLOC_LIST) consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid') want_version = req.environ[microversion.MICROVERSION_ENVIRON] # NOTE(cdent): There is no way for a 404 to be returned here, # only an empty result. We do not have a way to validate a # consumer id. allocations = alloc_obj.get_all_by_consumer_id(context, consumer_id) output = _serialize_allocations_for_consumer( context, allocations, want_version) last_modified = _last_modified_from_allocations(allocations, want_version) allocations_json = jsonutils.dumps(output) response = req.response response.status = 200 response.body = encodeutils.to_utf8(allocations_json) response.content_type = 'application/json' if want_version.matches((1, 15)): response.last_modified = last_modified response.cache_control = 'no-cache' return response
37575bb0d05491d8a2e0933134fa530bf7699b3b
9,019
def get_zcl_attribute_size(code): """ Determine the number of bytes a given ZCL attribute takes up. Args: code (int): The attribute size code included in the packet. Returns: int: size of the attribute data in bytes, or -1 for error/no size. """ opts = (0x00, 0, 0x08, 1, 0x09, 2, 0x0a, 3, 0x0b, 4, 0x0c, 5, 0x0d, 6, 0x0e, 7, 0x0f, 8, 0x10, 1, 0x18, 1, 0x19, 2, 0x1a, 3, 0x1b, 4, 0x1c, 5, 0x1d, 6, 0x1e, 7, 0x1f, 8, 0x20, 1, 0x21, 2, 0x22, 3, 0x23, 4, 0x24, 5, 0x25, 6, 0x26, 7, 0x27, 8, 0x28, 1, 0x29, 3, 0x2a, 3, 0x2b, 4, 0x2c, 5, 0x2d, 6, 0x2e, 7, 0x2f, 8, 0x30, 1, 0x31, 2, 0x38, 2, 0x38, 4, 0x39, 8, 0x41, -1, 0x42, -1, 0x43, -1, 0x44, -1, 0x48, -1, 0x4c, -1, 0x50, -1, 0x51, -1, 0xe0, 4, 0xe1, 4, 0xe2, 4, 0xe8, 2, 0xe9, 2, 0xea, 4, 0xf0, 8, 0xf1, 16, 0xff, 0) for i in range(0, len(opts), 2): if code == opts[i]: return opts[i + 1] return -1
99782c86be2413410c6819a59eadf0daba326af2
9,021
def get_mappings(): """We process the mappings for two separate cases. (1) Variables that vary by year, and (2) variables where there are multiple realizations each year. """ # Set up grid for survey years. Note that from 1996 we can only expect information every other # year. We start with 1978 as information about 1978 employment histories is collected with # the initial interview. years = range(1978, 2013) # time-constant variables dct_full = dict() dct_full.update(process_time_constant(years)) dct_full.update(process_school_enrollment_monthly()) dct_full.update(process_highest_degree_received()) dct_full.update(process_multiple_each_year()) dct_full.update(process_single_each_year()) # Finishing return years, dct_full
a02ac60889ab2ef9524a50ec7eb03fe6a8b54917
9,022
def _get_function_name_and_args(str_to_split): """ Split a string of into a meta-function name and list of arguments. @param IN str_to_split String to split @return Function name and list of arguments, as a pair """ parts = [s.strip() for s in str_to_split.split(" | ")] if len(parts) < 2: raise Exception("Invalid meta function string: %s" % str_to_split) func_name = parts[0] func_args = parts[1:] return func_name, func_args
1dae51c87e727d7fa6a3a8012f9768b9ca3364e7
9,023
import requests def replicas_on_delete(): """ This is a route for ALL NODES. A (previous) neighbor node sends POST requests to this route, so that a key-value pair replica is deleted in the current NODE. """ # The hash ID of the node-owner of the primary replica start_id = request.form['id'] key = request.form['key'] k = int(request.form['k']) if (key in node.storage): # Delete the key-value replica from our database del node.storage[key] if (k == 1 or node.next_id == start_id): return "Replicas have been deleted!", 200 data_to_next = { 'id': start_id, 'key': key, 'k': k-1 } url_next = "http://" + node.next_ip + ":" + \ str(node.next_port) + "/delete/replicas" print("Informing the next neighbor to delete their replica.") r = requests.post(url_next, data_to_next) if r.status_code != 200: print("Something went wrong with deleting the replica \ in the next node.") return r.text, r.status_code
ff8b4cc06ce7a640914bdd58ff897dc060f22d4b
9,025
def pdf2(sigma_matrix, grid): """Calculate PDF of the bivariate Gaussian distribution. Args: sigma_matrix (ndarray): with the shape (2, 2) grid (ndarray): generated by :func:`mesh_grid`, with the shape (K, K, 2), K is the kernel size. Returns: kernel (ndarrray): un-normalized kernel. """ inverse_sigma = np.linalg.inv(sigma_matrix) kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2)) return kernel
7477b33eab034d9ca5cac63fd1eedd4f6789f1ba
9,027
def spell_sql(*args,**kwargs): """ list=[] """ if len(args[0])<=0: return None sql="SELECT * from `emotion_data` WHERE id ={}".format(args[0][0]) for index in args[0][1:]: sql +=" or id ={}".format(index) return sql
5e5b231be2dabca75abed332864c8ae3d93b750e
9,028
def is_within_bounds(bounds, point): """ Returns true if point is within bounds. point is a d-array and bounds is a dx2 array. bounds is expected to be an np.array object. """ point = np.array(point) if point.shape != (bounds.shape[0],): return False above_lb = np.all((point - bounds[:, 0] >= 0)) below_ub = np.all((bounds[:, 1] - point >= 0)) return above_lb * below_ub
926c107a808d98f62c0323746112b6f73b5f89fe
9,029
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight.mean(dim=-1) # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': loss = loss.sum() / avg_factor # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss
b19b937f9b774dcac09f8949c2d1762743e7958e
9,030
def list_of_paths(): """ It lists all the folders which not contain PET images """ return ['.DS_Store', 'localizer', 'Space_3D_T2_FLAIR_sag_p2', 'AXIAL_FLAIR', 'MPRAGE_ADNI_confirmed_REPEATX2', 'Axial_PD-T2_TSE', 'Axial_PD-T2_TSE_repeat', 'MPRAGE_SAG_ISO_p2_ND', 'Axial_PD-T2_TSE_confirmed', 'MPRAGESAGISOp2ND', 'MPRAGE_ADNI_confirmed', 'MPRAGE_ADNI_confirmed_repeat', 'MPRAGE_SAG_ISO_p2', 'MPRAGE', 'MPRAGE_ADNI_confirmed_REPEAT', 'Axial_PD-T2_TSE_confirmed_repeat', 'MPRAGE_ADNI_conf_REPEAT', 'Space_3D_T2_FLAIR_sag_p2_REPEAT', 'MPRAGE_ADNI_confirmed_RPT', 'Brain_256_1.6_zoom_4_x_4_iter', 'Space_3D_T2_FLAIR_sag_REPEAT', 'Axial_PD-T2_TSE_RPTconfirmed', 'Axial_PD-T2_TSE_RPT_confirmed', 'Axial_PD-T2_TSE_confirmed_REPEAT', 'flair_t2_spc_irprep_ns_sag_p2_1mm_iso', 'localiser']
bc74024d49396f80947b3cb0a45066381b7d3af4
9,031
def convert_onnx_to_ell(path, step_interval_msec=None, lag_threshold_msec=None): """ convert the importer model into a ELL model, optionally a steppable model if step_interval_msec and lag_threshold_msec are provided. """ _logger = logger.get() _logger.info("Pre-processing... ") converter = convert.OnnxConverter() importer_model = converter.load_model(path) _logger.info("\n Done pre-processing.") try: importer_engine = common.importer.ImporterEngine(step_interval_msec=step_interval_msec, lag_threshold_msec=lag_threshold_msec) ell_map = importer_engine.convert_nodes(importer_model) ordered_importer_nodes, node_mapping = importer_engine.get_importer_node_to_ell_mapping() except Exception as e: _logger.error("Error occurred while attempting to convert the model: " + str(e)) raise return ell_map, ordered_importer_nodes
28843c1b588d4c1772c5c4be10e1a535b940703d
9,032
def compute_error_model(model_metadata, X_test, y_test, target,error_metric): """Computes the model MRR based on test data :param model_metadata: a dictionary containing metadata about a model :param X_test: a dataframe containing features specfic to the model being evaluated :param y_test: a dataframe of target labels :param target: the column which contains the actual labels for training data :param error_metric: error metric to evalualte model performance on (MAE, RMSE, etc.) :return: the computed error """ model_pipeline = get_prediction_pipeline(model_metadata) pred_prices = model_pipeline.predict(X_test) error = compute_error(y_test, pred_prices, error_metric) return error
9cb1ede604f863c1eeab12a593c8b62527599d12
9,034
def column(df, s, column) -> ReturnType: """Gets the series of the column named `column` """ return df.loc[s, column].to_numpy(), 0
8d400c2425a062566e61c23361dd6a1f6e0ba8b7
9,035
def features_to_id(features, intervals): """Convert list of features into index using spacings provided in intervals""" id = 0 for k in range(len(intervals)): id += features[k] * intervals[k] # Allow 0 index to correspond to null molecule 1 id = id + 1 return id
74b0b201888a69c045ef140959876dd3e909f20d
9,036
import torch def index_initial(n_batch, n_ch, tensor=True): """Tensor batch and channel index initialization. Args: n_batch (Int): Number of batch. n_ch (Int): Number of channel. tensor (bool): Return tensor or numpy array Returns: Tensor: Batch index Tensor: Channel index """ batch_index = [] for i in range(n_batch): batch_index.append([[i]] * n_ch) ch_index = [] for i in range(n_ch): ch_index += [[i]] ch_index = [ch_index] * n_batch if tensor: batch_index = torch.tensor(batch_index) ch_index = torch.tensor(ch_index) if torch.cuda.is_available(): batch_index = batch_index.cuda() ch_index = ch_index.cuda() return batch_index, ch_index
52a16ad4afcf931ba4cda9c014d47050970995c5
9,037
def load_titanic(test_size=0.2, random_state=1, cache_dir=None, cache_subdir='datasets'): """ load titanic database """ path = find_path(DatasetEnum.titanic, cache_dir=cache_dir, cache_subdir=cache_subdir) df = pd.read_csv(path, sep=",", na_values=["?"], keep_default_na=True) # Shuffle DF and compute train/test split df = df.sample(frac=1, random_state=random_state).reset_index(drop=True) idx = int(len(df) * (1 - test_size)) df_train = df.loc[:idx] df_test = df.loc[idx:] # Filter columns and build X, y y_train = df_train["survived"].values del df_train["survived"] y_test = df_test["survived"].values del df_test["survived"] infos = {} return df_train, y_train, df_test, y_test, infos
a222a684a55bde482664b0b3072fb04047360f50
9,039
def mock_function_fail(*args, **kwargs): """ Mock a function that 'fails', i.e., returns a 1. """ print("\nmock> f({}) ==> 1".format(args)) # pragma: no cover return 1 # pragma: no cover
ec2085e51a0809c9656d1831429858e14baf3f63
9,040
def get_field_result(client_id, field_id, count=1): """ на входе: id-поля, id-карты, выход: последний результат поля :return: """ with connection.cursor() as cursor: cursor.execute( """ SELECT directions_napravleniya.client_id, directions_issledovaniya.napravleniye_id, directions_issledovaniya.research_id, directions_issledovaniya.time_confirmation AT TIME ZONE %(tz)s as time_confirmation, to_char(directions_issledovaniya.time_confirmation AT TIME ZONE %(tz)s, 'DD.MM.YYYY') as date_confirm, directions_paraclinicresult.value, directions_paraclinicresult.field_id FROM directions_issledovaniya LEFT JOIN directions_napravleniya ON directions_issledovaniya.napravleniye_id=directions_napravleniya.id LEFT JOIN directions_paraclinicresult ON directions_issledovaniya.id=directions_paraclinicresult.issledovaniye_id WHERE directions_napravleniya.client_id = %(client_p)s and directions_paraclinicresult.field_id = %(field_id)s and directions_issledovaniya.time_confirmation is not NULL ORDER BY directions_issledovaniya.time_confirmation DESC LIMIT %(count_p)s """, params={'client_p': client_id, 'field_id': field_id, 'count_p': count, 'tz': TIME_ZONE}, ) row = cursor.fetchall() return row
7191705462f1fceb3dfca866c5fed96fa8019886
9,041
def parse_basic_profile_forms(): """Parses and validates basic profile forms in the request. Returns: A dictionary containing user profile. Raises: ValueError: When validation failed. """ return { 'display_name': get_form_string('display_name', 32), 'contact_email': get_form_string('contact_email', 256), 'member_names': get_form_string('member_names', 4096), 'nationalities': get_form_string('nationalities', 1024, allow_empty=True), 'languages': get_form_string('languages', 1024, allow_empty=True), 'source_url': get_form_string('source_url', 2083, allow_empty=True), }
c8409bcc7de6a2c0a320859f90d54215888febf8
9,042
def fixture_success(request): """ Test Cases: 1. Hitting uncovered route as base user (logged in flow). Will return 200 since uncovered route is an open endpoint and thus Anonymous users can also access it. 2. Hitting uncovered route as base user and HEAD request 3. Hitting uncovered route as admin user and HEAD request 4. Hitting uncovered route as super admin user and GET request 5. Hitting uncovered route as super admin user and HEAD request 6. Hitting uncovered route as anonymous user and GET request 7. Hitting uncovered route as anonymous user and HEAD request 8. Hitting covered route as admin user and GET request 9. Hitting covered route as admin user and HEAD request 10. Hitting covered route as super admin user and POST request 11. Hitting covered route as super admin user and GET request 12. Hitting covered route as super admin user and HEAD request """ db.create_all() base_user, admin_user, super_admin_user = config_data_setup() data_to_send = [ { 'input': { 'method': 'GET', 'url_rule': '/uncovered_route', 'user': base_user, 'function': app.view_functions['uncovered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'HEAD', 'url_rule': '/uncovered_route', 'user': base_user, 'function': app.view_functions['uncovered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'GET', 'url_rule': '/uncovered_route', 'user': admin_user, 'function': app.view_functions['uncovered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'HEAD', 'url_rule': '/uncovered_route', 'user': admin_user, 'function': app.view_functions['uncovered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'GET', 'url_rule': '/uncovered_route', 'user': super_admin_user, 'function': app.view_functions['uncovered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'HEAD', 'url_rule': '/uncovered_route', 'user': super_admin_user, 'function': app.view_functions['uncovered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'GET', 'url_rule': '/uncovered_route', 'user': None, 'function': app.view_functions['uncovered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'HEAD', 'url_rule': '/uncovered_route', 'user': None, 'function': app.view_functions['uncovered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'GET', 'url_rule': '/covered_route', 'user': admin_user, 'function': app.view_functions['covered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'HEAD', 'url_rule': '/covered_route', 'user': admin_user, 'function': app.view_functions['covered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'POST', 'url_rule': '/covered_route', 'user': super_admin_user, 'function': app.view_functions['covered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'GET', 'url_rule': '/covered_route', 'user': super_admin_user, 'function': app.view_functions['covered_route'] }, 'output': { 'status_code': 200 } }, { 'input': { 'method': 'HEAD', 'url_rule': '/covered_route', 'user': super_admin_user, 'function': app.view_functions['covered_route'] }, 'output': { 'status_code': 200 } } ] request.addfinalizer(tear_down) return app, data_to_send
26603ce9203372e9ced217f75505b149942eee98
9,043
from typing import Optional import csv def get_quote_name(quote_number: int) -> Optional[str]: """ used to help applications look up quote names based on the number users. """ assert type(quote_number) in (int, type(None)) if quote_number is None: return None for key, value in csv.__dict__.items(): if value == quote_number: return key else: raise ValueError('invalid quote_number: {}'.format(quote_number))
4a96ee42b37879469a67cb657d97aa321770fd83
9,044
def calc_floodzone(row): """Extracts the FEMAZONE of an SFHA based on each row's attributes. This function acts on individual rows of a pandas DataFrame using the apply built-in. Parameters ---------- row : Pandas Series A row of a pandas DataFrame Returns ------- str The flood zone designation for an SFHA """ if row["FLD_ZONE"] == 'AO': zone = 'AO' + str(round(row['DEPTH'])) elif row["FLD_ZONE"] == 'AH': zone = 'AH' + str(round(row["STATIC_BFE"])) else: zone = row["FLD_ZONE"] return zone
5bb6f3f7cfc1b6bce41ad7a752845287759c16ad
9,045
def trans_you(ori_image, img_db, target_size=(8, 8)): """Transfer original image to composition of images. Parameters ---------- ori_image : numpy.ndarray the original image img_db : h5py.File image datasets target_size : tuple Returns ------- res_img : numpy.ndarray result image """ tot_pixels = ori_image.shape[0]*ori_image.shape[1] image_idx = img_idx(tot_pixels) res_img = np.zeros_like(ori_image) res_img = imresize(res_img, (res_img.shape[0]*target_size[0], res_img.shape[1]*target_size[1])) for i in xrange(ori_image.shape[0]): for j in xrange(ori_image.shape[1]): idx = image_idx[i*ori_image.shape[1]+j] img = get_img(img_db, idx) pixel = ori_image[i, j, :] img = trans_img(img, pixel, target_size) res_img[i*target_size[0]:(i+1)*target_size[0], j*target_size[1]:(j+1)*target_size[1]] = img print ("[MESSAGE] Row %i is processed." % (i+1)) return res_img
f9717d2ddc9052bee103010a23328f5445c4edc5
9,046
from re import A from re import T def new_assessment(): """ RESTful CRUD controller to create a new 'complete' survey - although the created form is a fully custom one """ # Load Model table = s3db.survey_complete s3db.table("survey_series") def prep(r): if r.interactive: viewing = get_vars.get("viewing", None) if viewing: dummy, series_id = viewing.split(".") else: series_id = get_vars.get("series", None) if not series_id: series_id = r.id if series_id is None: # The URL is bad, without a series id we're lost so list all series redirect(URL(c="survey", f="series", args=[], vars={})) if len(request.post_vars) > 0: id = s3db.survey_save_answers_for_series(series_id, None, # Insert request.post_vars) response.confirmation = \ s3.crud_strings["survey_complete"].msg_record_created r.method = "create" return True s3.prep = prep def postp(r, output): if r.interactive: # Not sure why we need to repeat this & can't do it outside the prep/postp viewing = get_vars.get("viewing", None) if viewing: dummy, series_id = viewing.split(".") else: series_id = get_vars.get("series", None) if not series_id: series_id = r.id if output["form"] is None: # The user is not authorised to create so switch to read redirect(URL(c="survey", f="series", args=[series_id, "read"], vars={})) # This is a bespoke form which confuses CRUD, which displays an # error "Invalid form (re-opened in another window?)" # So so long as we don't have an error in the form we can # delete this error. elif response.error and not output["form"]["error"]: response.error = None s3db.survey_answerlist_dataTable_post(r) form = s3db.survey_buildQuestionnaireFromSeries(series_id, None) urlimport = URL(c=module, f="complete", args=["import"], vars={"viewing":"%s.%s" % ("survey_series", series_id), "single_pass":True} ) buttons = DIV(A(T("Upload Completed Assessment Form"), _href=urlimport, _id="Excel-import", _class="action-btn" ), ) output["form"] = TAG[""](buttons, form) return output s3.postp = postp return crud_controller(module, "complete", method = "create", rheader = s3db.survey_series_rheader )
a4b1f9ba0a7e70349607f5cc70fdac72d75fb236
9,047
import types import random async def random_pokemon(connection: asyncpg.Connection, /) -> types.Pokemon: """Returns a random :class:`types.Pokemon`.""" records = await tables.Pokemon.fetch(connection) return await _pokemon(connection, random.choice(records))
b60659f236a4cbea998a77df211da92c18e4f0b8
9,048
import re def remove_space(text): """ Funcion que elimina espacios :param str text: texto a procesar """ return re.sub(r"\s+", " ", text).strip()
729d26bb6acbaa8da4c945d2ea6646ebb90f3122
9,049
import base64 def getFilePathBase(): """ 获取请求url文件的文件路径 :return: php->base64 code """ code = """ @ini_set("display_errors","0"); @set_time_limit(0); @set_magic_quotes_runtime(0); header("Content-Type:application/json"); $res = array();$res["path"] = dirname(__FILE__); echo ("<ek>"); echo json_encode($res); echo ("</ek>"); die(); """ return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
afcb1a5bf2972a2b13a32edcd8a9b968742bf7f3
9,050
def extractHeldSimple(q, factoryConfig=None): """All Held Glideins: JobStatus == 5 q: dictionary of Glideins from condor_q factoryConfig (FactoryConfig): Factory configuartion (NOT USED, for interface) Returns: dict: dictionary of Held Glideins from condor_q """ # Held==5 qheld = q.fetchStored(lambda el: el["JobStatus"] == 5) qheld_list = list(qheld.keys()) return qheld_list
c942991bb0370b63364c1b8d5644713865d9ea82
9,051
def neighbors(stats1, stats2, max_val=1e5): """stats from cv.connectedComponentsWithStats.""" pts1 = np.concatenate( (stats1[:, :2], stats1[:, :2] + stats1[:, 2:4]), axis=0) pts2 = np.concatenate( (stats2[:, :2], stats2[:, :2] + stats2[:, 2:4]), axis=0) dist = np.abs(pts1[:, None] - pts2).sum(axis=2) eye = np.eye(dist.shape[0], dtype=dist.dtype) R = (dist + eye * max_val).argmin(axis=1) return R.reshape((2, -1)).T
1b6aecad76f968cd83d40ee6531fcbd6b3b0df6c
9,052
from typing import Optional def shortest_substring_containing_characters(text: str, char_set: set) -> Optional[str]: """ O(n) & O(k) """ start = 0 end = -1 count_char = defaultdict(int) # char and its count found_set = set() for index, char in enumerate(text): if char in char_set: count_char[char] += 1 found_set.add(char) if len(found_set) == len(char_set): new_start = start new_end = index while text[new_start] not in char_set or count_char[text[new_start]] > 1: if text[new_start] in count_char: count_char[text[new_start]] -= 1 new_start += 1 if end < start or (new_end - new_start) < (end - start): end = new_end start = new_start return text[start: end + 1] if end > start else None
4682a01b1a4331dbada7a234c908d1c53639e69a
9,053
def refine_grid( grid, cb, grid_additions=(50, 50), ntrail=2, blurs=((), ()), metric=None, atol=None, rtol=None, extremum_refinement=None, snr=False, ): """Refines an existing grid by adding points to it. Parameters ---------- grid : array cb : callbable Function to be evaluated (note that noise is handled poorly). grid_additions : iterable of ints (even numbers) Sequence specifying how many gridpoints to add each time. ntrail : int (>= 2) Number of points to include in the look-ahead extrapolation. blurs : pair of iterables of ints (of same length) Blur fractions of absolute residuals to neighbors. atol : float Absolute tolerance to be fulfilled by all absolute residuals for early exit. rtol : float Relative tolerance to be fulfilled by all absolute residuals for early exit. extremum_refinement : locator (callable), n (int), predicate (callable) Between each grid addition a callable for locating the extremum (e.g. np.argmax) can be evaluated. The integer specifies how many gridpoints that should be inserted on each side (one side if on boundary) of the extremum. snr : bool Use signal-to-noise ratio the lower the grid-addition-weight of potential noise. Returns ------- (grid, errors) """ for na in grid_additions: if (na % 2) != 0: raise ValueError("Need even number of grid points for each addition") if extremum_refinement == "max": extremum_refinement = (np.argmax, 1, lambda y, i: True) elif extremum_refinement == "min": extremum_refinement = (np.argmin, 1, lambda y, i: True) def add_to(adds, grd, res, ys): na = np.sum(adds) if na == 0: return grd, res, ys nextresults = np.empty(grd.size + na, dtype=object) nextresults[0] = res[0] nexty = np.empty(grd.size + na) nexty[0] = ys[0] nextgrid = np.empty(grd.size + na) nextgrid[0] = grd[0] ptr = 1 yslices = [] for gi, nloc in enumerate(adds): nextgrid[ptr : ptr + nloc + 1] = np.linspace( grd[gi], grd[gi + 1], 2 + nloc )[1:] nextresults[ptr + nloc] = res[gi + 1] nexty[ptr + nloc] = ys[gi + 1] if nloc > 0: yslices.append(slice(ptr, ptr + nloc)) ptr += nloc + 1 newresults = cb(np.concatenate([nextgrid[yslc] for yslc in yslices])) newy = ( newresults if metric is None else np.array([metric(r) for r in newresults]) ) ystart, ystop = 0, 0 for yslc in yslices: ystop += yslc.stop - yslc.start nextresults[yslc] = newresults[ystart:ystop] nexty[yslc] = newy[ystart:ystop] ystart = ystop return nextgrid, nextresults, nexty results = cb(grid) y = np.array( results if metric is None else [metric(r) for r in results], dtype=np.float64 ) for na in grid_additions: if extremum_refinement: extremum_cb, extremum_n, predicate_cb = extremum_refinement argext = extremum_cb(y) if predicate_cb(y, argext): additions = np.zeros(grid.size - 1, dtype=int) if argext > 0: # left of additions[argext - 1] = extremum_n elif argext < grid.size - 1: # right of additions[argext] = extremum_n grid, results, y = add_to(additions, grid, results, y) additions = np.zeros(grid.size - 1, dtype=int) done = True if atol is not None or rtol is not None else False slcs, errs = [], [] for direction in ("fw", "bw"): est, slc = interpolate_ahead(grid, y, ntrail, direction) err = np.abs(y[slc] - est) if atol is not None: done = done and np.all(err < atol) if rtol is not None: done = done and np.all(err / y[slc] < rtol) slcs.append(slc) errs.append(err) if snr: all_errs = np.array( [[0.0] * ntrail + errs[0].tolist(), errs[1].tolist() + [0.0] * ntrail] ) min__max = np.amin(all_errs, axis=0) / np.amax(all_errs, axis=0) dgrid = np.diff(grid) delta = np.empty_like(grid) delta[0] = dgrid[0] ** -2 delta[-1] = dgrid[-1] ** -2 delta[1:-1] = 1 / (dgrid[:-1] * dgrid[1:]) lndelta = np.log(delta) normlndelta = lndelta - np.max(lndelta) for i in range(2): errs[i] *= (1.0 + 1e-8) - min__max[slcs[i]] errs[i] *= np.exp(normlndelta[slcs[i]]) for direction, blur, slc, err in zip(("fw", "bw"), blurs, slcs, errs): for ib, b in enumerate(blur, 1): blur_slices = (slice(ib, None), slice(None, -ib)) err[blur_slices[direction == "bw"]] += ( b * err[blur_slices[direction == "fw"]] ) rerr = np.array(np.round(err * na / 2 / np.sum(err)), dtype=int) delta = np.sum(rerr) - na // 2 if delta == 0: pass else: sorted_indices = np.argsort(rerr) for i in sorted_indices[-abs(delta) :]: rerr[i] += 1 if delta < 0 else -1 if np.sum(rerr) - na // 2: raise ValueError("Balancing failed.") additions[ slice(ntrail - 1, None) if direction == "fw" else slice(None, 1 - ntrail) ] += rerr grid, results, y = add_to(additions, grid, results, y) if done: break return grid, results
c84a365bcc271622fd49a01d89303aa2adb1c624
9,054
from datetime import datetime def last_week(today: datetime=None, tz=None): """ Returns last week begin (inclusive) and end (exclusive). :param today: Some date (defaults current datetime) :param tz: Timezone (defaults pytz UTC) :return: begin (inclusive), end (exclusive) """ if today is None: today = datetime.utcnow() begin = today - timedelta(weeks=1, days=today.weekday()) begin = datetime(year=begin.year, month=begin.month, day=begin.day) return localize_time_range(begin, begin + timedelta(days=7), tz)
a210707e2a479fe4e8b98a137c0ade684d4dd6da
9,055
def get_velocity_limits(): """ """ velocity_limits = {} for i in range(6): try: velocity_limits['a{}'.format(i+1)] = float(pm.textField( 't_A{}vel'.format(i+1), q=True, text=True)) except ValueError: pm.error('Robot velocity limits must be floats') return velocity_limits
68f58ed715a39478d119af1e1aabe54fa7ec6094
9,056
def decode_item_length(encoded_data: Bytes) -> int: """ Find the length of the rlp encoding for the first object in the encoded sequence. Here `encoded_data` refers to concatenation of rlp encoding for each item in a sequence. NOTE - This is a helper function not described in the spec. It was introduced as the spec doesn't discuss about decoding the RLP encoded data. Parameters ---------- encoded_data : RLP encoded data for a sequence of objects. Returns ------- rlp_length : `int` """ # Can't decode item length for empty encoding ensure(len(encoded_data) > 0) first_rlp_byte = Uint(encoded_data[0]) # This is the length of the big endian representation of the length of # rlp encoded object byte stream. length_length = Uint(0) decoded_data_length = 0 # This occurs only when the raw_data is a single byte whose value < 128 if first_rlp_byte < 0x80: # We return 1 here, as the end formula # 1 + length_length + decoded_data_length would be invalid for # this case. return 1 # This occurs only when the raw_data is a byte stream with length < 56 # and doesn't fall into the above cases elif first_rlp_byte <= 0xB7: decoded_data_length = first_rlp_byte - 0x80 # This occurs only when the raw_data is a byte stream and doesn't fall # into the above cases elif first_rlp_byte <= 0xBF: length_length = first_rlp_byte - 0xB7 ensure(length_length < len(encoded_data)) # Expectation is that the big endian bytes shouldn't start with 0 # while trying to decode using RLP, in which case is an error. ensure(encoded_data[1] != 0) decoded_data_length = Uint.from_be_bytes( encoded_data[1 : 1 + length_length] ) # This occurs only when the raw_data is a sequence of objects with # length(concatenation of encoding of each object) < 56 elif first_rlp_byte <= 0xF7: decoded_data_length = first_rlp_byte - 0xC0 # This occurs only when the raw_data is a sequence of objects and # doesn't fall into the above cases. elif first_rlp_byte <= 0xFF: length_length = first_rlp_byte - 0xF7 ensure(length_length < len(encoded_data)) # Expectation is that the big endian bytes shouldn't start with 0 # while trying to decode using RLP, in which case is an error. ensure(encoded_data[1] != 0) decoded_data_length = Uint.from_be_bytes( encoded_data[1 : 1 + length_length] ) return 1 + length_length + decoded_data_length
d005b8050abaaba76bd5d3a24419f86c462af2b2
9,057
def pxor(a1, a2, fmt=None): """Bitwise XOR""" return c2repr(_inconv(a1) ^ _inconv(a2), fmt)
a65ada1901fc5bfa202af5128c3e5b6e54d5f6dc
9,058
from typing import Tuple def milestone_2_test_1_initial_val(lattice_grid_shape: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]: """ Return initial conditions Args: lattice_grid_shape: lattice grid [lx, ly] Returns: density with 0.5, but one peak in the middle, velocities 0 """ density = np.ones(lattice_grid_shape) * 0.5 density[lattice_grid_shape[0] / 2, lattice_grid_shape[1] / 2] = 0.6 velocity = np.ones(lattice_grid_shape) * 0.0 return density, velocity
89d6ed57e93859182a92946e94adc2d26631f6e3
9,059
def test_element_html_call_get_attribute(monkeypatch, browser_driver): """Calls el_or_xpath WebElement attr get_attribute""" called = [] class FakeWebElement: def get_attribute(self, val): called.append(('get_attribute', val)) return 42 @browser_driver.register class FakeDriver: pass # This is needed to pass type checks in element_html() monkeypatch.setattr(core, 'WebElement', FakeWebElement) b = Browser(FakeDriver()) fake_el = FakeWebElement() retval = b.element_html(fake_el, core.HTMLProperty.outer) assert retval == 42 assert called == [ ('get_attribute', core.HTMLProperty.outer.value) ]
7b3bcc3ba4a8c030b15649b240f75bf9bed71570
9,060
import time def moving_dictators(session, system_ids): """ Show newly controlling dictators in the last 5 days. Show all controlling dictators in monitored systems. Subqueries galore, you've been warned. Returns: A list of messages to send. """ gov_dic = session.query(Government.id).\ filter(Government.text.in_(["Anarchy", "Dictatorship"])).\ scalar_subquery() control_state_id = session.query(PowerState.id).\ filter(PowerState.text == "Control").\ scalar_subquery() current = sqla_orm.aliased(FactionState) pending = sqla_orm.aliased(FactionState) sys = sqla_orm.aliased(System) sys_control = sqla_orm.aliased(System) dics = session.query(Influence, sys.name, Faction.name, Government.text, current.text, pending.text, sqla.func.ifnull(sys_control.name, 'N/A').label('control')).\ join(sys, Influence.system_id == sys.id).\ join(Faction, Influence.faction_id == Faction.id).\ join(Government, Faction.government_id == Government.id).\ join(current, Influence.state_id == current.id).\ join(pending, Influence.pending_state_id == pending.id).\ outerjoin( sys_control, sqla.and_( sys_control.power_state_id == control_state_id, sys_control.dist_to(sys) < 15 ) ).\ filter(Influence.system_id.in_(system_ids), Government.id.in_(gov_dic)).\ order_by('control', sys.name).\ all() look_for = [sqla.and_(InfluenceHistory.system_id == inf[0].system_id, InfluenceHistory.faction_id == inf[0].faction_id) for inf in dics] time_window = time.time() - (60 * 60 * 24 * 2) inf_history = session.query(InfluenceHistory).\ filter(sqla.or_(*look_for)).\ filter(InfluenceHistory.updated_at >= time_window).\ order_by(InfluenceHistory.system_id, InfluenceHistory.faction_id, InfluenceHistory.updated_at.desc()).\ all() pair_hist = {} for hist in inf_history: key = "{}_{}".format(hist.system_id, hist.faction_id) pair_hist[key] = hist lines = [["Control", "System", "Faction", "Gov", "Date", "Inf", "Inf (2 days ago)", "State", "Pending State"]] for dic in dics: key = "{}_{}".format(dic[0].system_id, dic[0].faction_id) try: lines += [[dic[-1], dic[1][:16], dic[2][:16], dic[3][:3], dic[0].short_date, "{:5.2f}".format(round(dic[0].influence, 2)), "{:5.2f}".format(round(pair_hist[key].influence, 2)), dic[-3], dic[-2]]] except KeyError: lines += [[dic[-1], dic[1][:16], dic[2][:16], dic[3][:3], dic[0].short_date, "{:5.2f}".format(round(dic[0].influence, 2)), "N/A", dic[-3], dic[-2]]] prefix = "**\n\nInf Movement Anarchies/Dictators**)\n" prefix += "N/A: Means no previous information, either newly expanded to system or not tracking.\n" return cog.tbl.format_table(lines, header=True, prefix=prefix)
9d9808d608190dae0a9f57980312e2ae830c492c
9,061
def get_alt_for_q_with_constant_mach(q, mach, tol=5., SI=False, nmax=20): # type: (float, float, float, bool, int) -> float """ Gets the altitude associated with a dynamic pressure. Parameters ---------- q : float the dynamic pressure lb/ft^2 (SI=Pa) mach : float the mach to hold constant tol : float; default=5. tolerance in feet/meters SI : bool should SI units be used; default=False Returns ------- alt : float the altitude in ft (SI=m) """ pressure = 2 * q / (1.4 * mach ** 2) # gamma = 1.4 alt = get_alt_for_pressure(pressure, tol=tol, SI=SI, nmax=nmax) return alt
f9286d7f742a8e8e3f25d63210180dbd7bc2fcc7
9,062
def addMetadataFlags(metadataChunk, numberOfMetadataChunks): """Adds binary flag the number of metadata chunks this upload has (uint8). Arguments: metadataChunk {bytes} -- First metadata chunk already encrypted, but before signing. numberOfMetadataChunks {int} -- Self-explanatory. Returns: bytes -- Metadata chunk ready to be signed. """ #pylint: disable=E1111 numberFlag = np.uint8(numberOfMetadataChunks).tobytes() fullMetadataChunk = b"".join([numberFlag, metadataChunk]) return fullMetadataChunk
aeaefd8e1cd62524d435ee95bc272a9a676680c0
9,063
def table(a): """get tabular view of obj, if available, else return obj""" if misc.istablarray(a): return a.__view__('table') return a
e04b53f40203fbeeb3104f5e46bab87ab3304269
9,064
def parse_quadrupole(line): """ Quadrupole (type 1) V1: zedge V2: quad gradient (T/m) V3: file ID If > 0, then include fringe field (using Enge function) and V3 = effective length of quadrupole. V4: radius (m) V5: x misalignment error (m) V6: y misalignment error (m) V7: rotation error x (rad) V8: rotation error y (rad) V9: rotation error z (rad) If V9 != 0, skew quadrupole V10: rf quadrupole frequency (Hz) V11: rf quadrupole phase (degree) """ v = v_from_line(line) d={} d['zedge'] = float(v[1]) d['b1_gradient'] = float(v[2]) if float(v[3]) > 0: d['L_effective'] = float(v[3]) else: d['file_id'] = int(v[3]) d['radius'] = float(v[4]) d2 = parse_misalignments(v[5:10]) d.update(d2) if len(v) > 11: d['rf_frequency'] = float(v[10]) d['rf_phase_deg'] = float(v[11]) return(d)
2e9748fb0eabe51383fcb1ff47a7278dda622e44
9,065
def cases_vides(pave): """fonction qui cherche toutes les cases vides ayant des cases adjacentes pleines dans un pavé (où pavé est un tableau de tuiles ou de cases vides) retourne le tableau contenant les positions de ces cases vides et les cases adjacentes en fonction de leur position""" result = [] for i in range(len(pave)): for j in range(len(pave)): if pave[i][j] == None: position = Position((i, j), None, None, None, None) if is_in_array(i + 1, j, pave) and pave[i + 1][j] != None: position.Bot = pave[i + 1][j] if is_in_array(i - 1, j, pave) and pave[i - 1][j] != None: position.Top = pave[i - 1][j] if is_in_array(i, j + 1, pave) and pave[i][j + 1] != None: position.Right = pave[i][j + 1] if is_in_array(i, j - 1, pave) and pave[i][j - 1] != None: position.Left = pave[i][j - 1] if position.Top != None or position.Bot != None or position.Left != None or position.Right != None: result.append(position) return result
2d2de1651f000f48ab32e484f3f6b465231248b5
9,066
def _create_scalar_tensor(vals, tensor=None): """Create tensor from scalar data""" if not isinstance(vals, (tuple, list)): vals = (vals,) return _create_tensor(np.array(vals), tensor)
ef41eabc66eda8739a78931d53ccc6feb8dfc6bb
9,067
import importlib def is_importable(name): """ Determines if a given package name can be found. :param str name: The name of the pacakge :returns: True if the package can be found :rtype: bool """ return bool(importlib.util.find_spec(name))
548044b06d250af7f49dc3c9b4144490a5bbcc83
9,068
def make_pipeline(*steps, **kwargs): """Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list of estimators A list of estimators. memory : None, str or object with the joblib.Memory interface, default=None Used to cache the fitted transformers of the pipeline. By default, no caching is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. verbose : bool, default=False If True, the time elapsed while fitting each step will be printed as it is completed. Returns ------- p : Pipeline See Also -------- imblearn.pipeline.Pipeline : Class for creating a pipeline of transforms with a final estimator. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) ... # doctest: +NORMALIZE_WHITESPACE Pipeline(steps=[('standardscaler', StandardScaler()), ('gaussiannb', GaussianNB())]) """ memory = kwargs.pop("memory", None) verbose = kwargs.pop('verbose', False) if kwargs: raise TypeError( 'Unknown keyword arguments: "{}"'.format(list(kwargs.keys())[0]) ) return Pipeline(skpipeline._name_estimators(steps), memory=memory, verbose=verbose)
a036c345208333b6f6d9d33998d06b282c9aa711
9,069
import logging def say_hello(name): """ Log client's name which entered our application and send message to it """ logging.info('User %s entered', name) return 'Hello {}'.format(name)
b79865cca34d1430bf47afabf7c96741d59ac560
9,070
import numpy def dual_edges_2(vertices): """ Compute the dual edge vectors of a triangle, expressed in the triangle plane orthonormal basis. :param vertices: The triangle vertices (3 by n matrix with the vertices as rows (where n is the dimension of the space)). :returns: The triangle dual edge vectors (3 by 2 matrix with the coordinates for edge i in row i). :rtype: :class:`Numpy array <numpy.ndarray>` """ t = dual_edges(vertices) t2 = numpy.zeros((3, 2)) for i in range(3): t2[i] = in_triangleplane_coords(vertices, t[i]) return t2
64ff173ef00dc4d916f00f67c7a35da25d81b535
9,071
def merge_dicts(dictionaries): """Merges multiple separate dictionaries into a single dictionary. Parameters ---------- dictionaries : An iterable container of Python dictionaries. Returns ------- merged : A single dictionary that represents the result of merging the all the dicts in ``dictionaries``. Example ------- The primary purpose of this function is to create a single dictionary by combining multiple singleton dictionaries, as shown in the following example: >>> dicts = [{'a': 1}, {'b': 2}, {'c': 3}] >>> eb.merge_dicts(dicts) {'a': 1, 'c': 3, 'b': 2} """ merged = dictionaries[0].copy() for i in range(1, len(dictionaries)): merged.update(dictionaries[i]) return merged
1a2b5f3c539937e2e27a55ce3914f7368f0a7296
9,072
from typing import Union from typing import Callable def noise_distribution_to_cost_function( noise_distribution: Union[str, Callable] ) -> Callable[[str], str]: """ Parse noise distribution string to a cost function definition amici can work with. The noise distributions listed in the following are supported. :math:`m` denotes the measurement, :math:`y` the simulation, and :math:`\\sigma` a distribution scale parameter (currently, AMICI only supports a single distribution parameter). - `'normal'`, `'lin-normal'`: A normal distribution: .. math:: \\pi(m|y,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma}\\ exp\\left(-\\frac{(m-y)^2}{2\\sigma^2}\\right) - `'log-normal'`: A log-normal distribution (i.e. log(m) is normally distributed): .. math:: \\pi(m|y,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma m}\\ exp\\left(-\\frac{(\\log m - \\log y)^2}{2\\sigma^2}\\right) - `'log10-normal'`: A log10-normal distribution (i.e. log10(m) is normally distributed): .. math:: \\pi(m|y,\\sigma) = \\frac{1}{\\sqrt{2\\pi}\\sigma m \\log(10)}\\ exp\\left(-\\frac{(\\log_{10} m - \\log_{10} y)^2}{2\\sigma^2}\\right) - `'laplace'`, `'lin-laplace'`: A laplace distribution: .. math:: \\pi(m|y,\\sigma) = \\frac{1}{2\\sigma} \\exp\\left(-\\frac{|m-y|}{\\sigma}\\right) - `'log-laplace'`: A log-Laplace distribution (i.e. log(m) is Laplace distributed): .. math:: \\pi(m|y,\\sigma) = \\frac{1}{2\\sigma m} \\exp\\left(-\\frac{|\\log m - \\log y|}{\\sigma}\\right) - `'log10-laplace'`: A log10-Laplace distribution (i.e. log10(m) is Laplace distributed): .. math:: \\pi(m|y,\\sigma) = \\frac{1}{2\\sigma m \\log(10)} \\exp\\left(-\\frac{|\\log_{10} m - \\log_{10} y|}{\\sigma}\\right) - `'binomial'`, `'lin-binomial'`: A (continuation of a discrete) binomial distribution, parameterized via the success probability :math:`p=\\sigma`: .. math:: \\pi(m|y,\\sigma) = \\operatorname{Heaviside}(y-m) \\cdot \\frac{\\Gamma(y+1)}{\\Gamma(m+1) \\Gamma(y-m+1)} \\sigma^m (1-\\sigma)^{(y-m)} - `'negative-binomial'`, `'lin-negative-binomial'`: A (continuation of a discrete) negative binomial distribution, with with `mean = y`, parameterized via success probability `p`: .. math:: \\pi(m|y,\\sigma) = \\frac{\\Gamma(m+r)}{\\Gamma(m+1) \\Gamma(r)} (1-\\sigma)^m \\sigma^r where .. math:: r = \\frac{1-\\sigma}{\\sigma} y The distributions above are for a single data point. For a collection :math:`D=\\{m_i\\}_i` of data points and corresponding simulations :math:`Y=\\{y_i\\}_i` and noise parameters :math:`\\Sigma=\\{\\sigma_i\\}_i`, AMICI assumes independence, i.e. the full distributions is .. math:: \\pi(D|Y,\\Sigma) = \\prod_i\\pi(m_i|y_i,\\sigma_i) AMICI uses the logarithm :math:`\\log(\\pi(m|y,\\sigma)`. In addition to the above mentioned distributions, it is also possible to pass a function taking a symbol string and returning a log-distribution string with variables '{str_symbol}', 'm{str_symbol}', 'sigma{str_symbol}' for y, m, sigma, respectively. :param noise_distribution: An identifier specifying a noise model. Possible values are {`'normal'`, `'lin-normal'`, `'log-normal'`, `'log10-normal'`, `'laplace'`, `'lin-laplace'`, `'log-laplace'`, `'log10-laplace'`, `'binomial'`, `'lin-binomial'`, `'negative-binomial'`, `'lin-negative-binomial'`, `<Callable>`} For the meaning of the values see above. :return: A function that takes a strSymbol and then creates a cost function string (negative log-likelihood) from it, which can be sympified. """ if isinstance(noise_distribution, Callable): return noise_distribution if noise_distribution in ['normal', 'lin-normal']: y_string = '0.5*log(2*pi*{sigma}**2) + 0.5*(({y} - {m}) / {sigma})**2' elif noise_distribution == 'log-normal': y_string = '0.5*log(2*pi*{sigma}**2*{m}**2) ' \ '+ 0.5*((log({y}) - log({m})) / {sigma})**2' elif noise_distribution == 'log10-normal': y_string = '0.5*log(2*pi*{sigma}**2*{m}**2*log(10)**2) ' \ '+ 0.5*((log({y}, 10) - log({m}, 10)) / {sigma})**2' elif noise_distribution in ['laplace', 'lin-laplace']: y_string = 'log(2*{sigma}) + Abs({y} - {m}) / {sigma}' elif noise_distribution == 'log-laplace': y_string = 'log(2*{sigma}*{m}) + Abs(log({y}) - log({m})) / {sigma}' elif noise_distribution == 'log10-laplace': y_string = 'log(2*{sigma}*{m}*log(10)) ' \ '+ Abs(log({y}, 10) - log({m}, 10)) / {sigma}' elif noise_distribution in ['binomial', 'lin-binomial']: # Binomial noise model parameterized via success probability p y_string = '- log(Heaviside({y} - {m})) - loggamma({y}+1) ' \ '+ loggamma({m}+1) + loggamma({y}-{m}+1) ' \ '- {m} * log({sigma}) - ({y} - {m}) * log(1-{sigma})' elif noise_distribution in ['negative-binomial', 'lin-negative-binomial']: # Negative binomial noise model of the number of successes m # (data) before r=(1-sigma)/sigma * y failures occur, # with mean number of successes y (simulation), # parameterized via success probability p = sigma. r = '{y} * (1-{sigma}) / {sigma}' y_string = f'- loggamma({{m}}+{r}) + loggamma({{m}}+1) ' \ f'+ loggamma({r}) - {r} * log(1-{{sigma}}) ' \ f'- {{m}} * log({{sigma}})' else: raise ValueError( f"Cost identifier {noise_distribution} not recognized.") def nllh_y_string(str_symbol): y, m, sigma = _get_str_symbol_identifiers(str_symbol) return y_string.format(y=y, m=m, sigma=sigma) return nllh_y_string
d26ae31211ab5a9fae2b350391ab2a835ba02758
9,073
from datetime import datetime def serializer(cls, o): """ Custom class level serializer. """ # You can provide a custom serialize/deserialize logic for certain types. if cls is datetime: return o.strftime('%d/%m/%y') # Raise SerdeSkip to tell serde to use the default serializer/deserializer. else: raise SerdeSkip()
6e9bfbb83ede2c2da412b70741d793c6e24e05ef
9,074
def parse_args(): """ parse command-line arguments """ usage = """Usage: bcfg2_svnlog.py [options] -r <revision> <repos>""" parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", help="Be verbose", action="count") parser.add_option("-c", "--config", help="Config file", default="/etc/bcfg2_svnlog.conf") parser.add_option("-r", "--rev", help="Revision") parser.add_option("--stdout", help="Print log message to stdout") try: (options, args) = parser.parse_args() except OptionError: parser.print_help() raise SystemExit(1) if not len(args): parser.print_help() raise SystemExit(1) get_logger(options.verbose) return (options, args.pop())
30ac6035e375b692a516903055b7916a601e98a5
9,075
import array def compute_com(kpt_ids, pose_keypoints): """Computes center of mass from available points for each pose. Requires at least one arm (shoulder, elbow, wrist), neck and hips. Required keypoints to return result: at least one arm with hip, neck and [nose OR ear] :param kpt_id: IDs of keypoints in pose_keypoints. Corresponds to kpt_names. :param pose_keypoints: keypoints for parts of a pose. All types are in kpt_names. :return COM/BOS tuple: tuple of main center of mass' x,y coordinates (ndarray), segment COMs (ndarray), BOS coordinates (list of list of int) """ C_pts = [] # minor center of mass points BOS = [[-1, -1], [-1, -1]] # base of support COM = array([-1, -1]).astype(int32) # final center of mass # legs are 3.5 to 4 heads # 25 and 20: 20 front, 5 back # Find length from nose/ears to neck and multiply 0.5 for front foot, 0.14 for back foot. ## Heuristics no_right = False no_left = False for r_id in right_profile: if r_id not in kpt_ids: no_right = True break for l_id in left_profile: if l_id not in kpt_ids: no_left = True break face_id = -1 for f_id in face_profile: if f_id in kpt_ids: face_id = f_id break if face_id == -1: return (COM, array(C_pts), BOS) elif no_right and no_left: return (COM, array(C_pts), BOS) ## Transformation """Two scenarios (1) Front/Back of body: do nothing (2) Side of body: copy point to side if needed """ if not no_right and no_left: for indx in range(prof_len): r_id = right_profile[indx] l_id = left_profile[indx] if pose_keypoints[l_id, 0] == -1: pose_keypoints[l_id] = pose_keypoints[r_id] elif no_right and not no_left: for indx in range(prof_len): r_id = right_profile[indx] l_id = left_profile[indx] if pose_keypoints[r_id, 0] == -1: pose_keypoints[r_id] = pose_keypoints[l_id] ## Compute COM sections face_pt = pose_keypoints[face_id] neck_pt = pose_keypoints[1] head_vector = (neck_pt - face_pt) # points down nose_neck_len = sqrt(sum(head_vector * head_vector)) head_vector[0] = 0 # project to y-axis # head_vector[1] = head_vector[1] * 1.5 r_sho_pt = pose_keypoints[2] l_sho_pt = pose_keypoints[5] upperRidge_pt = (r_sho_pt + l_sho_pt)/2 r_hip_pt = pose_keypoints[8] l_hip_pt = pose_keypoints[11] lowerRidge_pt = (r_hip_pt + l_hip_pt)/2 # Thorax COM thorax_vector = (lowerRidge_pt - upperRidge_pt) * proximal_ratios[0] C_pts.append((upperRidge_pt + thorax_vector).tolist()) # Upper Arms COM r_elb_pt = pose_keypoints[3] l_elb_pt = pose_keypoints[6] r_uparm_vector = (r_sho_pt - r_elb_pt) * proximal_ratios[1] l_uparm_vector = (l_sho_pt - l_elb_pt) * proximal_ratios[1] C_pts.append((r_uparm_vector + r_elb_pt).tolist()) C_pts.append((l_uparm_vector + l_elb_pt).tolist()) # Forearms COM r_forarm_vector = (r_elb_pt - pose_keypoints[4]) * proximal_ratios[2] l_forarm_vector = (l_elb_pt - pose_keypoints[7]) * proximal_ratios[2] C_pts.append((r_forarm_vector + pose_keypoints[4]).tolist()) C_pts.append((l_forarm_vector + pose_keypoints[7]).tolist()) # Thigh COM and Leg COM (OR) Total Leg COM (if pts missing) # Right Side if pose_keypoints[9,0] == -1: # missing leg estimation r_total_leg_com = (head_vector * proximal_ratios[6]) + r_hip_pt C_pts.append([0,0]) C_pts.append([0,0]) C_pts.append(r_total_leg_com.tolist()) BOS[0] = ((head_vector * 3.5) + r_hip_pt).tolist() else: r_knee_pt = pose_keypoints[9] r_thigh_vector = (r_hip_pt - r_knee_pt) * proximal_ratios[3] C_pts.append((r_thigh_vector + r_knee_pt).tolist()) if pose_keypoints[10, 0] == -1: # missing ankle estimation r_leg_com = (head_vector * proximal_ratios[5]) + r_knee_pt C_pts.append(r_leg_com.tolist()) BOS[0] = ((head_vector * 1.75) + r_knee_pt).tolist() else: r_ankle_pt = pose_keypoints[10] r_leg_vector = (r_knee_pt - r_ankle_pt) * proximal_ratios[4] C_pts.append((r_leg_vector + r_ankle_pt).tolist()) BOS[0] = r_ankle_pt.tolist() C_pts.append([0,0]) # Left Side if pose_keypoints[12,0] == -1: # missing leg estimation l_total_leg_com = (head_vector * proximal_ratios[6]) + l_hip_pt C_pts.append([0,0]) C_pts.append([0,0]) C_pts.append(l_total_leg_com.tolist()) BOS[1] = ((head_vector * 3.5) + l_hip_pt).tolist() else: l_knee_pt = pose_keypoints[12] l_thigh_vector = (l_hip_pt - l_knee_pt) * proximal_ratios[3] C_pts.append((l_thigh_vector + l_knee_pt).tolist()) if pose_keypoints[13, 0] == -1: # missing ankle estimation l_leg_com = (head_vector * proximal_ratios[5]) + l_knee_pt C_pts.append(l_leg_com.tolist()) BOS[1] = ((head_vector * 1.75) + l_knee_pt).tolist() else: l_ankle_pt = pose_keypoints[13] l_leg_vector = (l_knee_pt - l_ankle_pt) * proximal_ratios[4] C_pts.append((l_leg_vector + l_ankle_pt).tolist()) BOS[1] = l_ankle_pt.tolist() C_pts.append([0,0]) ## Compute COM from C_pts, and BOS C_pts = array(C_pts, dtype=int32) COM = sum(C_pts * mass_ratios, axis=0).astype(int32) # was BOS[0][0] == BOS[1][0] if no_left^no_right: # sagittal spreading; greedy approach min1, min2, min3, min4 = [-1, -1, -1, -1] if no_left: # facing towards right of image min1 = round(BOS[0][0] - (nose_neck_len * 0.14)) # constants 0.14 and 0.5 based on my estimates min2 = round(BOS[1][0] - (nose_neck_len * 0.14)) # of nose-neck length and foot length relative max1 = round(BOS[0][0] + (nose_neck_len * 0.5)) # to ankle point. max2 = round(BOS[1][0] + (nose_neck_len * 0.5)) else: # facing towards left of image min1 = round(BOS[0][0] - (nose_neck_len * 0.5)) min2 = round(BOS[1][0] - (nose_neck_len * 0.5)) max1 = round(BOS[0][0] + (nose_neck_len * 0.14)) max2 = round(BOS[1][0] + (nose_neck_len * 0.14)) if min1 < min2: BOS[0][0] = min1 else: BOS[0][0] = min2 if max1 > max2: BOS[1][0] = max1 else: BOS[1][0] = max2 return (COM, C_pts, BOS)
16e884ef76bdc21695349e6f0f9f9948426c5b8c
9,076
def _MinimumLineCount(text: str, min_line_count: int) -> str: """Private implementation of minimum number of lines. Args: text: The source to verify the line count of. Returns: src: The unmodified input src. Raises: NoCodeException: If src is less than min_line_count long. """ if len(text.strip().split("\n")) < min_line_count: raise errors.NoCodeException return text
037400aed0503dabee61a8d5088ca2e4b3ab34a6
9,078
def RationalQuadratic1d( grid, corrlen, sigma, alpha, prior=None, mu_basis=None, mu_hyper=None, energy=0.99 ) -> Formula: """Rational quadratic kernel formula """ kernel_kwargs = { "corrlen": corrlen, "sigma": sigma, "alpha": alpha } _Formula = create_from_kernel1d(utils.rational_quadratic) return _Formula( grid=grid, prior=prior, mu_basis=mu_basis, mu_hyper=mu_hyper, energy=energy, **kernel_kwargs )
56d61ef851ac5c84336f7f6bda19885d85b42b26
9,079