content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def f_score_one_hot(labels,predictions,beta=1.0,average=None): """compute f score, =(1+beta*beta)precision*recall/(beta*beta*precision+recall) the labels must be one_hot. the predictions is prediction results. Args: labels: A np.array whose shape matches `predictions` and must be one_hot. Will be cast to `bool`. predictions: A floating point np.array of arbitrary shape. average : string, [None(default), 'micro', 'macro',] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. Returns: values: A score float. References ----------------------- [1] https://blog.csdn.net/sinat_28576553/article/details/80258619 """ if beta < 0: raise ValueError("beta should be >=0 in the F-beta score") beta2 = beta ** 2 p=precision_score_one_hot(labels,predictions,average=average) r=recall_score_one_hot(labels,predictions,average=average) #In the functions:precision and recall,add a epsilon,so p and r will #not be zero. f=(1+beta2)*p*r/(beta2*p+r) if average is None or average=='micro': p = precision_score_one_hot(labels, predictions, average=average) r = recall_score_one_hot(labels, predictions, average=average) f = (1 + beta2) * p * r / (beta2 * p + r) return f elif average=='macro': p = precision_score_one_hot(labels, predictions, average=None) r = recall_score_one_hot(labels, predictions, average=None) f = (1 + beta2) * p * r / (beta2 * p + r) return np.average(f) else: raise ValueError('Invaild average: %s.' % average)
091143244858dee1e001042931f625db30c58195
31,449
def articles(): """Show a list of article titles""" the_titles = [[a[0], a[1]] for a in articles] return render_template('articles.html', titles = the_titles)
bb8f9af9cedb30f89fa950f60c7710ac840f026c
31,450
import json from pathlib import Path import uuid def create_montage_for_background(montage_folder_path: str, im_b_path: str, f_path: str, only_face: bool) -> str: """ Creates and saves the montage from a designed background. If a folder is provided for faces, it will create a file 'faces.json' inside the folder for faster loading if used repeatedly. If new images are introduced in the faces' folder, delete the json and the next time 'create_montage_for_background' is called, it will be created automatically. :param montage_folder_path: folder to save the montage :param im_b_path: str with the background image path :param f_path: folder face path or json with the face's params. If folder provided, the 'faces.json' file will be created inside the faces' folder :param only_face: Whether to crop the hair and chin of the face or not :return: str with montage path """ json_f_path = get_or_create_params_json(f_path, is_background=False) f_faces = open(json_f_path, "r") json_faces: dict = json.load(f_faces) f_faces.close() im_montage = create_montage(im_b_path, json_faces, only_face) # creates the montage montage_file_path = Path(f"{montage_folder_path}/montage_{uuid.uuid4().hex[:10]}.png") try: im_montage.save(montage_file_path) except IOError: logger.error("Montage created but error while saving montage") raise logger.info(f"Montage created and saved in '{montage_file_path}'") return str(montage_file_path)
b0c229d16e0ffdf2a8ea63cbc5785be918c09d46
31,451
def deserialize_question( question: QuestionDict ) -> Question: """Convert a dict into Question object.""" return Question( title=question['title'], content=question.get('content'), choices=[ Choice( title=title, goto=goto ) for title, goto in question['choices'].items() ] )
c6f5dd962cdc7a0ef273d4397472de572f92c1f8
31,452
from datetime import datetime def ensure_utc_datetime(value): """ Given a datetime, date, or Wayback-style timestamp string, return an equivalent datetime in UTC. Parameters ---------- value : str or datetime.datetime or datetime.date Returns ------- datetime.datetime """ if isinstance(value, str): return parse_timestamp(value) elif isinstance(value, datetime): if value.tzinfo: return value.astimezone(timezone.utc) else: return value.replace(tzinfo=timezone.utc) elif isinstance(value, date): return datetime(value.year, value.month, value.day, tzinfo=timezone.utc) else: raise TypeError('`datetime` must be a string, date, or datetime')
0d5c631d2736094f5a60c2eb4ca7c83fcb1e3e6a
31,453
def get_pod_status(pod_name: str) -> GetPodEntry: """Returns the current pod status for a given pod name""" oc_get_pods_args = ["get", "pods"] oc_get_pods_result = execute_oc_command(oc_get_pods_args, capture_output=True).stdout line = "" for line in oc_get_pods_result.splitlines(): if pod_name in line: break return GetPodEntry.parse(line)
1353fb4f457a4818ffcfda188ca4d3db55ce5cc9
31,454
def helix_evaluate(t, a, b): """Evalutes an helix at a parameter. Parameters ---------- t: float Parameter a: float Constant b: float Constant c: float Constant Returns ------- list The (x, y, z) coordinates. Notes ----- An interpretation of the constants a and b are the radius of the helix is a, and the slope of the helix is b / a. References ---------- .. [1] Wolfram MathWorld. *Helix*. Available at: http://mathworld.wolfram.com/Helix.html. """ return [a * cos(t), a * sin(t), b * t]
2d62cae57dac72cd244d66df8de2d0a5d3b70c38
31,455
import scipy def get_pfb_window(num_taps, num_branches, window_fn='hamming'): """ Get windowing function to multiply to time series data according to a finite impulse response (FIR) filter. Parameters ---------- num_taps : int Number of PFB taps num_branches : int Number of PFB branches. Note that this results in `num_branches / 2` coarse channels. window_fn : str, optional Windowing function used for the PFB Returns ------- window : array Array of PFB windowing coefficients """ window = scipy.signal.firwin(num_taps * num_branches, cutoff=1.0 / num_branches, window=window_fn, scale=True) window *= num_taps * num_branches return xp.array(window)
1193a29ab754e2c8f30e1a58f34c9efcf58513af
31,456
from typing import Dict from typing import OrderedDict def retrieve_bluffs_by_id(panelist_id: int, database_connection: mysql.connector.connect, pre_validated_id: bool = False) -> Dict: """Returns an OrderedDict containing Bluff the Listener information for the requested panelist ID Arguments: panelist_id (int) database_connection (mysql.connector.connect) pre_validated_id (bool): Flag whether or not the panelist ID has been validated or not """ if not pre_validated_id: if not utility.validate_id(panelist_id, database_connection): return None try: cursor = database_connection.cursor() query = ("SELECT ( " "SELECT COUNT(blm.chosenbluffpnlid) FROM ww_showbluffmap blm " "JOIN ww_shows s ON s.showid = blm.showid " "WHERE s.repeatshowid IS NULL AND blm.chosenbluffpnlid = %s " ") AS chosen, ( " "SELECT COUNT(blm.correctbluffpnlid) FROM ww_showbluffmap blm " "JOIN ww_shows s ON s.showid = blm.showid " "WHERE s.repeatshowid IS NULL AND blm.correctbluffpnlid = %s " ") AS correct;") cursor.execute(query, (panelist_id, panelist_id,)) result = cursor.fetchone() cursor.close() if result: bluffs = OrderedDict() bluffs["chosen"] = result[0] bluffs["correct"] = result[1] return bluffs return None except ProgrammingError as err: raise ProgrammingError("Unable to query the database") from err except DatabaseError as err: raise DatabaseError("Unexpected database error") from err
f3f5d3e86423db20aa4ffe22410cc491ec5e80ed
31,457
def extract_protein_from_record(record): """ Grab the protein sequence as a string from a SwissProt record :param record: A Bio.SwissProt.SeqRecord instance :return: """ return str(record.sequence)
a556bd4316f145bf23697d8582f66f7dcb589087
31,458
import cftime def _diff_coord(coord): """Returns the difference as a `xarray.DataArray`.""" v0 = coord.values[0] calendar = getattr(v0, "calendar", None) if calendar: ref_units = "seconds since 1800-01-01 00:00:00" decoded_time = cftime.date2num(coord, ref_units, calendar) coord = xr.DataArray(decoded_time, dims=coord.dims, coords=coord.coords) return np.diff(coord) elif pd.api.types.is_datetime64_dtype(v0): return np.diff(coord).astype("timedelta64[s]").astype("f8") else: return np.diff(coord)
e430d7f22f0c4b9ac125768b5c69a045e44046a5
31,459
import _tkinter def checkDependencies(): """ Sees which outside dependencies are missing. """ missing = [] try: del _tkinter except: missing.append("WARNING: _tkinter is necessary for NetworKit.\n" "Please install _tkinter \n" "Root privileges are necessary for this. \n" "If you have these, the installation command should be: sudo apt-get install python3-tk") return missing
05aad218f3df84ddb5656d0206d50ec32aa02dcb
31,460
from typing import Dict from typing import Any def get_user_groups_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Function for sta-get-user-groups command. Get all the groups associated with a specific user. """ response, output_data = client.user_groups_data(userName=args.get('userName'), limit=args.get('limit')) if not response: return CommandResults( readable_output=NO_RESULT_MSG, ) header_sequence = ['id', 'schemaVersionNumber', 'name', 'description', 'isSynchronized'] return CommandResults( readable_output=tableToMarkdown( f"Groups associated with user - {args.get('userName')} : ", response, headers=header_sequence, headerTransform=pascalToSpace, removeNull=True), outputs_prefix='STA.USER', outputs_key_field=['id'], outputs=output_data )
b21b087e4e931e33111720bbc987b1bb6749fee8
31,461
def get_capital_flow(order_book_ids, start_date=None, end_date=None, frequency="1d", market="cn"): """获取资金流入流出数据 :param order_book_ids: 股票代码or股票代码列表, 如'000001.XSHE' :param start_date: 开始日期 :param end_date: 结束日期 :param frequency: 默认为日线。日线使用 '1d', 分钟线 '1m' 快照 'tick' (Default value = "1d"), :param market: (Default value = "cn") :returns: pandas.DataFrame or None """ ensure_string_in(frequency, ("1d", "1m", "tick"), "frequency") if frequency == "tick": return get_capital_flow_tickbar(order_book_ids, start_date, end_date, TICKBAR_FIELDS, market) order_book_ids = ensure_order_book_ids(order_book_ids) start_date, end_date = ensure_date_range(start_date, end_date) if frequency == "1d": return get_capital_flow_daybar(order_book_ids, start_date, end_date, DAYBAR_FIELDS, 1, market) return get_capital_flow_minbar(order_book_ids, start_date, end_date, MINBAR_FIELDS, 1, market)
f7c3f94fd012672b75d960ef1c4d749959a7e6cc
31,462
def split_quoted(s): """Split a string with quotes, some possibly escaped, into a list of alternating quoted and unquoted segments. Raises a ValueError if there are unmatched quotes. Both the first and last entry are unquoted, but might be empty, and therefore the length of the resulting list must be an odd number. """ result = [] for part in s.split(QUOTE): if result and result[-1].endswith('\\'): result[-1] = result[-1] + QUOTE + part else: result.append(part) if not len(result) % 2: raise ValueError('Unmatched quote.') return result
0790e7b2fecfd6c2aa1ca04c8cb5f1faebb3722b
31,463
import torch def calc_IOU(seg_omg1: torch.BoolTensor, seg_omg2: torch.BoolTensor, eps: float = 1.e-6) -> float: """ calculate intersection over union between 2 boolean segmentation masks :param seg_omg1: first segmentation mask :param seg_omg2: second segmentation mask :param eps: eps for numerical stability :return: IOU """ dim = [1, 2, 3] if len(seg_omg1.shape) == 4 else [1, 2] intersection = (seg_omg1 & seg_omg2).sum(dim=dim) union = (seg_omg1 | seg_omg2).sum(dim=dim) return (intersection.float() / (union.float() + eps)).mean().item()
6586b1f9995858be9ab7e40edd1c3433cd1cd6f4
31,464
def td_path_join(*argv): """Construct TD path from args.""" assert len(argv) >= 2, "Requires at least 2 tdpath arguments" return "/".join([str(arg_) for arg_ in argv])
491f1d50767a50bfbd7d3a2e79745e0446f5204c
31,466
import torch def calculate_segmentation_statistics(outputs: torch.Tensor, targets: torch.Tensor, class_dim: int = 1, threshold=None): """Compute calculate segmentation statistics. Args: outputs: torch.Tensor. targets: torch.Tensor. threshold: threshold for binarization of predictions. class_dim: indicates class dimension (K). Returns: True positives , false positives , false negatives for segmentation task. """ num_dims = len(outputs.shape) assert num_dims > 2, "Found only two dimensions, shape should be [bs , C , ...]" # noqa: S101 assert outputs.shape == targets.shape, "shape mismatch" # noqa: S101 if threshold is not None: outputs = (outputs > threshold).float() dims = [dim for dim in range(num_dims) if dim != class_dim] true_positives = torch.sum(outputs * targets, dim=dims) false_positives = torch.sum(outputs * (1 - targets), dim=dims) false_negatives = torch.sum(targets * (1 - outputs), dim=dims) return true_positives, false_positives, false_negatives
ccc017dd5c7197565e54c62cd83eb5cdc02d7d17
31,467
def sample_from_cov(mean_list, cov_list, Nsamples): """ Sample from the multivariate Gaussian of Gaia astrometric data. Args: mean_list (list): A list of arrays of astrometric data. [ra, dec, plx, pmra, pmdec] cov_list (array): A list of all the uncertainties and covariances: [ra_err, dec_err, plx_err, pmra_err, pmdec_err, ra_dec_corr, ra_plx_corr, ra_pmra_corr, ra_pmdec_corr, dec_plx_corr, dec_pmra_corr, dec_pmdec_corr, plx_pmra_corr, plx_pmdec_corr, pmra_pmdec_corr] Nsamples: (int): The number of samples. """ Ndim = len(mean_list) # 5 dimensions: ra, dec, plx, pmra, pmdec Nstars = len(mean_list[0]) # Construct the mean and covariance matrices. mean = np.vstack(([i for i in mean_list])) cov = construct_cov(cov_list, Ndim) # Sample from the multivariate Gaussian samples = np.zeros((Nsamples, Ndim, Nstars)) for i in range(Nstars): samples[:, :, i] = np.random.multivariate_normal( mean[:, i], cov[:, :, i], Nsamples) return samples
353c08bfd8951610fdcf1511107888c1153d3eed
31,468
def get_finger_distal_angle(x,m): """Gets the finger angle th3 from a hybrid state""" return x[2]
f93b1931f3e4a9284ccac3731dfeea21526ea07c
31,469
def get_karma(**kwargs): """Get your current karma score""" user_id = kwargs.get("user_id").strip("<>@") session = db_session.create_session() kama_user = session.query(KarmaUser).get(user_id) try: if not kama_user: return "User not found" if kama_user.karma_points == 0: return "Sorry, you don't have any karma yet" return ( f"Hey {kama_user.username}, your current karma is {kama_user.karma_points}" ) finally: session.close()
29f2622e65c45e642285014bbfa6dc33abb0e326
31,470
def plotly_shap_violin_plot(X, shap_values, col_name, color_col=None, points=False, interaction=False): """ Returns a violin plot for categorical values. if points=True or color_col is not None, a scatterplot of points is plotted next to the violin plots. If color_col is given, scatter is colored by color_col. """ assert is_string_dtype(X[col_name]), \ f'{col_name} is not categorical! Can only plot violin plots for categorical features!' x = X[col_name] shaps = shap_values[:, X.columns.get_loc(col_name)] n_cats = X[col_name].nunique() if points or color_col is not None: fig = make_subplots(rows=1, cols=2*n_cats, column_widths=[3, 1]*n_cats, shared_yaxes=True) showscale = True else: fig = make_subplots(rows=1, cols=n_cats, shared_yaxes=True) fig.update_yaxes(range=[shaps.min()*1.3, shaps.max()*1.3]) for i, cat in enumerate(X[col_name].unique()): col = 1+i*2 if points or color_col is not None else 1+i fig.add_trace(go.Violin( x=x[x == cat], y=shaps[x == cat], name=cat, box_visible=True, meanline_visible=True, showlegend=False, ), row=1, col=col) if color_col is not None: if is_numeric_dtype(X[color_col]): fig.add_trace(go.Scatter( x=np.random.randn(len(x[x == cat])), y=shaps[x == cat], name=color_col, mode='markers', showlegend=False, hoverinfo="text", hovertemplate = "<i>shap</i>: %{y:.2f}<BR>" + f"<i>{color_col}" + ": %{marker.color}", text = [f"shap: {shap}<>{color_col}: {col}" for shap, col in zip(shaps[x == cat], X[color_col][x==cat])], marker=dict(size=7, opacity=0.6, cmin=X[color_col].min(), cmax=X[color_col].max(), color=X[color_col][x==cat], colorscale='Bluered', showscale=showscale, colorbar=dict(title=color_col)), ), row=1, col=col+1) else: n_color_cats = X[color_col].nunique() colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'] colors = colors * (1+int(n_color_cats / len(colors))) colors = colors[:n_color_cats] for color_cat, color in zip(X[color_col].unique(), colors): fig.add_trace(go.Scatter( x=np.random.randn(len(x[(x == cat) & (X[color_col] == color_cat)])), y=shaps[(x == cat) & (X[color_col] == color_cat)], name=color_cat, mode='markers', showlegend=showscale, hoverinfo="text", hovertemplate = "<i>shap</i>: %{y:.2f}<BR>" + f"<i>{color_col}: {color_cat}", marker=dict(size=7, opacity=0.8, color=color) ), row=1, col=col+1) showscale = False elif points: fig.add_trace(go.Scatter( x=np.random.randn(len(x[x == cat])), y=shaps[x == cat], mode='markers', showlegend=False, hovertemplate = "<i>shap</i>: %{y:.2f}", marker=dict(size=7, opacity=0.6, color='blue'), ), row=1, col=col+1) if points or color_col is not None: for i in range(n_cats): fig.update_xaxes(showgrid=False, zeroline=False, visible=False, row=1, col=2+i*2) fig.update_yaxes(showgrid=False, zeroline=False, row=1, col=2+i*2) if color_col is not None: fig.update_layout(title=f'Shap {"interaction" if interaction else None} values for {col_name}<br>(colored by {color_col})', hovermode='closest') else: fig.update_layout(title=f'Shap {"interaction" if interaction else None} values for {col_name}', hovermode='closest') else: fig.update_layout(title=f'Shap {"interaction" if interaction else None} values for {col_name}') return fig
03754df82272826965e73266075f3a0334620e93
31,472
def animation(): """ This function gives access to the animation tools factory - allowing you to access all the tools available. Note: This will not re-instance the factory on each call, the factory is instanced only on the first called and cached thereafter. :return: factories.Factory """ # -- If we already have a cached factory return that global _anim_library if _anim_library: return _anim_library # -- Instance a new factory _anim_library = factories.Factory( abstract=AnimTool, plugin_identifier='identifier', versioning_identifier='version', envvar=constants.PLUGIN_ENVIRONMENT_VARIABLE, paths=constants.PLUGIN_LOCATIONS, ) return _anim_library
682cfe96f5682296ae721519be03aeb98b918e20
31,473
def merge(pinyin_d_list): """ :rtype: dict """ final_d = {} for overwrite_d in pinyin_d_list: final_d.update(overwrite_d) return final_d
512f551620ccedae8fb53f0c60f7caf931aae249
31,474
def dprnn_tasnet(name_url_or_file=None, *args, **kwargs): """ Load (pretrained) DPRNNTasNet model Args: name_url_or_file (str): Model name (we'll find the URL), model URL to download model, path to model file. If None (default), DPRNNTasNet is instantiated but no pretrained weights are loaded. *args: Arguments to pass to DPRNNTasNet. **kwargs: Keyword arguments to pass to DPRNNTasNet. Returns: DPRNNTasNet instance (with ot without pretrained weights). Examples: >>> from torch import hub >>> # Instantiate without pretrained weights >>> model = hub.load('mpariente/asteroid', 'dprnn_tasnet') >>> # Use pretrained weights >>> URL = "TOCOME" >>> model = hub.load('mpariente/asteroid', 'dprnn_tasnet', URL) """ # No pretrained weights if name_url_or_file is None: return models.DPRNNTasNet(*args, **kwargs) return models.DPRNNTasNet.from_pretrained(name_url_or_file)
cf3190656d9c24730d9bab1554987d684ec33712
31,477
def utcnow(): """Better version of utcnow() that returns utcnow with a correct TZ.""" return timeutils.utcnow(True)
a23cc98eca8e291f6e9aff5c0e78494930476f78
31,478
def build_profile(base_image, se_size=4, se_size_increment=2, num_openings_closings=4): """ Build the extended morphological profiles for a given set of images. Parameters: base_image: 3d matrix, each 'channel' is considered for applying the morphological profile. It is the spectral information part of the EMP. se_size: int, initial size of the structuring element (or kernel). Structuring Element used: disk se_size_increment: int, structuring element increment step num_openings_closings: int, number of openings and closings by reconstruction to perform. Returns: emp: 3d matrix with both spectral (from the base_image) and spatial information """ base_image_rows, base_image_columns, base_image_channels = base_image.shape se_size = se_size se_size_increment = se_size_increment num_openings_closings = num_openings_closings morphological_profile_size = (num_openings_closings * 2) + 1 emp_size = morphological_profile_size * base_image_channels emp = np.zeros( shape=(base_image_rows, base_image_columns, emp_size)) cont = 0 for i in range(base_image_channels): # build MPs mp_temp = build_morphological_profiles( base_image[:, :, i], se_size, se_size_increment, num_openings_closings) aux = morphological_profile_size * (i+1) # build the EMP cont_aux = 0 for k in range(cont, aux): emp[:, :, k] = mp_temp[:, :, cont_aux] cont_aux += 1 cont = morphological_profile_size * (i+1) return emp
7f7cd0e1259cdd52cd4ce73c3d6eee9c9f87b474
31,479
def mujoco_env(env_id, nenvs=None, seed=None, summarize=True, normalize_obs=True, normalize_ret=True): """ Creates and wraps MuJoCo env. """ assert is_mujoco_id(env_id) seed = get_seed(nenvs, seed) if nenvs is not None: env = ParallelEnvBatch([ lambda s=s: mujoco_env(env_id, seed=s, summarize=False, normalize_obs=False, normalize_ret=False) for s in seed]) return mujoco_wrap(env, summarize=summarize, normalize_obs=normalize_obs, normalize_ret=normalize_ret) env = gym.make(env_id) set_seed(env, seed) return mujoco_wrap(env, summarize=summarize, normalize_obs=normalize_obs, normalize_ret=normalize_ret)
5bad4500be5261f33a19e49612ce62e1db8c66dd
31,480
import torch def polar2cart(r, theta): """ Transform polar coordinates to Cartesian. Parameters ---------- r, theta : floats or arrays Polar coordinates Returns ------- [x, y] : floats or arrays Cartesian coordinates """ return torch.stack((r * theta.cos(), r * theta.sin()), dim=-1).squeeze()
c13225a49d6435736bf326f70af5f6d4039091d8
31,481
def belongs_to(user, group_name): """ Check if the user belongs to the given group. :param user: :param group_name: :return: """ return user.groups.filter(name__iexact=group_name).exists()
e1b70b4771dfec45218078ca16335ddc3c6214e2
31,482
import torch def sum_log_loss(logits, mask, reduction='sum'): """ :param logits: reranking logits(B x C) or span loss(B x C x L) :param mask: reranking mask(B x C) or span mask(B x C x L) :return: sum log p_positive i over all candidates """ num_pos = mask.sum(-1) # B x C gold_scores = logits.masked_fill(~(mask.bool()), 0) gold_scores_sum = gold_scores.sum(-1) # BxC all_log_sum_exp = torch.logsumexp(logits, -1) # B x C # gold_log_probs = gold_scores_sum - all_log_sum_exp * num_pos gold_log_probs = gold_scores_sum/num_pos - all_log_sum_exp loss = -gold_log_probs.sum() if reduction == 'mean': loss /= logits.size(0) return loss
88a312f74e7d4dce95d8dcadaeeaa1a136fceca6
31,483
from acor import acor from .autocorrelation import ipce from .autocorrelation import icce def _get_iat_method(iatmethod): """Control routine for selecting the method used to calculate integrated autocorrelation times (iat) Parameters ---------- iat_method : string, optional Routine to use for calculating said iats. Accepts 'ipce', 'acor', and 'icce'. Returns ------- iatroutine : function The function to be called to estimate the integrated autocorrelation time. """ if iatmethod=='acor': iatroutine = acor elif iatmethod == 'ipce': iatroutine = ipce elif iatmethod == 'icce': iatroutine = icce return iatroutine
a5bbe3a4f4bad486f9bab6ca4b367040ce516478
31,484
def run(): """Main entry point.""" return cli(obj={}, auto_envvar_prefix='IMPLANT') # noqa
ae9e96478dbf081469052ff29d31873263060bff
31,485
def qtl_test_interaction_GxG(pheno, snps1, snps2=None, K=None, covs=None, test="lrt"): """ Epistasis test between two sets of SNPs Args: pheno: [N x 1] np.array of 1 phenotype for N individuals snps1: [N x S1] np.array of S1 SNPs for N individuals snps2: [N x S2] np.array of S2 SNPs for N individuals K: [N x N] np.array of LMM-covariance/kinship koefficients (optional) If not provided, then linear regression analysis is performed covs: [N x D] np.array of D covariates for N individuals test: 'lrt' for likelihood ratio test (default) or 'f' for F-test Returns: pv: [S2 x S1] np.array of P values for epistasis tests beten all SNPs in snps1 and snps2 """ if K is None: K = np.eye(N) N = snps1.shape[0] if snps2 is None: snps2 = snps1 return qtl_test_interaction_GxE_1dof( snps=snps1, pheno=pheno, env=snps2, covs=covs, K=K, test=test )
77eebc7c1c673562b1b793e9e5513b9a50aa6f1b
31,486
import copy import io def log_parser(log): """ This takes the EA task log file generated by e-prime and converts it into a set of numpy-friendly arrays (with mixed numeric and text fields.) pic -- 'Picture' lines, which contain the participant's ratings. res -- 'Response' lines, which contain their responses (unclear) vid -- 'Video' lines, which demark the start and end of trials. """ # substitute for GREP -- finds 'eventtype' field. # required as this file has a different number of fields per line logname = copy.copy(log) log = open(log, "r").readlines() pic = filter(lambda s: 'Picture' in s, log) vid = filter(lambda s: 'Video' in s, log) # write out files from stringio blobs into numpy genfromtxt pic = np.genfromtxt(io.StringIO(''.join(pic)), delimiter='\t', names=['subject', 'trial', 'eventtype', 'code', 'time', 'ttime', 'uncertainty1', 'duration', 'uncertainty2', 'reqtime', 'reqduration', 'stimtype', 'pairindex'], dtype=['|S64' , int , '|S64' , '|S64', int , int , int , int , int , int , int , '|S64' , int]) vid = np.genfromtxt(io.StringIO(''.join(vid)), delimiter='\t', names=['subject', 'trial', 'eventtype', 'code', 'time', 'ttime', 'uncertainty1'], dtype=['|S64' , int , '|S64' , '|S64', int , int , int]) # ensure our inputs contain a 'MRI_start' string. if pic[0][3] != 'MRI_start': logger.error('log {} does not contain an MRI_start entry!'.format(logname)) raise ValueError else: # this is the start of the fMRI run, all times are relative to this. mri_start = pic[0][7] return pic, vid, mri_start
7793cb1b53100961aca5011655211b0da47af856
31,487
from typing import List def line_assign_z_to_vertexes(line_2d: ogr.Geometry, dem: DEM, allowed_input_types: List[int] = None) -> ogr.Geometry: """ Assign Z dimension to vertices of line based on raster value of `dem`. The values from `dem` are interpolated using bilinear interpolation to provide smooth surface. Parameters ---------- line_2d : ogr.Geometry `ogr.Geometry` containing lines. Allowed types are checked against `allowed_input_types`. dem : DEM Raster data source in specific format `DEM` (`gdalhepers` class). allowed_input_types : list of int, optional Allowed geometry types for `line_2d`. Default value is `None` which means any type of line. The default values is equal to definition `allowed_input_types=[ogr.wkbLineString, ogr.wkbLineString25D, ogr.wkbLineStringM, ogr.wkbLineStringZM]]`. Returns ------- ogr.Geometry `ogr.Geometry` with definition `ogr.wkbLineStringZ`. """ if allowed_input_types is None: allowed_input_types = [ogr.wkbLineString, ogr.wkbLineString25D, ogr.wkbLineStringM, ogr.wkbLineStringZM] geometry_checks.check_variable_expected_geometry(line_2d, "line_2d", allowed_input_types) line_3d = ogr.Geometry(ogr.wkbLineString25D) for i in range(0, line_2d.GetPointCount()): pt = line_2d.GetPoint(i) z_value = dem.get_value_bilinear(pt[0], pt[1]) if z_value != dem.get_nodata_value(): line_3d.AddPoint(pt[0], pt[1], z_value) return line_3d
ae3e6c496cd10848e35830c1122a77589f322aad
31,488
def backoff_linear(n): """ backoff_linear(n) -> float Linear backoff implementation. This returns n. See ReconnectingWebSocket for details. """ return n
a3a3b3fc0c4a56943b1d603bf7634ec50404bfb3
31,489
import pkg_resources def _doc(): """ :rtype: str """ return pkg_resources.resource_string( 'dcoscli', 'data/help/config.txt').decode('utf-8')
e83f8a70b9d6c9cff38f91b980cd3f9031d84fd7
31,490
def sk_algo(U, gates, n): """Solovay-Kitaev Algorithm.""" if n == 0: return find_closest_u(gates, U) else: U_next = sk_algo(U, gates, n-1) V, W = gc_decomp(U @ U_next.adjoint()) V_next = sk_algo(V, gates, n-1) W_next = sk_algo(W, gates, n-1) return V_next @ W_next @ V_next.adjoint() @ W_next.adjoint() @ U_next
e8251d7a41899584f92c808af1d4fdee10757349
31,491
def get_movie_list(): """ Returns: A list of populated media.Movie objects """ print("Generating movie list...") movie_list = [] movie_list.append(media.Movie( title='Four Brothers', summary='Mark Wahlberg takes on a crime syndicate with his brothers.', trailer_youtube_url='https://www.youtube.com/watch?v=vZPi0K6UoP8', rating=5, imdb_id='tt0430105')) movie_list.append(media.Movie( 'American Sniper', imdb_id='tt2179136', trailer_youtube_url='https://www.youtube.com/watch?v=5bP1f_1o-zo', rating=5)) movie_list.append(media.Movie( imdb_id='tt0120657', trailer_youtube_url='https://www.youtube.com/watch?v=JYUBKcurY88', rating=4)) movie_list.append(media.Movie( imdb_id='tt0416449', trailer_youtube_url='https://www.youtube.com/watch?v=UrIbxk7idYA', rating=5)) movie_list.append(media.Movie( imdb_id='tt1790885', trailer_youtube_url='https://www.youtube.com/watch?v=k7R2uVZYebE', rating=5)) movie_list.append(media.Movie( imdb_id='tt0119698', trailer_youtube_url='https://www.youtube.com/watch?v=4OiMOHRDs14', rating=5)) print("Done!") return movie_list
e00f67b55a47bf13075a4b2065b94feec4138bcd
31,492
def check_dna_sequence(sequence): """Check if a given sequence contains only the allowed letters A, C, T, G.""" return len(sequence) != 0 and all(base.upper() in ['A', 'C', 'T', 'G'] for base in sequence)
2f561c83773ddaaad2fff71a6b2e5d48c5a35f87
31,493
def test_inner_scalar_mod_args_length(): """ Feature: Check the length of input of inner scalar mod. Description: The length of input of inner scalar mod should not less than 2. Expectation: The length of input of inner scalar mod should not less than 2. """ class Net(Cell): def __init__(self): super().__init__() self.param_a = Parameter(Tensor(5, ms.int32), name="param_a") self.mod = P.Mod() def construct(self, x): return x + self.param_a + self.mod(5) x = Tensor(2, dtype=ms.int32) net = Net() with pytest.raises(Exception, match="For 'S-Prim-Mod', the size of input should be 2"): ret = net(x) print("ret:", ret)
06bc7530106c5bf2f586e08ee2b941bd964228f1
31,494
import requests def zip_list_files(url): """ cd = central directory eocd = end of central directory refer to zip rfcs for further information :sob: -Erica """ # get blog representing the maximum size of a EOBD # that is 22 bytes of fixed-sized EOCD fields # plus the max comment length of 65535 bytes eocd_blob_range = "-65557" eocd_blob_response = requests.get(url, headers={"range": eocd_blob_range}) eocd_blob = eocd_blob_response.content.read() """ End of central directory record (EOCD) Offset Bytes Description[26] 0 4 End of central directory signature = 0x06054b50 4 2 Number of this disk 6 2 Disk where central directory starts 8 2 Number of central directory records on this disk 10 2 Total number of central directory records 12 4 Size of central directory (bytes) 16 4 Offset of start of central directory, relative to start of archive 20 2 Comment length (n) 22 n Comment """ # search eocd_blob for eocd block, seek magic bytes 0x06054b50 def check_blob_magic_bytes(blob, magic): # Recursively search the blob for a string of bytes. # this is not optimized. I could tail-recursion this...-Erica original_magic = magic def _check_blob_magic_bytes(blob, magic, distance): for distance, value in enumerate(blob): if value == magic[:-1]: if len(magic) == 0: return distance + 1 sub_distance = _check_blob_magic_bytes( blob[:-1], magic[:-1], distance + 1 ) if not sub_distance: return _check_blob_magic_bytes(blob, original_magic, 0) return None return _check_blob_magic_bytes(blob, magic, 0) eocd_block = check_blob_magic_bytes(reversed(iter(eocd_blob)), 0x06054B50) if not eocd_block: raise Exception("No zip central directory signature found.") cd_file_offset = eocd_block[16:4] cd_block_resp = requests.get(url, headers={"range": "%i-" % (cd_file_offset,)}) return cd_block_resp.content.read()
694f6340145d509e7a18aa7b427b75f521c389df
31,495
import torch import numpy import math def project_ball(tensor, epsilon=1, ord=2): """ Compute the orthogonal projection of the input tensor (as vector) onto the L_ord epsilon-ball. **Assumes the first dimension to be batch dimension, which is preserved.** :param tensor: variable or tensor :type tensor: torch.autograd.Variable or torch.Tensor :param epsilon: radius of ball. :type epsilon: float :param ord: order of norm :type ord: int :return: projected vector :rtype: torch.autograd.Variable or torch.Tensor """ assert isinstance(tensor, torch.Tensor) or isinstance(tensor, torch.autograd.Variable), 'given tensor should be torch.Tensor or torch.autograd.Variable' if ord == 0: assert epsilon >= 0 size = list(tensor.shape) flattened_size = int(numpy.prod(size[1:])) tensor = tensor.view(-1, flattened_size) k = int(math.ceil(epsilon)) k = min(k, tensor.size(1) - 1) assert k > 0 for b in range(tensor.size(0)): _, indices = topk(tensor[b], k=k) complement_indices = numpy.delete(numpy.arange(tensor.size(1)), indices.cpu().numpy()) tensor[b][complement_indices] = 0 tensor = tensor.view(size) elif ord == 1: # ! Does not allow differentiation obviously! cuda = is_cuda(tensor) array = tensor.detach().cpu().numpy() array = cnumpy.project_ball(array, epsilon=epsilon, ord=ord) tensor = torch.from_numpy(array) if cuda: tensor = tensor.cuda() elif ord == 2: size = list(tensor.shape) flattened_size = int(numpy.prod(size[1:])) tensor = tensor.view(-1, flattened_size) clamped = torch.clamp(epsilon/torch.norm(tensor, 2, dim=1), max=1) clamped = clamped.view(-1, 1) tensor = tensor * clamped if len(size) == 4: tensor = tensor.view(-1, size[1], size[2], size[3]) elif len(size) == 2: tensor = tensor.view(-1, size[1]) elif ord == float('inf'): tensor = torch.clamp(tensor, min=-epsilon, max=epsilon) else: raise NotImplementedError() return tensor
188eda46ede2b6ac08bc6fc4cfa72efb56e2918e
31,496
def load_model(filename): """ Loads the specified Keras model from a file. Parameters ---------- filename : string The name of the file to read from Returns ------- Keras model The Keras model loaded from a file """ return load_keras_model(__construct_path(filename))
89656f682f1e754a08c756f0db49fc3138171384
31,498
from datetime import datetime def testjob(request): """ handler for test job request Actual result from beanstalk instance: * testjob triggerd at 2019-11-14 01:02:00.105119 [headers] - Content-Type : application/json - User-Agent : aws-sqsd/2.4 - X-Aws-Sqsd-Msgid : 6998edf8-3f19-4c69-92cf-7c919241b957 - X-Aws-Sqsd-Receive-Count : 4 - X-Aws-Sqsd-First-Received-At : 2019-11-14T00:47:00Z - X-Aws-Sqsd-Sent-At : 2019-11-14T00:47:00Z - X-Aws-Sqsd-Queue : awseb-e-n23e8zdd3w-stack-AWSEBWorkerQueue-1QZHOZ650P0J0 - X-Aws-Sqsd-Path : /testjob - X-Aws-Sqsd-Sender-Id : AROA2XEFXCLXVWYXRGF4D:i-07f157f85fb97a241 - X-Aws-Sqsd-Scheduled-At : 2019-11-14T00:47:00Z - X-Aws-Sqsd-Taskname : testjob - Connection : close - Host : localhost - Content-Length : 0 [body] b'' """ with open("/tmp/testjob.log", "a") as f: f.write("\n\n") f.write(f"* testjob triggerd at {datetime.datetime.now()}\n") f.write("[headers]\n") for key, value in request.headers.items(): f.write(f"- {key} : {value}\n") f.write("[body]\n") f.write(str(request.body)) return HttpResponse(status=204)
c2a751d64e76434248029ec1805265e80ef30661
31,500
from datetime import datetime def todatetime(mydate): """ Convert the given thing to a datetime.datetime. This is intended mainly to be used with the mx.DateTime that psycopg sometimes returns, but could be extended in the future to take other types. """ if isinstance(mydate, datetime.datetime): return mydate # Already a datetime if not mydate: return mydate # maybe it was None # this works for mx.DateTime without requiring us to explicitly # check for mx.DateTime (which is annoying if it may not even be installed) return datetime.datetime.fromtimestamp(mydate)
10ce9e46f539c9d12b406d65fb8fd71d75d98191
31,502
from datetime import datetime def generate_datetime(time: str) -> datetime: """生成时间戳""" today: str = datetime.now().strftime("%Y%m%d") timestamp: str = f"{today} {time}" dt: datetime = parse_datetime(timestamp) return dt
f6fa6643c5f988a7e24cf807f987655803758479
31,503
def get_rgb_scores(arr_2d=None, truth=None): """ Returns a rgb image of pixelwise separation between ground truth and arr_2d (predicted image) with different color codes Easy when needed to inspect segmentation result against ground truth. :param arr_2d: :param truth: :return: """ arr_rgb = np.zeros([arr_2d.shape[0], arr_2d.shape[1], 3], dtype=np.uint8) for i in range(0, arr_2d.shape[0]): for j in range(0, arr_2d.shape[1]): if arr_2d[i, j] == 255 and truth[i, j] == 255: arr_rgb[i, j, :] = 255 if arr_2d[i, j] == 255 and truth[i, j] == 0: arr_rgb[i, j, 0] = 0 arr_rgb[i, j, 1] = 255 arr_rgb[i, j, 2] = 0 if arr_2d[i, j] == 0 and truth[i, j] == 255: arr_rgb[i, j, 0] = 255 arr_rgb[i, j, 1] = 0 arr_rgb[i, j, 2] = 0 return arr_rgb
7d5fff0ac76bf8326f9db8781221cfc7a098615d
31,504
def calClassMemProb(param, expVars, classAv): """ Function that calculates the class membership probabilities for each observation in the dataset. Parameters ---------- param : 1D numpy array of size nExpVars. Contains parameter values of class membership model. expVars : 2D numpy array of size (nExpVars x (nDms * nClasses)). Contains explanatory variables of class membership model. classAv : sparse matrix of size ((nDms * nClasses) x nDms). The (i, j)th element equals 1 if ith row in expVars corresponds to the jth decision-maker, and 0 otherwise. Returns ------- p : 2D numpy array of size 1 x (nDms x nClasses). Identifies the class membership probabilities for each individual and each available latent class. """ v = np.dot(param[None, :], expVars) # v is 1 x (nDms * nClasses) ev = np.exp(v) # ev is 1 x (nDms * nClasses) ev[np.isinf(ev)] = 1e+20 # As precaution when exp(v) is too large for machine ev[ev < 1e-200] = 1e-200 # As precaution when exp(v) is too close to zero nev = ev * classAv # nev is 1 x (nDms * nClasses) nnev = classAv * np.transpose(nev) # nnev is (nDms * nClasses) x 1 p = np.divide(ev, np.transpose(nnev)) # p is 1 x (nDms * nClasses) p[np.isinf(p)] = 1e-200 # When the class is unavailable return p
a77b1c6f7ec3e8379df1b91c804d0253a20898c5
31,505
from typing import List def detect_statistical_outliers( cloud_xyz: np.ndarray, k: int, std_factor: float = 3.0 ) -> List[int]: """ Determine the indexes of the points of cloud_xyz to filter. The removed points have mean distances with their k nearest neighbors that are greater than a distance threshold (dist_thresh). This threshold is computed from the mean (mean_distances) and standard deviation (stddev_distances) of all the points mean distances with their k nearest neighbors: dist_thresh = mean_distances + std_factor * stddev_distances :param cloud_xyz: points kdTree :param k: number of neighbors :param std_factor: multiplication factor to use to compute the distance threshold :return: list of the points to filter indexes """ # compute for each points, all the distances to their k neighbors cloud_tree = cKDTree(cloud_xyz) neighbors_distances, _ = cloud_tree.query(cloud_xyz, k + 1) # Compute the mean of those distances for each point # Mean is not used directly as each line # contained the distance value to the point itself mean_neighbors_distances = np.sum(neighbors_distances, axis=1) mean_neighbors_distances /= k # compute mean and standard deviation of those mean distances # for the whole point cloud mean_distances = np.mean(mean_neighbors_distances) stddev_distances = np.std(mean_neighbors_distances) # compute distance threshold and # apply it to determine which points will be removed dist_thresh = mean_distances + std_factor * stddev_distances points_to_remove = np.argwhere(mean_neighbors_distances > dist_thresh) # flatten points_to_remove detected_points = [] for removed_point in points_to_remove: detected_points.extend(removed_point) return detected_points
2e48e207c831ceb8ee0f223565d2e3570eda6c4f
31,506
def collinear(cell1, cell2, column_test): """Determines whether the given cells are collinear along a dimension. Returns True if the given cells are in the same row (column_test=False) or in the same column (column_test=True). Args: cell1: The first geocell string. cell2: The second geocell string. column_test: A boolean, where False invokes a row collinearity test and 1 invokes a column collinearity test. Returns: A bool indicating whether or not the given cells are collinear in the given dimension. """ for i in range(min(len(cell1), len(cell2))): x1, y1 = _subdiv_xy(cell1[i]) x2, y2 = _subdiv_xy(cell2[i]) # Check row collinearity (assure y's are always the same). if not column_test and y1 != y2: return False # Check column collinearity (assure x's are always the same). if column_test and x1 != x2: return False return True
f79b34c5d1c8e4eed446334b1967f5e75a679e8a
31,507
def plasma_fractal(mapsize=512, wibbledecay=3): """Generate a heightmap using diamond-square algorithm. Modification of the algorithm in https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py Args: mapsize: side length of the heightmap, must be a power of two. wibbledecay: integer, decay factor. Returns: numpy 2d array, side length 'mapsize', of floats in [0,255]. """ if mapsize & (mapsize - 1) != 0: raise ValueError('mapsize must be a power of two.') maparray = np.empty((mapsize, mapsize), dtype=np.float_) maparray[0, 0] = 0 stepsize = mapsize wibble = 100 def wibbledmean(array): return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape) def fillsquares(): """For each square, calculate middle value as mean of points + wibble.""" cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0) squareaccum += np.roll(squareaccum, shift=-1, axis=1) maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum) def filldiamonds(): """For each diamond, calculate middle value as meanof points + wibble.""" mapsize = maparray.shape[0] drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] ldrsum = drgrid + np.roll(drgrid, 1, axis=0) lulsum = ulgrid + np.roll(ulgrid, -1, axis=1) ltsum = ldrsum + lulsum maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum) tdrsum = drgrid + np.roll(drgrid, 1, axis=1) tulsum = ulgrid + np.roll(ulgrid, -1, axis=0) ttsum = tdrsum + tulsum maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum) while stepsize >= 2: fillsquares() filldiamonds() stepsize //= 2 wibble /= wibbledecay maparray -= maparray.min() return maparray / maparray.max()
96457a0b00b74d269d266512188dfb4fab8d752c
31,508
import pwd import grp import time def stat_to_longname(st, filename): """ Some clients (FileZilla, I'm looking at you!) require 'longname' field of SSH2_FXP_NAME to be 'alike' to the output of ls -l. So, let's build it! Encoding side: unicode sandwich. """ try: n_link = str(st.st_nlink) except: # Some stats (e.g. SFTPAttributes of paramiko) don't have this n_link = str('1') longname = [ filemode(st.st_mode).decode(), n_link, pwd.getpwuid(st.st_uid)[0], grp.getgrgid(st.st_gid)[0], str(st.st_size), time.strftime('%b %d %H:%M', time.gmtime(st.st_mtime)), ] # add needed padding longname = [ field + ' ' * (_paddings[i] - len(field)) for i, field in enumerate(longname) ] longname.append(filename.decode()) # append the filename # and return the string return ' '.join(longname).encode()
c0a4a58ec66f2af62cef9c3fa64c8332420bfe1c
31,509
def driver(): """ Make sure this driver returns the result. :return: result - Result of computation. """ _n = int(input()) arr = [] for i in range(_n): arr.append(input()) result = solve(_n, arr) print(result) return result
fcd11f88715a45805fa3c1629883fc5239a02a91
31,510
def load_element_different(properties, data): """ Load elements which include lists of different lengths based on the element's property-definitions. Parameters ------------ properties : dict Property definitions encoded in a dict where the property name is the key and the property data type the value. data : array Data rows for this element. """ element_data = {k: [] for k in properties.keys()} for row in data: start = 0 for name, dt in properties.items(): length = 1 if '$LIST' in dt: dt = dt.split('($LIST,)')[-1] # the first entry in a list-property is the number of elements in the list length = int(row[start]) # skip the first entry (the length), when reading the data start += 1 end = start + length element_data[name].append(row[start:end].astype(dt)) # start next property at the end of this one start = end # try converting to numpy arrays squeeze = {k: np.array(v).squeeze() for k, v in element_data.items()} return squeeze
a6fe0a28bb5c05ee0a82db845b778ddc80e1bb8c
31,511
def start_survey(): """clears the session and starts the survey""" # QUESTION: flask session is used to store temporary information. for permanent data, use a database. # So what's the difference between using an empty list vs session. Is it just for non sens. data like user logged in or not? # QUESTION: When using get or post methods, do we need to use redirect? session[RESPONSES_KEY] = [] return redirect("/questions/0")
9a9cc9aba02f31af31143f4cc33e23c78ae61ec2
31,512
def page(token): """``page`` property validation.""" if token.type == 'ident': return 'auto' if token.lower_value == 'auto' else token.value
5b120a8548d2dbcbdb080d1f804e2b693da1e5c4
31,513
def index(): """ Gets the the weight data and displays it to the user. """ # Create a base query weight_data_query = Weight.query.filter_by(member=current_user).order_by(Weight.id.desc()) # Get all the weight data. all_weight_data = weight_data_query.all() # Get the last 5 data points for a graph. limit_weight_data = weight_data_query.limit(5).all() # Get the chart data for the last t events. # Reverse the array so the newest is on the right. chart_data = [data.get_weight() for data in limit_weight_data][::-1] label_data = [data.get_date_str() for data in limit_weight_data][::-1] # Display the weight homepage. return render_template('weight_view_weight.html', add_weight_form=AddWeightForm(), weight_data=all_weight_data,chart_data=chart_data,label_data=label_data)
a812dd55c5d775bcff669feb4aa55b798b2042e8
31,515
def upload_binified_data(binified_data, error_handler, survey_id_dict): """ Takes in binified csv data and handles uploading/downloading+updating older data to/from S3 for each chunk. Returns a set of concatenations that have succeeded and can be removed. Returns the number of failed FTPS so that we don't retry them. Raises any errors on the passed in ErrorHandler.""" failed_ftps = set([]) ftps_to_retire = set([]) upload_these = [] for data_bin, (data_rows_deque, ftp_deque) in binified_data.iteritems(): # print 3 with error_handler: try: # print 4 study_id, user_id, data_type, time_bin, original_header = data_bin # print 5 # data_rows_deque may be a generator; here it is evaluated rows = list(data_rows_deque) updated_header = convert_unix_to_human_readable_timestamps(original_header, rows) # print 6 chunk_path = construct_s3_chunk_path(study_id, user_id, data_type, time_bin) # print 7 old_chunk_exists = ChunkRegistry.objects.filter(chunk_path=chunk_path).exists() if old_chunk_exists: chunk = ChunkRegistry.objects.get(chunk_path=chunk_path) try: # print 8 # print chunk_path s3_file_data = s3_retrieve(chunk_path, study_id, raw_path=True) # print "finished s3 retrieve" except S3ResponseError as e: # print 9 # The following check is correct for boto version 2.38.0 if "The specified key does not exist." == e.message: # This error can only occur if the processing gets actually interrupted and # data files fail to upload after DB entries are created. # Encountered this condition 11pm feb 7 2016, cause unknown, there was # no python stacktrace. Best guess is mongo blew up. # If this happened, delete the ChunkRegistry and push this file upload to the next cycle chunk.remove() raise ChunkFailedToExist("chunk %s does not actually point to a file, deleting DB entry, should run correctly on next index." % chunk_path) raise # Raise original error if not 404 s3 error # print 10 old_header, old_rows = csv_to_list(s3_file_data) if old_header != updated_header: # To handle the case where a file was on an hour boundary and placed in # two separate chunks we need to raise an error in order to retire this file. If this # happens AND ONE of the files DOES NOT have a header mismatch this may ( # will?) cause data duplication in the chunked file whenever the file # processing occurs run. raise HeaderMismatchException('%s\nvs.\n%s\nin\n%s' % (old_header, updated_header, chunk_path) ) # print 11 old_rows = [_ for _ in old_rows] # print "11a" # This is O(1), which is why we use a deque (double-ended queue) old_rows.extend(rows) # print "11b" del rows # print 12 ensure_sorted_by_timestamp(old_rows) # print 13 if data_type == SURVEY_TIMINGS: # print "13a" new_contents = construct_utf_safe_csv_string(updated_header, old_rows) else: # print "13b" new_contents = construct_csv_string(updated_header, old_rows) del old_rows # print 14 upload_these.append((chunk, chunk_path, new_contents.encode("zip"), study_id)) del new_contents else: # print "7a" ensure_sorted_by_timestamp(rows) # print "7b" if data_type == SURVEY_TIMINGS: # print "7ba" new_contents = construct_utf_safe_csv_string(updated_header, rows) else: # print "7bc" new_contents = construct_csv_string(updated_header, rows) # print "7c" if data_type in SURVEY_DATA_FILES: # We need to keep a mapping of files to survey ids, that is handled here. # print "7da" survey_id_hash = study_id, user_id, data_type, original_header survey_id = survey_id_dict[survey_id_hash] # print survey_id_hash else: # print "7db" survey_id = None # print "7e" chunk_params = { "study_id": study_id, "user_id": user_id, "data_type": data_type, "chunk_path": chunk_path, "time_bin": time_bin, "survey_id": survey_id } upload_these.append((chunk_params, chunk_path, new_contents.encode("zip"), study_id)) except Exception as e: # Here we catch any exceptions that may have arisen, as well as the ones that we raised # ourselves (e.g. HeaderMismatchException). Whichever FTP we were processing when the # exception was raised gets added to the set of failed FTPs. failed_ftps.update(ftp_deque) print(e) print("failed to update: study_id:%s, user_id:%s, data_type:%s, time_bin:%s, header:%s " % (study_id, user_id, data_type, time_bin, updated_header)) raise else: # If no exception was raised, the FTP has completed processing. Add it to the set of # retireable (i.e. completed) FTPs. ftps_to_retire.update(ftp_deque) pool = ThreadPool(CONCURRENT_NETWORK_OPS) errors = pool.map(batch_upload, upload_these, chunksize=1) for err_ret in errors: if err_ret['exception']: print(err_ret['traceback']) raise err_ret['exception'] pool.close() pool.terminate() # The things in ftps to retire that are not in failed ftps. # len(failed_ftps) will become the number of files to skip in the next iteration. return ftps_to_retire.difference(failed_ftps), len(failed_ftps)
8b4499f3e5a8539a0b0fb31b44a5fe06ce5fd16b
31,516
from enum import Enum def system_get_enum_values(enum): """Gets all values from a System.Enum instance. Parameters ---------- enum: System.Enum A Enum instance. Returns ------- list A list containing the values of the Enum instance """ return list(Enum.GetValues(enum))
b440d5b5e3012a1708c88aea2a1bf1dc7fc02d18
31,517
def skip_leading_ws_with_indent(s,i,tab_width): """Skips leading whitespace and returns (i, indent), - i points after the whitespace - indent is the width of the whitespace, assuming tab_width wide tabs.""" count = 0 ; n = len(s) while i < n: ch = s[i] if ch == ' ': count += 1 i += 1 elif ch == '\t': count += (abs(tab_width) - (count % abs(tab_width))) i += 1 else: break return i, count
e787a0a1c407902a2a946a21daf308ca94a794c6
31,518
import sh def get_minibam_bed(bamfile, bedfile, minibam=None): """ samtools view -L could do the work, but it is NOT random access. Here we are processing multiple regions sequentially. See also: https://www.biostars.org/p/49306/ """ pf = op.basename(bedfile).split(".")[0] minibamfile = minibam or op.basename(bamfile).replace(".bam", ".{}.bam".format(pf)) minisamfile = minibam.replace(".bam", ".sam") baifile = minibamfile + ".bai" if op.exists(baifile): sh("rm {}".format(baifile)) cmd = "samtools view -H {} > {}".format(bamfile, minisamfile) sh(cmd) cmd = "cat {}".format(bedfile) cmd += " | perl -lane 'print \"$F[0]:$F[1]-$F[2]\"'" cmd += " | xargs -n1 -t -I \{\}" cmd += " samtools view {}".format(bamfile) cmd += " \{\} >> " + minisamfile sh(cmd) cmd = "samtools view {} -b".format(minisamfile) cmd += " | samtools sort -" cmd += " -o {0}".format(minibamfile) sh(cmd) sh("samtools index {0}".format(minibamfile)) return minibamfile
48142e8df2468332699459a6ff0a9c455d5ad32f
31,520
def create_app(config_object="tigerhacks_api.settings"): """Create application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/. :param config_object: The configuration object to use. """ app = Flask(__name__.split(".")[0]) logger.info("Flask app initialized") app.config.from_object(config_object) logger.info("Config loaded") app.dbconn = init_database_connection(app) logger.info("Database connection successful") register_extensions(app) register_blueprints(app) register_shellcontext(app) register_commands(app) configure_logger(app) logger.info("Extensions loaded") configure_api_key(app) configure_admin_key(app) logger.info("API keys configured") logger.info("Request logs will now take over.") cors = CORS(app) return app
7bd2af062b770b80454b1f1fc219411fdb174a41
31,521
def dest_in_spiral(data): """ The map of the circuit consists of square cells. The first element in the center is marked as 1, and continuing in a clockwise spiral, the other elements are marked in ascending order ad infinitum. On the map, you can move (connect cells) vertically and horizontally. For example, the distance between cells 1 and 9 is two moves and the distance between 24 and 9 is one move. You must help Nikola find the distance between any two elements on the map. Input: A list of two marks of cells (integers). Output: The distance between the two elements. An Integer. Find the nearest square number that the larger of the two numbers is less than. if the nearest square number is odd it can move down sqrt(nearestsquare)-1 digits and then left the same number. determine it's location with 1 being the origin """ a,b=max(data),min(data) nearestSquare=lambda x: int(x**0.5) if (float(int(x**0.5))==x**0.5) else 1+int(x**0.5) NRA=nearestSquare(a) # nearest square of a NSA=NRA**2 # nearest root of a NRB=nearestSquare(b) NSB=NRB**2 stepsfromNSA=NSA-a if NRA%2!=0: if stepsfromNSA>(NRA-1): aY=0 aX=stepsfromNSA-(NRA-1) else: aX=0 aY=(NRA-1)-stepsfromNSA else: if stepsfromNSA>(NRA-1): aY=NRA-1 aX=(NRA-1)-(stepsfromNSA-(NRA-1)) else: aX=NRA-1 aY=stepsfromNSA offset=(NRA-NRB)/2 if (NRB%2==0 and NRB%2 != NRA %2): offset+=1 stepsfromNSB=NSB-b if NRB%2!=0: if stepsfromNSB>(NRB-1): bY=0 bX=stepsfromNSB-(NRB-1) else: bX=0 bY=(NRB-1)-stepsfromNSB else: if stepsfromNSB>(NRB-1): bY=NRB-1 bX=(NRB-1)-(stepsfromNSB-(NRB-1)) else: bX=NRB-1 bY=stepsfromNSB bX,bY= bX+offset, bY+offset distance=(((aX-bX)**2)**0.5)+(((aY-bY)**2)**0.5) return distance
a84a00d111b80a3d9933d9c60565b7a31262f878
31,522
from datetime import datetime def get_current_time(): """ returns current time w.r.t to the timezone defined in Returns ------- : str time string of now() """ srv = get_server() if srv.time_zone is None: time_zone = 'UTC' else: time_zone = srv.time_zone return utc_to_localtime(datetime.now(), time_zone)
3b8d547d68bbc0f7f7f21a8a5b375cb898e53d30
31,523
import async_timeout import aiohttp import asyncio async def _update_google_domains(hass, session, domain, user, password, timeout): """Update Google Domains.""" url = f"https://{user}:{password}@domains.google.com/nic/update" params = {"hostname": domain} try: async with async_timeout.timeout(timeout): resp = await session.get(url, params=params) body = await resp.text() if body.startswith("good") or body.startswith("nochg"): return True _LOGGER.warning("Updating Google Domains failed: %s => %s", domain, body) except aiohttp.ClientError: _LOGGER.warning("Can't connect to Google Domains API") except asyncio.TimeoutError: _LOGGER.warning("Timeout from Google Domains API for domain: %s", domain) return False
372137db20bdb1c410f84dfa55a48269c4f588bc
31,524
def smoothen_over_time(lane_lines): """ Smooth the lane line inference over a window of frames and returns the average lines. """ avg_line_lt = np.zeros((len(lane_lines), 4)) avg_line_rt = np.zeros((len(lane_lines), 4)) for t in range(0, len(lane_lines)): avg_line_lt[t] += lane_lines[t][0].get_coords() avg_line_rt[t] += lane_lines[t][1].get_coords() return Line(*np.mean(avg_line_lt, axis=0)), Line(*np.mean(avg_line_rt, axis=0))
64c31747ed816acbaeebdd9dc4a9e2163c3d5274
31,525
from typing import List from typing import Optional import random def select_random(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]: """ Return a random node. """ return random.choice(nodes)
7bb41abd7f135ea951dbad85e4dc7290d6191e44
31,526
def convert(from_path, ingestor, to_path, egestor, select_only_known_labels, filter_images_without_labels): """ Converts between data formats, validating that the converted data matches `IMAGE_DETECTION_SCHEMA` along the way. :param from_path: '/path/to/read/from' :param ingestor: `Ingestor` to read in data :param to_path: '/path/to/write/to' :param egestor: `Egestor` to write out data :return: (success, message) """ from_valid, from_msg = ingestor.validate(from_path) if not from_valid: return from_valid, from_msg image_detections = ingestor.ingest(from_path) validate_image_detections(image_detections) image_detections = convert_labels( image_detections=image_detections, expected_labels=egestor.expected_labels(), select_only_known_labels=select_only_known_labels, filter_images_without_labels=filter_images_without_labels) egestor.egest(image_detections=image_detections, root=to_path) return True, ''
0407768620b3c703fec0143d2ef1297ba566ed7f
31,527
import timeit def timer(method): """ Method decorator to capture and print total run time in seconds :param method: The method or function to time :return: A function """ @wraps(method) def wrapped(*args, **kw): timer_start = timeit.default_timer() result = method(*args, **kw) timer_finish = timeit.default_timer() print('%r %2.2f s' % (method.__name__, round((timer_finish - timer_start), 2))) return result return wrapped
526a7b78510efb0329fba7da2f4c24a6d35c2266
31,528
def macro_states(macro_df, style, roll_window): """ Function to convert macro factors into binary states Args: macro_df (pd.DataFrame): contains macro factors data style (str): specify method used to classify. Accepted values: 'naive' roll_window (int): specify rolling window in months Returns: state_df (pd.DataFrame): macro factors classified to binary states. 1 for up and 0 for down """ # style='naive'; roll_window=60 if style == 'naive': # Classify on the basis of a rolling median roll_median = macro_df.rolling(roll_window).median() state_df = macro_df >= roll_median state_df = state_df[pd.notnull(roll_median)].dropna(how='all') state_df.replace(0, -1, inplace=True) state_df.fillna(0, inplace=True) return state_df
1d4862cfb43aeebd33e71bc67293cbd7b62eb7b5
31,529
import torch def get_sparsity(lat): """Return percentage of nonzero slopes in lat. Args: lat (Lattice): instance of Lattice class """ # Initialize operators placeholder_input = torch.tensor([[0., 0]]) op = Operators(lat, placeholder_input) # convert z, L, H to np.float64 (simplex requires this) L_mat_sparse = op.L_mat_sparse.astype(np.float64) z = lat.flattened_C # # compute ||Lz||_1 # htv_loss = np.linalg.norm(L_z, ord=1) # print('HTV: {:.2f}'.format(htv_loss)) # compute ||Lz||_0 L_z = L_mat_sparse.dot(z.numpy()) L_z_zero_idx = np.where(np.absolute(L_z) <= SPARSITY_EPS)[0] fraction_zero = 1. if L_z.shape[0] != 0: fraction_zero = L_z_zero_idx.shape[0] / L_z.shape[0] percentage_nonzero = (100. - fraction_zero * 100) return percentage_nonzero
703bd061b662a20b7ebce6111442bb6597fddaec
31,530
def XYZ_to_Kim2009( XYZ: ArrayLike, XYZ_w: ArrayLike, L_A: FloatingOrArrayLike, media: MediaParameters_Kim2009 = MEDIA_PARAMETERS_KIM2009["CRT Displays"], surround: InductionFactors_Kim2009 = VIEWING_CONDITIONS_KIM2009["Average"], discount_illuminant: Boolean = False, n_c: Floating = 0.57, ) -> CAM_Specification_Kim2009: """ Computes the *Kim, Weyrich and Kautz (2009)* colour appearance model correlates from given *CIE XYZ* tristimulus values. Parameters ---------- XYZ *CIE XYZ* tristimulus values of test sample / stimulus. XYZ_w *CIE XYZ* tristimulus values of reference white. L_A Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken to be 20% of the luminance of a white object in the scene). media Media parameters. surround Surround viewing conditions induction factors. discount_illuminant Truth value indicating if the illuminant should be discounted. n_c Cone response sigmoidal curve modulating factor :math:`n_c`. Returns ------- :class:`colour.CAM_Specification_Kim2009` *Kim, Weyrich and Kautz (2009)* colour appearance model specification. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``XYZ`` | [0, 100] | [0, 1] | +------------+-----------------------+---------------+ | ``XYZ_w`` | [0, 100] | [0, 1] | +------------+-----------------------+---------------+ +---------------------------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +=================================+=======================+===============+ | ``CAM_Specification_Kim2009.J`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.C`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.h`` | [0, 360] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.s`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.Q`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.M`` | [0, 100] | [0, 1] | +---------------------------------+-----------------------+---------------+ | ``CAM_Specification_Kim2009.H`` | [0, 400] | [0, 1] | +---------------------------------+-----------------------+---------------+ References ---------- :cite:`Kim2009` Examples -------- >>> XYZ = np.array([19.01, 20.00, 21.78]) >>> XYZ_w = np.array([95.05, 100.00, 108.88]) >>> L_A = 318.31 >>> media = MEDIA_PARAMETERS_KIM2009['CRT Displays'] >>> surround = VIEWING_CONDITIONS_KIM2009['Average'] >>> XYZ_to_Kim2009(XYZ, XYZ_w, L_A, media, surround) ... # doctest: +ELLIPSIS CAM_Specification_Kim2009(J=28.8619089..., C=0.5592455..., \ h=219.0480667..., s=9.3837797..., Q=52.7138883..., M=0.4641738..., \ H=278.0602824..., HC=None) """ XYZ = to_domain_100(XYZ) XYZ_w = to_domain_100(XYZ_w) _X_w, Y_w, _Z_w = tsplit(XYZ_w) L_A = as_float_array(L_A) # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform # sharpened *RGB* values. RGB = vector_dot(CAT_CAT02, XYZ) RGB_w = vector_dot(CAT_CAT02, XYZ_w) # Computing degree of adaptation :math:`D`. D = ( degree_of_adaptation(surround.F, L_A) if not discount_illuminant else ones(L_A.shape) ) # Computing full chromatic adaptation. XYZ_c = full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D) XYZ_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D) # Converting to *Hunt-Pointer-Estevez* colourspace. LMS = RGB_to_rgb(XYZ_c) LMS_w = RGB_to_rgb(XYZ_wc) # Cones absolute response. LMS_n_c = spow(LMS, n_c) LMS_w_n_c = spow(LMS_w, n_c) L_A_n_c = spow(L_A, n_c) LMS_p = LMS_n_c / (LMS_n_c + L_A_n_c) LMS_wp = LMS_w_n_c / (LMS_w_n_c + L_A_n_c) # Achromatic signal :math:`A` and :math:`A_w`. v_A = np.array([40, 20, 1]) A = np.sum(v_A * LMS_p, axis=-1) / 61 A_w = np.sum(v_A * LMS_wp, axis=-1) / 61 # Perceived *Lightness* :math:`J_p`. a_j, b_j, o_j, n_j = 0.89, 0.24, 0.65, 3.65 A_A_w = A / A_w J_p = spow( (-(A_A_w - b_j) * spow(o_j, n_j)) / (A_A_w - b_j - a_j), 1 / n_j ) # Computing the media dependent *Lightness* :math:`J`. J = 100 * (media.E * (J_p - 1) + 1) # Computing the correlate of *brightness* :math:`Q`. n_q = 0.1308 Q = J * spow(Y_w, n_q) # Opponent signals :math:`a` and :math:`b`. a = (1 / 11) * np.sum(np.array([11, -12, 1]) * LMS_p, axis=-1) b = (1 / 9) * np.sum(np.array([1, 1, -2]) * LMS_p, axis=-1) # Computing the correlate of *chroma* :math:`C`. a_k, n_k = 456.5, 0.62 C = a_k * spow(np.sqrt(a ** 2 + b ** 2), n_k) # Computing the correlate of *colourfulness* :math:`M`. a_m, b_m = 0.11, 0.61 M = C * (a_m * np.log10(Y_w) + b_m) # Computing the correlate of *saturation* :math:`s`. s = 100 * np.sqrt(M / Q) # Computing the *hue* angle :math:`h`. h = np.degrees(np.arctan2(b, a)) % 360 # Computing hue :math:`h` quadrature :math:`H`. H = hue_quadrature(h) return CAM_Specification_Kim2009( as_float(from_range_100(J)), as_float(from_range_100(C)), as_float(from_range_degrees(h)), as_float(from_range_100(s)), as_float(from_range_100(Q)), as_float(from_range_100(M)), as_float(from_range_degrees(H, 400)), None, )
bf694c7a66052b3748f561018d253d2dfcdfc8df
31,531
from typing import Union from pathlib import Path from typing import Optional def load_capsule(path: Union[str, Path], source_path: Optional[Path] = None, key: Optional[str] = None, inference_mode: bool = True) -> BaseCapsule: """Load a capsule from the filesystem. :param path: The path to the capsule file :param source_path: The path to the capsule's source code, if it's available at runtime :param key: The AES key to decrypt the capsule with, or None if the capsule is not encrypted :param inference_mode: If True, the backends for this capsule will be started. If False, the capsule will never be able to run inference, but it will still have it's various readable attributes. """ path = Path(path) if source_path is None: # Set the default source path to a directory alongside the capsule file source_path = path.absolute().with_suffix("") return load_capsule_from_bytes( data=path.read_bytes(), source_path=source_path, key=key, inference_mode=inference_mode, )
f6810bdb82ab734e2bd424feee76f11da18cccf4
31,532
def geodetic2ecef(lat, lon, alt): """Convert geodetic coordinates to ECEF.""" lat, lon = radians(lat), radians(lon) xi = sqrt(1 - esq * sin(lat)) x = (a / xi + alt) * cos(lat) * cos(lon) y = (a / xi + alt) * cos(lat) * sin(lon) z = (a / xi * (1 - esq) + alt) * sin(lat) return x, y, z
43654b16d89eeeee0aa411f40dc12d5c12637e80
31,533
def processor_group_size(nprocs, number_of_tasks): """ Find the number of groups to divide `nprocs` processors into to tackle `number_of_tasks` tasks. When `number_of_tasks` > `nprocs` the smallest integer multiple of `nprocs` is returned that equals or exceeds `number_of_tasks` is returned. When `number_of_tasks` < `nprocs` the smallest divisor of `nprocs` that equals or exceeds `number_of_tasks` is returned. Parameters ---------- nprocs : int The number of processors to divide into groups. number_of_tasks : int or float The number of tasks to perform, which can also be seen as the *desired* number of processor groups. If a floating point value is given the next highest integer is used. Returns ------- int """ if number_of_tasks >= nprocs: return nprocs * int(_np.ceil(1. * number_of_tasks / nprocs)) else: fctrs = sorted(_prime_factors(nprocs)); i = 1 if int(_np.ceil(number_of_tasks)) in fctrs: return int(_np.ceil(number_of_tasks)) # we got lucky while _np.product(fctrs[0:i]) < number_of_tasks: i += 1 return _np.product(fctrs[0:i])
f6d9a760d79ff59c22b3a95cc56808ba142c4045
31,534
def skin_base_url(skin, variables): """ Returns the skin_base_url associated to the skin. """ return variables \ .get('skins', {}) \ .get(skin, {}) \ .get('base_url', '')
80de82862a4a038328a6f997cc29e6bf1ed44eb8
31,535
import json def validate_dumpling(dumpling_json): """ Validates a dumpling received from (or about to be sent to) the dumpling hub. Validation involves ensuring that it's valid JSON and that it includes a ``metadata.chef`` key. :param dumpling_json: The dumpling JSON. :raise: :class:`netdumplings.exceptions.InvalidDumpling` if the dumpling is invalid. :return: A dict created from the dumpling JSON. """ try: dumpling = json.loads(dumpling_json) except json.JSONDecodeError as e: raise InvalidDumpling("Could not interpret dumpling JSON") try: dumpling['metadata']['chef'] except (KeyError, TypeError) as e: raise InvalidDumpling("Could not determine chef name") return dumpling
7d6885a69fe40fa8531ae58c373a1b1161b1df49
31,538
def _recurse_to_best_estimate( lower_bound, upper_bound, num_entities, sample_sizes ): """Recursively finds the best estimate of population size by identifying which half of [lower_bound, upper_bound] contains the best estimate. Parameters ---------- lower_bound: int The lower bound of the interval to be tested; the value of the error function can always be assumed to be positive at this point. upper_bound: int The upper bound of the interval to be tested; the value of the error function can always be assumed to be negative at this point. num_entities: int The number of distinct entities observed. sample_sizes: list A list of integers indicating the size of each sample taken. Returns ------- int The best estimate of population size. """ # Base case - return the upper bound when the upper and lower bounds are # adjacent if upper_bound - lower_bound <= 1: return upper_bound # Otherwise calculate error at midpoint and recursively evaluate the # relevant half of the interval midpoint = int(np.ceil((lower_bound + upper_bound) / 2)) error_at_midpoint = _calculate_error(midpoint, num_entities, sample_sizes) if error_at_midpoint > 0: return _recurse_to_best_estimate( midpoint, upper_bound, num_entities, sample_sizes ) else: return _recurse_to_best_estimate( lower_bound, midpoint, num_entities, sample_sizes )
969b550da712682ae620bb7158ed623785ec14f5
31,540
def betwix(iterable, start=None, stop=None, inc=False): """ Extract selected elements from an iterable. But unlike `islice`, extract based on the element's value instead of its position. Args: iterable (iter): The initial sequence start (str): The fragment to begin with (inclusive) stop (str): The fragment to finish at (exclusive) inc (bool): Make stop operate inclusively (useful if reading a file and the start and stop fragments are on the same line) Returns: Iter: New dict with specified keys removed Examples: >>> from io import StringIO >>> >>> list(betwix('ABCDEFG', stop='C')) == ['A', 'B'] True >>> list(betwix('ABCDEFG', 'C', 'E')) == ['C', 'D'] True >>> list(betwix('ABCDEFG', 'C')) == ['C', 'D', 'E', 'F', 'G'] True >>> f = StringIO('alpha\\n<beta>\\ngamma\\n') >>> list(betwix(f, '<', '>', True)) == ['<beta>\\n'] True >>> list(betwix('ABCDEFG', 'C', 'E', True)) == ['C', 'D', 'E'] True """ def inc_takewhile(predicate, _iter): for x in _iter: yield x if not predicate(x): break get_pred = lambda sentinel: lambda x: sentinel not in x pred = get_pred(stop) first = it.dropwhile(get_pred(start), iterable) if start else iterable if stop and inc: last = inc_takewhile(pred, first) elif stop: last = it.takewhile(pred, first) else: last = first return last
e1079158429e7d25fee48222d5ac734c0456ecfe
31,541
import logging def map_configuration(config: dict) -> tp.List[MeterReaderNode]: # noqa MC0001 """ Parsed configuration :param config: dict from :return: """ # pylint: disable=too-many-locals, too-many-nested-blocks meter_reader_nodes = [] if 'devices' in config and 'middleware' in config: try: if config.get('middleware').get('type') == 'volkszaehler': gateway = VolkszaehlerGateway(config.get('middleware').get('middleware_url'), config.get('middleware').get('interpolate', True)) else: logging.error(f'Middleware "{config.get("middleware").get("type")}" not supported!') gateway = None if gateway: for device in config.get('devices').values(): meter_id = strip(str(device.pop('id'))) protocol = strip(device.pop('protocol')) channels = device.pop('channels') if protocol == 'SML': reader = SmlReader(meter_id, **device) elif protocol == 'PLAIN': reader = PlainReader(meter_id, **device) elif protocol == 'BME280': reader = Bme280Reader(meter_id, **device) else: logging.error(f'Unsupported protocol {protocol}') reader = None sample = reader.poll() if sample is not None: available_channels = {} for variable in sample.channels: obj_name = variable.get('objName', '') for channel_name, channel in channels.items(): interval = humanfriendly_time_parser(channel.get('interval', '1h')) uuid = channel.get('uuid') factor = channel.get('factor', 1) if strip(str(channel_name)) in strip(str(obj_name)): # Replacing config string with exact match available_channels[obj_name] = (uuid, interval, factor) if available_channels: meter_reader_node = MeterReaderNode(available_channels, reader, gateway) # Perform first push to middleware if meter_reader_node.poll_and_push(sample): meter_reader_nodes.append(meter_reader_node) else: logging.error(f"Not registering node for meter id {reader.meter_id}.") else: logging.warning(f"Cannot register channels for meter {meter_id}.") else: logging.warning(f"Could not read meter id {meter_id} using protocol {protocol}.") except KeyError as err: logging.error(f"Error while processing configuration: {err}") else: logging.error("Config file is incomplete.") return meter_reader_nodes
0d9212850547f06583d71d8d9b7e2995bbf701d5
31,542
def places(client, query, location=None, radius=None, language=None, min_price=None, max_price=None, open_now=False, type=None, region=None, page_token=None): """ Places search. :param query: The text string on which to search, for example: "restaurant". :type query: string :param location: The latitude/longitude value for which you wish to obtain the closest, human-readable address. :type location: string, dict, list, or tuple :param radius: Distance in meters within which to bias results. :type radius: int :param language: The language in which to return results. :type language: string :param min_price: Restricts results to only those places with no less than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type min_price: int :param max_price: Restricts results to only those places with no greater than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type max_price: int :param open_now: Return only those places that are open for business at the time the query is sent. :type open_now: bool :param type: Restricts the results to places matching the specified type. The full list of supported types is available here: https://developers.google.com/places/supported_types :type type: string :param region: The region code, optional parameter. See more @ https://developers.google.com/places/web-service/search :type region: string :param page_token: Token from a previous search that when provided will returns the next page of results for the same search. :type page_token: string :rtype: result dict with the following keys: results: list of places html_attributions: set of attributions which must be displayed next_page_token: token for retrieving the next page of results """ return _places(client, "text", query=query, location=location, radius=radius, language=language, min_price=min_price, max_price=max_price, open_now=open_now, type=type, region=region, page_token=page_token)
50aea370006d5d016b7ecd943abc2deba382212d
31,543
def load_data(_file, pct_split): """Load test and train data into a DataFrame :return pd.DataFrame with ['test'/'train', features]""" # load train and test data data = pd.read_csv(_file) # split into train and test using pct_split # data_train = ... # data_test = ... # concat and label # data_out = pd.concat([data_train, data_test], keys=['train', 'test']) data_out = data return data_out
1a02f83aba497bc58e54c262c3f42386938ee9bd
31,544
def sorted_items(d, key=None, reverse=False): """Given a dictionary `d` return items: (k1, v1), (k2, v2)... sorted in ascending order according to key. :param dict d: dictionary :param key: optional function remapping key :param bool reverse: If True return in descending order instead of default ascending """ if d is None: return [] key = toolz.first if key is None else toolz.comp(key, toolz.first) return sorted(d.items(), key=key, reverse=reverse)
4e4302eebe2955cdd5d5266a65eac3acf874474a
31,545
def randint_population(shape, max_value, min_value=0): """Generate a random population made of Integers Args: (set of ints): shape of the population. Its of the form (num_chromosomes, chromosome_dim_1, .... chromesome_dim_n) max_value (int): Maximum value taken by a given gene. min_value (int, optional): Min value a gene can take. Defaults to 0. Returns: Tensor: random population. """ high = max_value + 1 return B.randint(low=min_value, high=high, shape=shape, dtype=B.intx())
79cbc5ceba4ecb3927976c10c8990b167f208c0e
31,547
def simplex_creation( mean_value: np.array, sigma_variation: np.array, rng: RandomNumberGenerator = None ) -> np.array: """ Creation of the simplex @return: """ ctrl_par_number = mean_value.shape[0] ################## # Scale matrix: # Explain what the scale matrix means here ################## # First row x0_scale = np.zeros((1, ctrl_par_number)) # Simplex matrix ( without first row ) simplex_matrix = np.diag(np.ones_like(sigma_variation)) # Add random number in the first column if rng is None: random_array = np.random.rand(ctrl_par_number) else: random_array = rng.get_random_numbers(ctrl_par_number) random_array = random_array.reshape( ctrl_par_number, ) simplex_matrix[0, :] += np.sqrt(3) * (random_array - 0.5) * 2 # Orthogonalize set of vectors with gram_schmidt, and rescale with the normalization length simplex_matrix_orthonormal = gram_schmidt(simplex_matrix.T) # Rescale the vector with the sigma variation simplex_matrix_orthogonal_rescaled = simplex_matrix_orthonormal @ np.diag( sigma_variation ) # Add the first row containing only zeros x_t_norm = np.append(x0_scale, simplex_matrix_orthogonal_rescaled, axis=0) # Offset matrix x_offset = np.outer(np.ones((1, ctrl_par_number + 1)), mean_value) # Start simplex matrix StartSimplex = x_t_norm + x_offset return StartSimplex
a25ac6b6f92acb5aaa1d50f6c9a5d8d5caa02639
31,548
def _scale_db(out, data, mask, vmins, vmaxs, scale=1.0, offset=0.0): # pylint: disable=too-many-arguments """ decibel data scaling. """ vmins = [0.1*v for v in vmins] vmaxs = [0.1*v for v in vmaxs] return _scale_log10(out, data, mask, vmins, vmaxs, scale, offset)
dab3125f7d8b03ff5141e9f97f470211416f430c
31,549
def make_tree(anime): """ Creates anime tree :param anime: Anime :return: AnimeTree """ tree = AnimeTree(anime) # queue for BFS queue = deque() root = tree.root queue.appendleft(root) # set for keeping track of visited anime visited = {anime} # BFS downwards while len(queue) > 0: current = queue.pop() related = current.anime.related for relation in related: if relation.lower() in CHILDREN: for item in related[relation]: child = Anime(jikan.anime(item['mal_id'])) node = tree.add_child(child=child, parent=current) visited.add(node) queue.appendleft(child) parent_id = 0 # Search for parent upwards while parent_id is not None: related = root.anime.related parent_id = None for i in PARENT: if i in related: parent_id = related[i][0]['mal_id'] break if parent_id is None: break parent = Anime(jikan.anime(parent_id)) node = tree.add_parent(parent=parent, child=root) root = node visited.add(root) queue.appendleft(parent) # BFS new root while len(queue) > 0: current = queue.pop() if current is None: continue related = current.anime.related for relation in related: if relation.lower() in CHILDREN: for item in related[relation]: child = Anime(jikan.anime(item['mal_id'])) node = tree.add_child(child=child, parent=current) if node in visited: continue visited.add(node) queue.appendleft(child) return tree
d93257e32b024b48668e7c02e534a31e54b4665d
31,550
def draw_bboxes(images, # type: thelper.typedefs.InputType preds=None, # type: Optional[thelper.typedefs.AnyPredictionType] bboxes=None, # type: Optional[thelper.typedefs.AnyTargetType] color_map=None, # type: Optional[thelper.typedefs.ClassColorMap] redraw=None, # type: Optional[thelper.typedefs.DrawingType] block=False, # type: Optional[bool] min_confidence=0.5, # type: thelper.typedefs.Number class_map=None, # type: Optional[thelper.typedefs.ClassIdType, AnyStr] **kwargs # type: Any ): """Draws a set of bounding box prediction results on images. Args: images: images with first dimension as list index, and other dimensions are each image's content preds: predicted bounding boxes per image to be displayed, must match images count if provided bboxes: ground truth (targets) bounding boxes per image to be displayed, must match images count if provided color_map: mapping of class-id to color to be applied to drawn bounding boxes on the image redraw: existing figure and axes to reuse for drawing the new images and bounding boxes block: indicate whether to block execution until all figures have been closed or not min_confidence: ignore display of bounding boxes that have a confidence below this value, if available class_map: alternative class-id to class-name mapping to employ for display. This overrides the default class names retrieved from each bounding box's attributed task. Useful for displaying generic bounding boxes obtained from raw input values without a specific task. kwargs: other arguments to be passed down to further drawing functions or drawing settings (amongst other settings, box_thickness, font_thickness and font_scale can be provided) """ def get_class_name(_bbox): if isinstance(class_map, dict): return class_map[_bbox.class_id] elif bbox.task is not None: return _bbox.task.class_names[_bbox.class_id] else: raise RuntimeError("could not find class name from either class mapping or bbox task definition") image_list = [get_displayable_image(images[batch_idx, ...]) for batch_idx in range(images.shape[0])] if color_map is not None and isinstance(color_map, dict): assert len(color_map) <= 256, "too many indices for uint8 map" color_map_new = np.zeros((256, 3), dtype=np.uint8) for idx, val in color_map.items(): color_map_new[idx, ...] = val color_map = color_map_new.tolist() nb_imgs = len(image_list) grid_size_x, grid_size_y = nb_imgs, 1 # all images on one row, by default (add gt and preds as extra rows) box_thickness = thelper.utils.get_key_def("box_thickness", kwargs, default=2, delete=True) font_thickness = thelper.utils.get_key_def("font_thickness", kwargs, default=1, delete=True) font_scale = thelper.utils.get_key_def("font_scale", kwargs, default=0.4, delete=True) if preds is not None: assert len(image_list) == len(preds) for preds_list, image in zip(preds, image_list): for bbox_idx, bbox in enumerate(preds_list): assert isinstance(bbox, thelper.data.BoundingBox), "unrecognized bbox type" if bbox.confidence is not None and bbox.confidence < min_confidence: continue color = get_bgr_from_hsl(bbox_idx / len(preds_list) * 360, 1.0, 0.5) \ if color_map is None else color_map[bbox.class_id] conf = "" if thelper.utils.is_scalar(bbox.confidence): conf = f" ({bbox.confidence:.3f})" elif isinstance(bbox.confidence, (list, tuple, np.ndarray)): conf = f" ({bbox.confidence[bbox.class_id]:.3f})" draw_bbox(image, bbox.top_left, bbox.bottom_right, f"{get_class_name(bbox)} {conf}", color, box_thickness=box_thickness, font_thickness=font_thickness, font_scale=font_scale) if bboxes is not None: assert len(image_list) == len(bboxes), "mismatched bboxes list and image list sizes" clean_image_list = [get_displayable_image(images[batch_idx, ...]) for batch_idx in range(images.shape[0])] for bboxes_list, image in zip(bboxes, clean_image_list): for bbox_idx, bbox in enumerate(bboxes_list): assert isinstance(bbox, thelper.data.BoundingBox), "unrecognized bbox type" color = get_bgr_from_hsl(bbox_idx / len(bboxes_list) * 360, 1.0, 0.5) \ if color_map is None else color_map[bbox.class_id] draw_bbox(image, bbox.top_left, bbox.bottom_right, f"GT: {get_class_name(bbox)}", color, box_thickness=box_thickness, font_thickness=font_thickness, font_scale=font_scale) grid_size_y += 1 image_list += clean_image_list return draw_images(image_list, redraw=redraw, window_name="detections", block=block, grid_size_x=grid_size_x, grid_size_y=grid_size_y, **kwargs)
6e82ee3ad211166ad47c0aae048246052de2d21c
31,551
def html_table_from_dict(data, ordering): """ >>> ordering = ['administrators', 'key', 'leader', 'project'] >>> data = [ \ {'key': 'DEMO', 'project': 'Demonstration', 'leader': '[email protected]', 'administrators': ['[email protected]', '[email protected]']}, \ {'key': 'FOO', 'project': 'Foo', 'leader': '[email protected]', 'administrators': ['[email protected]', '[email protected]']}, \ {'key': 'BAR', 'project': 'Bar', 'leader': '[email protected]', 'administrators': ['[email protected]', '[email protected]']}] >>> html_table_from_dict(data, ordering) '<table><tbody>\\n<tr><th>Administrators</th><th>Key</th><th>Leader</th><th>Project</th></tr>\\n<tr><td><ul><li><a href="mailto:[email protected]">[email protected]</a></li><li><a href="mailto:[email protected]">[email protected]</a></li></ul></td><td>DEMO</td><td>[email protected]</td><td>Demonstration</td></tr>\\n<tr><td><ul><li><a href="mailto:[email protected]">[email protected]</a></li><li><a href="mailto:[email protected]">[email protected]</a></li></ul></td><td>FOO</td><td>[email protected]</td><td>Foo</td></tr>\\n<tr><td><ul><li><a href="mailto:[email protected]">[email protected]</a></li><li><a href="mailto:[email protected]">[email protected]</a></li></ul></td><td>BAR</td><td>[email protected]</td><td>Bar</td></tr>\\n</tbody></table>' >>> ordering = ['key', 'project', 'leader', 'administrators'] >>> html_table_from_dict(data, ordering) '<table><tbody>\\n<tr><th>Key</th><th>Project</th><th>Leader</th><th>Administrators</th></tr>\\n<tr><td>DEMO</td><td>Demonstration</td><td>[email protected]</td><td><ul><li><a href="mailto:[email protected]">[email protected]</a></li><li><a href="mailto:[email protected]">[email protected]</a></li></ul></td></tr>\\n<tr><td>FOO</td><td>Foo</td><td>[email protected]</td><td><ul><li><a href="mailto:[email protected]">[email protected]</a></li><li><a href="mailto:[email protected]">[email protected]</a></li></ul></td></tr>\\n<tr><td>BAR</td><td>Bar</td><td>[email protected]</td><td><ul><li><a href="mailto:[email protected]">[email protected]</a></li><li><a href="mailto:[email protected]">[email protected]</a></li></ul></td></tr>\\n</tbody></table>' """ html = '<table><tbody>' html += html_table_header_row(ordering) for row in data: html += html_row_with_ordered_headers(row, ordering) return html + '\n</tbody></table>'
f3a77977c3341adf08af17cd3d907e2f12d5a093
31,552
import random def getRandomChests(numChests): """Return a list of (x, y) integer tuples that represent treasure chest locations.""" chests = [] while len(chests) < numChests: newChest = [random.randint(0, BOARD_WIDTH - 1), random.randint(0, BOARD_HEIGHT - 1)] # Make sure a chest is not already there: if newChest not in chests: chests.append(newChest) return chests
285b35379f8dc8c13b873ac77c1dcac59e26ccef
31,553
import random def random_tolerance(value, tolerance): """Generate a value within a small tolerance. Credit: /u/LightShadow on Reddit. Example:: >>> time.sleep(random_tolerance(1.0, 0.01)) >>> a = random_tolerance(4.0, 0.25) >>> assert 3.0 <= a <= 5.0 True """ value = float(value) if tolerance == 0.0: return value return value + value * random.uniform(-tolerance, tolerance)
abe631db8a520de788540f8e0973537306872bde
31,554
def routes_stations(): """The counts of stations of routes.""" return jsonify( [ (n.removeprefix("_"), int(c)) for n, c in r.zrange( "Stats:Route.stations", 0, 14, desc=True, withscores=True ) ] )
2e0e865681c2e47da6da5f5cbd9dc5b130721233
31,555
import math def montage(packed_ims, axis): """display as an Image the contents of packed_ims in a square gird along an aribitray axis""" if packed_ims.ndim == 2: return packed_ims # bring axis to the front packed_ims = np.rollaxis(packed_ims, axis) N = len(packed_ims) n_tile = math.ceil(math.sqrt(N)) rows = [] for i in range(n_tile): if i*n_tile > N: continue im = packed_ims[i * n_tile] for j in range(1, n_tile): ind = i * n_tile + j if ind < N: im = utils.hstack(im, packed_ims[ind]) else: im = utils.hstack(im, np.zeros_like(packed_ims[0])) rows.append(im) matrix = rows[0] for i in range(1, len(rows)): matrix = utils.vstack(matrix, rows[i]) return matrix
27d2de01face567a1caa618fc2a025ec3adf2c8c
31,556
def blocks2image(Blocks, blocks_image): """ Function to stitch the blocks back to the original image input: Blocks --> the list of blocks (2d numpies) blocks_image --> numpy 2d array with numbers corresponding to block number output: image --> stitched image """ image = np.zeros(np.shape(blocks_image)) for i in range(1,int(np.max(blocks_image))): ind = np.asarray(np.where(blocks_image==i)) top = np.min(ind[0, :]) bottom = np.max(ind[0, :]) left = np.min(ind[1, :]) right = np.max(ind[1, :]) #print('top: {}, bottom: {}, left: {}, right: {}'.format(top, bottom, left, right)) image[top:bottom+1,left:right+1] = Blocks[i-1] return image
ef6f5af40946828af664fc698e0b2f64dbbe8a96
31,557
def create_bucket(storage_client, bucket_name, parsed_args): """Creates the test bucket. Also sets up lots of different bucket settings to make sure they can be moved. Args: storage_client: The storage client object used to access GCS bucket_name: The name of the bucket to create parsed_args: the configargparser parsing of command line options Returns: The bucket object that has been created in GCS """ bucket = storage.Bucket(client=storage_client, name=bucket_name) # Requester pays bucket.requester_pays = False # CORS policies = bucket.cors policies.append({'origin': ['/foo']}) policies[0]['maxAgeSeconds'] = 3600 bucket.cors = policies # KMS Key - When a custom KMS key is set up, uncomment the line below to test it #bucket.default_kms_key_name = parsed_args.test_default_kms_key_name # Labels bucket.labels = {'colour': 'red', 'flavour': 'cherry'} # Object Lifecycle Rules bucket.lifecycle_rules = [{ "action": { "type": "Delete" }, "condition": { "age": 365 } }] # Location bucket.location = parsed_args.test_bucket_location # Storage Class bucket.storage_class = parsed_args.test_storage_class # File Versioning # Setting this to True means we can't delete a non-empty bucket with the CLI in one # bucket.delete command bucket.versioning_enabled = False # Access Logs bucket.enable_logging(parsed_args.test_logging_bucket, parsed_args.test_logging_prefix) bucket.create() # IAM Policies policy = bucket.get_iam_policy() # Uncomment the line below to view the existing IAM policies #print(json.dumps(policy.to_api_repr(), indent=4, sort_keys=True)) policy['roles/storage.admin'].add('user:' + parsed_args.test_email_for_iam) bucket.set_iam_policy(policy) # ACLs bucket.acl.user(parsed_args.test_email_for_iam).grant_read() bucket.acl.save() # Default Object ACL bucket.default_object_acl.user(parsed_args.test_email_for_iam).grant_read() bucket.default_object_acl.save() bucket.update() # Bucket Notification notification = storage.notification.BucketNotification( bucket, parsed_args.test_topic_name, custom_attributes={'myKey': 'myValue'}, event_types=['OBJECT_FINALIZE', 'OBJECT_DELETE'], payload_format='JSON_API_V1') notification.create() return bucket
df7ccc9979007ee7278770f94c27363936961286
31,559
from typing import Dict from typing import List from typing import Tuple def learn_parameters(df_path: str, pas: Dict[str, List[str]]) -> \ Tuple[Dict[str, List[str]], nx.DiGraph, Dict[str, List[float]]]: """ Gets the parameters. :param df_path: CSV file. :param pas: Parent-child relationships (structure). :return: Tuple; first item is dictionary of domains; second item is a graph; third item is dictionary of probabilities. """ def vals_to_str(): ddf = df.copy(deep=True) for col in ddf.columns: ddf[col] = ddf[col].astype(str) return ddf def get_filters(ch, parents, domains): pas = parents[ch] if len(pas) == 0: ch_domain = domains[ch] return [f'{ch}=="{v}"' for v in ch_domain] else: def is_valid(tups): n_tups = len(tups) u_tups = len(set([name for name, _ in tups])) if n_tups == u_tups: return True return False vals = [[(pa, v) for v in domains[pa]] for pa in pas] vals = vals + [[(ch, v) for v in domains[ch]]] vals = chain(*vals) vals = combinations(vals, len(pas) + 1) vals = filter(is_valid, vals) vals = map(lambda tups: ' and '.join([f'`{t[0]}`=="{t[1]}"' for t in tups]), vals) vals = list(vals) return vals def get_total(filters, n): def divide(arr): a = np.array(arr) n = np.sum(a) if n == 0: p = 1 / len(arr) return [p for _ in range(len(arr))] r = a / n r = list(r) return r counts = [ddf.query(f).shape[0] for f in filters] counts = [counts[i:i + n] for i in range(0, len(counts), n)] counts = [divide(arr) for arr in counts] counts = list(chain(*counts)) return counts df = expand_data(df_path, pas) g = get_graph(pas) ddf = vals_to_str() nodes = list(g.nodes()) domains = {n: sorted(list(ddf[n].unique())) for n in nodes} parents = {ch: list(g.predecessors(ch)) for ch in nodes} p = {ch: get_total(get_filters(ch, parents, domains), len(domains[ch])) for ch in nodes} return domains, g, p
ea34c67e5bf6b09aadc34ee271415c74103711e3
31,560
import io def extract_urls_n_email(src, all_files, strings): """IPA URL and Email Extraction.""" try: logger.info('Starting IPA URL and Email Extraction') email_n_file = [] url_n_file = [] url_list = [] domains = {} all_files.append({'data': strings, 'name': 'IPA Strings Dump'}) for file in all_files: if isinstance(file, dict): relative_src_path = file['name'] dat = '\n'.join(file['data']) # Skip CodeResources and contents under Frameworks elif 'CodeResources' in file or '/Frameworks/' in file: continue elif file.endswith(('.nib', '.ttf', '.svg', '.woff2', '.png', '.dylib', '.mobileprovision', 'Assets.car')): continue else: dat = '' relative_src_path = file.replace(src, '') with io.open(file, mode='r', encoding='utf8', errors='ignore') as flip: dat = flip.read() # Extract URLs and Emails from Plists urls, urls_nf, emails_nf = url_n_email_extract( dat, relative_src_path) url_list.extend(urls) url_n_file.extend(urls_nf) email_n_file.extend(emails_nf) # Unique URLs urls_list = list(set(url_list)) # Domain Extraction and Malware Check logger.info('Performing Malware Check on extracted Domains') domains = MalwareDomainCheck().scan(urls_list) logger.info('Finished URL and Email Extraction') binary_recon = { 'urls_list': urls_list, 'urlnfile': url_n_file, 'domains': domains, 'emailnfile': email_n_file, } return binary_recon except Exception: logger.exception('IPA URL and Email Extraction')
edb0dd4f0fe24de914f99b87999efd9a24795381
31,561
def find_scan_info(filename, position = '__P', scan = '__S', date = '____'): """ Find laser position and scan number by looking at the file name """ try: file = filename.split(position, 2) file = file[1].split(scan, 2) laser_position = file[0] file = file[1].split(date, 2) scan_number = file[0] except IndexError: laser_position = -1 scan_number = -1 return laser_position, scan_number
f98afb440407ef7eac8ceda8e15327b5f5d32b35
31,562