content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def delete_data(data, object_name, **kwargs): """ Delete data """ data.delete() is_queryset = isinstance(data, QuerySet) return { "is_queryset": is_queryset, "data": data, "object_name": object_name, }
28405ae426e53fc3637a4b281775cba99e112a0a
3,659,100
def get_g(source): """ Read the Graph from a textfile """ G = {} Grev = {} for i in range(1, N+1): G[i] = [] Grev[i] = [] fin = open(source) for line in fin: v1 = int(line.split()[0]) v2 = int(line.split()[1]) G[v1].append(v2) Grev[v2].append(v1) fin.close() return G, Grev
f2771c28d6c86a0af035cc38cd5cdad2774b0dba
3,659,101
def _mercator(lat_long): """ Calculate the 2D X and Y coordinates from a set of coordinates based on radius, latitude and longitude using the Mercator projection. :param lat_long: The coordinates of the points to be projected expressed as radius, latitude and longitude. :type lat_long: list[tuple] :return: The projected coordinates in the XY-plane. :rtype: ndarray """ x = np.array([coord[0] * coord[2] for coord in lat_long]) y = np.array([coord[0] * np.log(np.tan(np.pi / 4 + coord[1] / 2)) for coord in lat_long]) return np.vstack((x, y)).T
cc1f4eb97f4c5a1505b88ab5aa8fa6992744dccf
3,659,102
def subjectForm(request, experiment_id): """ Generates the fourth page, the demographic/participant data form of an experiment. """ experiment = get_object_or_404(Experiment, pk=experiment_id) form = SubjectDataForm(experiment=experiment) t = Template(experiment.demographic_data_page_tpl) c = RequestContext(request, {'subject_data_form': form, 'experiment': experiment, 'recaptcha_site_key': settings.GOOGLE_RECAPTCHA_SITE_KEY}) return HttpResponse(t.render(c))
48273e891c87b30157c13c726376a9d3052eebe6
3,659,103
def sigma_0(x): """First rotational + shifting mixing function σ_256_0(x) = ROTR_7(x) ⊕ ROTR_18(x) ⊕ SHR_3(x) """ return ROTR(x, 7) ^ ROTR(x, 18) ^ SHR(x, 3)
9090dc6652944189765657ad9b3650f54b10e70a
3,659,104
from datetime import datetime def edit_entry(edit_result): """Edit entry""" new_entry = edit_result.copy() edit_key = None edit_value = None date_keys = ["Date"] int_keys = ["Time Spent"] while edit_key not in edit_result: reset_screen("key", "Please type the key you want to edit.") for key, value in edit_result.items(): print(f"{key}: {value}") edit_key = get_input(str) if edit_key not in edit_result: reset_screen(error=True, sub_title="Input is not a valid key.") if edit_key in date_keys: input_type = datetime elif edit_key in int_keys: input_type = int else: input_type = str while not edit_value: reset_screen("new value", ENTRY_QUESTIONS[edit_key]) edit_value = get_input(input_type, newline=False) new_entry[edit_key] = edit_value entries = get_entries() entries[entries.index(edit_result)] = new_entry csvfile = open("entries.csv", "w") csvfile.close() for entry in entries: write_to_csv(entry) return new_entry
e63b9d94f192fdc2175457ebc1ce7f9562e1cf41
3,659,105
import sys import os def build_input_files(filename, base_path = 'input_files', out = sys.stdout): """ build_input_files(filename, base_path = 'input_files') takes a 'well-formated' input fileand outputs a directory structure with the properly formated input files created in them. """ calling_dir = os.getcwd() # I'm doing this because I need it later file_path, file_name = os.path.split(filename) with open(filename, 'r') as f: txt = f.read() ## First Parse the FDS file param_dict, IOoutput = FDSa_parser(txt, file_name, out) # param_dict, sweep_param_dict, prms_in_axis = calculate_params(param_dict, axes) for key_ in param_dict.keys(): txt = txt.replace(param_dict[key_][0], key_) formatted_trials, logfile, IOoutput = eval_parsed_FDS(param_dict, out) print("formatted_trials", formatted_trials[0]) ## Make input files and directories for i, value_set in enumerate(formatted_trials): print(i,value_set) tmp_txt = txt # make a directory case_name = 'case_'+int2base(i, 26) # FDS uses uppercase reseved keywords, and so will we value_set['TITLE'] = case_name input_directory_builder(case_name, base_path) # populate the input file print(tmp_txt.count(list(value_set.keys())[1])) print(value_set) with open('tmp_txt', 'w') as f: f.write(str(tmp_txt)) tmp_txt = tmp_txt.format(**value_set) ## The format command doesn't like : or . because it things its a float format # create the file name fname = os.path.join(calling_dir, base_path, case_name, case_name + '.fds') # write the input file to the directory with open(fname, 'w') as f: f.write(str(tmp_txt)) log_path_name = os.path.join(calling_dir, base_path, file_name[:-4] + '.log') # write the augmented fds log file with open(log_path_name, 'a') as f: f.write(logfile) return IOoutput
a40def5dfc8d52f905e8a82ddafb5f756771a3e7
3,659,106
def srpd(mvec, k, ra, Nmax, w, V): """ Calculate the Steered Response Power Density (SRPD) :param mvec: SHD coefficients for the TF bin to be analysed :param k: Wave number (2*pi*f/c) :param ra: Radius of the microphone array :param Nmax: Maximum SHD order to be used :param w: Diagonal eigenvalue matrix :param V: Reduced eigenvector matrix :return: SRPD for the given pixel """ assert np.size(mvec) == (Nmax + 1) ** 2 V = V[0:(Nmax + 1) ** 2, 0:(Nmax + 1) ** 2] w = w[0:(Nmax + 1) ** 2] kra = k * ra jn, jnp, yn, ynp = sph_jnyn(Nmax, kra) # jn, jnp, yn, ynp = spec.sph_jnyn(Nmax, kra) hn = jn - 1j * yn hnp = jnp - 1j * ynp bnkra = jn - (jnp / hnp) * hn b = [] for n in range(Nmax + 1): for count in range(-n, n + 1): b.append(1 / (4 * np.pi * (1j) ** n * bnkra[n])) b = np.array(b) p = b * mvec B0 = np.conj(np.matrix(np.conj(p)) * V).T B0s = np.diag(w) * np.multiply(B0, np.conj(B0)) srpval = B0s.sum() return srpval
0506c76812bfdff447f09e4dae8380635e894040
3,659,107
import torch def batch_inverse(tensor): """ Compute the matrix inverse of a batch of square matrices. This routine is used for removing rotational motion during the molecular dynamics simulation. Taken from https://stackoverflow.com/questions/46595157 Args: tensor (torch.Tensor): Tensor of square matrices with the shape n_batch x dim1 x dim1 Returns: torch.Tensor: Tensor of the inverted square matrices with the same shape as the input tensor. """ eye = tensor.new_ones(tensor.size(-1), device=tensor.device).diag().expand_as(tensor) tensor_inv, _ = torch.gesv(eye, tensor) return tensor_inv
b8defb26561e38d5e16e2483f27287a334b2cd61
3,659,108
def create(**kwds): """ Add data. """ status_code = 200 message = "Successfully added data." articles = [] for a in kwds.get("articles", []): a = Article.query.filter_by(id=a).first() if a: articles.append(a) cols = {"user_id": current_user.id, "name": kwds["name"]} model = Bookmark.query.filter_by(**cols).first() if model: for a in articles: exist = model.articles.filter_by(id=a.id).first() if not exist: model.articles.append(a) db_commit() else: cols["articles"] = articles model = Bookmark(**cols) db_add(model) return {"code": status_code, "message": message}
a8add828427b285700f23a041bd2c592346775f2
3,659,109
def _center_size_bbox_to_corners_bbox(centers, sizes): """Converts bbox center-size representation to corners representation. Args: centers: a tensor with shape [N, 2] representing bounding box centers sizes: a tensor with shape [N, 2] representing bounding boxes Returns: corners: tensor with shape [N, 4] representing bounding boxes in corners representation """ return tf.concat([centers - .5 * sizes, centers + .5 * sizes], 1)
885bbbe2760a464c6fd3bad0811e91a70610eb8c
3,659,110
from datetime import datetime def get_starting_month(number_of_months_to_get, include_actual_month=True, actual_date=datetime.datetime.now()): """ Get starting month based on parameters :param number_of_months_to_get: Numbers of months to get - e.g: 2 :param include_actual_month: Include actual month? e.g.: True :param actual_date: Actual Date e.g: now() :return: :raise Exception: if number_of_months_to_get less than 1 Initial month & year e.g: (12,2014) """ if number_of_months_to_get <= 0: raise Exception("Number of month's to get should be greater than 0") initial_year = actual_date.year if actual_date.month > number_of_months_to_get: initial_month = actual_date.month - number_of_months_to_get else: initial_month = actual_date.month - number_of_months_to_get if initial_month <= 0: initial_month += 12 initial_year -= 1 if include_actual_month: initial_month += 1 if initial_month > 12: initial_month = 1 initial_year += 1 return initial_month, initial_year
c075ef074b644749ca72955598c098cf76845608
3,659,111
import token def cvt_raise_stmt(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base: """raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]""" # 0 1 2 3 2 3 4 5 #-# Raise(expr? exc, expr? cause) assert ctx.is_REF, [node] if len(node.children) == 1: return ast_cooked.RaiseStmt(items=[]) exc = cvt(node.children[1], ctx) if len(node.children) > 2: # TODO: test case if xcast(Leaf, node.children[2]).value == 'from': raise_from = cvt(node.children[3], ctx) exc2 = ast_cooked.OMITTED_NODE exc3 = ast_cooked.OMITTED_NODE else: raise_from = ast_cooked.OMITTED_NODE assert node.children[2].type == token.COMMA, [node] exc2 = cvt(node.children[3], ctx) # TODO: test case if len(node.children) > 4: assert node.children[4].type == token.COMMA, [node] exc3 = cvt(node.children[5], ctx) else: exc3 = ast_cooked.OMITTED_NODE else: raise_from = ast_cooked.OMITTED_NODE exc2 = ast_cooked.OMITTED_NODE exc3 = ast_cooked.OMITTED_NODE return ast_cooked.RaiseStmt(items=[exc, exc2, exc3, raise_from])
8e7809ff9317a285838f0c0a1a25a0b40634b88f
3,659,112
def user_in_user_groups(user_id, **options): """ Get all user groups a user belongs to :param user_id: The id of user :param user_id: str :param options: Generic advanced options dict, see online documentation :type options: dict, optional :return: List of groups user is in :rtype: dict """ uri = [USER_GROUPS_SUB_PATH, user_id] return _call_account_api("get", uri, {}, **options)
70b83b81ee4d03e7ab5fff68be710c02c01aaa0d
3,659,113
def read_book(title_path): """Read a book and return it as a string""" with open(title_path, "r", encoding = "utf8") as current_file: #encoding = "utf8" causes a problem when running the code in Python 2.7. However, it runs normally when using Python 3.5. text = current_file.read() text = text.replace("\n","").replace("\r","") return text
e5273c6b0b71638b47ce5ee5beb33c715c914a1b
3,659,114
from datetime import datetime def eval_whole_scene_one_epoch(sess, ops, test_writer): """ ops: dict mapping from string to tf ops """ global EPOCH_CNT is_training = False test_idxs = np.arange(0, len(TEST_DATASET_WHOLE_SCENE)) num_batches = len(TEST_DATASET_WHOLE_SCENE) total_correct = 0 total_seen = 0 loss_sum = 0 total_seen_class = [0 for _ in range(NUM_CLASSES)] total_correct_class = [0 for _ in range(NUM_CLASSES)] total_correct_vox = 0 total_seen_vox = 0 total_seen_class_vox = [0 for _ in range(NUM_CLASSES)] total_correct_class_vox = [0 for _ in range(NUM_CLASSES)] log_string(str(datetime.now())) log_string('---- EPOCH %03d EVALUATION WHOLE SCENE----'%(EPOCH_CNT)) labelweights = np.zeros(21) labelweights_vox = np.zeros(21) is_continue_batch = False extra_batch_data = np.zeros((0,NUM_POINT,3)) extra_batch_label = np.zeros((0,NUM_POINT)) extra_batch_smpw = np.zeros((0,NUM_POINT)) for batch_idx in range(num_batches): if not is_continue_batch: batch_data, batch_label, batch_smpw = TEST_DATASET_WHOLE_SCENE[batch_idx] batch_data = np.concatenate((batch_data,extra_batch_data),axis=0) batch_label = np.concatenate((batch_label,extra_batch_label),axis=0) batch_smpw = np.concatenate((batch_smpw,extra_batch_smpw),axis=0) else: batch_data_tmp, batch_label_tmp, batch_smpw_tmp = TEST_DATASET_WHOLE_SCENE[batch_idx] batch_data = np.concatenate((batch_data,batch_data_tmp),axis=0) batch_label = np.concatenate((batch_label,batch_label_tmp),axis=0) batch_smpw = np.concatenate((batch_smpw,batch_smpw_tmp),axis=0) if batch_data.shape[0]<BATCH_SIZE: is_continue_batch = True continue elif batch_data.shape[0]==BATCH_SIZE: is_continue_batch = False extra_batch_data = np.zeros((0,NUM_POINT,3)) extra_batch_label = np.zeros((0,NUM_POINT)) extra_batch_smpw = np.zeros((0,NUM_POINT)) else: is_continue_batch = False extra_batch_data = batch_data[BATCH_SIZE:,:,:] extra_batch_label = batch_label[BATCH_SIZE:,:] extra_batch_smpw = batch_smpw[BATCH_SIZE:,:] batch_data = batch_data[:BATCH_SIZE,:,:] batch_label = batch_label[:BATCH_SIZE,:] batch_smpw = batch_smpw[:BATCH_SIZE,:] aug_data = batch_data feed_dict = {ops['pointclouds_pl']: aug_data, ops['labels_pl']: batch_label, ops['smpws_pl']: batch_smpw, ops['is_training_pl']: is_training} summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred']], feed_dict=feed_dict) test_writer.add_summary(summary, step) pred_val = np.argmax(pred_val, 2) # BxN correct = np.sum((pred_val == batch_label) & (batch_label>0) & (batch_smpw>0)) # evaluate only on 20 categories but not unknown total_correct += correct total_seen += np.sum((batch_label>0) & (batch_smpw>0)) loss_sum += loss_val tmp,_ = np.histogram(batch_label,range(22)) labelweights += tmp for l in range(NUM_CLASSES): total_seen_class[l] += np.sum((batch_label==l) & (batch_smpw>0)) total_correct_class[l] += np.sum((pred_val==l) & (batch_label==l) & (batch_smpw>0)) for b in range(batch_label.shape[0]): _, uvlabel, _ = pc_util.point_cloud_label_to_surface_voxel_label_fast(aug_data[b,batch_smpw[b,:]>0,:], np.concatenate((np.expand_dims(batch_label[b,batch_smpw[b,:]>0],1),np.expand_dims(pred_val[b,batch_smpw[b,:]>0],1)),axis=1), res=0.02) total_correct_vox += np.sum((uvlabel[:,0]==uvlabel[:,1])&(uvlabel[:,0]>0)) total_seen_vox += np.sum(uvlabel[:,0]>0) tmp,_ = np.histogram(uvlabel[:,0],range(22)) labelweights_vox += tmp for l in range(NUM_CLASSES): total_seen_class_vox[l] += np.sum(uvlabel[:,0]==l) total_correct_class_vox[l] += np.sum((uvlabel[:,0]==l) & (uvlabel[:,1]==l)) log_string('eval whole scene mean loss: %f' % (loss_sum / float(num_batches))) log_string('eval whole scene point accuracy vox: %f'% (total_correct_vox / float(total_seen_vox))) log_string('eval whole scene point avg class acc vox: %f' % (np.mean(np.array(total_correct_class_vox[1:])/(np.array(total_seen_class_vox[1:],dtype=np.float)+1e-6)))) log_string('eval whole scene point accuracy: %f'% (total_correct / float(total_seen))) log_string('eval whole scene point avg class acc: %f' % (np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6)))) labelweights = labelweights[1:].astype(np.float32)/np.sum(labelweights[1:].astype(np.float32)) labelweights_vox = labelweights_vox[1:].astype(np.float32)/np.sum(labelweights_vox[1:].astype(np.float32)) caliweights = np.array([0.388,0.357,0.038,0.033,0.017,0.02,0.016,0.025,0.002,0.002,0.002,0.007,0.006,0.022,0.004,0.0004,0.003,0.002,0.024,0.029]) caliacc = np.average(np.array(total_correct_class_vox[1:])/(np.array(total_seen_class_vox[1:],dtype=np.float)+1e-6),weights=caliweights) log_string('eval whole scene point calibrated average acc vox: %f' % caliacc) per_class_str = 'vox based --------' for l in range(1,NUM_CLASSES): per_class_str += 'class %d weight: %f, acc: %f; ' % (l,labelweights_vox[l-1],total_correct_class_vox[l]/float(total_seen_class_vox[l])) log_string(per_class_str) EPOCH_CNT += 1 return caliacc
0c5fd39c8cb464a0b4883be15aa687882a20f94a
3,659,115
def _create_save_name(save_path: str, case_date: date, field_names: list, fix: str = "") -> str: """Creates file name for saved images.""" date_string = case_date.strftime("%Y%m%d") return f"{save_path}{date_string}_{'_'.join(field_names)}{fix}.png"
a731effa50ae291df31fcd4b282a924a057561dd
3,659,116
def list_favorite_queries(): """List of all favorite queries. Returns (title, rows, headers, status)""" headers = ["Name", "Query"] rows = [(r, favoritequeries.get(r)) for r in favoritequeries.list()] if not rows: status = '\nNo favorite queries found.' + favoritequeries.usage else: status = '' return [('', rows, headers, status)]
e3b20d3d06a76d7f621fa830e2d22f0d3e6614ad
3,659,117
def random_portfolio_weights(weights_count) -> np.array: """ Random portfolio weights, of length weights_count. """ weights = np.random.random((weights_count, 1)) weights /= np.sum(weights) return weights.reshape(-1, 1)
47ba5ea84b24ede66fe4d1071fb82f721a550995
3,659,118
def matrix2list(mat): """Create list of lists from blender Matrix type.""" return list(map(list, list(mat)))
9b4b598eb33e4d709e15fd826f23d06653659318
3,659,119
def convert_handle(handle): """ Takes string handle such as 1: or 10:1 and creates a binary number accepted by the kernel Traffic Control. """ if isinstance(handle, str): major, minor = handle.split(':') # "major:minor" minor = minor if minor else '0' return int(major, 16) << 16 | int(minor, 16) return handle
ed4ef5107178bd809a421e0b66c621d9bdaceef1
3,659,120
def index(request): """Display start page""" return HttpResponseRedirect(reverse('admin:index'))
c237e46affb7217bbcfc1146d98f84fb1cc20cc6
3,659,121
import traceback from datetime import datetime async def check_data(user_input, hass, own_id=None): """Check validity of the provided date.""" ret = {} if(CONF_ICS_URL in user_input): try: cal_string = await async_load_data(hass, user_input[CONF_ICS_URL]) try: Calendar.from_ical(cal_string) except Exception: _LOGGER.error(traceback.format_exc()) ret["base"] = ERROR_ICS return ret except Exception: _LOGGER.error(traceback.format_exc()) ret["base"] = ERROR_URL return ret if(CONF_TIMEFORMAT in user_input): try: datetime.datetime.now(get_localzone()).strftime(user_input[CONF_TIMEFORMAT]) except Exception: _LOGGER.error(traceback.format_exc()) ret["base"] = ERROR_TIMEFORMAT return ret if(CONF_ID in user_input): if(user_input[CONF_ID] < 0): _LOGGER.error("ICS: ID below zero") ret["base"] = ERROR_SMALL_ID return ret if(CONF_LOOKAHEAD in user_input): if(user_input[CONF_LOOKAHEAD] < 1): _LOGGER.error("ICS: Lookahead < 1") ret["base"] = ERROR_SMALL_LOOKAHEAD return ret if(CONF_ID in user_input): if((own_id != user_input[CONF_ID]) and (hass is not None)): if(async_generate_entity_id(ENTITY_ID_FORMAT, "ics_" + str(user_input[CONF_ID]), hass=hass) != PLATFORM + ".ics_" + str(user_input[CONF_ID])): _LOGGER.error("ICS: ID not unique") ret["base"] = ERROR_ID_NOT_UNIQUE return ret if(CONF_N_SKIP in user_input): if(user_input[CONF_N_SKIP] < 0): _LOGGER.error("ICS: Skip below zero") ret["base"] = ERROR_NEGATIVE_SKIP return ret return ret
a0b9302cb1f69c98585edb0bae918675ceab32cf
3,659,122
import os import json import warnings def run( uri, entry_point="main", version=None, parameters=None, docker_args=None, experiment_name=None, experiment_id=None, backend="local", backend_config=None, use_conda=None, storage_dir=None, synchronous=True, run_id=None, run_name=None, env_manager=None, ): """ Run an MLflow project. The project can be local or stored at a Git URI. MLflow provides built-in support for running projects locally or remotely on a Databricks or Kubernetes cluster. You can also run projects against other targets by installing an appropriate third-party plugin. See `Community Plugins <../plugins.html#community-plugins>`_ for more information. For information on using this method in chained workflows, see `Building Multistep Workflows <../projects.html#building-multistep-workflows>`_. :raises: :py:class:`mlflow.exceptions.ExecutionException` If a run launched in blocking mode is unsuccessful. :param uri: URI of project to run. A local filesystem path or a Git repository URI (e.g. https://github.com/mlflow/mlflow-example) pointing to a project directory containing an MLproject file. :param entry_point: Entry point to run within the project. If no entry point with the specified name is found, runs the project file ``entry_point`` as a script, using "python" to run ``.py`` files and the default shell (specified by environment variable ``$SHELL``) to run ``.sh`` files. :param version: For Git-based projects, either a commit hash or a branch name. :param parameters: Parameters (dictionary) for the entry point command. :param docker_args: Arguments (dictionary) for the docker command. :param experiment_name: Name of experiment under which to launch the run. :param experiment_id: ID of experiment under which to launch the run. :param backend: Execution backend for the run: MLflow provides built-in support for "local", "databricks", and "kubernetes" (experimental) backends. If running against Databricks, will run against a Databricks workspace determined as follows: if a Databricks tracking URI of the form ``databricks://profile`` has been set (e.g. by setting the MLFLOW_TRACKING_URI environment variable), will run against the workspace specified by <profile>. Otherwise, runs against the workspace specified by the default Databricks CLI profile. :param backend_config: A dictionary, or a path to a JSON file (must end in '.json'), which will be passed as config to the backend. The exact content which should be provided is different for each execution backend and is documented at https://www.mlflow.org/docs/latest/projects.html. :param use_conda: This argument is deprecated. Use `env_manager='local'` instead. If True (the default), create a new Conda environment for the run and install project dependencies within that environment. Otherwise, run the project in the current environment without installing any project dependencies. :param storage_dir: Used only if ``backend`` is "local". MLflow downloads artifacts from distributed URIs passed to parameters of type ``path`` to subdirectories of ``storage_dir``. :param synchronous: Whether to block while waiting for a run to complete. Defaults to True. Note that if ``synchronous`` is False and ``backend`` is "local", this method will return, but the current process will block when exiting until the local run completes. If the current process is interrupted, any asynchronous runs launched via this method will be terminated. If ``synchronous`` is True and the run fails, the current process will error out as well. :param run_id: Note: this argument is used internally by the MLflow project APIs and should not be specified. If specified, the run ID will be used instead of creating a new run. :param run_name: The name to give the MLflow Run associated with the project execution. If ``None``, the MLflow Run name is left unset. :param env_manager: Specify an environment manager to create a new environment for the run and install project dependencies within that environment. The following values are suppported: - local: use the local environment - conda: use conda - virtualenv: use virtualenv (and pyenv for Python version management) If unspecified, default to conda. :return: :py:class:`mlflow.projects.SubmittedRun` exposing information (e.g. run ID) about the launched run. .. code-block:: python :caption: Example import mlflow project_uri = "https://github.com/mlflow/mlflow-example" params = {"alpha": 0.5, "l1_ratio": 0.01} # Run MLflow project and create a reproducible conda environment # on a local host mlflow.run(project_uri, parameters=params) .. code-block:: text :caption: Output ... ... Elasticnet model (alpha=0.500000, l1_ratio=0.010000): RMSE: 0.788347345611717 MAE: 0.6155576449938276 R2: 0.19729662005412607 ... mlflow.projects: === Run (ID '6a5109febe5e4a549461e149590d0a7c') succeeded === """ backend_config_dict = backend_config if backend_config is not None else {} if ( backend_config and type(backend_config) != dict and os.path.splitext(backend_config)[-1] == ".json" ): with open(backend_config, "r") as handle: try: backend_config_dict = json.load(handle) except ValueError: _logger.error( "Error when attempting to load and parse JSON cluster spec from file %s", backend_config, ) raise if use_conda is not None and env_manager is not None: raise MlflowException.invalid_parameter_value( "`use_conda` cannot be used with `env_manager`" ) elif use_conda is not None: warnings.warn( "`use_conda` is deprecated and will be removed in a future release. " "Use `env_manager=local` instead", FutureWarning, stacklevel=2, ) env_manager = _EnvManager.CONDA if use_conda else _EnvManager.LOCAL elif env_manager is not None: _EnvManager.validate(env_manager) if backend == "databricks": mlflow.projects.databricks.before_run_validations(mlflow.get_tracking_uri(), backend_config) elif backend == "local" and run_id is not None: backend_config_dict[MLFLOW_LOCAL_BACKEND_RUN_ID_CONFIG] = run_id experiment_id = _resolve_experiment_id( experiment_name=experiment_name, experiment_id=experiment_id ) submitted_run_obj = _run( uri=uri, experiment_id=experiment_id, entry_point=entry_point, version=version, parameters=parameters, docker_args=docker_args, backend_name=backend, backend_config=backend_config_dict, env_manager=env_manager, storage_dir=storage_dir, synchronous=synchronous, run_name=run_name, ) if synchronous: _wait_for(submitted_run_obj) return submitted_run_obj
56c0c9f333e0b4861533c59db14793e5b3d9af1e
3,659,123
def general_search_v2(params, sed_mod, lnprior, Alambda, sed_obs, sed_obs_err=0.1, vpi_obs=None, vpi_obs_err=None, Lvpi=1.0, Lprior=1.0, cost_order=2, av_llim=-0.001, debug=False): """ when p = [teff, logg, [M/H], Av, DM], theta = [teff, logg, [M/H]], given a set of SED, find the best theta and estimate the corresponding Av and DM """ n_band = len(sed_obs) n_mod = sed_mod.shape[0] # cope with scalar sed_obs_err if isinstance(sed_obs_err, np.float): sed_obs_err = np.ones_like(sed_obs, np.float) * sed_obs_err # select good bands ind_good_band = np.isfinite(sed_obs) & (sed_obs_err > 0) n_good_band = np.sum(ind_good_band) if n_good_band < 4: # n_good_band = 3: unique solution # so n_good_band should be at least 4 return [np.ones((4,), ) * np.nan for i in range(3)] # use a subset of bands sed_mod_select = sed_mod[:, ind_good_band] # observed SED sed_obs_select = sed_obs[ind_good_band] sed_obs_err_select = sed_obs_err[ind_good_band] # extinction coefs Alambda_select = Alambda[ind_good_band] # WLS to guess Av and DM av_est, dm_est = guess_avdm_wls( sed_mod_select, sed_obs_select, sed_obs_err_select, Alambda_select) # cost(SED) res_sed = sed_mod_select + av_est.reshape(-1, 1) * Alambda_select \ + dm_est.reshape(-1, 1) - sed_obs_select lnprob_sed = -0.5 * np.nansum( np.abs(res_sed / sed_obs_err_select) ** cost_order, axis=1) # cost(VPI) if vpi_obs is not None and vpi_obs_err is not None and Lvpi > 0: vpi_mod = 10 ** (2 - 0.2 * dm_est) lnprob_vpi = -0.5 * ((vpi_mod - vpi_obs) / vpi_obs_err) ** 2. else: lnprob_vpi = np.zeros((n_mod,), np.float) lnprob_vpi = np.where(np.isfinite(lnprob_vpi), lnprob_vpi, 0) * Lvpi # lnprob = cost(SED) + cost(VPI) + prior if Lprior > 0: lnprob_prior = lnprior * Lprior # posterior probability lnpost = lnprob_sed + lnprob_vpi + lnprob_prior # eliminate neg Av lnpost[av_est < av_llim] = -np.inf lnpost -= np.nanmax(lnpost) # for debugging the code if debug: return dict(params=params, av_est=av_est, dm_est=dm_est, lnprob_sed=lnprob_sed, lnprob_vpi=lnprob_vpi, lnprior=lnprior) # normalization post = np.exp(lnpost) L0 = np.sum(post) # weighted mean # ind_mle = np.argmax(lnpost) # av_mle = av_est[ind_mle] # dm_mle = dm_est[ind_mle] # p_mle = params[ind_mle] L1_av = np.sum(av_est * post) L1_dm = np.sum(dm_est * post) L1_p = np.sum(params * post.reshape(-1, 1), axis=0) L2_av = np.sum(av_est ** 2 * post) L2_dm = np.sum(dm_est ** 2 * post) L2_p = np.sum(params ** 2 * post.reshape(-1, 1), axis=0) sigma_av = np.sqrt(L2_av / L0 - L1_av ** 2 / L0 ** 2) sigma_dm = np.sqrt(L2_dm / L0 - L1_dm ** 2 / L0 ** 2) sigma_p = np.sqrt(L2_p / L0 - L1_p ** 2 / L0 ** 2) # MLE model ind_mle = np.argmax(lnprob_sed + lnprob_vpi) av_mle = av_est[ind_mle] dm_mle = dm_est[ind_mle] p_mle = params[ind_mle] p_mle = np.hstack([p_mle, av_mle, dm_mle]) p_mean = np.hstack([L1_p/L0, L1_av/L0, L1_dm/L0]) p_err = np.hstack([sigma_p, sigma_av, sigma_dm]) rms_sed_mle = np.sqrt(np.nanmean(res_sed[ind_mle] ** 2.)) rms_sed_min = np.min(np.sqrt(np.nanmean(res_sed ** 2., axis=1))) return dict(p_mle=p_mle, p_mean=p_mean, p_err=p_err, rmsmle=rms_sed_mle, rmsmin=rms_sed_min, ind_mle=ind_mle, n_good=np.sum(ind_good_band))
9629d0ecdec38f4e55bf3becb219c5c348300988
3,659,124
import re def demangle_backtrace(backtrace): """ Returns a demangled backtrace. Args: * backtrace, a backtrace to demangle """ new_bt = [] frame_regex = re.compile(FRAME_PATTERN) lines = backtrace.splitlines() for line in lines: frame = frame_regex.match(line) if frame: func = frame.group(2) # A frame with missing symbols is a special case, so skip it if func == '???': new_bt.append(line) continue # FIXME: this logic will break once the crash probe starts sending # function argument values; make this more generic! if func[-2:] == '()': # The crash probe adds the () to the function name, but c++filt # cannot demangle a symbol with the () suffix func_name = func[:-2] else: # Assume already demangled, or this is from a kernel crash record new_bt.append(line) continue try: new_func = cxxfilt.demangle(func_name) except cxxfilt.InvalidName: new_bt.append(line) continue # c++filt adds a trailing newline to the output new_func = new_func.rstrip() # Restore () if this was not a mangled symbol if new_func == func_name: new_func = func_name + '()' repl_str = r'\1{}\3'.format(new_func) new_line = frame_regex.sub(repl_str, line) new_bt.append(new_line) else: new_bt.append(line) return '\n'.join(new_bt)
676b90c16223b24f539520306a7725434eb28363
3,659,125
import sys import os def resource_path(base_path, rel_path): """ Get absolute path to resource, works for dev and for PyInstaller """ # PyInstaller creates a temp folder and stores path in _MEIPASS return os.path.join(getattr(sys, '_MEIPASS', base_path), rel_path)
aae3961d92f433aef4b8b3b4a1a946e89282548c
3,659,126
def legendre(N, x): """ Returns the value of Legendre Polynomial P_N(x) at position x[-1, 1]. """ P = np.zeros(2 * N) if N == 0: P[0] = 1 elif N == 1: P[1] = x else: P[0] = 1 P[1] = x for i in range(2, N + 1): P[i] = (1.0 / float(i)) * ((2 * i - 1) * x * P[i - 1] - (i - 1) * P[i - 2]) return(P[N])
0e02e19ef0a251aa4b30823d1598fc5fb8933288
3,659,127
def skip_any_whitespace(doc, idx): """Iterate through characters in ``doc`` starting from index ``idx`` until a non-whitespace character is reached. This iteration will also attempt to ignore comments. Args: doc (str): The JSPEC document. idx (int): The starting index for the iterator. Returns: str: The first non-whitespace character, starting at index ``idx`` int: The index of this character in ``doc`` Raises: JSPECDecodeError: Raised if an unterminated comment is detected. """ nextchar = doc[idx:idx + 1] if nextchar not in WHITESPACE_CHARACTERS: return nextchar, idx while True: idx = WHITESPACE_MATCH(doc, idx).end() if doc[idx:idx + 2] == '//': idx = COMMENT_MATCH(doc, idx).end() continue if doc[idx:idx + 2] != '/*': break m = MULTILINE_COMMENT_MATCH(doc, idx) if m is None: raise JSPECDecodeError("Unterminated comment", doc, idx) idx = m.end() nextchar = doc[idx:idx + 1] return nextchar, idx
18038bce945fb35222254a0fedf5d3936bb83308
3,659,128
def normalized_cross_correlation(f, g): """ Normalized cross-correlation of f and g. Normalize the subimage of f and the template g at each step before computing the weighted sum of the two. Hint: you should look up useful numpy functions online for calculating the mean and standard deviation. Args: f: numpy array of shape (Hf, Wf). g: numpy array of shape (Hg, Wg). Returns: out: numpy array of shape (Hf, Wf). """ Hf, Wf = f.shape Hg, Wg = g.shape if Hg%2 == 0: Hg = Hg-1 if Wg%2 == 0: Wg = Wg-1 g = g[:Hg,:Wg] g_mean = np.mean(g) g_std = np.std(g) filter_vector = g.reshape([1,Hg*Wg]) normalized_filter_vec = (g.reshape([1,Hg*Wg]) - g_mean)/g_std out = np.zeros((Hf, Wf)) ### YOUR CODE HERE pad_height,pad_width = int((Hg-1)/2),int((Wg-1)/2) im_padded = zero_pad(f, pad_height, pad_width) for i in range(Hf): for j in range(Wf): patch_vector = im_padded[i:i+Hg,j:j+Wg].reshape([Hg*Wg,1]) patch_mean = np.mean(patch_vector) patch_std = np.std(patch_vector) normalized_patch_vec = (patch_vector - patch_mean)/patch_std out[i,j] = np.dot(normalized_filter_vec,normalized_patch_vec) ### END YOUR CODE return out
fb6057d882b655a43a7d4a7d3c7ced00d32eeabf
3,659,129
import numpy def sphere_coordinates(sphere, inversion=False): """ Compute spherical coordinates (longitude, latitude) on a sphere. Parameters ---------- sphere: (AimsTimeSurface_3_VOID) a sphere mesh: vertices must be on a sphere with center 0. inversion: bool if True, the longitude coord is inverted (useful for right hemisphere) Return ------ (longitude, latitude): tuple, each element being a TimeTexture_FLOAT """ # a vector of vertices where each vertex is a 3D point # with coordinates in millimeters if isinstance(sphere, (aims.AimsTimeSurface_3_VOID, aims.AimsTimeSurface_2_VOID, aims.AimsTimeSurface_4_VOID)): vert = sphere.vertex() nvert = numpy.asarray(vert) else: nvert = numpy.asarray(sphere) ######################################################################### # A latitude texture # ######################################################################### radius = numpy.sqrt(numpy.square(nvert[:, 0]) + numpy.square(nvert[:, 1])) sphere_lat = numpy.arctan2(radius, nvert[:, 2]) sphere_lat = -sphere_lat * 180. / numpy.pi + 180. slat_tex = aims.TimeTexture(sphere_lat.astype(numpy.float32)) ######################################################################### # A longitude texture # ######################################################################### sphere_lon = numpy.arctan2(nvert[:, 1], nvert[:, 0]) sphere_lon *= 180. / numpy.pi sphere_lon += 180 print('inversion: ', inversion) if inversion == "True": print("there is an inversion", inversion) sphere_lon = 360 - sphere_lon slon_tex = aims.TimeTexture(sphere_lon.astype(numpy.float32)) return slon_tex, slat_tex
82f9e9c0e969904414761ed2ebe70d30194277e5
3,659,130
from typing import Dict def example_parameter_sets() -> Dict[str, ExampleParameterSet]: """Lists the available example parameter sets. They can be downloaded with :py:func:`~download_example_parameter_sets`.""" # TODO how to add a new model docs should be updated with this part examples = chain( _wflow.example_parameter_sets(), _pcrglobwb.example_parameter_sets(), _lisflood.example_parameter_sets(), ) return {e.name: e for e in examples}
cd60157809ca2abae77bc4616c7c46db55580818
3,659,131
def get_height(img): """ Returns the number of rows in the image """ return len(img)
765babc9fbc1468ef5045fa925843934462a3d32
3,659,132
def wpt_ask_for_name_and_coords(): """asks for name and coordinates of waypoint that should be created""" name = input("Gib den Namen des Wegpunkts ein: ") print("Gib die Koordinaten ein (Format: X XX°XX.XXX, X XXX°XX.XXX)") coordstr = input(">> ") return name, coordstr
d38a728c5a6ecd1fde9500175ea5895ade8c6880
3,659,133
def car_following_with_adp(distance_2_tan, radian_at_tan, distance_integral, K, estimated_dis, rec): """ Control with `distance_2_tan`, `radian_at_tan` and `distance_integral` with `K` trained from the ADP algorithm. While following the car in front of it with a simple P controller and `distance_2_car`. """ state = np.array([distance_2_tan, radian_at_tan, distance_integral]) MID_K = 1.5 diff = estimated_dis - 70 # try to stay 70cm away from the previous car pwm_mid = 60 if diff < -40: return 0, 0 elif diff >= 60: pwm_mid = 60 else: pwm_mid = np.clip(45.0 + MID_K * diff, 30, 60) print('distance:', estimated_dis, 'diff:', diff, 'mid:', pwm_mid) rec.append([estimated_dis, pwm_mid, distance_2_tan, radian_at_tan, distance_integral]) differential_drive = np.clip(-np.matmul(K, state), -100.0, 100.0) pwm_l_new = np.clip(pwm_mid - differential_drive / 2, 0, 100) pwm_r_new = np.clip(pwm_mid + differential_drive / 2, 0, 100) return pwm_l_new, pwm_r_new
7a49b257e7361451deae10d37a8d8ec811f4890d
3,659,134
def construct_full_available(cards, suits): """ Construct suit availability grid - a list of available suits for each rank slot in each player's deck. Returns grid and array giving the the total number of available suits for each slot. """ num_players, num_in_deck = cards.shape num_available = np.ones(cards.shape)*np.nan # will store the number of possible cards that can fill each deck slot available = [] # will store the suits that can fill each deck slot for player in range(num_players): avail_for_player = [] # holds sublists of available suits for this player for each rank for rank in np.arange(num_in_deck): # iterate over card ranks a = get_available(cards, suits, player, rank) # list suits availed to this player at this rank (can be empty) avail_for_player.append(a) num_available[player, rank] = len(a) available.append(avail_for_player) return num_available, available
0f4b2712a1346372d0782edfbc7c7b69a8e9e8e6
3,659,135
import yaml def get_commands_blacklist() -> list: """ Get commands from `features.yml` to blacklist, preventing them from being added to the bot :returns: list """ log.info("Getting commands blacklist...") cmds = [] if osp.isfile(features_path): with open(features_path, 'r') as file: data = yaml.full_load(file) if not "commands" in data: log.warn("Commands blacklist object not found in features.yml file") return list() # Return empty list commands = data["commands"] if not commands or len(commands) == 0: log.debug("Empty blacklist commands data, returning...") return list() # Return empty list for c in commands: c_name = c["command"] e_enabled = c["enabled"] if "enabled" in c else True if not e_enabled: cmds.append(c_name) log.debug(f"Command Found | Blacklist | {c_name}") log.info(f"Found *{len(cmds)}* commands to blacklist.") return cmds
6f76dbf354efff7b845a50ca8180a904b6d3a6d2
3,659,136
import io def fit_gaussians(estimated_hapcov, chromosomes=None, output_dir=None, cov_max=None, cov_min=None, level=0, cov_sample=None): """ Fits a 7-component Gaussian mixture model to the coverage distribution of the sample, using the appropriate attributes of the PloidyEstimation object. The center of the first Gaussian is initialized from a narrow region around the value of the estimated_hapcov attribute. The centers of the other Gaussians are initialized in a region around the value of estimated_hapcov multiplied by consecutive whole numbers. The parameters of the fitted model (center, sigma and weight) for all seven Gaussians are both saved to the GaussDistParams.pkl file (in output_dir, for later reuse) and set as the value of the distribution_dict attribute. :param cov_sample: a sample of the coverage distribution of the investigated sample, if None, it is loaded from the temporary files of the output_dir (default: None) (array-like) :param cov_min: the maximum value of the coverage for a position to be considered in the estimation (default: None) (int) :param output_dir: the path to the output directory of the PloidyEstimator object, where temporary files are located. If not None, distribution parameters are saved there as GaussDistParams.pkl. (default: None) (str) :param chromosomes: list of chromosomes for the sample (default: None) (array-like) :param estimated_hapcov: the estimated value for the haploid coverage, used as prior (float) :param level: the level of indentation used in verbose output (default: 0) (int) :returns: dictionary containing the fitted parameters of the 7 Gaussians """ def get_samples(coverage_distribution, estimated_haploid_cov, number_of_iterations, burn_period): K = 7 halfwidth_of_uniform = 0.2 __gc.collect() model = __pm.Model() with model: p = __pm.Dirichlet('p', a=__np.array([1., 1., 1., 1., 1., 1., 1.]), shape=K) c1 = __pm.Uniform('c1', (1 - halfwidth_of_uniform) * estimated_haploid_cov, (1 + halfwidth_of_uniform) * estimated_haploid_cov) means = __tt.stack([c1, c1 * 2, c1 * 3, c1 * 4, c1 * 5, c1 * 6, c1 * 7]) order_means_potential = __pm.Potential('order_means_potential', __tt.switch(means[1] - means[0] < 0, -__np.inf, 0) + __tt.switch(means[2] - means[1] < 0, -__np.inf, 0)) sds = __pm.Uniform('sds', lower=0, upper=estimated_haploid_cov / 2, shape=K) category = __pm.Categorical('category', p=p, shape=len(coverage_distribution)) points = __pm.Normal('obs', mu=means[category], sd=sds[category], observed=coverage_distribution) with model: step1 = __pm.Metropolis(vars=[p, sds, means]) step2 = __pm.ElemwiseCategorical(vars=[category], values=[0, 1, 2, 3, 4, 5, 6]) __logging.getLogger("pymc3").setLevel(__logging.WARNING) tr = __pm.sample(draw=number_of_iterations-burn_period, tune=burn_period, step=[step1, step2], progressbar=False, verbose=0, compute_convergence_checks=False) # trace = tr[burn_period:] # return trace return tr if cov_sample is None: cov_sample = io.get_coverage_distribution(chromosomes=chromosomes, output_dir=output_dir, cov_max=cov_max, cov_min=cov_min) iterations2 = 15000 burn_beginning2 = 10000 # logger = __logging.getLogger("pymc3") # logger.propagate = False trace2 = get_samples(coverage_distribution=cov_sample, estimated_haploid_cov=estimated_hapcov, number_of_iterations=iterations2, burn_period=burn_beginning2) std_trace = trace2.get_values('sds', chains=[0]) p_trace = trace2.get_values('p', chains=[0]) sigma = std_trace.mean(axis=0) p = p_trace.mean(axis=0) mu = __np.array([trace2.get_values('c1', chains=[0]).mean() * (i + 1) for i in range(7)]) prior_dict = {'mu': mu, 'sigma': sigma, 'p': p} del trace2 if output_dir: io.save_obj(prior_dict, output_dir + '/GaussDistParams') return prior_dict
660952140f6ea8685488a108e11ea1ca6f4e7fc5
3,659,137
from natsort import natsorted from sklearn.cluster import DBSCAN def remove_outliers(cords, eps: int = 1, min_samples: int = 2): """ Remove outlying cells based on UMAP embeddings with DBScan (density based clustering) Call as: sub.obs["d_cluster"] = remove_outliers(sub.obsm["X_umap"], min_samples = 10) Args: cords: adata UMAP coordinates, typically adata.obsm["X_umap"] eps: Maximum distance between two clusters to still be considered neighbors min_samples: Minimum samples of a cluster Returns: Pandas DataFrame of clusters """ clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(cords) cluster = clustering.labels_.astype("U") return pd.Categorical(cluster, categories=natsorted(np.unique(cluster)))
0b4c581158bc3c074b60ad5d29b333418a4f52ce
3,659,138
def sum_squares(n): """ Returns: sum of squares from 1 to n-1 Example: sum_squares(5) is 1+4+9+16 = 30 Parameter n: The number of steps Precondition: n is an int > 0 """ # Accumulator total = 0 for x in range(n): total = total + x*x return total
669a5aa03a9d9a9ffe74e48571250ffa38a7d319
3,659,139
def resolve_attribute(thing, name): """ A replacement resolver function for looking up symbols as members of *thing*. This is effectively the same as ``thing.name``. The *thing* object can be a :py:func:`~collections.namedtuple`, a custom Python class or any other object. Each of the members of *thing* must be of a compatible data type. .. warning:: This effectively exposes all members of *thing*. If any members are sensitive, then a custom resolver should be used that checks *name* against a whitelist of attributes that are allowed to be accessed. :param thing: The object on which the *name* attribute will be accessed. :param str name: The symbol name that is being resolved. :return: The value for the corresponding attribute *name*. """ if not hasattr(thing, name): raise errors.SymbolResolutionError(name, thing=thing) return getattr(thing, name)
76f7b4548a177168d98bb5cdf4c022bfe8e0d36e
3,659,140
def moment_fluxes(indices, wts_left, wts_right, xi_left, xi_right): """ Computes moment fluxes inputs: ------- num_nodes: number of quadrature nodes, depends on inversion algorithm indices: moment indices, size [ num_moments, num_internal_coords ] wts_left: weights on the left side, size [ num_nodes ] wts_right: weights on the right side, size [ num_nodes ] xi_left: abscissas on the left side, size [ num_internal_coords, num_nodes ] xi_right: abscissas on the right side, size [ num_internal_corods, num_nodes ] """ num_moments = len(indices) num_coords, num_nodes = xi_left.shape flux = np.zeros(num_moments) for i_moment in range(num_moments): for i_node in range(num_nodes): # compute local fluxes flux_left = local_flux( wts_left[i_node], xi_left[:, i_node], indices[i_moment, :] ) flux_right = local_flux( wts_right[i_node], xi_right[:, i_node], indices[i_moment, :] ) # limiter (?) flux_left = flux_left * max(xi_left[0, i_node], 0.0) flux_right = flux_right * min(xi_right[0, i_node], 0.0) # quadrature flux[i_moment] += flux_left + flux_right return flux
24ed54b56afe127963e6cc7f9d74448e8415edb0
3,659,141
import argparse import sys def get_args(): """Get the command-line arguments""" parser = argparse.ArgumentParser( description='Emulate wc (word count)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('file', help='Input file(s)', metavar='FILE', nargs='*', type=argparse.FileType('rt'), default=[sys.stdin]) return parser.parse_args()
8a891c5b3dac1f62db31455d596d7744335d8530
3,659,142
def froc_curve_per_side(df_gt, df_pred, thresholds, verbose, cases="all"): """ Compute FROC curve per side/breast. All lesions in a breast are considered TP if any lesion in that breast is detected. """ assert cases in ["all", "cancer", "benign"] if not cases == "all": df_exclude = df_gt[~(df_gt["Class"] == cases)] df_gt = df_gt[df_gt["Class"] == cases] df_pred = df_pred[~(df_pred["StudyUID"].isin(set(df_exclude["StudyUID"])))] df_gt["Side"] = df_gt["View"].astype(str).str[0] df_pred["Side"] = df_pred["View"].astype(str).str[0] total_volumes = len(df_pred.drop_duplicates(subset=["StudyUID", "View"])) total_tps = len(df_gt.drop_duplicates(subset=["PatientID", "Side"])) tpr = [] fps = [] if verbose: print("{} cases FROC:".format(cases.upper())) for th in sorted(thresholds, reverse=True): df_th = df_pred[df_pred["Score"] >= th] df_th_unique_tp = df_th.drop_duplicates(subset=["PatientID", "Side", "TP"]) num_tps_th = float(sum(df_th_unique_tp["TP"])) tpr_th = num_tps_th / total_tps num_fps_th = float(len(df_th[df_th["TP"] == 0])) fps_th = num_fps_th / total_volumes tpr.append(tpr_th) fps.append(fps_th) if verbose: print( "Sensitivity {0:.2f} at {1:.2f} FPs/volume (threshold: {2:.4f})".format( tpr_th * 100, fps_th, th ) ) return tpr, fps
6a113856a920f775be3ce652fa09d9d79fb9be00
3,659,143
import itertools def make_lists(*args, **kwargs): """ The make_lists function attaches auxiliary things to an input key_list of (normally) AD objects. Each key gets exactly one auxiliary thing from each other list -- these lists can be as long as the key_list, or have only one item in (in which case they don't have to be lists at all). Parameters ---------- args: lists of str/AD (or single str/AD) key_list and auxiliary things to be matched to each AD kwargs["force_ad"]: bool coerce strings into AD objects? Returns ------- tuple of lists the lists made from the keys and values """ log = logutils.get_logger(__name__) force_ad = kwargs.pop("force_ad", False) if kwargs: raise TypeError("make_lists() got unexpected keyword arguments " "{}".format(kwargs.keys())) ret_value = [arg if isinstance(arg, (list, tuple)) else [arg] for arg in args] # We allow only one value that can be assigned to multiple keys len_list = len(ret_value[0]) if len_list > 1: for i in range(1, len(ret_value)): if len(ret_value[i]) == 1: ret_value[i] *= len_list if force_ad: # We only want to open as many AD objects as there are unique entries, # so collapse all items in lists to a set and multiple keys with the # same value will be assigned references to the same open AD object ad_map_dict = {} for x in set(itertools.chain(*ret_value)): try: ad_map_dict.update({x: x if isinstance(x, astrodata.AstroData) or x is None else astrodata.open(x)}) except: ad_map_dict.update({x: None}) log.warning(f"Cannot open file {x}") ret_value = [[ad_map_dict[x] for x in List] for List in ret_value] return ret_value
5bdfd32ad317238e21f631655d01bf629722c959
3,659,144
def get_free_comment_url_ajax(content_object, parent=None, ajax_type='json'): """ Given an object and an optional parent, this tag gets the URL to POST to for the creation of new ``FreeThreadedComment`` objects. It returns the latest created object in the AJAX form of the user's choosing (json or xml). """ kwargs = get_contenttype_kwargs(content_object) kwargs.update({'ajax' : ajax_type}) if parent: if not isinstance(parent, FreeThreadedComment): raise template.TemplateSyntaxError, "get_free_comment_url_ajax requires its parent object to be of type FreeThreadedComment" kwargs.update({'parent_id' : getattr(parent, 'pk', getattr(parent, 'id'))}) return reverse('tc_free_comment_parent_ajax', kwargs=kwargs) else: return reverse('tc_free_comment_ajax', kwargs=kwargs)
7d22d2f2b0e012d462d0244d8154cd9ae00ee608
3,659,145
import gc def getDefensivePacts(playerOrID, askingPlayerOrID): """ Returns a list of CyPlayers who have a Defensive Pact with playerOrID. The askingPlayerOrID is used to limit the list to players they have met. """ pacts = [] askedPlayer, askedTeam = getPlayerAndTeam(playerOrID) askingPlayer, askingTeam = getPlayerAndTeam(askingPlayerOrID) for player in players(alive=True, barbarian=False, minor=False): if (askedPlayer.getTeam() != player.getTeam() and (askingTeam.isHasMet(player.getTeam()) or gc.getGame().isDebugMode())): if askedTeam.isDefensivePact(player.getTeam()): pacts.append(player) return pacts
246c67fa315f41ca2f417a880a970e52d68775c5
3,659,146
def cosine_score(vector1, vector2): """Calculate cosine cosine score between two spectral vectors.""" return np.dot(vector1, vector2)/np.sqrt(np.dot(np.dot(vector1, vector1), np.dot(vector2, vector2)))
5b206abb179f1635eeda6267e8019901c480afad
3,659,147
def fixture_times() -> Problem[int]: """Generate a problem which tests a times function.""" @test_case(4, 6) @test_case(-2, 16) @test_case(2, -3, aga_hidden=True, aga_output=-6) @problem() def times(x: int, y: int) -> int: """Compute x * y.""" return x * y return times
a00286a5827ec0c4fe7cb390d0d420d11823eb15
3,659,148
def get_axis_bounds(ax=None): """Obtain bounds of axis in format compatible with ipyleaflet Returns: bounds np.array with lat and lon bounds. bounds.tolist() gives [[s, w],[n, e]] """ if ax is None: ax = plt.gca() return np.array([ax.get_ylim(), ax.get_xlim()]).T
32bc97cf6596775dbfdffea655f5346a1fd21764
3,659,149
def get_pymatgen_structure(cell:tuple) -> Structure: """ Get pymatgen structure from cell. Args: cell: Cell (lattice, scaled_positions, symbols). """ return Structure(lattice=cell[0], coords=cell[1], species=cell[2])
c76e0e71da83737f079d36e56b4867e551affeff
3,659,150
def get_next_event(event_id: int): """Returns the next event from the selected one. This route may fail if the event is not repeated, or if the event is too far ahead in time (to avoid over-generation of events). """ # TODO(funkysayu): Implement the user visibility limit. # Check if we already created the event. maybe_created = Event.query.filter_by(parent_id=event_id).one_or_none() if maybe_created is not None: return jsonify(maybe_created.to_dict()) event = Event.query.filter_by(id=event_id).one_or_none() if event is None: return jsonify(error='Event %r not found' % event_id), 404 try: next_event = event.create_next_event() except ValueError: return jsonify( error='Cannot create the next occurrence of a non-repeated event.'), 412 # Ensure we have an event generation limit. if next_event.date - event.date > MAX_TIMEDELTA_EVENT_GENERATION: return jsonify( error='Event is over the maximum generation period', max_period=MAX_TIMEDELTA_EVENT_GENERATION), 400 db.session.add(next_event) db.session.commit() return jsonify(next_event.to_dict())
7a9865352dda9dd44c82a92f7f807a64a5ed993d
3,659,151
def conditional_samples(x_3, x_prime_3, MC_method, M): """Generate mixed sample sets of interest distributed accroding to a conditional PDF. Parameters ---------- x_3 : np.ndarray Array with shape (n_draws, 3). x_prime : np.ndarray Array with shape (n_draws, 3). MC_method : string Specify the Monte Carlo estimator. One of ["brute force", "DLR"], where "DLR" denotes to the double loop reordering approach. M : int The number of conditional bins to genetate if `MC_method` is "DLR". Returns ------- x_mix : np.ndarray Mixed sample sets. Shape has the form (n_draws, 3, n_draws, 3). """ n_draws, n_params = x_3.shape if MC_method == "Brute force": x_3_mix = np.zeros((n_draws, n_params, n_draws, n_params)) for i in range(n_params): for j in range(n_draws): x_3_mix[j, i] = x_3 x_3_mix[j, i, :, i] = x_prime_3[j, i] if MC_method == "DLR": conditional_bin = x_3[:M] x_3_mix = np.zeros((M, n_params, n_draws, n_params)) # subdivide unconditional samples into M eaually bins, # within each bin x_i being fixed. for i in range(n_params): for j in range(M): x_3_mix[j, i] = x_3 x_3_mix[j, i, :, i] = conditional_bin[j, i] return x_3_mix
e80d238f27a65271115fd3de2f574bfc3bbdb432
3,659,152
def recombine(geno_matrix, chr_index, no_loci): #, no_samples): """ Recombine at randomly generated breakpoints. """ recomb = {0: 0, 1: 2, 2: 1, 3: 3} # '0|1' <-> '1|0' no_samples = geno_matrix.shape[0] #print(no_samples) masked, bp_list = designate_breakpoints(chr_index, no_loci, no_samples) #masked, bp_list = designate_breakpoints(chr_index, no_loci, no_samples) z = np.copy(geno_matrix) if np.asarray(bp_list).size > 0: # this would modify the original geno_matrix too! Work with copy! try: z[masked] = np.vectorize(recomb.get)(z[masked]) except: return z return z
455ee154763b31e4d5baa9653caa9f9a118f248e
3,659,153
def update_record_files_async(object_version): """Get the bucket id and spawn a task to update record metadata.""" # convert to string to be able to serialize it when sending to the task str_uuid = str(object_version.bucket_id) return update_record_files_by_bucket.delay(bucket_id=str_uuid)
ba0ed0af4e6a604801344aa459b6279c5a79dfae
3,659,154
import platform def check_platform(): """ str returned """ return platform.system()
73e813c55807e7d84517cb7ce51ce9db34e42c23
3,659,155
def get_field_keys(table): """ Field keys for a selected table :param table: :return: list op dictionaries """ cql = 'SHOW FIELD KEYS FROM \"{}\"'.format(table) response = db_man.influx_qry(cql).get_points() return [x for x in response]
ca7be2b79c1641d407fa52ea805e5d99bb2b5c42
3,659,156
def extract_text_from_spans(spans, join_with_space=True, remove_integer_superscripts=True): """ Convert a collection of page tokens/words/spans into a single text string. """ if join_with_space: join_char = " " else: join_char = "" spans_copy = spans[:] if remove_integer_superscripts: for span in spans: flags = span['flags'] if flags & 2**0: # superscript flag if is_int(span['text']): spans_copy.remove(span) else: span['superscript'] = True if len(spans_copy) == 0: return "" spans_copy.sort(key=lambda span: span['span_num']) spans_copy.sort(key=lambda span: span['line_num']) spans_copy.sort(key=lambda span: span['block_num']) # Force the span at the end of every line within a block to have exactly one space # unless the line ends with a space or ends with a non-space followed by a hyphen line_texts = [] line_span_texts = [spans_copy[0]['text']] for span1, span2 in zip(spans_copy[:-1], spans_copy[1:]): if not span1['block_num'] == span2['block_num'] or not span1['line_num'] == span2['line_num']: line_text = join_char.join(line_span_texts).strip() if (len(line_text) > 0 and not line_text[-1] == ' ' and not (len(line_text) > 1 and line_text[-1] == "-" and not line_text[-2] == ' ')): if not join_with_space: line_text += ' ' line_texts.append(line_text) line_span_texts = [span2['text']] else: line_span_texts.append(span2['text']) line_text = join_char.join(line_span_texts) line_texts.append(line_text) return join_char.join(line_texts).strip()
ccb45164f695bdbbc53eac9c4cf6596e67c24fd0
3,659,157
def cf_resource_pool(cli_ctx, *_): """ Client factory for resourcepools. """ return cf_connectedvmware(cli_ctx).resource_pools
6cc838a7ad23786b5d86f945da98410506f7e758
3,659,158
import os import logging import sys import re def init_log(): """ Initialise the logging. """ level = script_args.log_level log_dir = os.path.abspath(script_args.log_dir) logger = logging.getLogger(__name__) log_format = ( '[%(asctime)s] [%(levelname)s] ' '[%(name)s] [%(funcName)s():%(lineno)s] ' '[PID:%(process)d] %(message)s') if not os.path.isdir(log_dir): logging.error('Logging directory \'%s\' does not exist', log_dir) sys.exit(os.EX_IOERR) dir_re = re.compile(u'/$') if not re.match(dir_re, log_dir): log_dir += "/" # Define the logging stream stream = open(log_dir + LOG_FILE, 'w+') log_levels = { 'unset': logging.NOTSET, 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL } log_level = log_levels[level] coloredlogs.install( level=log_level, fmt=log_format, datefmt='%d/%m/%Y %H:%M:%S', stream=stream) log('Logging to \'%s\' at level \'%s\'' % (log_dir + LOG_FILE, level)) return logger
ac3a172486980271878914481eaf7fbec3c80ecc
3,659,159
from typing import OrderedDict def get_bcolz_col_names(cols): """整理适应于bcolz表中列名称规范,返回OrderedDict对象""" trantab = str.maketrans(IN_TABLE, OUT_TABLE) # 制作翻译表 # col_names = OrderedDict( # {col: get_acronym(col.translate(trantab)) for col in cols}) col_names = OrderedDict() for col in cols: if col in (AD_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME): col_names[col] = col else: col_names[col] = regular_name(col, trantab) if len(col_names.values()) != len(set(col_names.values())): raise ValueError("整理后得列名称包含重复值") return col_names
9f52cd5adba9ef5d45ff74ef9b35825b80e2c621
3,659,160
def classify_loss(logits, target, eps): """ """ if eps > 0: loss = cross_entropy_with_smoothing(logits, target, eps, None) else: loss = F.cross_entropy(logits, target.view(-1)) return loss
549d2c1cbd3275153960ffde6f029c231b9e5703
3,659,161
def flip(position, adjacent): """finds the furthest position on grid up to which the player has captured enemy pieces""" interval = (adjacent[0] - position[0], adjacent[1] - position[1]) if adjacent[0] < 0 or adjacent[0] > (8*tile_size): return False elif adjacent[1] < 0 or adjacent[1] > (8*tile_size): return False check_piece = (adjacent[0] + interval[0], adjacent[1] + interval[1]) if check_piece in current_piece: flip_back(adjacent, (interval[0] * -1, interval[1] * -1)) else: return flip(adjacent, check_piece)
f6691ae4fe078668220c68c1df4706a0f5825faf
3,659,162
def is_android_raw(raw): """ Returns a string that describes the type of file, for common Android specific formats """ val = None # We do not check for META-INF/MANIFEST.MF, # as you also want to analyze unsigned APKs... # AndroidManifest.xml should be in every APK. # classes.dex and resources.arsc are not required! # if raw[0:2] == b"PK" and b'META-INF/MANIFEST.MF' in raw: # TODO this check might be still invalid. A ZIP file with stored APK inside would match as well. # probably it would be better to rewrite this and add more sanity checks. if raw[0:2] == b"PK" and b'AndroidManifest.xml' in raw: val = "APK" elif raw[0:3] == b"dex": val = "DEX" elif raw[0:3] == b"dey": val = "DEY" elif raw[0:4] == b"\x03\x00\x08\x00" or raw[0:4] == b"\x00\x00\x08\x00": val = "AXML" elif raw[0:4] == b"\x02\x00\x0C\x00": val = "ARSC" return val
6bdf574b3c8c36ead45f6f9b84c19705c1597b08
3,659,163
def zeros_from_spec(nested_spec, batch_size): """Create nested zero Tensors or Distributions. A zero tensor with shape[0]=`batch_size is created for each TensorSpec and A distribution with all the parameters as zero Tensors is created for each DistributionSpec. Args: nested_spec (nested TensorSpec or DistributionSpec): batch_size (int): batch size added as the first dimension to the shapes in TensorSpec Returns: nested Tensor or Distribution """ def _zero_tensor(spec): if batch_size is None: shape = spec.shape else: spec_shape = tf.convert_to_tensor(value=spec.shape, dtype=tf.int32) shape = tf.concat(([batch_size], spec_shape), axis=0) dtype = spec.dtype return tf.zeros(shape, dtype) param_spec = nest_utils.to_distribution_param_spec(nested_spec) params = tf.nest.map_structure(_zero_tensor, param_spec) return nest_utils.params_to_distributions(params, nested_spec)
8c89a930a6fd81d793c95166b90f4621312e69a9
3,659,164
def type_to_str(t): """Return str of variable type.""" if not hasattr(t, "broadcastable"): return str(t) s = broadcastable_to_str(t.broadcastable) if s == "": s = str(t.dtype) else: s = dtype_to_char(t.dtype) + s return s
a07982cbc6c8922c43620d23a3dcced24bafbef4
3,659,165
def save(self, fname="", ext="", slab="", **kwargs): """Saves all current database information. APDL Command: SAVE Parameters ---------- fname File name and directory path (248 characters maximum, including the characters needed for the directory path). An unspecified directory path defaults to the working directory; in this case, you can use all 248 characters for the file name. ext Filename extension (eight-character maximum). slab Mode for saving the database: ALL - Save the model data, solution data and post data (element tables, etc.). This value is the default. MODEL - Save the model data (solid model, finite element model, loadings, etc.) only. SOLU - Save the model data and the solution data (nodal and element results). Notes ----- Saves all current database information to a file (File.DB). In interactive mode, an existing File.DB is first written to a backup file (File.DBB). In batch mode, an existing File.DB is replaced by the current database information with no backup. The command should be issued periodically to ensure a current file backup in case of a system "crash" or a "line drop." It may also be issued before a "doubtful" command so that if the result is not what was intended the database may be easily restored to the previous state. A save may be time consuming for large models. Repeated use of this command overwrites the previous data on the file (but a backup file is first written during an interactive run). When issued from within POST1, the nodal boundary conditions in the database (which were read from the results file) will overwrite the nodal boundary conditions existing on the database file. Internal nodes may be created during solution (for example, via the mixed u-P formulation or generalized plane strain option for current- technology elements, the Lagrangian multiplier method for contact elements or the MPC184 elements, or the quadratic or cubic option of the BEAM188 and PIPE288 elements). It is sometimes necessary to save the internal nodes in the database for later operations, such as cutting boundary interpolations (CBDOF) for submodeling. To do so, issue the SAVE command after the first SOLVE command. In general, saving after solving is always a good practice. This command is valid in any processor. """ return self.run(f"SAVE,{fname},{ext},,{slab}", **kwargs)
ddc79dc0f54e32d6cd96e115ad9842c1689c17b1
3,659,166
def rollout( env, agent, max_path_length=np.inf, render=False, render_kwargs=None, fast_rgb=True ): """ The following value for the following keys will be a 2D array, with the first dimension corresponding to the time dimension. - observations - actions - rewards - next_observations - terminals The next two elements will be lists of dictionaries, with the index into the list being the index into the time - agent_infos - env_infos """ if render_kwargs is None: render_kwargs = {} observations = [] actions = [] rewards = [] terminals = [] agent_infos = [] env_infos = [] rgb_array = [] o = env.reset() agent.reset() next_o = None path_length = 0 if hasattr(env, 'sim') and 'fixed' in env.sim.model.camera_names: camera_name = 'fixed' else: camera_name = None if render: # import ipdb; ipdb.set_trace(context=10) if render_kwargs['mode'] == 'rgb_array': if not fast_rgb: rgb_array.append(env.sim.render(500, 500, camera_name=camera_name)) else: rgb_array.append(np.zeros((500, 500, 3), dtype=np.uint8)) else: env.render(**render_kwargs) # print("###############################") while path_length < max_path_length: a, agent_info = agent.get_action(o) # print(a) next_o, r, d, env_info = env.step(a) observations.append(o) rewards.append(r) terminals.append(d) actions.append(a) agent_infos.append(agent_info) env_infos.append(env_info) path_length += 1 if d: break o = next_o if render: if render_kwargs['mode'] == 'rgb_array': if path_length % 3 == 0 or not fast_rgb: rgb_array.append(env.sim.render(500, 500, camera_name=camera_name)) else: rgb_array.append(np.zeros((500, 500, 3), dtype=np.uint8)) else: env.render(**render_kwargs) actions = np.array(actions) if len(actions.shape) == 1: actions = np.expand_dims(actions, 1) observations = np.array(observations) if len(observations.shape) == 1: observations = np.expand_dims(observations, 1) next_o = np.array([next_o]) next_observations = np.vstack( ( observations[1:, :], np.expand_dims(next_o, 0) ) ) result = dict( observations=observations, actions=actions, rewards=np.array(rewards).reshape(-1, 1), next_observations=next_observations, terminals=np.array(terminals).reshape(-1, 1), agent_infos=agent_infos, env_infos=env_infos, ) if len(rgb_array) > 0 and rgb_array[0] is not None: result['rgb_array'] = np.array(rgb_array) return result
a90c712155648773e72d5226b0f2be4c7fe72b2a
3,659,167
import os import scipy def _get_colors(data, verbose=False): """ Get how often each color is used in data. Parameters ---------- data : dict with key 'path' pointing to an image verbose : bool, optional Returns ------- color_count : dict Maps a grayscale value (0..255) to how often it was in `data` """ color_count = {} for i in range(256): color_count[i] = 0 for i, data_item in enumerate(data): if i % 1000 == 0 and i > 0 and verbose: print("%i of %i done" % (i, len(data))) fname = os.path.join(".", data_item["path"]) img = scipy.ndimage.imread(fname, flatten=False, mode="L") for row in img: for pixel in row: color_count[pixel] += 1 return color_count
7c515f3559a00410d6cc382778bc49efdfa387c9
3,659,168
def data_dir(): """The data directory.""" return DATA
f7696b434ebdab7ec1619f42bed124ba562de64d
3,659,169
def create_single_test(j): """Walk through the json cases and recursively write the test cases""" si = [] for tnum, c in enumerate(j['cases']): if 'cases' in c: si.extend(create_single_test(c)) else: si.extend(write_testcase(c, tnum)) return si
4a37a95f59e90b5314ea225f58144fa112b9722e
3,659,170
def _token_text(token): """Helper to get the text of a antlr token w/o the <EOF>""" istream = token.getInputStream() if istream is None: return token.text n = istream.size if token.start >= n or token.stop >= n: return [] return token.text
0821c44eea9dfc229034bebc45211f8e6336c552
3,659,171
def show_interface(enode, dev, shell=None): """ Show the configured parameters and stats of an interface. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str dev: Unix network device name. Ex 1, 2, 3.. :rtype: dict :return: A combined dictionary as returned by both :func:`topology_lib_ip.parser._parse_ip_addr_show` :func:`topology_lib_ip.parser._parse_ip_stats_link_show` """ assert dev cmd = 'ip addr list dev {ldev}'.format(ldev=dev) response = enode(cmd, shell=shell) first_half_dict = _parse_ip_addr_show(response) d = None if (first_half_dict): cmd = 'ip -s link list dev {ldev}'.format(ldev=dev) response = enode(cmd, shell=shell) second_half_dict = _parse_ip_stats_link_show(response) d = first_half_dict.copy() d.update(second_half_dict) return d
54ae542cf5df747ad45e016b8296a7ae5408635e
3,659,172
def get_params_for_category_api(category): """Method to get `GET` parameters for querying MediaWiki for category details. :param category: category name to be passed in params. :return: GET parameters `params` """ params = CATEGORY_API_PARAMS.copy() params['cmtitle'] = 'Category:' + category return params
c97be0a2aae9b1d92e5a02d4376e0a186f669735
3,659,173
def get_dict_or_generate(dictionary, key, generator): """Get value from dict or generate one using a function on the key""" if key in dictionary: return dictionary[key] value = generator(key) dictionary[key] = value return value
e31cd2b6661cf45e5345ce57d1e628174e6fd732
3,659,174
def createNotInConfSubGraph(graphSet, possibleSet): """ Return a subgraph by removing all incoming edges to nodes in the possible set. """ subGraph = {} for i in graphSet: subGraph[i] = graphSet[i] - possibleSet return subGraph
d3cbee9049416d7ff865306713e9a12f26717fae
3,659,175
def _backprop_gradient_pure(dL, L): """ Given the derivative of an objective fn with respect to the cholesky L, compute the derivate with respect to the original matrix K, defined as K = LL^T where L was obtained by Cholesky decomposition """ dL_dK = np.tril(dL).copy() N = L.shape[0] for k in range(N - 1, -1, -1): for j in range(k + 1, N): for i in range(j, N): dL_dK[i, k] -= dL_dK[i, j] * L[j, k] dL_dK[j, k] -= dL_dK[i, j] * L[i, k] for j in range(k + 1, N): dL_dK[j, k] /= L[k, k] dL_dK[k, k] -= L[j, k] * dL_dK[j, k] dL_dK[k, k] /= (2 * L[k, k]) return dL_dK
28ab304a375e20f952da341024a09477221d54c5
3,659,176
import random def get_random_instance() -> random.Random: """ Returns the Random instance in the random module level. """ return random._inst
ee66055275153ce8c3eae67eade6e32e50fe1d79
3,659,177
import types def to(cond, inclusive = True): """ Stream elements until the one that fits some condition. Arguments: cond -- Either a function or some other object. In the first case, the function will be applied to each element; in the second case, the object will be compared (using ==) with each element. Keyword Arguments: inclusive -- Whether the element first matching the criteria is streamed (default True) See Also: :func:`dagpype.filt` :func:`dagpype.from_` :func:`dagpype.from_to` :func:`dagpype.skip` :func:`dagpype.nth` :func:`dagpype.slice_` :func:`dagpype.tail` Examples: >>> source([1, 2, 3, 4, 3, 2, 1]) | to(2) | to_list() [1, 2] >>> source([1, 2, 3, 4, 3, 2, 1]) | to(2, False) | to_list() [1] >>> source([1, 2, 3, 4, 3, 2, 1]) | to(lambda d: d % 3 == 0) | to_list() [1, 2, 3] """ @filters def _dagpype_internal_fn_act(target): try: if isinstance(cond, types.FunctionType): while True: e = (yield) if cond(e): break target.send(e) else: while True: e = (yield) if e == cond: break target.send(e) if inclusive: target.send(e) target.close() except GeneratorExit: target.close() return _dagpype_internal_fn_act
bc7b4fec4b868e12f4e075256ada80c05dfd2c4d
3,659,178
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ # initialize parameters with zeros (≈ 1 line of code) w, b = initialize_with_zeros(X_train.shape[0]) print(w.shape) print(b) # Gradient descent (≈ 1 line of code) parameters, grads, costs = optimize(b=b,learning_rate=learning_rate,num_iterations=num_iterations,print_cost=print_cost,w=w,X=X_train,Y=Y_train) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples (≈ 2 lines of code) Y_prediction_test = predict(b=b,w=w,X=X_test) Y_prediction_train = predict(b=b,w=w,X=X_train) # Print train/test Errors print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train" : Y_prediction_train, "w" : w, "b" : b, "learning_rate" : learning_rate, "num_iterations": num_iterations} return d
073f474ada5d43811564180026cb9d4b2b052cf4
3,659,179
def mixlogistic_invcdf(y, *, logits, means, logscales, mix_dim, tol=1e-8, max_bisection_iters=60, init_bounds_scale=100.): """ inverse cumulative distribution function of a mixture of logistics, via bisection """ if _FORCE_ACCURATE_INV_CDF: tol = min(tol, 1e-14) max_bisection_iters = max(max_bisection_iters, 200) init_bounds_scale = max(init_bounds_scale, 100.) return mixlogistic_invlogcdf(y.log(), logits=logits, means=means, logscales=logscales, mix_dim=mix_dim, tol=tol, max_bisection_iters=max_bisection_iters, init_bounds_scale=init_bounds_scale)
ef25170fbaaa5eae55b22b09d2d2fb66d20d03fe
3,659,180
import cgitb def FormatException(exc_info): """Gets information from exception info tuple. Args: exc_info: exception info tuple (type, value, traceback) Returns: exception description in a list - wsgi application response format. """ return [cgitb.handler(exc_info)]
733c2170a08f9880f8c191c1c6a52ee1ab455b7f
3,659,181
def trackers_init(box, vid_path, image): """Initialize a single tracker""" tracker = cv2.TrackerCSRT_create() tracker.init(image, box) return tracker, cv2.VideoCapture(vid_path)
9b32501ad68dcc698fad2b734ff140be7a137903
3,659,182
from typing import Optional def get_image(id: Optional[int] = None, name: Optional[str] = None, slug: Optional[str] = None, source: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetImageResult: """ Get information on an image for use in other resources (e.g. creating a Droplet based on snapshot). This data source provides all of the image properties as configured on your DigitalOcean account. This is useful if the image in question is not managed by the provider or you need to utilize any of the image's data. An error is triggered if zero or more than one result is returned by the query. ## Example Usage Get the data about a snapshot: ```python import pulumi import pulumi_digitalocean as digitalocean example1 = digitalocean.get_image(name="example-1.0.0") ``` Reuse the data about a snapshot to create a Droplet: ```python import pulumi import pulumi_digitalocean as digitalocean example_image = digitalocean.get_image(name="example-1.0.0") example_droplet = digitalocean.Droplet("exampleDroplet", image=example_image.id, region="nyc2", size="s-1vcpu-1gb") ``` Get the data about an official image: ```python import pulumi import pulumi_digitalocean as digitalocean example2 = digitalocean.get_image(slug="ubuntu-18-04-x64") ``` :param int id: The id of the image :param str name: The name of the image. :param str slug: The slug of the official image. :param str source: Restrict the search to one of the following categories of images: """ __args__ = dict() __args__['id'] = id __args__['name'] = name __args__['slug'] = slug __args__['source'] = source if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('digitalocean:index/getImage:getImage', __args__, opts=opts, typ=GetImageResult).value return AwaitableGetImageResult( created=__ret__.created, description=__ret__.description, distribution=__ret__.distribution, error_message=__ret__.error_message, id=__ret__.id, image=__ret__.image, min_disk_size=__ret__.min_disk_size, name=__ret__.name, private=__ret__.private, regions=__ret__.regions, size_gigabytes=__ret__.size_gigabytes, slug=__ret__.slug, source=__ret__.source, status=__ret__.status, tags=__ret__.tags, type=__ret__.type)
180e133173ddb6e99d1743326ec5dcacbc7d5901
3,659,183
def _infer_added_params(kw_params): """ Infer values for proplot's "added" parameters from stylesheets. """ kw_proplot = {} mpl_to_proplot = { 'font.size': ('tick.labelsize',), 'axes.titlesize': ( 'abc.size', 'suptitle.size', 'title.size', 'leftlabel.size', 'rightlabel.size', 'toplabel.size', 'bottomlabel.size', ), 'text.color': ( 'abc.color', 'suptitle.color', 'tick.labelcolor', 'title.color', 'leftlabel.color', 'rightlabel.color', 'toplabel.color', 'bottomlabel.color', ), } for key, params in mpl_to_proplot.items(): if key in kw_params: value = kw_params[key] for param in params: kw_proplot[param] = value return kw_proplot
fec171caef3562344ee86684edc944b0d08af3f3
3,659,184
def create_table_description(config: ConfigLoader): """ creates the description for the pytables table used for dataloading """ n_sample_values = int(config.SAMPLING_RATE * config.SAMPLE_DURATION) table_description = { COLUMN_MOUSE_ID: tables.Int16Col(), COLUMN_LABEL: tables.StringCol(10) } for c in config.CHANNELS: table_description[c] = tables.Float32Col(shape=n_sample_values) return table_description
bd26332586a87e66e14427adb3b0c1ddfd809ce9
3,659,185
def get_target_rank_list(daos_object): """Get a list of target ranks from a DAOS object. Note: The DaosObj function called is not part of the public API Args: daos_object (DaosObj): the object from which to get the list of targets Raises: DaosTestError: if there is an error obtaining the target list from the object Returns: list: list of targets for the specified object """ try: daos_object.get_layout() return daos_object.tgt_rank_list except DaosApiError as error: raise DaosTestError( "Error obtaining target list for the object: {}".format(error))
9ce003a4e21ed0fbbf58b57989273939613fff95
3,659,186
import copy def find_global_best(particle_best=[]): """ Searches for the best particle best to make it the global best. :param particle_best: :return: """ best_found = None for particle in particles_best: if best_found is None: best_found = copy(particle) elif particle.total_cost < best_found.total_cost: best_found = copy(particle) print('\nBest found: ', best_found) return best_found
15a6b0f970e385fdc83fcffe19808c61d2a14d7f
3,659,187
def rename_to_monet_latlon(ds): """Short summary. Parameters ---------- ds : type Description of parameter `ds`. Returns ------- type Description of returned object. """ if "lat" in ds.coords: return ds.rename({"lat": "latitude", "lon": "longitude"}) elif "Latitude" in ds.coords: return ds.rename({"Latitude": "latitude", "Longitude": "longitude"}) elif "Lat" in ds.coords: return ds.rename({"Lat": "latitude", "Lon": "longitude"}) elif "grid_lat" in ds.coords: return ds.rename({"grid_lat": "latitude", "grid_lon": "longitude"}) else: return ds
18647e3bbf82bae9d02db3e965c0ddfd51ddd6dd
3,659,188
def payments_reset(): """ Removes all payments from the database """ Payment.remove_all() return make_response('', status.HTTP_204_NO_CONTENT)
c5132e8a1809a2b04ba4282d3f05aafbcf996209
3,659,189
def get_smallerI(x, i): """Return true if string x is smaller or equal to i. """ if len(x) <= i: return True else: return False
1588ef998f4914aa943a063546112766060a9cbf
3,659,190
import re def _ParseSourceContext(remote_url, source_revision): """Parses the URL into a source context blob, if the URL is a git or GCP repo. Args: remote_url: The remote URL to parse. source_revision: The current revision of the source directory. Returns: An ExtendedSourceContext suitable for JSON. """ # Assume it's a Git URL unless proven otherwise. context = None # Now try to interpret the input as a Cloud Repo URL, and change context # accordingly if it looks like one. Assume any seemingly malformed URL is # a valid Git URL, since the inputs to this function always come from Git. # # A cloud repo URL can take three forms: # 1: https://<hostname>/id/<repo_id> # 2: https://<hostname>/p/<project_id> # 3: https://<hostname>/p/<project_id>/r/<repo_name> # # There are two repo ID types. The first type is the direct repo ID, # <repo_id>, which uniquely identifies a repository. The second is the pair # (<project_id>, <repo_name>) which also uniquely identifies a repository. # # Case 2 is equivalent to case 3 with <repo_name> defaulting to "default". match = re.match(_CLOUD_REPO_PATTERN, remote_url) if match: # It looks like a GCP repo URL. Extract the repo ID blob from it. id_type = match.group('id_type') if id_type == 'id': raw_repo_id = match.group('project_or_repo_id') # A GCP URL with an ID can't have a repo specification. If it has # one, it's either malformed or it's a Git URL from some other service. if not match.group('repo_name'): context = { 'cloudRepo': { 'repoId': { 'uid': raw_repo_id }, 'revisionId': source_revision}} elif id_type == 'p': # Treat it as a project name plus an optional repo name. project_id = match.group('project_or_repo_id') repo_name = match.group('repo_name') or 'default' context = { 'cloudRepo': { 'repoId': { 'projectRepoId': { 'projectId': project_id, 'repoName': repo_name}}, 'revisionId': source_revision}} # else it doesn't look like a GCP URL if not context: context = {'git': {'url': remote_url, 'revisionId': source_revision}} return ExtendContextDict(context)
3bb14066280e616f103d3aa55710706c967df432
3,659,191
def decrypt_and_verify(message, sender_key, private_key): """ Decrypts and verifies a message using a sender's public key name Looks for the sender's public key in the public_keys/ directory. Looks for your private key as private_key/private.asc The ASN.1 specification for a FinCrypt message resides in asn1spec.py Raises exceptions if key files are not found, or are malformed. :param message: Message to decrypt (bytes) :param private_key: Decrypter's private key (file like object) :param sender_key: Sender's public key (file like object) :return: Tuple (decrypted message (bytes), whether the message was verified (boolean)) If message was unable to be decrypted, the tuple will be (None, False) """ try: decryption_key = read_private_key(private_key.read()) except Exception: raise FinCryptDecodingError('Private key file is malformed.') try: sender_key = read_public_key(sender_key.read()) except Exception: raise FinCryptDecodingError('Sender key file is malformed.') try: rsc = reedsolomon.RSCodec(8) message = bytes(rsc.decode(message)[0]) decoded, _ = decode_ber(message, asn1Spec=FinCryptMessage()) decoded = encode_native(decoded) except Exception: return None, False try: decrypted_message = decrypt_message(decryption_key['k'], decoded['key'], decoded['message']) except Exception: decrypted_message = None try: authenticated = authenticate_message(sender_key['kx'], sender_key['ky'], decrypted_message, decoded['signature']) except Exception: authenticated = False return decrypted_message, authenticated
9c3d43cc2ee01abd68416eaad4ea21fe066916a7
3,659,192
def find_best_margin(args): """ return `best_margin / 0.1` """ set_global_seeds(args['seed']) dataset = DataLoader(args['dataset'], args) X_train, X_test, X_val, y_train, y_test, y_val = dataset.prepare_train_test_val(args) results = [] for margin in MARGINS: model = Perceptron(feature_dim=X_train.shape[-1], margin=margin) model.fit(X_train, y_train) results.append(model.score(X_val, y_val)) return results
40f3a80c56546e0fc9ae42c70cfc633dc83ba111
3,659,193
import os import json def get_user_balances(userAddress): """ :param userAddress: :return: """ try: data = get_request_data(request) or {} from_block = data.get("fromBlock", int(os.getenv("BFACTORY_BLOCK", 0))) ocean = Ocean(ConfigProvider.get_config()) result = ocean.pool.get_user_balances(userAddress, from_block) return Response(json.dumps(result), 200, content_type="application/json") except Exception as e: logger.error(f"pools/user/{userAddress}: {str(e)}") return f"Get pool user balances failed: {str(e)}", 500
acb2b6cf91723b0d9d15969d32a7ba58032b607f
3,659,194
def unfold(raw_log_line): """Take a raw syslog line and unfold all the multiple levels of newline-escaping that have been inflicted on it by various things. Things that got python-repr()-ized, have '\n' sequences in them. Syslog itself looks like it uses #012. """ lines = raw_log_line \ .replace('#012', '\n') \ .replace('\\n', '\n') \ .splitlines() return lines
9e23bdd82ac15086468a383a1ef98989aceee25e
3,659,195
import subprocess def metis(hdf5_file_name, N_clusters_max): """METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph passed by CSPA. Parameters ---------- hdf5_file_name : string or file handle N_clusters_max : int Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the cluster to which each sample has been assigned as a result of the CSPA heuristics for consensus clustering. Reference --------- G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for Partitioning Irregular Graphs" In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999. """ file_name = wgraph(hdf5_file_name) labels = sgraph(N_clusters_max, file_name) subprocess.call(['rm', file_name]) return labels
43b991921ccf62f958fc094dd5bafe9d969cad9c
3,659,196
def _mcs_single(mol, mols, n_atms): """Get per-molecule MCS distance vector.""" dists_k = [] n_atm = float(mol.GetNumAtoms()) n_incomp = 0 # Number of searches terminated before timeout for l in range(0, len(mols)): # Set timeout to halt exhaustive search, which could take minutes result = FindMCS([mol, mols[l]], completeRingsOnly=True, ringMatchesRingOnly=True, timeout=10) dists_k.append(1. - result.numAtoms / ((n_atm + n_atms[l]) / 2)) if result.canceled: n_incomp += 1 return np.array(dists_k), n_incomp
fd2adf4ee9e3811acd4acb144f3b7861ac4b64ff
3,659,197
def new_transaction(): """ 新的交易 :return: """ values = request.get_json() # 检查 POST 请求中的字段 required = ['sender', 'recipient', 'amount'] if not all(k in values for k in required): return 'Missing values', 400 # 创建新的交易 index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount']) response = {'message': f'交易将会被添加到区块 {index}'} return jsonify(response), 201
06af06839e6afcaf4188cca724cebc7878455534
3,659,198
async def get_favicon(): """Return favicon""" return FileResponse(path="assets/kentik_favicon.ico", media_type="image/x-icon")
8597f21ad240cd43f59703624d380e3b879a1a8a
3,659,199