content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import string def equationMaker(congruency=None, beat_type=None, structure=None, n=None, perms=None, catch = False): """ Function to create equation stimuli, like in Landy & Goldstone, e.g. "b + d * f + y" required inputs: congruency: 'congruent' or 'incongruent' beat_type : 'binary_beat' or 'ternary_beat' structure : '+*+' or '*+*' n : how many equations to generate outputs: a list of trial dictionaries of the length specified by n """ output_list = [] alphabet = list(string.ascii_lowercase) letters_to_remove = ['i', 'l', 'o'] # because of similarity with other symbols alphabet = [letter for letter in alphabet if letter not in letters_to_remove] # final letter list op = list(structure) # list of operands #op = [x if x != "*" else "times" for x in op] # use this line for experimenting with speech stims eq_per_perm = int(n / len(perms)) # number of equations per permutation #assert eq_per_perm.is_integer(), "length of perms must be evenly divisble into n" perms = perms * eq_per_perm shuffle(perms) for eq in range(n): l = list(choice(alphabet, size=5, replace=False)) equation = [l[0],op[0],l[1],op[1],l[2],op[2],l[3]] p = itemgetter(*perms[eq][0])(l) # creates permutation of letter ordering for this iteration probe = [p[0],op[0],p[1],op[1],p[2],op[2],p[3]] if catch: cat_idx = 2 * randint(0,3) # chooses one of the 4 letter indices probe[cat_idx] = l[4] # replace with other random letter not in stimulus trial_type = 'catch' else: trial_type = 'main' probe = ' '.join(probe) # add info on 'validity' and 'sensitivity' based on permutation used if perms[eq][1] <= 4: sensitivity = 'insensitive' else: sensitivity = 'sensitive' if structure == '+*+': if ( (perms[eq][1] <= 2) or (5 <= perms[eq][1] <= 6) ): validity = 'True' else: validity = 'False' elif structure == '*+*': if ( (perms[eq][1] <= 2) or (7 <= perms[eq][1] <= 8) ): validity = 'True' else: validity = 'False' elif structure == '+++': sensitivity = 'neutral' if catch: validity = 'False' else: validity = 'True' # assemble trial dictionary trial_dict = {'stim':equation, 'beat_type':beat_type, 'congruency':congruency, 'structure': structure, 'stim_number': eq + 1, 'probe': probe, 'validity': validity, 'sensitivity': sensitivity, 'trial_type':trial_type} output_list.append(trial_dict) return output_list
b2a81696055c77fa8803ab218e7b115a66a542aa
13,500
def get_deadline_delta(target_horizon): """Returns number of days between official contest submission deadline date and start date of target period (14 for week 3-4 target, as it's 14 days away, 28 for week 5-6 target, as it's 28 days away) Args: target_horizon: "34w" or "56w" indicating whether target period is weeks 3 & 4 or weeks 5 & 6 """ if target_horizon == "34w": deadline_delta = 14 elif target_horizon == "56w": deadline_delta = 28 else: raise ValueError("Unrecognized target_horizon "+target_horizon) return deadline_delta
df09b04fc2e7065056b724cfe5d8966c06240b79
13,501
def perform_tensorflow_model_inference(model_name, sample): """ Perform evaluations from model (must be configured) Args: model_name ([type]): [description] sample ([type]): [description] Returns: [type]: [description] """ reloaded_model = tf.keras.models.load_model(model_name) input_dict = {name: tf.convert_to_tensor( [value]) for name, value in sample.items()} predictions = reloaded_model.predict(input_dict) print('Predction; ', predictions) # prob = tf.nn.sigmoid(predictions[0]) return predictions
3c374dd76d4d40dbaffc7758694ff358ad1aefeb
13,502
def check_ntru(f, g, F, G): """Check that f * G - g * F = 1 mod (x ** n + 1).""" a = karamul(f, G) b = karamul(g, F) c = [a[i] - b[i] for i in range(len(f))] return ((c[0] == q) and all(coef == 0 for coef in c[1:]))
1c2ff2fbaadcdf80e5fd9ac49f39a301c9606ada
13,503
import json def edit_user(user_id): """ TODO: differentiate between PUT and PATCH -> PATCH partial update """ user = User.from_dict(request.get_json()) user.id = user_id session_id = request.headers.get('Authorization', None) session = auth.lookup(session_id) if session["user"].role != Role.admin: # We must check if the user is editing themselves if user_id != session["user"].id: raise UserException("Insufficient privileges", status_code=401) # Create basic query for user updating query = dict() # If the user updates their profile check for all fields to be updated if user.first_name and user.first_name != "": query["first_name"] = user.first_name if user.last_name and user.last_name != "": query["last_name"] = user.last_name if user.email and user.email != "": query["email"] = user.email if user.role != None and user.role >= 0: query["role"] = user.role if user.settings and user.settings != {}: query['settings'] = user.settings # In case of password change, verify that it is really him (revalidate their password) if user.password and user.password != "": query["password"] = auth.create_hash(user.password) if len(query.keys()) == 0: raise UserException("Nothing to update", status_code=400) # Update the user and return updated document res = db.update("users", "id", user.id, query) # Remove password hash from the response del res['password'] return json.dumps(res)
2459911c2c65bf4e3e5ddbfa347c16ec39c9fc2b
13,504
def Search_tau(A, y, S, args, normalize=True, min_delta=0): """ Complete parameter search for sparse regression method S. Input: A,y : from linear system Ax=y S : sparse regression method args : arguments for sparse regression method normalize : boolean. Normalize columns of A? min_delta : minimum change in tau Returns: X : list of all possible outputs of S(A,y,tau) Tau : list of values of tau corresponding to each x in X """ X = [] Tau =[] tau = 0 # Normalize if normalize: normA = np.linalg.norm(A,axis=0) A = A @ np.diag(normA**-1) for j in range(2**A.shape[1]): # Apply sparse regression x, delta_tau = S(A, y, tau, args) delta_tau = np.max([delta_tau, min_delta]) X.append(x) Tau.append(tau) # Break condition if np.max(np.abs(x)) == 0 or delta_tau == np.inf: break # Update tau tau = tau+delta_tau # Renormalize x if normalize: X = [np.diag(normA**-1) @ x for x in X] return X,Tau
30c74b0fed304df8851b9037e7091fb95be58554
13,505
def get_entry_accounts(entry: Directive) -> list[str]: """Accounts for an entry. Args: entry: An entry. Returns: A list with the entry's accounts ordered by priority: For transactions the posting accounts are listed in reverse order. """ if isinstance(entry, Transaction): return list(reversed([p.account for p in entry.postings])) if isinstance(entry, Custom): return [val.value for val in entry.values if val.dtype == ACCOUNT_TYPE] if isinstance(entry, Pad): return [entry.account, entry.source_account] account_ = getattr(entry, "account", None) if account_ is not None: return [account_] return []
dec8da3ced1956b4ae4dca08e2de812c66dcb412
13,506
def enter_fastboot(adb_serial, adb_path=None): """Enters fastboot mode by calling 'adb reboot bootloader' for the adb_serial provided. Args: adb_serial (str): Device serial number. adb_path (str): optional alternative path to adb executable Raises: RuntimeError: if adb_path is invalid or adb executable was not found by get_adb_path. Returns: str: Output from calling 'adb reboot' or None if call fails with non-zero return code. Note: If adb_path is not provided then path returned by get_adb_path will be used instead. If adb returns a non-zero return code then None will be returned. """ return _adb_command(("reboot", "bootloader"), adb_serial, adb_path=adb_path)
9f7a8dfe8d0ce47a172cf7d07feb1bd5d2e8b273
13,507
def thesaurus_manager_menu_header(context, request, view, manager): # pylint: disable=unused-argument """Thesaurus manager menu header""" return THESAURUS_MANAGER_LABEL
7c37e69d4a662e4a155ed6a63a473e2eb52fe28b
13,508
def create_compiled_keras_model(): """Create compiled keras model.""" model = models.create_keras_model() model.compile( loss=tf.keras.losses.sparse_categorical_crossentropy, optimizer=utils.get_optimizer_from_flags('client'), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) return model
252fe18678e302e216b7b05121dfedd3bb46a180
13,509
import os def train_sedinet_cat(SM, train_df, test_df, train_idx, test_idx, ID_MAP, vars, greyscale, name, mode, batch_size, valid_batch_size, res_folder): """ This function trains an implementation of SediNet """ ##================================ ## create training and testing file generators, set the weights path, ## plot the model, and create a callback list for model training train_gen = get_data_generator_1image(train_df, train_idx, True, ID_MAP, vars[0], batch_size, greyscale, DO_AUG) ##BATCH_SIZE valid_gen = get_data_generator_1image(test_df, test_idx, True, ID_MAP, vars[0], valid_batch_size, greyscale, False) ##VALID_BATCH_SIZE if SHALLOW is True: if DO_AUG is True: weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\ "_"+str(IM_WIDTH)+"_shallow_"+vars[0]+"_"+CAT_LOSS+"_aug.hdf5" else: weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\ "_"+str(IM_WIDTH)+"_shallow_"+vars[0]+"_"+CAT_LOSS+"_noaug.hdf5" else: if DO_AUG is True: weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\ "_"+str(IM_WIDTH)+"_"+vars[0]+"_"+CAT_LOSS+"_aug.hdf5" else: weights_path = name+"_"+mode+"_batch"+str(batch_size)+"_im"+str(IM_HEIGHT)+\ "_"+str(IM_WIDTH)+"_"+vars[0]+"_"+CAT_LOSS+"_noaug.hdf5" if os.path.exists(weights_path): SM.load_weights(weights_path) print("==========================================") print("Loading weights that already exist: %s" % (weights_path) ) print("Skipping model training") elif os.path.exists(res_folder+os.sep+weights_path): weights_path = res_folder+os.sep+weights_path SM.load_weights(weights_path) print("==========================================") print("Loading weights that already exist: %s" % (weights_path) ) print("Skipping model training") else: try: plot_model(SM, weights_path.replace('.hdf5', '_model.png'), show_shapes=True, show_layer_names=True) except: pass callbacks_list = [ ModelCheckpoint(weights_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only = True) ] print("=========================================") print("[INFORMATION] schematic of the model has been written out to: "+\ weights_path.replace('.hdf5', '_model.png')) print("[INFORMATION] weights will be written out to: "+weights_path) ##============================================== ## set checkpoint file and parameters that control early stopping, ## and reduction of learning rate if and when validation ## scores plateau upon successive epochs # reduceloss_plat = ReduceLROnPlateau(monitor='val_loss', factor=FACTOR, # patience=STOP_PATIENCE, verbose=1, mode='auto', min_delta=MIN_DELTA, # cooldown=STOP_PATIENCE, min_lr=MIN_LR) # # earlystop = EarlyStopping(monitor="val_loss", mode="min", patience=STOP_PATIENCE) model_checkpoint = ModelCheckpoint(weights_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only = True) #tqdm_callback = tfa.callbacks.TQDMProgressBar() # callbacks_list = [model_checkpoint, reduceloss_plat, earlystop] #, tqdm_callback] ##============================================== ## train the model # history = SM.fit(train_gen, # steps_per_epoch=len(train_idx)//batch_size, ##BATCH_SIZE # epochs=NUM_EPOCHS, # callbacks=callbacks_list, # validation_data=valid_gen, #use_multiprocessing=True, # validation_steps=len(test_idx)//valid_batch_size) #max_queue_size=10 ##VALID_BATCH_SIZE ## with non-adaptive exponentially decreasing learning rate exponential_decay_fn = exponential_decay(MAX_LR, NUM_EPOCHS) lr_scheduler = LearningRateScheduler(exponential_decay_fn) callbacks_list = [model_checkpoint, lr_scheduler] ## train the model history = SM.fit(train_gen, steps_per_epoch=len(train_idx)//batch_size, ##BATCH_SIZE epochs=NUM_EPOCHS, callbacks=callbacks_list, validation_data=valid_gen, #use_multiprocessing=True, validation_steps=len(test_idx)//valid_batch_size) #max_queue_size=10 ##VALID_BATCH_SIZE ###=================================================== ## Plot the loss and accuracy as a function of epoch plot_train_history_1var(history) # plt.savefig(vars+'_'+str(IM_HEIGHT)+'_batch'+str(batch_size)+'_history.png', ##BATCH_SIZE # dpi=300, bbox_inches='tight') plt.savefig(weights_path.replace('.hdf5','_history.png'),dpi=300, bbox_inches='tight') plt.close('all') # serialize model to JSON to use later to predict model_json = SM.to_json() with open(weights_path.replace('.hdf5','.json'), "w") as json_file: json_file.write(model_json) return SM, weights_path
830306ac4d3c6c8b43f763bc1b4cbb96ad8109f6
13,510
def psycopg2_string(): """ Generates a connection string for psycopg2 """ return 'dbname={db} user={user} password={password} host={host} port={port}'.format( db=settings.DATABASES['default']['NAME'], user=settings.DATABASES['default']['USER'], password=settings.DATABASES['default']['PASSWORD'], host=settings.DATABASES['default']['HOST'], port=settings.DATABASES['default']['PORT'], )
187fe1b576337613f791df657fd76ca8a4f783df
13,511
def get_phase_relation(protophase: np.ndarray, N: int = 0) -> np.ndarray: """ relation between protophase and phase Parameters ---------- protophase : np.ndarray N : int, optional number of fourier terms need to be used Returns ------- np.ndarray phase (protophase from 0 to 2pi) """ phase = np.linspace(0, np.pi * 2, 5000) + np.zeros(5000) * 1j new_phase = phase.copy() if N == 0: N = protophase.size for n in range(1, N + 1): Sn = fourier_coefficient(protophase, n) new_phase = new_phase + 2 * Sn * (np.exp(1j * n * phase) - 1) / (1j * n) return new_phase
828f200f55f8d17071a51244465311f8c99866f7
13,512
import re def handle_articlepeople(utils, mention): """ Handles #articlepeople functionality. Parameters ---------- utils : `Utils object` extends tweepy api wrapper mention : `Status object` a single mention Returns ------- None """ urls = re.findall(r'(https?://[^\s]+)', mention.text) if not urls or len(urls) != 1: utils.rundown.update_status( "@%s to use the #articlepeople service, your message should be in the following format: @ rundown_bot #articlepeople url" %mention.user.screen_name, mention.id) else: article = ArticleReader(url = urls[0]) people = article.get_people() if not people: utils.rundown.update_status( "@%s Hi! I didn't find any people in that article :(" %mention.user.screen_name, mention.id) else: people = ", ".join(people) utils.rundown.update_status( "@%s Hi! I found these people: %s" %( mention.user.screen_name, people), mention.id) return None
efdf2d7cda6124a163290aa7c3197a7462703749
13,513
from pathlib import Path def bak_del_cmd(filename:Path, bakfile_number:int, quietly=False): """ Deletes a bakfile by number """ console = Console() _bakfile = None bakfiles = db_handler.get_bakfile_entries(filename) if not bakfiles: console.print(f"No bakfiles found for {filename}") return False if not bakfile_number: try: _bakfile, bakfile_number = \ __do_select_bakfile(bakfiles, select_prompt=(("Delete which .bakfile?"), default_select_prompt[0]), return_index=True) bakfile_number += 1 except TypeError: return True confirm = input( f"Confirming: Delete bakfile #{bakfile_number} for {filename}? " f"(y/N) ").lower() == 'y' if not quietly else True if confirm: _bakfile = _bakfile or __get_bakfile_entry(filename, bakfile_number=bakfile_number, console=console) if not _bakfile: return False __remove_bakfiles([_bakfile]) return True
3ded066c23708a3fdcc2e38fb07706dc0e0cd628
13,514
def fetch_county_data(file_reference): """The name of this function is displayed to the user when there is a cache miss.""" path = file_reference.filename return (pd .read_csv(path) .assign(date = lambda d: pd.to_datetime(d.date)) )
d29459efc5a46901cce970c2ddf4e499094f1aea
13,515
def preston_sad(abund_vector, b=None, normalized = 'no'): """Plot histogram of species abundances on a log2 scale""" if b == None: q = np.exp2(list(range(0, 25))) b = q [(q <= max(abund_vector)*2)] if normalized == 'no': hist_ab = np.histogram(abund_vector, bins = b) if normalized == 'yes': hist_ab_norm = np.histogram(abund_vector, bins = b) hist_ab_norm1 = hist_ab_norm[0]/(b[0:len(hist_ab_norm[0])]) hist_ab_norm2 = hist_ab_norm[1][0:len(hist_ab_norm[0])] hist_ab = (hist_ab_norm1, hist_ab_norm2) return hist_ab
97eec01c5d23ca7b48951d4c62c7066b77ffb467
13,516
def exp_rearrangement(): """Example demonstrating of Word-Blot for pairwise local similarity search on two randomly generated sequencees with motif sequences violating collinearity :math:`S=M_1M_2M_3, T=M'_1M'_1M'_3M'_2` where motif pairs :math:`(M_i, M'_i)_{i=1,2,3}` have lengths 200, 400, 600 and are related by match probabilities 0.95, 0.85, and 0.75, respectively. .. figure:: https://www.dropbox.com/s/nsvsf5gaui6t9ww/rearrangement.png?raw=1 :target: https://www.dropbox.com/s/nsvsf5gaui6t9ww/rearrangement.png?raw=1 :alt: lightbox Dynamic programming scores of the forward pass of Smith Waterman are shown in color code (*left*) with seeds (word length 6) grey intensity coded according to the local match probability assigned by Word-Blot (minimum similarity length 200). Similar segments reported by Word-Blot are shown as grey diagonal strips (*left*) and schematically (*right*) color coded by their Word-Blot estimated match probabilities (note agreement with true match probabilities). """ # NOTE we are running whole table DP later here; be careful with size K = 200 wordlen = 6 A = Alphabet('ACGT') WB_kw = {'g_max': .2, 'sensitivity': .9, 'alphabet': A, 'wordlen': wordlen, 'path': ':memory:', 'log_level': logging.INFO} # homologies Hs = [rand_seq(A, i) for i in [i * K for i in range(1, 4)]] ps = [.95, .85, .75] Ms = [] for p_match in ps: subst = gap = 1 - np.sqrt(p_match) print subst, gap Ms.append( MutationProcess(A, subst_probs=subst, ge_prob=gap, go_prob=gap) ) # connector junk def J(): return rand_seq(A, 2 * K) S = J() + Hs[0] + J() + Hs[1] + J() + Hs[2] + J() Hs = [M.mutate(hom)[0] for hom, M in zip(Hs, Ms)] T = J() + Hs[0] + J() + Hs[0] + Hs[2] + J() + Hs[1] + J() fig = plt.figure(figsize=(9, 6)) gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1]) ax_seeds = plt.subplot(gs[0]) ax_mapping = plt.subplot(gs[1]) WB = WordBlot(S, T, **WB_kw) p_min = .95 * min(ps) scored_seeds = WB.score_seeds(K) scored_seeds = [(WB.to_ij_coordinates(*rec['seed']), rec['p']) for rec in scored_seeds] plot_seeds(ax_seeds, [x[0] for x in scored_seeds]) cmap = plt.cm.get_cmap('plasma') sim_segments = list(WB.similar_segments(K_min=K, p_min=p_min)) min_p_obs = min(rec['p'] for rec in sim_segments) max_p_obs = max(rec['p'] for rec in sim_segments) for rec in sim_segments: print rec seg = rec['segment'] (i_start, i_end), (j_start, j_end) = WB.to_ij_coordinates_seg(seg) i_ctr, j_ctr = (i_start + i_end) / 2, (j_start + j_end) / 2 color = cmap((rec['p'] - min_p_obs) / (max_p_obs - min_p_obs))[:3] plot_similar_segment(ax_seeds, seg, lw=5, alpha=.1, c='k') ax_mapping.plot([1, 1], [i_start, i_end], lw=3, c=color, alpha=.7) ax_mapping.plot([2, 2], [j_start, j_end], lw=3, c=color, alpha=.7) ax_mapping.plot([1, 2], [i_ctr, j_ctr], marker='o', markersize=7, lw=2, c=color, alpha=.4) ax_mapping.set_xticks([1, 2]) ax_mapping.set_xticklabels(['S', 'T']) ax_mapping.set_xlim(0, 3) ax_mapping.set_ylim(0, None) ax_c = make_axes_locatable(ax_mapping).append_axes('right', size='4%', pad=0.05) norm = matplotlib.colors.Normalize(vmin=min_p_obs, vmax=max_p_obs) matplotlib.colorbar.ColorbarBase(ax_c, cmap=cmap, norm=norm, orientation='vertical') aligner_kw = { 'match_score': 1 / p_min - 1, 'mismatch_score': -1, 'ge_score': -1, 'go_score': 0, 'alnmode': STD_MODE, 'alntype': LOCAL, } print len(S), len(T) with Aligner(S, T, **aligner_kw) as aligner: aligner.solve() scores = np.array(aligner.table_scores()) min_score = min(scores.flatten()) max_score = max(scores.flatten()) ax_seeds.imshow(scores, cmap='plasma', alpha=.3) ax_c = make_axes_locatable(ax_seeds).append_axes('right', size='4%', pad=0.05) norm = matplotlib.colors.Normalize(vmin=min_score, vmax=max_score) matplotlib.colorbar.ColorbarBase(ax_c, cmap='plasma', norm=norm, orientation='vertical') adjust_pw_plot(ax_seeds, len(S), len(T)) ax_seeds.set_xlabel('T') ax_seeds.set_ylabel('S') fig.tight_layout() savefig(fig, 'rearrangement.png')
fdd7650d2ab0340bd11d150f7f6ad5e60ddd2d09
13,517
def package_install_site(name='', user=False, plat_specific=False): """pip-inspired, distutils-based method for fetching the default install location (site-packages path). Returns virtual environment or system site-packages, unless `user=True` in which case returns user-site (typ. under `~/.local/ on linux). If there's a distinction (on a particular system) between platform specific and pure python package locations, set `plat_specific=True` to retrieve the former. """ dist = Distribution({'name': name}) dist.parse_config_files() inst = dist.get_command_obj('install', create=True) # NOTE: specifying user=True will create user-site if user: inst.user = user inst.prefix = "" inst.finalize_options() # platform-specific site vs. purelib (platform-independent) site if plat_specific: loc = inst.install_platlib else: loc = inst.install_purelib # install_lib specified in setup.cfg has highest precedence if 'install_lib' in dist.get_option_dict('install'): loc = inst.install_lib return loc
31b477208954886f847bd33651464f386a4e6adf
13,518
def atlas_slice(atlas, slice_number): """ A function that pulls the data for a specific atlas slice. Parameters ---------- atlas: nrrd Atlas segmentation file that has a stack of slices. slice_number: int The number in the slice that corresponds to the fixed image for registration. Returns ------- sagittal: array Sagittal view being pulled from the atlas. coronal: array Coronal view being pulled from the atlas. horizontal: arrary Horizontal view being pulled from the atlas. """ epi_img_data2 = atlas.get_fdata() sagittal = epi_img_data2[140, :, :] coronal = epi_img_data2[:, slice_number, :] horizontal = epi_img_data2[:, :, 100] return sagittal, coronal, horizontal
bafe5d886568203792b0f6178302f3ca5d536e5b
13,519
def enviar_cambio_estado(request): """ Cambio de estado de una nota técnica y avisar por email al personal de stib """ if request.method == "POST" or request.POST.get("nota_tecnica"): try: nota_tecnica = get_object_or_404(NotasTecnicas, pk=request.POST.get("nota_tecnica")) nota_tecnica.estado = request.POST.get("estado") nota_tecnica.save() # -- envio de email notificando el cambio de estado subject = "Nota Técnica - Cambio de estado" ctx = { 'administracion': nota_tecnica.edificio.user.perfil.nombre_comercial, 'edificio': nota_tecnica.edificio, 'estado': NotasTecnicas.ESTADOS[ int(request.POST.get("estado"))-1 ][1], 'descripcion': nota_tecnica.descripcion, 'fecha': nota_tecnica.creado, 'comentario': request.POST.get("comentario") } body = render_to_string('emails/email_cambio_estado_nota_tecnica_notificaciones.html', ctx) _send_email(STIB_TO_EMAIL, subject, body) # -- / envio de email notificando el cambio de estado messages.success(request, "Se ha cambiado el estado de la Nota Técnica.") except: messages.error(request, "Error al cambiar el estado de la Nota Técnica.") return HttpResponseRedirect(reverse('notas-tecnicas:detail', args=[request.POST.get("nota_tecnica")])) else: messages.success(request, "Error.") return HttpResponseRedirect("/")
176a9a9d1bf7fd0ba1bec0c34526180581d33a8d
13,520
def post_auth_logout(): # noqa: E501 """Logout of the service TODO: # noqa: E501 :rtype: None """ return 'do some magic!'
3b24c61a301d08b1f2bbce76fe068f7adeb4a10b
13,521
from typing import Dict import aiohttp async def head(url: str) -> Dict: """Fetch headers returned http GET request. :param str url: The URL to perform the GET request for. :rtype: dict :returns: dictionary of lowercase headers """ async with aiohttp.request("HEAD", url) as res: response_headers = res.headers return {k.lower(): v for k, v in response_headers.items()}
b4decbfb4e92863c07c5202e2c884c02e590943f
13,522
from scapy.all import sr, srp def send_recv_packet(packet, iface=None, retry=3, timeout=1, verbose=False): """Method sends packet and receives answer Args: packet (obj): packet iface (str): interface, used when Ether packet is included retry (int): number of retries timeout (int): timeout to receive answer verbose (bool): verbose mode Returns: tuple: answered packets, unswered packets Raises: event: inet_before_sendrecv_packet event: inet_after_sendrecv_packet """ try: mh.demsg('htk_on_debug_info', mh._trn.msg('htk_inet_sending_recv_packet', iface, retry, timeout), mh.fromhere()) ev = event.Event( 'inet_before_sendrecv_packet', packet, iface, retry, timeout, verbose) if (mh.fire_event(ev) > 0): packet = ev.argv(0) iface = ev.argv(1) retry = ev.argv(2) timeout = ev.argv(3) verbose = ev.argv(4) if (ev.will_run_default()): if (iface != None): ans, unans = srp( packet, iface=iface, retry=retry, timeout=timeout, verbose=verbose) else: ans, unans = sr( packet, retry=retry, timeout=timeout, verbose=verbose) mh.demsg('htk_on_debug_info', mh._trn.msg( 'htk_inet_packet_sent_recv'), mh.fromhere()) ev = event.Event('inet_after_sendrecv_packet') mh.fire_event(ev) return ans, unans except (Scapy_Exception, error) as ex: return None, None mh.demsg('htk_on_error', ex, mh.fromhere())
ea8967096e376f91cc2ce5435d178f8b56429a86
13,523
def node_vectors(node_id): """Get the vectors of a node. You must specify the node id in the url. You can pass direction (incoming/outgoing/all) and failed (True/False/all). """ exp = Experiment(session) # get the parameters direction = request_parameter(parameter="direction", default="all") failed = request_parameter(parameter="failed", parameter_type="bool", default=False) for x in [direction, failed]: if type(x) == Response: return x # execute the request node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/node/vectors, node does not exist") try: vectors = node.vectors(direction=direction, failed=failed) exp.vector_get_request(node=node, vectors=vectors) session.commit() except Exception: return error_response(error_type="/node/vectors GET server error", status=403, participant=node.participant) # return the data return success_response(vectors=[v.__json__() for v in vectors])
c61d85e4f4ae975bdd015f6bd181d1ae78aa245d
13,524
def newFlatDict(store, selectKeys=None, labelPrefix=''): """ Takes a list of dictionaries and returns a dictionary of 1D lists. If a dictionary did not have that key or list element, then 'None' is put in its place Parameters ---------- store : list of dicts The dictionaries would be expected to have many of the same keys. Any dictionary keys containing lists in the input have been split into multiple numbered keys selectKeys : list of strings, optional The keys whose data will be included in the return dictionary. Default ``None``, which results in all keys being returned labelPrefix : string An identifier to be added to the beginning of each key string. Returns ------- newStore : dict The new dictionary with the keys from the keySet and the values as 1D lists with 'None' if the keys, value pair was not found in the store. Examples -------- >>> store = [{'list': [1, 2, 3, 4, 5, 6]}] >>> newFlatDict(store) {'list_[0]': [1], 'list_[1]': [2], 'list_[2]': [3], 'list_[3]': [4], 'list_[4]': [5], 'list_[5]': [6]} >>> store = [{'string': 'string'}] >>> newFlatDict(store) {'string': ["'string'"]} >>> store = [{'dict': {1: {3: "a"}, 2: "b"}}] >>> newFlatDict(store) {'dict_1_3': ["'a'"], 'dict_2': ["'b'"]} """ keySet = flatDictKeySet(store, selectKeys=selectKeys) newStore = {} if labelPrefix: labelPrefix += "_" for key, loc in keySet.items(): newKey = labelPrefix + str(key) if isinstance(loc, dict): subStore = [s[key] for s in store] keyStoreSet = newFlatDict(subStore, labelPrefix=newKey) newStore.update(keyStoreSet) elif isinstance(loc, (list, np.ndarray)): for locCo in loc: tempList = [] for s in store: rawVal = s.get(key, None) if rawVal is None: tempList.append(None) else: tempList.append(listSelection(rawVal, locCo)) newStore.setdefault(newKey + "_" + str(locCo), tempList) else: vals = [repr(s.get(key, None)) for s in store] newStore.setdefault(newKey, vals) return newStore
d44dec60de06779a8e965eb9e3771c66dd25e10b
13,525
from typing import Any from typing import List from typing import Union async def get_races( db: Any, token: str, raceplan_id: str ) -> List[Union[IndividualSprintRace, IntervalStartRace]]: """Check if the event has a races.""" races = await RacesService.get_races_by_raceplan_id(db, raceplan_id) if len(races) == 0: raise NoRacesInRaceplanException( f"No races in raceplan {raceplan_id}. Cannot proceed." ) return races
393a38992be404e5a82517b13d24e85b42b57b30
13,526
def _reshape_vectors(v1, v2, axis, dim, same_shape=True): """ Reshape input vectors to two dimensions. """ # TODO v2 as DataArray with possibly different dimension order v1, axis, _, _, _, _, coords, *_ = _maybe_unpack_dataarray( v1, dim, axis, None, False ) v2, *_ = _maybe_unpack_dataarray(v2, None, axis, None) if v1.shape[axis] != 3 or v2.shape[axis] != 3: raise ValueError( f"Shape of v1 and v2 along axis {axis} must be 3, got " f"{v1.shape[axis]} for v1 and {v2.shape[axis]} for v2" ) if v1.ndim < 2: raise ValueError("v1 must have at least two dimensions") # flatten everything except spatial dimension v1 = np.swapaxes(v1, axis, -1).reshape(-1, 3) v2 = np.swapaxes(v2, axis, -1).reshape(-1, 3) if same_shape and v1.shape != v2.shape: raise ValueError("v1 and v2 must have the same shape") return v1, v2, coords is not None
90c1dbf66f12fbc0bfa6e2ddede7530dbcbdf52b
13,527
from typing import Any def yaml_load(data: str) -> Any: """Deserializes a yaml representation of known objects into those objects. Parameters ---------- data : str The serialized YAML blob. Returns ------- Any The deserialized Python objects. """ yaml = yaml_import(raise_error=True) return yaml.safe_load(data)
2e721698ef0bde3bd084127556d41503417ee516
13,528
def _get_current_branch(): """Retrieves the branch Git is currently in. Returns: (str): The name of the current Git branch. """ branch_name_line = _run_cmd(GIT_CMD_GET_STATUS).splitlines()[0] return branch_name_line.split(' ')[2]
1b0d93d6e69205981c06f4dc8a45cf21259f4ccd
13,529
from models.progressive_gan import ProgressiveGAN as PGAN def PGAN(pretrained=False, *args, **kwargs): """ Progressive growing model pretrained (bool): load a pretrained model ? model_name (string): if pretrained, load one of the following models celebaHQ-256, celebaHQ-512, DTD, celeba, cifar10. Default is celebaHQ. """ if 'config' not in kwargs or kwargs['config'] is None: kwargs['config'] = {} model = PGAN(useGPU=kwargs.get('useGPU', True), storeAVG=True, **kwargs['config']) checkpoint = {"celebAHQ-256": 'https://dl.fbaipublicfiles.com/gan_zoo/PGAN/celebaHQ_s6_i80000-6196db68.pth', "celebAHQ-512": 'https://dl.fbaipublicfiles.com/gan_zoo/PGAN/celebaHQ16_december_s7_i96000-9c72988c.pth', "DTD": 'https://dl.fbaipublicfiles.com/gan_zoo/PGAN/testDTD_s5_i96000-04efa39f.pth', "celeba": "https://dl.fbaipublicfiles.com/gan_zoo/PGAN/celebaCropped_s5_i83000-2b0acc76.pth"} if pretrained: if "model_name" in kwargs: if kwargs["model_name"] not in checkpoint.keys(): raise ValueError("model_name should be in " + str(checkpoint.keys())) else: print("Loading default model : celebaHQ-256") kwargs["model_name"] = "celebAHQ-256" state_dict = model_zoo.load_url(checkpoint[kwargs["model_name"]], map_location='cpu') model.load_state_dict(state_dict) return model
cb78031a6aeca887c2ed17d02419c2b551a4b1ba
13,530
import logging def compare_environment(team_env, master_env, jenkins_build_terms ): """ compare the versions replace compare_environment Return types 1 - Matches Master 2 - Does not match master. Master is ahead(red) 3 - branch is ahead (yellow) :param team_env: :param master_env: :param jenkins_build_terms: :return: """ result = 0 team_hash = team_env['version'].split('-')[-1] master_hash = master_env['version'].split('-')[-1] service_name = team_env['servicename'].replace('_','-') team_branch_name = team_env['version'].replace('_','-').split('-')[1:-1] master_branch_name = master_env['version'].replace('_','-').split('-')[1:-1] # replace signiant-installer-service dash to underscore # if there are more name changes in the future a seperate functions can be created if service_name == "signiant-installer-service": service_name = service_name.replace('-','_') if len(team_hash) == 7 and len(master_hash) == 7: if team_hash == master_hash: # if commit hash match result (green) result = 1 elif len(team_branch_name) > 0: # if a sub team branch exist and is currently deployed in the dev environment (yellow) result = 3 else: if team_env['build_date'] and master_env['build_date']: # if build dates are available for both sections if compare_bb_commit_parents(service_name, team_hash, master_hash): result = 1 else: # compare build time between two environment result = compare_build_time(team_env, master_env) else: # if build date does not exist for either or both team/master service (red) result = 2 elif (len(team_hash) == 7) ^ (len(master_hash) == 7): # if one is jenkin build number or other one is bitbucket hash (red) but not both result = 2 elif 'master' in master_env['version'] and 'master' in team_env['version']: # if hash len is not 7 for both master and team # that means jenkin build master on both prod and dev comparison environment (not bitbucket way) result = jenkins_compare_environment(team_env['version'], master_env['version'], jenkins_build_terms) else: # all other scenarios result = 2 logging.debug("Bitbucket comparing %s and %s result is %s" % (team_env['version'], master_env['version'], result)) return result
4c126d6119cfc4f506cffbcbc0324afa1694320e
13,531
def _runge_kutta_step(func, y0, f0, t0, dt, tableau=_DORMAND_PRINCE_TABLEAU, name=None): """Take an arbitrary Runge-Kutta step and estimate error. Args: func: Function to evaluate like `func(y, t)` to compute the time derivative of `y`. y0: Tensor initial value for the state. f0: Tensor initial value for the derivative, computed from `func(y0, t0)`. t0: float64 scalar Tensor giving the initial time. dt: float64 scalar Tensor giving the size of the desired time step. tableau: optional _ButcherTableau describing how to take the Runge-Kutta step. name: optional name for the operation. Returns: Tuple `(y1, f1, y1_error, k)` giving the estimated function value after the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`, estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for calculating these terms. """ with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope: y0 = ops.convert_to_tensor(y0, name='y0') f0 = ops.convert_to_tensor(f0, name='f0') t0 = ops.convert_to_tensor(t0, name='t0') dt = ops.convert_to_tensor(dt, name='dt') dt_cast = math_ops.cast(dt, y0.dtype) k = [f0] for alpha_i, beta_i in zip(tableau.alpha, tableau.beta): ti = t0 + alpha_i * dt yi = y0 + _scaled_dot_product(dt_cast, beta_i, k) k.append(func(yi, ti)) if not (tableau.c_sol[-1] == 0 and tableau.c_sol[:-1] == tableau.beta[-1]): # This property (true for Dormand-Prince) lets us save a few FLOPs. yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k) y1 = array_ops.identity(yi, name='%s/y1' % scope) f1 = array_ops.identity(k[-1], name='%s/f1' % scope) y1_error = _scaled_dot_product( dt_cast, tableau.c_error, k, name='%s/y1_error' % scope) return (y1, f1, y1_error, k)
f106c6842a7f9faed6e37bcb4305adbd4bd83146
13,532
def _create_serialize(cls, serializers): """ Create a new serialize method with extra serializer functions. """ def serialize(self, value): for serializer in serializers: value = serializer(value) value = super(cls, self).serialize(value) return value serialize.__doc__ = serializers[0].__doc__ return serialize
522f6a14fe3e2bca70c141f14dc8b400be1ca680
13,533
def confusion_matrix(y_true, y_pred, labels=None): """Compute confusion matrix to evaluate the accuracy of a classification By definition a confusion matrix cm is such that cm[i, j] is equal to the number of observations known to be in group i but predicted to be in group j. Parameters ---------- y_true : array, shape = [n_samples] true targets y_pred : array, shape = [n_samples] estimated targets labels : array, shape = [n_classes] lists all labels occuring in the dataset. If none is given, those that appear at least once in y_true or y_pred are used. Returns ------- CM : array, shape = [n_classes, n_classes] confusion matrix References ---------- http://en.wikipedia.org/wiki/Confusion_matrix """ if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels, dtype=np.int) n_labels = labels.size label_to_ind = dict((y, x) for x, y in enumerate(labels)) # convert yt, yp into index y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred]) y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true]) # intersect y_pred, y_true with labels, eliminate items not in labels ind = np.logical_and(y_pred < n_labels, y_true < n_labels) y_pred = y_pred[ind] y_true = y_true[ind] CM = np.asarray(coo_matrix((np.ones(y_true.shape[0]), (y_true, y_pred)), shape=(n_labels, n_labels), dtype=np.int).todense()) return CM
53d143a5388b23a61f927f4b8b4407cf8a051d3f
13,534
def dialect_selector(s): """Return a dialect given it's name.""" s = s or 'ansi' lookup = { 'ansi': ansi_dialect } return lookup[s]
e9232e22e2ef0789d98a16c8e2f3fd7efa5a7981
13,535
import unittest def importlib_only(fxn): """Decorator to skip a test if using __builtins__.__import__.""" return unittest.skipIf(using___import__, "importlib-specific test")(fxn)
3cdc1ac5e0a2062b6822291973770459f6bf2318
13,536
def bf(x): """ returns the given bitfield value from within a register Parameters: x: a pandas DataFrame line - with a column named BF_NUMBER which holds the definition of given bit_field reg_val: integer Returns: -------- res: str the bit field value from within the register """ try: reg_val = int(x[REG_VALUE][2:],16) except: if isnan(x[REG_VALUE]): return nan else: raise if str(x[BF_NUMBER]).find("..")>0: #Example #BF=3..1 => min_bit =1 , max_bit = 3 #mask = 14 = 0xE #(1<<4) - (1<<1)= 16 - 2 =14 min_bit = int(x[BF_NUMBER].split("..")[1]) max_bit = int(x[BF_NUMBER].split("..")[0]) mask = (1<<(max_bit+1)) -(1<<(min_bit)) res= mask & reg_val res = res>>min_bit res = "{:04x}".format(res).upper() res = "0x"+res else: mask = (1<<int(x[BF_NUMBER])) res = mask & reg_val res = res >> int(x[BF_NUMBER]) res = "{:04x}".format(res).upper() res = "0x"+res return res
6167666cf7c6c5df8b121b2f418d29ff95df8898
13,537
from typing import Union def get_mean_brightness( frame: np.ndarray, mask: Union[ np.ndarray, None, ] = None, ) -> int: """Return the mean brightness of a frame. Load the frame, calculate a histogram, and iterate through the bins until half or more of the pixels have been counted. Args: `frame`: A video data frame. `mask`: An `np.ndarray` instance that represents a bit mask, or `None`. (See, *e.g.*, <https://docs.opencv.org/master/d1/db7/tutorial_py_histogram_begins.html>.) Returns: A integer representing the mean brightness of the frame. (Note that this is defined relative to the number of bins in the histogram.) """ try: grayscale_frame = cv.cvtColor( frame, cv.COLOR_RGB2GRAY, ) except Exception as error: print(f'Could not convert frame to grayscale. ({error})') return False num_pixels = frame.shape[0] * frame.shape[1] histogram = cv.calcHist( [grayscale_frame], [0], mask, [RANGE], [0, RANGE], ) pixel_count = 0 bin_index = 0 while pixel_count / num_pixels <= 0.5: pixel_count += histogram[bin_index] bin_index += 1 return bin_index
825afe97500f247aee4b1ccb045555fb21300cfe
13,538
def rearrange(s): """ Args: s Returns: [] """ if not can_arrange_palindrome2(s): return [] m = {} for c in s: if c in m: m[c] += 1 else: m[c] = 1 middle = "" for k in m: if m[k] % 2 == 0: m[k] /= 2 else: middle = k if middle: del m[middle] res = rec_rearrange("", m) palindromes = [] for i in res: palindromes.append(i + middle + "".join(list(i)[::-1])) return palindromes
bb6e03d35cc3f786c52ce7535628e02b51abd3a0
13,539
def get_org_memberships(user_id: str): """Return a list of organizations and roles where the input user is a member""" query = ( model.Session.query(model.Group, model.Member.capacity) .join(model.Member, model.Member.group_id == model.Group.id) .join(model.User, model.User.id == model.Member.table_id) .filter( model.User.id == user_id, model.Member.state == "active", model.Group.is_organization == True, ) .order_by(model.Group.name) ) return query.all()
eaa5ba796798289185816719a176efb31d7f25e6
13,540
def standardize_concentration(df, columns, unit="nM"): """Make all concentrations match the given unit. For a given DataFrame and column, convert mM, uM, nM, and pM concentration values to the specified unit (default nM). Rename the column to include ({unit}). Parameters ---------- df : a pandas DataFrame columns : str or list column name(s) to be converted to the given unit unit : one of ["mM", "uM", "nM", "pM"], default "nM" Returns ------- A modified dataframe. Examples -------- >>> df.head() experiment [DNA] A 100 nM B 1 uM >>> standardize_concentration(df, columns="[DNA]", unit="nM").head() experiment [DNA] (nM) A 100.0 B 1000.0 """ conversions_dict = { "mM to mM": 1, "mM to uM": 1000, "mM to nM": 1000000, "mM to pM": 1000000000, "uM to mM": 1 / 1000, "uM to uM": 1, "uM to nM": 1000, "uM to pM": 1000000, "nM to mM": 1 / 1000000, "nM to uM": 1 / 1000, "nM to nM": 1, "nM to pM": 1000, "pM to mM": 1 / 1000000000, "pM to uM": 1 / 1000000, "pM to nM": 1 / 1000, "pM to pM": 1, } # don't modify in place df = df.copy().reset_index(drop=True) if type(columns) == str: columns = [columns] for column in columns: for i, row in df.iterrows(): # variables that didn't exist in all concatanated dfs will be represented as NaN if type(row[column]) is float: if np.isnan(row[column]): df.loc[i, column] = 0 continue else: raise RuntimeError( f"Something has gone wrong in row {i}, column {column}. " + "Value is {row[column]}." ) molar_index = row[column].find("M") current_unit = row[column][molar_index - 1 : molar_index + 1] if current_unit not in ["mM", "uM", "nM", "pM"]: raise RuntimeError( f"Unit {current_unit} not recognized in row {i}, column {column}." ) value = float(row[column][: molar_index - 1]) df.loc[i, column] = value * conversions_dict[f"{current_unit} to {unit}"] df = df.rename(columns={column: f"{column} ({unit})" for column in columns}) return df
79f889640faf10e5b66989b0444a235cba872fd2
13,541
from visonic import alarm as visonicalarm def setup(hass, config): """ Setup the Visonic Alarm component.""" global HUB HUB = VisonicAlarmHub(config[DOMAIN], visonicalarm) if not HUB.connect(): return False HUB.update() # Load the supported platforms for component in ('sensor', 'alarm_control_panel'): discovery.load_platform(hass, component, DOMAIN, {}, config) return True
be11f167b393ed97d318f6f516c353ad1df39670
13,542
def moreparams(): """ Read list of json files or return one specific for specific time """ hour_back1 = request.args.get('hour_back1', default=1, type=int) hour_back2 = request.args.get('hour_back2', default=0, type=int) object_of_interest = request.args.get('object_of_interest', type=None) #print("object_of_interest: " + str(object_of_interest)[1:-1]) cam = request.args.get('cam', default=0, type=str) if hour_back1 != '': hour_back1 = int(hour_back1) else: hour_back1 = 0 # default value: 60 min back if hour_back2 != '': hour_back2 = int(hour_back2) else: hour_back2 = 1 # default value: 60 min back logger.debug("cam: {}, hour_back:{}, now_in_seconds:{}".format(cam, hour_back1, hour_back2)) params = gen_params(cam=cam, time1=hour_back1, time2=hour_back2 ,object_of_interest=object_of_interest) return Response(params, mimetype='text/plain')
0a1efd4d504ad0825a59b13ba6ce985c72b0f339
13,543
def generate_http_request_md_fenced_code_block( language=None, fence_string='```', **kwargs, ): """Wraps [``generate_http_request_code``](#generate_http_request_code) function result in a Markdown fenced code block. Args: fence_string (str): Code block fence string used wrapping the code. It does not perform any check about if the fenced string is a "valid" markdown code block fence string. **kwargs: All other optional arguments are passed to [``generate_http_request_code``](#generate_http_request_code) function. Examples: >>> generate_http_request_md_fenced_code_block(setup=False) "```python\\nreq = requests.get('http://localhost')\\n```" >>> generate_http_request_md_fenced_code_block(fence_string='~~~', ... setup=False) "~~~python\\nreq = requests.get('http://localhost')\\n~~~" Returns: str: Fenced code block with HTTP request code snippet inside. """ return '{fence_string}{language}\n{render}\n{fence_string}'.format( language=language if language else DEFAULT_LANGUAGE, render=generate_http_request_code(language=language, **kwargs), fence_string=fence_string, )
a34581e8c0d40542a625d222183adb601c60b408
13,544
def confident_hit_ratio(y_true, y_pred, cut_off=0.1): """ This function return the hit ratio of the true-positive for confident molecules. Confident molecules are defined as confidence values that are higher than the cutoff. :param y_true: :param y_pred: :param cut_off: confident value that defines if a prediction are considered confident :return: """ actual_indexes = np.where(y_true==1)[0] confident_indexes = np.where(y_pred>cut_off)[0] confident_hit = np.intersect1d(actual_indexes, confident_indexes) ratio = 1.0 * len(confident_hit) / len(actual_indexes) return ratio
0a7dbe9f3d81b877c309fd1fffb2840ec71dbeee
13,545
import click def onion(ctx, port, onion_version, private_key, show_private_key, detach): """ Add a temporary onion-service to the Tor we connect to. This keeps an onion-service running as long as this command is running with an arbitrary list of forwarded ports. """ if len(port) == 0: raise click.UsageError( "You must use --port at least once" ) if private_key is not None: if onion_version == 3 and not private_key.startswith('ED25519-V3'): raise click.UsageError( "Private key type is not version 3" ) if onion_version == 2 and not private_key.startswith('RSA1024'): raise click.UsageError( "Private key type is not version 2" ) def _range_check(p): try: p = int(p) if p < 1 or p > 65535: raise click.UsageError( "{} invalid port".format(p) ) except ValueError: raise click.UsageError( "{} is not an int".format(p) ) validated_ports = [] for p in port: if ':' in p: remote, local = p.split(':', 1) _range_check(remote) # the local port can be an ip:port pair, or a unix:/ # socket so we'll let txtorcon take care validated_ports.append((int(remote), local)) else: _range_check(p) validated_ports.append(int(p)) try: onion_version = int(onion_version) if onion_version not in (2, 3): raise ValueError() except ValueError: raise click.UsageError( "--onion-version must be 2 or 3" ) cfg = ctx.obj return _run_command( carml_onion.run, cfg, list(validated_ports), onion_version, private_key, show_private_key, detach, )
7f36e967fc30877b504fda79699c7d3347a4f410
13,546
def determine_if_pb_should_be_filtered(row, min_junc_after_stop_codon): """PB should be filtered if NMD, a truncation, or protein classification is not likely protein coding (intergenic, antisense, fusion,...) Args: row (pandas Series): protein classification row min_junc_after_stop_codon (int): mininum number of junctions after stop codon a protein can have. used in NMD determination Returns: int: 1 if should be filtered, 0 if should not be filtered """ # filter out pbs that are artifacts or noncoding pclass = str(row['protein_classification']) num_junc_after_stop_codon = int(row['num_junc_after_stop_codon']) pclass_base_to_keep = ['pFSM','pNIC'] pclass_base = str(row['protein_classification_base']) if pclass_base not in pclass_base_to_keep and num_junc_after_stop_codon > min_junc_after_stop_codon: return 1 elif 'trunc' in pclass: return 1 elif 'intergenic' in pclass: return 1 elif 'antisense' in pclass: return 1 elif 'fusion' in pclass: return 1 elif 'orphan' in pclass: return 1 elif 'genic' in pclass: return 1 return 0
29ab7ce53ac7569c4d8a29e8e8564eab33b3f545
13,547
import queue def calculate_shortest_path(draw_func, grid, start, end): """https://en.wikipedia.org/wiki/A*_search_algorithm""" count = 0 open_set = queue.PriorityQueue() open_set.put((0, count, start)) open_set_hash = {start} came_from = {} # g_score: Distance from start to current node g_score = {spot: float("inf") for row in grid for spot in row} # TODO: Using objects as dict keys, is it OK? g_score[start] = 0 # f_score: Sum of g_score and ESTIMATED distance from current to end node f_score = {spot: float("inf") for row in grid for spot in row} # TODO: Using objects as dict keys, is it OK? f_score[start] = start.distance(end) while not open_set.empty(): for event in pygame.event.get(): if event.type == pygame.QUIT: return None current = open_set.get()[2] open_set_hash.remove(current) if current == end: reconstruct_path(came_from, current, draw_func) start.celltype = CellType.START end.celltype = CellType.END return True for neighbor in current.neighbors: new_g_score = g_score[current] + 1 # All edges are weighted equally # If this path to neighbor is better than any previous one record it! if new_g_score < g_score[neighbor]: came_from[neighbor] = current g_score[neighbor] = new_g_score f_score[neighbor] = g_score[neighbor] + neighbor.distance(end) if neighbor not in open_set_hash: count += 1 open_set.put((f_score[neighbor], count, neighbor)) open_set_hash.add(neighbor) neighbor.celltype = CellType.OPEN draw_func() if current != start: current.celltype = CellType.CLOSED return False
52cbdc4a8e9114395d6349e58cba11bb2b6ab84f
13,548
from typing import MutableMapping import hashlib def _get_hashed_id(full_name: str, name_from_id: MutableMapping[int, str]) -> int: """Converts the string-typed name to int-typed ID.""" # Built-in hash function will not exceed the range of int64, which is the # type of id in metadata artifact proto. result = int(hashlib.sha256(full_name.encode('utf-8')).hexdigest(), 16) % _INT64_MAX name_from_id[result] = full_name return result
2eadfac0369d33ae29e4c054691180720995ef93
13,549
def find_adjustment(tdata : tuple, xdata : tuple, ydata : tuple, numstept=10,numstepx=10,tol=1e-6) -> tuple: """ Find best fit of data with temporal and spatial offset in range. Returns the tuple err, dt, dx. Finds a temporal and spatial offset to apply to the temporal and spatial locations of the lif data such that the corresponding elevation data has minimal absolute difference. find_adjustment takes a brute force approach, and will compare the difference in ydata at overlapping tdata and xdata locations for all offsets within plus or minus numstept and numstepx. By default 400 possible offsets are evaluated. tdata and xdata must be integer types in order to find the overlapping tdata and xdata locations. Raises a TypeError for some inputs. Raises a ValueError if there is no intersection in tdata & xdata, """ if not (isinstance(tdata,tuple) and len(tdata)==2): raise TypeError("tdata must be a tuple with length 2") elif not (tdata[0].dtype==int and tdata[1].dtype==int): raise TypeError(f"t in tdata must have dtype int but has dtypes " \ f"{tdata[0].dtype} and {tdata[1].dtype}") elif not (isinstance(xdata,tuple) and len(xdata)==2): raise TypeError("xdata must be a tuple with length 2") elif not (xdata[0].dtype==int and xdata[1].dtype==int): raise TypeError(f"x in xdata must have dtype int but has dtypes " \ f"{xdata[0].dtype} and {xdata[1].dtype}") elif not (isinstance(ydata,tuple) and len(ydata)==2): raise TypeError("ydata must be a tuple with length 2") # create all possibile pairs of offsets in the range if numstept == 0: dt = np.asarray([0],dtype=int) else: dt = np.arange(-numstept,numstept+1) if numstepx == 0: dx = np.asarray([0],dtype=int) else: dx = np.arange(-numstepx,numstepx+1) DT, DX = tuple(np.meshgrid(dt,dx)) pos = np.transpose(np.stack([DT.ravel(),DX.ravel()])) # for each possible offset in space and time, estimate the error err = np.empty(DT.ravel().shape) err[:] = np.nan # invalid by default for idx, p in enumerate(pos): dt, dx = p _, tidx0, tidx1 = np.intersect1d(tdata[0],tdata[1]+dt,return_indices=True) _, xidx0, xidx1 = np.intersect1d(xdata[0],xdata[1]+dx,return_indices=True) # it is possible that dt and dx will push them out of overlapping # skip in that case (err[idx] = np.nan by default) if not ( tidx0.size==0 or xidx0.size==0 or tidx1.size==0 or xidx1.size==0 ): yidx0 = tuple(np.meshgrid(tidx0,xidx0,indexing = 'ij')) yidx1 = tuple(np.meshgrid(tidx1,xidx1,indexing = 'ij')) #err[idx] = np.mean(np.abs(ydata[0][yidx0] - ydata[1][yidx1])) err[idx] = np.mean((ydata[0][yidx0] - ydata[1][yidx1])**2) # error out if there is no intersection of the data for any offset if np.isnan(err).all(): raise ValueError("xdata and tdata have no intersection") idx_min = np.nanargmin(err) dt, dx = pos[idx_min] return err[idx_min], dt, dx
4efe607c40606b1235a5f9d62c3002a673a47828
13,550
import yaml def get_params(): """Loads ./config.yml in a dict and returns it""" with open(HERE/'config.yml') as file: params = yaml.load(file) return params
8e2e1b3ae47ff9a296aab7945562e3ea8ad43598
13,551
import argparse import logging def parse_args(args, repo_dirs): """ Extract the CLI arguments from argparse """ parser = argparse.ArgumentParser(description="Sweet branch creation tool") parser.add_argument( "--repo", help="Repository to create branch in", choices=repo_dirs, required=False, ) parser.add_argument( "--parent", help="Parent branch", default="dev", required=False, ) parser.add_argument("ticket", help="Ticket to build branch name from") parser.add_argument( "--version", action="version", version="jolly_brancher {ver}".format(ver=__version__), ) parser.add_argument( "-v", "--verbose", dest="loglevel", help="set loglevel to INFO", action="store_const", const=logging.INFO, ) parser.add_argument( "-vv", "--very-verbose", dest="loglevel", help="set loglevel to DEBUG", action="store_const", const=logging.DEBUG, ) return parser.parse_args(args)
b745e9aedc03857101c4faa4c9a126b8b19a093d
13,552
def get_textgrid(path_transcription): """Get data from TextGrid file""" data = textgriddf_reader(path_file=path_transcription) text_df = textgriddf_df(data, item_no=2) sentences = textgriddf_converter(text_df) return sentences
d3e037ff10488eb1eed777e008599769ddf9d81f
13,553
import http def accessible_required(f): """Decorator for an endpoint that requires a user have accessible or read permission in the given room. The function must take a `room` argument by name, as is typically used with flask endpoints with a `<Room:room>` argument.""" @wraps(f) def required_accessible_wrapper(*args, room, **kwargs): if not room.check_accessible(g.user): abort(http.NOT_FOUND) return f(*args, room=room, **kwargs) return required_accessible_wrapper
e4e13632963fb80377dcbdaa36e90c4c62dd9a1f
13,554
import warnings def make_erb_cos_filters_nx(signal_length, sr, n, low_lim, hi_lim, sample_factor, padding_size=None, full_filter=True, strict=True, **kwargs): """Create ERB cosine filters, oversampled by a factor provided by "sample_factor" Args: signal_length (int): Length of signal to be filtered with the generated filterbank. The signal length determines the length of the filters. sr (int): Sampling rate associated with the signal waveform. n (int): Number of filters (subbands) to be generated with standard sampling (i.e., using a sampling factor of 1). Note, the actual number of filters in the generated filterbank depends on the sampling factor, and will also include lowpass and highpass filters that allow for perfect reconstruction of the input signal (the exact number of lowpass and highpass filters is determined by the sampling factor). The number of filters in the generated filterbank is given below: +---------------+---------------+-+------------+---+---------------------+ | sample factor | n_out |=| bandpass |\ +| highpass + lowpass | +===============+===============+=+============+===+=====================+ | 1 | n+2 |=| n |\ +| 1 + 1 | +---------------+---------------+-+------------+---+---------------------+ | 2 | 2*n+1+4 |=| 2*n+1 |\ +| 2 + 2 | +---------------+---------------+-+------------+---+---------------------+ | 4 | 4*n+3+8 |=| 4*n+3 |\ +| 4 + 4 | +---------------+---------------+-+------------+---+---------------------+ | s | s*(n+1)-1+2*s |=| s*(n+1)-1 |\ +| s + s | +---------------+---------------+-+------------+---+---------------------+ low_lim (int): Lower limit of frequency range. Filters will not be defined below this limit. hi_lim (int): Upper limit of frequency range. Filters will not be defined above this limit. sample_factor (int): Positive integer that determines how densely ERB function will be sampled to create bandpass filters. 1 represents standard sampling; adjacent bandpass filters will overlap by 50%. 2 represents 2x overcomplete sampling; adjacent bandpass filters will overlap by 75%. 4 represents 4x overcomplete sampling; adjacent bandpass filters will overlap by 87.5%. padding_size (int, optional): If None (default), the signal will not be padded before filtering. Otherwise, the filters will be created assuming the waveform signal will be padded to length padding_size*signal_length. full_filter (bool, default=True): If True (default), the complete filter that is ready to apply to the signal is returned. If False, only the first half of the filter is returned (likely positive terms of FFT). strict (bool, default=True): If True (default), will throw an error if sample_factor is not a power of two. This facilitates comparison across sample_factors. Also, if True, will throw an error if provided hi_lim is greater than the Nyquist rate. Returns: tuple: A tuple containing the output: * **filts** (*array*)-- The filterbank consisting of filters have cosine-shaped frequency responses, with center frequencies equally spaced on an ERB scale from low_lim to hi_lim. * **center_freqs** (*array*) -- something * **freqs** (*array*) -- something Raises: ValueError: Various value errors for bad choices of sample_factor; see description for strict parameter. """ if not isinstance(sample_factor, int): raise ValueError('sample_factor must be an integer, not %s' % type(sample_factor)) if sample_factor <= 0: raise ValueError('sample_factor must be positive') if sample_factor != 1 and np.remainder(sample_factor, 2) != 0: msg = 'sample_factor odd, and will change ERB filter widths. Use even sample factors for comparison.' if strict: raise ValueError(msg) else: warnings.warn(msg, RuntimeWarning, stacklevel=2) if padding_size is not None and padding_size >= 1: signal_length += padding_size if np.remainder(signal_length, 2) == 0: # even length n_freqs = signal_length // 2 # .0 does not include DC, likely the sampling grid max_freq = sr / 2 # go all the way to nyquist else: # odd length n_freqs = (signal_length - 1) // 2 # .0 max_freq = sr * (signal_length - 1) / 2 / signal_length # just under nyquist # verify the high limit is allowed by the sampling rate if hi_lim > sr / 2: hi_lim = max_freq msg = 'input arg "hi_lim" exceeds nyquist limit for max frequency; ignore with "strict=False"' if strict: raise ValueError(msg) else: warnings.warn(msg, RuntimeWarning, stacklevel=2) # changing the sampling density without changing the filter locations # (and, thereby changing their widths) requires that a certain number of filters # be used. n_filters = sample_factor * (n + 1) - 1 n_lp_hp = 2 * sample_factor freqs = utils.matlab_arange(0, max_freq, n_freqs) filts = np.zeros((n_freqs + 1 , n_filters + n_lp_hp)) # ?? n_freqs+1 # cutoffs are evenly spaced on an erb scale -- interpolate linearly in erb space then convert back # get the actual spacing use to generate the sequence (in case numpy does something weird) center_freqs, erb_spacing = np.linspace(freq2erb(low_lim), freq2erb(hi_lim), n_filters + 2, retstep=True) # +2 for bin endpoints # we need to exclude the endpoints center_freqs = center_freqs[1:-1] freqs_erb = freq2erb(freqs) for i in range(n_filters): i_offset = i + sample_factor l = center_freqs[i] - sample_factor * erb_spacing h = center_freqs[i] + sample_factor * erb_spacing # the first sample_factor # of rows in filts will be lowpass filters filts[(freqs_erb > l) & (freqs_erb < h), i_offset] = make_cosine_filter(freqs_erb, l, h, convert_to_erb=False) # be sample_factor number of each for i in range(sample_factor): # account for the fact that the first sample_factor # of filts are lowpass i_offset = i + sample_factor lp_h_ind = max(np.where(freqs < erb2freq(center_freqs[i]))[0]) # lowpass filter goes up to peak of first cos filter lp_filt = np.sqrt(1 - np.power(filts[:lp_h_ind+1, i_offset], 2)) hp_l_ind = min(np.where(freqs > erb2freq(center_freqs[-1-i]))[0]) # highpass filter goes down to peak of last cos filter hp_filt = np.sqrt(1 - np.power(filts[hp_l_ind:, -1-i_offset], 2)) filts[:lp_h_ind+1, i] = lp_filt filts[hp_l_ind:, -1-i] = hp_filt # ensure that squared freq response adds to one filts = filts / np.sqrt(sample_factor) # get center freqs for lowpass and highpass filters cfs_low = np.copy(center_freqs[:sample_factor]) - sample_factor * erb_spacing cfs_hi = np.copy(center_freqs[-sample_factor:]) + sample_factor * erb_spacing center_freqs = erb2freq(np.concatenate((cfs_low, center_freqs, cfs_hi))) # rectify center_freqs[center_freqs < 0] = 1 # discard highpass and lowpass filters, if requested if kwargs.get('no_lowpass'): filts = filts[:, sample_factor:] if kwargs.get('no_highpass'): filts = filts[:, :-sample_factor] # make the full filter by adding negative components if full_filter: filts = make_full_filter_set(filts, signal_length) return filts, center_freqs, freqs
207a9d3be6b732c1d86a5ed5bde069d5ea760347
13,555
def button_ld_train_first_day(criteria, min_reversal_number): """ This function creates a csv file for the LD Train test. Each row will be the first day the animal ran the test. At the end, the function will ask the user to save the newly created csv file in a directory. :param criteria: A widget that contains a string that represents the duration of the criteria as n days/n+1 days :param min_reversal_number: An entry widget that contains a value that represents the the minimum required reversal number for an animal """ # check that the inputs to the criteria widgets are valid if ld_train_criteria_min_rev_check(criteria, min_reversal_number) is not None: criteria_list, min_rev = ld_train_criteria_min_rev_check(criteria, min_reversal_number) else: mb.showerror('LD Train Criteria Error', 'button_ld_train_first_day() error: One of the three criteria is empty or invalid!') print('button_ld_train_first_day() error: One of the three criteria is empty or invalid!') return None if ld_criteria_list_check(criteria_list) is not None: criteria_value, criteria_max_days = ld_criteria_list_check(criteria_list) else: mb.showerror('LD Train Criteria Error', 'button_ld_train_first_day() error: The n/n+1 days criteria is empty or invalid!') print('button_ld_train_first_day() error: The n/n+1 days criteria is empty or invalid!') return None df = data_setup('LD Train') if df is not None: ld_train_delete_other_difficulties(df) get_ld_train_normal(df, criteria_value, criteria_max_days, min_rev) save_file_message(df) else: mb.showerror('LD Train Criteria Error', 'button_ld_train_first_day() error: One of the criterias is invalid or you hit the cancel button!') print('button_ld_train_first_day() error: One of the criterias is invalid or you hit the cancel button!') return None
9de68279f6ffb8253275a7a7051a1ed9b2df8f8e
13,556
def analyze_video(file, name, api): """ Call Scenescoop analyze with a video """ args = Namespace(video=file, name=name, input_data=None, api=True) scene_content = scenescoop(args) content = '' maxframes = 0 for description in scene_content: if(len(scene_content[description]) > maxframes): content = description maxframes = len(scene_content[description]) if(api): return jsonify(status="200", scene_content=scene_content, content=content, maxframes=maxframes) else: return content
92e176a5c951d038aa8477db7aec0705fba0152c
13,557
from osgeo import ogr import jsonschema import json import mimetypes import os def validategeojson(data_input, mode): """GeoJSON validation example >>> import StringIO >>> class FakeInput(object): ... json = open('point.geojson','w') ... json.write('''{"type":"Feature", "properties":{}, "geometry":{"type":"Point", "coordinates":[8.5781228542328, 22.87500500679]}, "crs":{"type":"name", "properties":{"name":"urn:ogc:def:crs:OGC:1.3:CRS84"}}}''') ... json.close() ... file = 'point.geojson' >>> class fake_data_format(object): ... mimetype = 'application/geojson' >>> fake_input = FakeInput() >>> fake_input.data_format = fake_data_format() >>> validategeojson(fake_input, MODE.SIMPLE) True """ passed = False if mode >= MODE.NONE: passed = True if mode >= MODE.SIMPLE: _get_mimetypes() name = data_input.file (mtype, encoding) = mimetypes.guess_type(name, strict=False) passed = (mtype == data_input.data_format.mimetype == FORMATS['GEOJSON'][0]) if mode >= MODE.STRICT: data_source = ogr.Open(data_input.file) if data_source: passed = (data_source.GetDriver().GetName() == "GeoJSON") else: passed = False if mode >= MODE.VERYSTRICT: # this code comes from # https://github.com/om-henners/GeoJSON_Validation/blob/master/geojsonvalidation/geojson_validation.py schema_home = os.path.join(_get_schemas_home(), "geojson") base_schema = os.path.join(schema_home, "geojson.json") geojson_base = json.load(open(base_schema)) cached_json = { "http://json-schema.org/geojson/crs.json": json.load(open(os.path.join(schema_home, "crs.json"))), "http://json-schema.org/geojson/bbox.json": json.load(open(os.path.join(schema_home, "bbox.json"))), "http://json-schema.org/geojson/geometry.json": json.load(open(os.path.join(schema_home, "geometry.json"))) } resolver = jsonschema.RefResolver( "http://json-schema.org/geojson/geojson.json", geojson_base, store=cached_json) validator = jsonschema.Draft4Validator(geojson_base, resolver=resolver) try: validator.validate(json.loads(data_input.stream.read())) passed = True except jsonschema.ValidationError: passed = False return passed
41fc1283469d28b8f422564303b17d80407e7893
13,558
def encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): """ Build and train an encoder-decoder model on x and y :param input_shape: Tuple of input shape :param output_sequence_length: Length of output sequence :param english_vocab_size: Number of unique English words in the dataset :param french_vocab_size: Number of unique French words in the dataset :return: Keras model built, but not trained """ # OPTIONAL: Implement return None
47fa1893cc04b491292461db6c8a3418b464ba45
13,559
def close_to_cron(crontab_time, time_struct): """coron的指定范围(crontab_time)中 最接近 指定时间 time_struct 的值""" close_time = time_struct cindex = 0 for val_struct in time_struct: offset_min = val_struct val_close = val_struct for val_cron in crontab_time[cindex]: offset_tmp = val_struct - val_cron if offset_tmp > 0 and offset_tmp < offset_min: val_close = val_struct offset_min = offset_tmp close_time[cindex] = val_close cindex = cindex + 1 return close_time
7ce04d9b4260e7ea1ed7c3e95e7c36928989024e
13,560
def remove_stop_words(words_list: list) -> list: """ Remove stop words from strings list """ en_stop_words = set(stopwords.words('english')) return [w for w in words_list if str(w).lower not in en_stop_words]
a6e3c117ea805bdfaffe80c17fc5e340a869d55d
13,561
import os def build_jerry_data(jerry_path): """ Build up a dictionary which contains the following items: - sources: list of JerryScript sources which should be built. - dirs: list of JerryScript dirs used. - cflags: CFLAGS for the build. """ jerry_sources = [] jerry_dirs = set() for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]: for file in find_sources(os.path.normpath(jerry_path), sub_dir): path = os.path.join('jerryscript', file) jerry_sources.append(path) jerry_dirs.add(os.path.split(path)[0]) jerry_cflags = [ '-DJERRY_GLOBAL_HEAP_SIZE=10', '-DJERRY_NDEBUG', '-DJERRY_DISABLE_HEAVY_DEBUG', '-DJERRY_BUILTIN_NUMBER=0', '-DJERRY_BUILTIN_STRING=0', '-DJERRY_BUILTIN_BOOLEAN=0', #'-DJERRY_BUILTIN_ERRORS=0', '-DJERRY_BUILTIN_ARRAY=0', '-DJERRY_BUILTIN_MATH=0', '-DJERRY_BUILTIN_JSON=0', '-DJERRY_BUILTIN_DATE=0', '-DJERRY_BUILTIN_REGEXP=0', '-DJERRY_BUILTIN_ANNEXB=0', '-DJERRY_ESNEXT=0', '-DJERRY_LCACHE=0', '-DJERRY_PROPERTY_HASHMAP=0', ] return { 'sources': jerry_sources, 'dirs': jerry_dirs, 'cflags': jerry_cflags, }
77a26d0f94bc82881e86e9f0fc40915a66bc3914
13,562
def min_geodesic_distance_rotmats_pairwise_tf(r1s, r2s): """Compute min geodesic distance for each R1 wrt R2.""" # These are the traces of R1^T R2 trace = tf.einsum('...aij,...bij->...ab', r1s, r2s) # closest rotation has max trace max_trace = tf.reduce_max(trace, axis=-1) return tf.acos(tf.clip_by_value((max_trace - 1.0) / 2.0, -1.0, 1.0))
a4da40aa9594c301b0366da0a26d73afce83e05f
13,563
def project_to_2D(xyz): """Projection to (0, X, Z) plane.""" return xyz[0], xyz[2]
c6cdb8bd6dce65f6ce39b14b9e56622832f35752
13,564
def Geom2dInt_Geom2dCurveTool_D2(*args): """ :param C: :type C: Adaptor2d_Curve2d & :param U: :type U: float :param P: :type P: gp_Pnt2d :param T: :type T: gp_Vec2d :param N: :type N: gp_Vec2d :rtype: void """ return _Geom2dInt.Geom2dInt_Geom2dCurveTool_D2(*args)
6ac157e171af9d4bab852a9677287e33bb1d90f2
13,565
def notfound(request): """ Common notfound return message """ msg = CustomError.NOT_FOUND_ERROR.format(request.url, request.method) log.error(msg) request.response.status = 404 return {'error': 'true', 'code': 404, 'message': msg}
b690d9b879db15e192e8ee50d4ea2b0847ba658b
13,566
def l2norm(a): """Return the l2 norm of a, flattened out. Implemented as a separate function (not a call to norm() for speed).""" return np.sqrt(np.sum(np.absolute(a)**2))
b5ce94bfc0f3472e60a4338c379bc4dfe490e623
13,567
def create_container(request): """ Creates a container (empty object of type application/directory) """ storage_url = get_endpoint(request, 'adminURL') auth_token = get_token_id(request) http_conn = client.http_connection(storage_url, insecure=settings.SWIFT_INSECURE) form = CreateContainerForm(request.POST or None) if form.is_valid(): container = form.cleaned_data['containername'] try: client.put_container(storage_url, auth_token, container, http_conn=http_conn) messages.add_message(request, messages.SUCCESS, _("Container created.")) actionlog.log(request.user.username, "create", container) except client.ClientException as err: log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err)) messages.add_message(request, messages.ERROR, _('Access denied.')) return redirect(containerview) context = utils.update_default_context(request, { 'form': form, }) return render_to_response('create_container.html', context, context_instance=RequestContext(request))
15c25df933f7620cee71319f9f41e92e29880d1c
13,568
import os def check_all_data_present(file_path): """Checks the data exists in location file_path""" filenames = [ "t10k-images-idx3-ubyte", "t10k-labels-idx1-ubyte", "train-images-idx3-ubyte", "train-labels-idx1-ubyte", ] data_path = os.path.join(file_path, "data") return tu.check_data_exists(data_path, filenames)
b15759f7f6849829f853d49c1c2c4a117ce28613
13,569
def get_searchable_models(): """ Returns a list of all models in the Django project which implement ISearchable """ app = AppCache(); return filter(lambda klass: implements(klass, ISearchable), app.get_models())
ad7c56f17ec4e0fc77942fe1466b879bd45eb191
13,570
def create_updated_alert_from_slack_message(payload, time, alert_json): """ Create an updated raw alert (json) from an update request in Slack """ values = payload['view']['state']['values'] for value in values: for key in values[value]: if key == 'alert_id': continue if key == 'severity': if values[value][key].get('selected_option'): alert_json[key] = \ values[value][key]['selected_option']['text']['text'] if key == 'active': if values[value][key].get('selected_option'): alert_json[key] = \ values[value][key]['selected_option']['text']['text'] else: if values[value][key].get('value'): alert_json[key] = values[value][key]['value'] alert_json['datetime'] = time return alert_json
a685a0c0da472f055dc8860bdf09970a1ecc8aff
13,571
def enforce(*types): """ decorator function enforcing, and converting, argument data types """ def decorator(fn): def new_function(*args, **kwargs): # convert args into something mutable, list in this case newargs = [] for original_argument, type_to_convert in zip(args, types): newargs.append(type_to_convert(original_argument)) return fn(*newargs, **kwargs) return new_function return decorator
217ad3adccdaa9fc83ceaf5ef2c0905b8d54f1ed
13,572
from typing import Type from typing import Any from typing import Sequence from enum import Enum from datetime import datetime def modify_repr(_cls: Type[Any]) -> None: """Improved dataclass repr function. Only show non-default non-internal values, and summarize containers. """ # let classes still create their own if _cls.__repr__ is not object.__repr__: return def new_repr(self: Any) -> str: name = self.__class__.__qualname__ lines = [] for f in sorted(fields(self), key=lambda f: f.name not in ("name", "id")): if f.name.endswith("_"): continue # https://github.com/python/mypy/issues/6910 if f.default_factory is not MISSING: # type: ignore default = f.default_factory() # type: ignore else: default = f.default current = getattr(self, f.name) if current != default: if isinstance(current, Sequence) and not isinstance(current, str): rep = f"[<{len(current)} {f.name.title()}>]" elif isinstance(current, Enum): rep = repr(current.value) elif isinstance(current, datetime): rep = f"datetime.fromisoformat({current.isoformat()!r})" else: rep = repr(current) lines.append(f"{f.name}={rep},") if len(lines) == 1: body = lines[-1].rstrip(",") elif lines: body = "\n" + indent("\n".join(lines), " ") + "\n" else: body = "" out = f"{name}({body})" return out setattr(_cls, "__repr__", new_repr)
ddc860bbe3c9d04723a3cc0b4cdcce960d0ecf71
13,573
def _is_binary(path): """Checks if the file at |path| is an ELF executable. This is done by inspecting its FourCC header. """ with open(path, 'rb') as f: file_tag = f.read(4) return file_tag == '\x7fELF'
0c5bc0917f405604a6d36495b786c9fbc9268ad1
13,574
from typing import Optional import typing def update_item(*, table: str, hash_key: str, sort_key: Optional[str] = None, update_expression: Optional[str], expression_attribute_values: typing.Dict, return_values: str = 'ALL_NEW'): """ Update an item from a dynamoDB table. Will determine the type of db this is being called on by the number of keys provided (omit sort_key to UPDATE from a db with only 1 primary key). NOTE: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html :param table: Name of the table in AWS. :param str hash_key: 1st primary key that can be used to fetch associated sort_keys and values. :param str sort_key: 2nd primary key, used with hash_key to fetch a specific value. Note: If not specified, this will DELETE only 1 key (hash_key) and 1 value. :param str update_expression: Expression used to update value, needs action to be performed and new value :param str expression_attribute_values: attribute values to use from the expression :param str return_values: return values to get back from the dynamodb API, defaults to 'ALL_NEW' which provides all item attributes after the update. :return: None """ query = {'TableName': table, 'Key': _format_item(hash_key=hash_key, sort_key=sort_key, value=None)} if update_expression: query['UpdateExpression'] = update_expression query['ExpressionAttributeValues'] = expression_attribute_values query['ReturnValues'] = return_values resp = db.update_item(**query) return _format_ddb_response(resp.get('Attributes'))
a6efc6638708d1c5dfc79d89216cfa866e3a24fa
13,575
def get_configuration(configuration_path=DEFAULT_CONFIGURATION_FILE): """ Return a dict containing configuration values. :param str configuration_path: path to parse yaml from. :return: dict """ global _configuration if _configuration is None: LOGGER.debug('Loading configuration: %s', configuration_path) parser = YAML(typ='rt') try: with open(configuration_path) as configuration_file: _configuration = parser.load(configuration_file) except FileNotFoundError: raise RuntimeError(f'Cannot find configuration file {configuration_path}') except YAMLParserError as ype: raise RuntimeError(f'Cannot parse configuration file {configuration_path}, see {ype.problem_mark}') return _configuration
99c9e53242e34495d872cc9131961ab7878c641a
13,576
import re def google_login_required(fn): """Return 403 unless the user is logged in from a @google.com domain.""" def wrapper(self, *args, **kwargs): user = users.get_current_user() if not user: self.redirect(users.create_login_url(self.request.uri)) return email_match = re.match('^(.*)@(.*)$', user.email()) if email_match: _, domain = email_match.groups() if domain == 'google.com': return fn(self, *args, **kwargs) self.error(403) # Unrecognized email or unauthroized domain. self.response.out.write('unauthroized email %s' % user.user_id()) return wrapper
1e45f2ea026e772b6b4c9048dddf93b2fe3ec991
13,577
def init_res_fig(n_subplots, max_sess=None, modif=False): """ init_res_fig(n_subplots) Initializes a figure in which to plot summary results. Required args: - n_subplots (int): number of subplots Optional args: - max_sess (int): maximum number of sessions plotted default: None - modif (bool) : if True, plots are made in a modified (simplified way) default: False Returns: - fig (plt Fig): figure - ax (plt Axis): axis """ subplot_hei = 14 subplot_wid = 7.5 if max_sess is not None: subplot_wid *= max_sess/4.0 if modif: sess_plot_util.update_plt_linpla() figpar_init = sess_plot_util.fig_init_linpla(sharey=True)["init"] fig, ax = plot_util.init_fig(n_subplots, **figpar_init) else: fig, ax = plot_util.init_fig(n_subplots, 2, sharey=True, subplot_hei=subplot_hei, subplot_wid=subplot_wid) return fig, ax
3c12c18c16a371d10977d165875a2aa346c009bf
13,578
import json def change_personal_data_settings(request): """ Creates a question with summarized data to be changed :param request: POST request from "Change personal data settings" Dialogflow intent :return: JSON with summarized data to be changed """ language = request.data['queryResult']['languageCode'] response_spoken_pl = "Nie mogę zmienić żadnych ustawień, ponieważ nie posiadasz jeszcze konta. Jeśli chcesz " \ "założyć konto w best transport Polska, wybierz poniższą opcję Zarejestruj się" display_spoken_pl = "Nie mogę zmienić żadnych ustawień. Załóż konto przez wybranie poniższej opcji Zarejestruj się" response_spoken_en = "I can't change any settings, because you don't have an account yet. If you want to create a best" \ " transport Poland account, select the option \"Sign up\" below" display_spoken_en = "I can't change any settings. Create an account by selecting the option below \"Sign up\"" access_token = request.data['originalDetectIntentRequest']['payload']['user'] if 'accessToken' in access_token: access_token = access_token['accessToken'] else: access_token = None if access_token: account_exist = check_token(access_token, language, response_spoken_pl, display_spoken_pl, response_spoken_en, display_spoken_en) if account_exist == "token exist": with open('api/response.json') as json_file: response = json.load(json_file) part_to_modify = response['payload']['google']['richResponse'] parameters_from_request = request.data["queryResult"]["parameters"] if language == "pl": entities_pl = {'First_name': 'imię', 'Surname': "nazwisko", 'Email': 'email', 'telephone-number': 'numer telefonu', 'geo-city': 'miejsce zamieszkania', 'post-code': 'kod pocztowy','geo-country': 'kraj', 'tax_number': "numer płatnika"} response_pl = "Czy na pewno chcesz zmienić " for k,v in parameters_from_request.items(): if v != "" and k in entities_pl: response_pl += entities_pl[k] + " na " + v + ", " response_pl = response_pl[:-2] response_pl += "?" suggestions_pl = [{"title": "Tak"}, {"title": "Nie"}] part_to_modify['items'][0]['simpleResponse']['textToSpeech'] = response_pl part_to_modify['items'][0]['simpleResponse']['displayText'] = response_pl part_to_modify['suggestions'] = suggestions_pl elif language == "en": entities_en = {'First_name': 'name', 'Surname': "surname", 'Email': 'email', 'telephone-number': 'phone number', 'geo-city': 'residence place', 'post-code': 'post code', 'geo-country': 'country', 'tax_number': "tax number"} response_en = "Are you sure you want to change " for k, v in parameters_from_request.items(): if v != "" and k in entities_en: response_en += entities_en[k] + " to " + v + ", " response_en = response_en[:-2] response_en += "?" suggestions_en = [{"title": "Yes"}, {"title": "No"}] part_to_modify['items'][0]['simpleResponse']['textToSpeech'] = response_en part_to_modify['items'][0]['simpleResponse']['displayText'] = response_en part_to_modify['suggestions'] = suggestions_en response['payload']['google']['richResponse'] = part_to_modify return response else: return account_exist else: access_token = "There is no" account_exist = check_token(access_token, language, response_spoken_pl, display_spoken_pl, response_spoken_en, display_spoken_en) return account_exist
c18641ea4cc32e8d2703dfca90066b6736c5103a
13,579
def get_selected_cells(mesh, startpos, endpos): """ Return a list of cells contained in the startpos-endpos rectangle """ xstart, ystart = startpos xend, yend = endpos selected_cells = set() vertex_coords = mesh.coordinates() for cell in dolfin.cells(mesh): cell_vertices = cell.entities(0) for vid in cell_vertices: x, y = vertex_coords[vid] if xstart <= x <= xend and ystart <= y <= yend: selected_cells.add(cell.index()) break return selected_cells
c637bfa195aae4125e65553b1f4023cc3dae1f3a
13,580
def flip_axis(array, axis): """ Flip the given axis of an array. Note that the ordering follows the numpy convention and may be unintuitive; that is, the first axis flips the axis horizontally, and the second axis flips the axis vertically. :param array: The array to be flipped. :type array: `ndarray` :param axis: The axis to be flipped. :type axis: `int` :returns: The flipped array. :rtype: `ndarray` """ # Rearrange the array so that the axis of interest is first. array = np.asarray(array).swapaxes(axis, 0) # Reverse the elements along the first axis. array = array[::-1, ...] # Put the array back and return. return array.swapaxes(0, axis)
e2839125ddf3b22dea732857763fda636b748dda
13,581
def fizz_buzz_tree(input_tree): """ traverses a tree and performs fizz buzz on each element, agumenting the val """ input_tree.in_order_trav(lambda x: fizzbuzz(x)) return input_tree
29b7380fb6215bf8ecf67fa85321445b8954abdc
13,582
def scan_to_headerword(serial_input, maximum_bytes=9999, header_magic=HeaderWord.MAGIC_MASK): """ Consume bytes until header magic is found in a word :param header_magic: :param maximum_bytes: :param serial_input: :rtype : MTS.Header.Header """ headerword = 0x0000 bytecount = 0 while headerword & header_magic != header_magic: # BlockingIOError # Read a single byte nextbyte = serial_input.read(1) if len(nextbyte) == 0: raise BufferError("Reached end of stream") bytecount += 1 # Take the low word and shift it high; Use OR to add this byte nextint = ord(nextbyte) # if DEBUG: print('0x{byte:02X} {byte:08b}'.format(byte=nextint)) headerword = ((headerword & 0x00FF) << 8) | nextint if 0 < maximum_bytes <= bytecount: raise BufferError("Failed to detect header word in serial stream") try: h = MTS.Header.Header(word=headerword) # if DEBUG: print("Found header word. 0x{:04X}".format(h.word)) return h except ValueError as e: print("Invalid header word 0x{:04X}".format(headerword)) raise e
5a59d934426559f0aa7d9357740b007c16dc8f90
13,583
def _c2_set_instrument_driver_parameters(reference_designator, data): """ Set one or more instrument driver parameters, return status. Accepts the following urlencoded parameters: resource: JSON-encoded dictionary of parameter:value pairs timeout: in milliseconds, default value is 60000 Sample: localhost:12572/instrument/api/reference_designator/resource [POST] The UI sends all READ_WRITE parameters in data; so data should never be empty. """ debug = False response_status = {} response_status['status_code'] = 200 response_status['message'] = "" response_status['range_errors'] = "" response_status['display_parameters'] = {} insufficient_data = 'Insufficient data, or bad data format.' valid_args = [ 'resource', 'timeout'] try: if not reference_designator: message = insufficient_data + ' (reference_designator is None or empty)' raise Exception(message) if not data: message = insufficient_data + ' (data is None or empty)' raise Exception(message) try: payload = convert(data) except Exception as err: message = 'Failed to process request data; %s' % str(err.message) raise Exception(message) if debug: print '\n debug --- Original payload: ', json.dumps(payload, indent=4, sort_keys=True) # Validate arguments required for uframe are provided. for arg in valid_args: if arg not in payload: raise Exception(insufficient_data) # Get instrument status. _status = get_instrument_status(reference_designator) if _status is None: message = 'Failed to retrieve instrument (%s) status.' % reference_designator raise Exception(message) # Verify payload['resource'] is not empty or None if payload['resource'] is None or not payload['resource']: message = 'The payload [resource] element is None or empty.' raise Exception(message) # Get dict of parameters and range values parameter_dict, key_dict_ranges = get_range_dictionary(payload['resource'], _status, reference_designator) # Scrub data and determine if range errors result, error_result = scrub_ui_request_data(payload['resource'], parameter_dict, key_dict_ranges) # If range error messages, return error dictionary if error_result: # Create dictionary with response data and return. result = {} response_status['message'] = 'Range Error(s)' response_status['range_errors'] = error_result response_status['status_code'] = 400 result['response'] = response_status if debug: print '\n debug ***** RANGE Error(s): %s' % json.dumps(result, indent=4, sort_keys=True) return result # If no errors and result is empty or None, raise exception elif result is None or not result: message = 'Unable to process resource payload (result is None or empty).' raise Exception(message) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Process parameter set request in uframe # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Update value of resource in payload. payload['resource'] = json.dumps(result) if 'CAMDS' in reference_designator: payload['timeout'] = 200000 # 200 millisecs if debug: print '\n debug --- payload: ', json.dumps(payload, indent=4, sort_keys=True) # Send request and payload to instrument/api and process result try: response = _uframe_post_instrument_driver_set(reference_designator, 'resource', payload) except Exception as err: message = str(err.message) raise Exception(message) if response.status_code != 200: message = '(%s) Failed to execute instrument driver set.' % str(response.status_code) raise Exception(message) if response.content: try: response_data = json.loads(response.content) except: message = 'Malformed data; not in valid json format. (C2 instrument driver set)' raise Exception(message) # Evaluate response content for error (review 'value' list in response_data) if response_data: status_code, status_type, status_message = _eval_POST_response_data(response_data, None) response_status['status_code'] = status_code response_status['message'] = status_message else: message = 'No response.content returned from _uframe_post_instrument_driver_set.' raise Exception(message) # Add response attribute information to result result['response'] = response_status # Get current over_all status, return in attribute 'status' of result try: status = _c2_get_instrument_driver_status(reference_designator) except Exception: status = {} result['status'] = status return result except Exception: raise
4a54b4b8e79c089657e5e63347bac4109d056834
13,584
def generate_key(keysize=KEY_SIZE): """Generate a RSA key pair Keyword Arguments: keysize {int} -- Key (default: {KEY_SIZE}) Returns: bytes -- Secret key bytes -- Public key """ key = RSA.generate(keysize) public_key = key.publickey().exportKey() secret_key = key.exportKey(passphrase=None) return secret_key, public_key
0a38221269b167c4ceefc95eb4cee3452f2aaffe
13,585
import functools def get_api_endpoint(func): """Register a GET endpoint.""" @json_api.route(f"/{func.__name__}", methods=["GET"]) @functools.wraps(func) def _wrapper(*args, **kwargs): return jsonify({"success": True, "data": func(*args, **kwargs)}) return _wrapper
d49dc725e7538374910e3819f8eae647250747f7
13,586
import sys def scrape_radio_koeln(): """ Fetch the currently playing song for Radio Köln. :return: A Song, if scraping went without error. Return None otherwise. """ url = 'http://www.radiokoeln.de/' tag = get_tag(url, '//div[@id="playlist_title"]')[0] artist = tag.xpath('.//div/b/text()') title = tag.xpath('.//div/text()') tmp = title title = [] for item in tmp: s = item.strip() if s: title.append(s) if artist and title: artist = artist[0] title = title[-1] return Song(artist, title) # else sys.stderr.write("ERROR in radiokoeln: "+str(artist)+" "+str(title)+"\n") return None
34c9dc15345d7b4e9921e6fe0d134b68bf8f2b65
13,587
def get_mt4(alias=DEFAULT_MT4_NAME): """ Notes: return mt4 object which is initialized. Args: alias(string): mt4 object alias name. default value is DEFAULT_MT4_NAME Returns: mt4 object(metatrader.backtest.MT4): instantiated mt4 object """ global _mt4s if alias in _mt4s: return _mt4s[alias] else: raise RuntimeError('mt4[%s] is not initialized.' % alias)
ee228ced5790124768c8a41e70bf596181a55ca2
13,588
import warnings def get_log_probability_function(model=None): """ Builds a theano function from a PyMC3 model which takes a numpy array of shape ``(n_parameters)`` as an input and returns returns the total log probability of the model. This function takes the **transformed** random variables defined withing the model context which is a different behaviour from :func:`caustic.utils.get_log_likelihood_function`. The ordering of th para eters in the input array should match the ordering of the RVs in model context. The purpose of this function is to be able to use external samplers with PyMC3 models. Parameters ---------- model : pymc3.Model PyMC3 model object. Returns ------- ndarray Total log probability of the model. """ model = pm.modelcontext(model) if ( "_interval__" or "_log__" or "_lowerbound__" or "_upperbound__" ) in str(model.vars): warnings.warn( """Your model contains transformed variables. Keep in mind, that the compiled log probability function expects the, transformed variables as an input.""", ) f = theano.function(model.vars, [model.logpt]) def log_prob(params): dct = model.bijection.rmap(params[::-1]) args = (dct[k.name] for k in model.vars) results = f(*args) return tuple(results)[0] return log_prob
8e4332ce6943341b196d1628942f3360ce9f4e05
13,589
def get_health(check_celery=True): """ Gets the health of the all the external services. :return: dictionary with key: service name like etcd, celery, elasticsearch value: dictionary of health status :rtype: dict """ health_status = { 'etcd': _check_etcd(), 'store': _check_store() } if check_celery: health_status['celery'] = _check_celery() return health_status
95fec6ee762ab81a8e27ebe796b914be6d38c59d
13,590
def add_quotation_items(quotation_items_data): """ 添加信息 :param quotation_items_data: :return: None/Value of user.id :except: """ return db_instance.add(QuotationItems, quotation_items_data)
09f60cc4e8182909acb34bb0b406336849bf8543
13,591
def load_cifar10_human_readable(path: str, img_nums: list) -> np.array: """ Loads the Cifar10 images in human readable format. Args: path: The path to the to the folder with mnist images. img_nums: A list with the numbers of the images we want to load. Returns: The images as a Mx3x32x32 numpy array. """ return load_img(path, img_nums, (3, 32, 32))
991ff4cd7192c0ed4b1d6e2d566ed1f0ce446db5
13,592
import os def identity(target_ftrs, identity_ftrs, output_name=None, output_folder=None, cluster_tolerance="", problem_fields={}, full_out_path=""): """ perform identity analysis on target feature class with identity feature class """ try: output_location = IN_MEMORY out_ftrs = os.path.basename(str(identity_ftrs)) if output_folder: output_location = output_folder out_ftrs = arcpy.CreateUniqueName(out_ftrs, output_location) else: out_ftrs = os.path.join(output_location, out_ftrs) # add 'identity' in output feature class name if not present if out_ftrs.find('_identity') == -1: out_ftrs += '_identity' out_ftrs = check_name_length(out_ftrs) # identity operation to combine attributes cnt = int(arcpy.GetCount_management(identity_ftrs)[0]) if cnt > 0: arcpy.Identity_analysis(target_ftrs, identity_ftrs, out_ftrs, "NO_FID", cluster_tolerance) feature_name = check_name_length("sp" + os.path.basename(str(out_ftrs))) if output_name: feature_name = output_name # convert multiparts to single part, if any return out_ftrs return target_ftrs except Exception as e: arcpy.AddError(str(e))
1d0e9cc4e4d68cc8da8ddbefff18629ccc01266c
13,593
import typing def get_project_linked_to_object(object_id: int) -> typing.Optional[Project]: """ Return the project linked to a given object, or None. :param object_id: the ID of an existing object :return: the linked project or None :raise errors.ObjectDoesNotExistError: if no object with the given ID exists """ association = projects.ProjectObjectAssociation.query.filter_by( object_id=object_id ).first() if association is None: # make sure the object exists objects.get_object(object_id) return None return get_project(association.project_id)
189282be5acfb063678ca2c6765eeb1a7fa6b6c5
13,594
from typing import Union from pathlib import Path import os def processor(template: Union[str, Path] = None, format_name: str = None) -> Union[None, RecordProcessor]: """ Configures the record level processor for either the template or for the format_name Args: template: path to template or template as string format_name: one of the valid registered formatter names Returns: RecordProcessor if valid template of format_name provide, None otherwise Raises: SpecException when format_name is not registered or if both template and format specified Examples: >>> import datacraft >>> engine = datacraft.outputs.processor(template='/path/to/template.jinja') >>> engine = datacraft.outputs.processor(template='{{ Inline: {{ variable }}') >>> formatter = datacraft.outputs.processor(format_name='json') >>> formatter = datacraft.outputs.processor(format_name='my_custom_registered_format') """ if template and format_name: raise SpecException('Only one of template or format_name should be supplied') # so name doesn't shadow _processor = None if template: _log.debug('Using template: %s', template) if os.path.exists(template): _processor = template_engines.for_file(template) elif '{{' in template: # type: ignore _processor = template_engines.string(template) # type: ignore else: raise SpecException(f'Unable to determine how to handle template {template}, with type: {type(template)}') elif format_name: _log.debug('Using %s formatter for output', format_name) _processor = _for_format(format_name) return _processor
7cbe291f1a41e9d00e15c721ad73ffc69e0f49e0
13,595
def calc_distances_for_everyon_in_frame(everyone_in_frame, people_graph, too_far_distance, minimum_distance_change): """ :param everyone_in_frame: [PersonPath] :type everyone_in_frame: list :param people_graph: :type people_graph: Graph :param too_far_distance: :param minimum_distance_change: :return: :rtype: Graph """ points = [[person_path.world_frame_coordinates_list.current_frame_coord().x, person_path.world_frame_coordinates_list.current_frame_coord().y] for person_path in everyone_in_frame] # all points of everyone in this frame points = np.array(points) ids = [person_path.id_number for person_path in everyone_in_frame] for index, person_path in enumerate(everyone_in_frame): x, y = person_path.world_frame_coordinates_list.current_frame_coord_xy() point = np.array([x, y]) all_euclidean_distances = np.linalg.norm(points - point, axis=1) # calculate all euclidean distances closer = deque() further = deque() for i in range(len(ids)): id_number = ids[i] if id_number == person_path.id_number: # if it's the same id as the person_path's id continue distance = all_euclidean_distances[i] people_graph.add_edge(person_path.id_number, id_number, distance) if distance < too_far_distance: # if it's not too far event = distance_event(person_path.id_number, id_number, people_graph, minimum_distance_change) if event == DistanceEvent.CLOSER: closer.append(id_number) elif event == DistanceEvent.FURTHER: further.append(id_number) if closer: print('%3d is getting CLOSER to' % person_path.id_number, list(closer)) if further: print('%3d is getting FURTHER from' % person_path.id_number, list(further)) return people_graph
83dcb204b53d2ac784d1cc8bb0da61a114a41768
13,596
import torch def conv2d(input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor = None, stride=1, padding=0, dilation=1, groups=1, mode=None): """Standard conv2d. Returns the input if weight=None.""" if weight is None: return input ind = None if mode is not None: if padding != 0: raise ValueError('Cannot input both padding and mode.') if mode == 'same': padding = (weight.shape[2] // 2, weight.shape[3] // 2) if weight.shape[2] % 2 == 0 or weight.shape[3] % 2 == 0: ind = (slice(-1) if weight.shape[2] % 2 == 0 else slice(None), slice(-1) if weight.shape[3] % 2 == 0 else slice(None)) elif mode == 'valid': padding = (0, 0) elif mode == 'full': padding = (weight.shape[2] - 1, weight.shape[3] - 1) else: raise ValueError('Unknown mode for padding.') out = F.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) if ind is None: return out return out[:, :, ind[0], ind[1]]
b43b975d96d273fa1ee1cfe4034ca1fd195b5019
13,597
import tqdm import torch def infer(model, loader_test): """ Returns the prediction of a model in a dataset. Parameters ---------- model: PyTorch model loader_test: PyTorch DataLoader. Returns ------- tuple y_true and y_pred """ model.eval() ys, ys_hat = [], [] for ids, masks, y_true in tqdm(loader_test): ids = ids.to(device) masks = masks.to(device) y_true = y_true.to(device) y_hat = model(ids, masks) loss = F.cross_entropy(y_hat, y_true) y_pred = torch.argmax(y_hat, dim=1) ys.extend(y_true.cpu().numpy().tolist() ) ys_hat.extend(y_pred.cpu().numpy().tolist()) return ys, ys_hat
956b17d8b3869eeff6d35019ac82cd3ca5d4092e
13,598
import keyword def validate_project(project_name): """ Check the defined project name against keywords, builtins and existing modules to avoid name clashing """ if not project_name_rx.search(project_name): return None if keyword.iskeyword(project_name): return None if project_name in dir(__builtins__): return None try: __import__(project_name) return None except ImportError: return project_name
569fdb1d6d37ce50b144facc6cba725a0575b2f6
13,599