content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _get_instance_id(instance_list, identity): """ Return instance UUID by name or ID, if found. """ for i in instance_list.items: if identity in (i.properties.name, i.id): return i.id return None
f466e10028e9b84f23bd4ace1f02ad8f792517ee
13,494
def isPageWatched(user, trunk): """Is the page being watched by the user?""" result = (models.Subscription.all(). filter('user =', user). filter('trunk =', trunk). filter('method !=', models.Subscription.METH_MEH)) return result.count(1) != 0
07335b32e11ef275a8c23281e295ed175b2b5854
13,496
def _parse_constants(): """Read the code in St7API and parse out the constants.""" def is_int(x): try: _ = int(x) return True except ValueError: return False with open(St7API.__file__) as f_st7api: current_comment = None seen_comments = set() f_stripped = (l.strip() for l in f_st7api) for l in f_stripped: is_comment_line = l.startswith("#") is_blank_line = not l is_constant_line = "=" in l and is_int(l.split("=")[1]) if is_comment_line: if l in seen_comments: raise ValueError(f"Duplicate comment {l}") if is_comment_line: current_comment = l elif is_blank_line: current_comment = None elif is_constant_line: if current_comment: name, val = [x.strip() for x in l.split("=")] yield current_comment, name, val
ca3c10b1eda6d46f86e21f318b960781b8875cc3
13,498
def verify_outcome(msg, prefix, lista): """ Compare message to list of claims: values. :param prefix: prefix string :param lista: list of claims=value :return: list of possible strings """ assert msg.startswith(prefix) qsl = ["{}={}".format(k, v[0]) for k, v in parse_qs(msg[len(prefix) :]).items()] return set(qsl) == set(lista)
dd24e16c3029c911b939af4a50f4c7c7a71c8722
13,499
import string def equationMaker(congruency=None, beat_type=None, structure=None, n=None, perms=None, catch = False): """ Function to create equation stimuli, like in Landy & Goldstone, e.g. "b + d * f + y" required inputs: congruency: 'congruent' or 'incongruent' beat_type : 'binary_beat' or 'ternary_beat' structure : '+*+' or '*+*' n : how many equations to generate outputs: a list of trial dictionaries of the length specified by n """ output_list = [] alphabet = list(string.ascii_lowercase) letters_to_remove = ['i', 'l', 'o'] # because of similarity with other symbols alphabet = [letter for letter in alphabet if letter not in letters_to_remove] # final letter list op = list(structure) # list of operands #op = [x if x != "*" else "times" for x in op] # use this line for experimenting with speech stims eq_per_perm = int(n / len(perms)) # number of equations per permutation #assert eq_per_perm.is_integer(), "length of perms must be evenly divisble into n" perms = perms * eq_per_perm shuffle(perms) for eq in range(n): l = list(choice(alphabet, size=5, replace=False)) equation = [l[0],op[0],l[1],op[1],l[2],op[2],l[3]] p = itemgetter(*perms[eq][0])(l) # creates permutation of letter ordering for this iteration probe = [p[0],op[0],p[1],op[1],p[2],op[2],p[3]] if catch: cat_idx = 2 * randint(0,3) # chooses one of the 4 letter indices probe[cat_idx] = l[4] # replace with other random letter not in stimulus trial_type = 'catch' else: trial_type = 'main' probe = ' '.join(probe) # add info on 'validity' and 'sensitivity' based on permutation used if perms[eq][1] <= 4: sensitivity = 'insensitive' else: sensitivity = 'sensitive' if structure == '+*+': if ( (perms[eq][1] <= 2) or (5 <= perms[eq][1] <= 6) ): validity = 'True' else: validity = 'False' elif structure == '*+*': if ( (perms[eq][1] <= 2) or (7 <= perms[eq][1] <= 8) ): validity = 'True' else: validity = 'False' elif structure == '+++': sensitivity = 'neutral' if catch: validity = 'False' else: validity = 'True' # assemble trial dictionary trial_dict = {'stim':equation, 'beat_type':beat_type, 'congruency':congruency, 'structure': structure, 'stim_number': eq + 1, 'probe': probe, 'validity': validity, 'sensitivity': sensitivity, 'trial_type':trial_type} output_list.append(trial_dict) return output_list
b2a81696055c77fa8803ab218e7b115a66a542aa
13,500
def get_deadline_delta(target_horizon): """Returns number of days between official contest submission deadline date and start date of target period (14 for week 3-4 target, as it's 14 days away, 28 for week 5-6 target, as it's 28 days away) Args: target_horizon: "34w" or "56w" indicating whether target period is weeks 3 & 4 or weeks 5 & 6 """ if target_horizon == "34w": deadline_delta = 14 elif target_horizon == "56w": deadline_delta = 28 else: raise ValueError("Unrecognized target_horizon "+target_horizon) return deadline_delta
df09b04fc2e7065056b724cfe5d8966c06240b79
13,501
def perform_tensorflow_model_inference(model_name, sample): """ Perform evaluations from model (must be configured) Args: model_name ([type]): [description] sample ([type]): [description] Returns: [type]: [description] """ reloaded_model = tf.keras.models.load_model(model_name) input_dict = {name: tf.convert_to_tensor( [value]) for name, value in sample.items()} predictions = reloaded_model.predict(input_dict) print('Predction; ', predictions) # prob = tf.nn.sigmoid(predictions[0]) return predictions
3c374dd76d4d40dbaffc7758694ff358ad1aefeb
13,502
def check_ntru(f, g, F, G): """Check that f * G - g * F = 1 mod (x ** n + 1).""" a = karamul(f, G) b = karamul(g, F) c = [a[i] - b[i] for i in range(len(f))] return ((c[0] == q) and all(coef == 0 for coef in c[1:]))
1c2ff2fbaadcdf80e5fd9ac49f39a301c9606ada
13,503
def Search_tau(A, y, S, args, normalize=True, min_delta=0): """ Complete parameter search for sparse regression method S. Input: A,y : from linear system Ax=y S : sparse regression method args : arguments for sparse regression method normalize : boolean. Normalize columns of A? min_delta : minimum change in tau Returns: X : list of all possible outputs of S(A,y,tau) Tau : list of values of tau corresponding to each x in X """ X = [] Tau =[] tau = 0 # Normalize if normalize: normA = np.linalg.norm(A,axis=0) A = A @ np.diag(normA**-1) for j in range(2**A.shape[1]): # Apply sparse regression x, delta_tau = S(A, y, tau, args) delta_tau = np.max([delta_tau, min_delta]) X.append(x) Tau.append(tau) # Break condition if np.max(np.abs(x)) == 0 or delta_tau == np.inf: break # Update tau tau = tau+delta_tau # Renormalize x if normalize: X = [np.diag(normA**-1) @ x for x in X] return X,Tau
30c74b0fed304df8851b9037e7091fb95be58554
13,505
def get_entry_accounts(entry: Directive) -> list[str]: """Accounts for an entry. Args: entry: An entry. Returns: A list with the entry's accounts ordered by priority: For transactions the posting accounts are listed in reverse order. """ if isinstance(entry, Transaction): return list(reversed([p.account for p in entry.postings])) if isinstance(entry, Custom): return [val.value for val in entry.values if val.dtype == ACCOUNT_TYPE] if isinstance(entry, Pad): return [entry.account, entry.source_account] account_ = getattr(entry, "account", None) if account_ is not None: return [account_] return []
dec8da3ced1956b4ae4dca08e2de812c66dcb412
13,506
def enter_fastboot(adb_serial, adb_path=None): """Enters fastboot mode by calling 'adb reboot bootloader' for the adb_serial provided. Args: adb_serial (str): Device serial number. adb_path (str): optional alternative path to adb executable Raises: RuntimeError: if adb_path is invalid or adb executable was not found by get_adb_path. Returns: str: Output from calling 'adb reboot' or None if call fails with non-zero return code. Note: If adb_path is not provided then path returned by get_adb_path will be used instead. If adb returns a non-zero return code then None will be returned. """ return _adb_command(("reboot", "bootloader"), adb_serial, adb_path=adb_path)
9f7a8dfe8d0ce47a172cf7d07feb1bd5d2e8b273
13,507
def thesaurus_manager_menu_header(context, request, view, manager): # pylint: disable=unused-argument """Thesaurus manager menu header""" return THESAURUS_MANAGER_LABEL
7c37e69d4a662e4a155ed6a63a473e2eb52fe28b
13,508
def create_compiled_keras_model(): """Create compiled keras model.""" model = models.create_keras_model() model.compile( loss=tf.keras.losses.sparse_categorical_crossentropy, optimizer=utils.get_optimizer_from_flags('client'), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) return model
252fe18678e302e216b7b05121dfedd3bb46a180
13,509
def psycopg2_string(): """ Generates a connection string for psycopg2 """ return 'dbname={db} user={user} password={password} host={host} port={port}'.format( db=settings.DATABASES['default']['NAME'], user=settings.DATABASES['default']['USER'], password=settings.DATABASES['default']['PASSWORD'], host=settings.DATABASES['default']['HOST'], port=settings.DATABASES['default']['PORT'], )
187fe1b576337613f791df657fd76ca8a4f783df
13,511
def get_phase_relation(protophase: np.ndarray, N: int = 0) -> np.ndarray: """ relation between protophase and phase Parameters ---------- protophase : np.ndarray N : int, optional number of fourier terms need to be used Returns ------- np.ndarray phase (protophase from 0 to 2pi) """ phase = np.linspace(0, np.pi * 2, 5000) + np.zeros(5000) * 1j new_phase = phase.copy() if N == 0: N = protophase.size for n in range(1, N + 1): Sn = fourier_coefficient(protophase, n) new_phase = new_phase + 2 * Sn * (np.exp(1j * n * phase) - 1) / (1j * n) return new_phase
828f200f55f8d17071a51244465311f8c99866f7
13,512
import re def handle_articlepeople(utils, mention): """ Handles #articlepeople functionality. Parameters ---------- utils : `Utils object` extends tweepy api wrapper mention : `Status object` a single mention Returns ------- None """ urls = re.findall(r'(https?://[^\s]+)', mention.text) if not urls or len(urls) != 1: utils.rundown.update_status( "@%s to use the #articlepeople service, your message should be in the following format: @ rundown_bot #articlepeople url" %mention.user.screen_name, mention.id) else: article = ArticleReader(url = urls[0]) people = article.get_people() if not people: utils.rundown.update_status( "@%s Hi! I didn't find any people in that article :(" %mention.user.screen_name, mention.id) else: people = ", ".join(people) utils.rundown.update_status( "@%s Hi! I found these people: %s" %( mention.user.screen_name, people), mention.id) return None
efdf2d7cda6124a163290aa7c3197a7462703749
13,513
from pathlib import Path def bak_del_cmd(filename:Path, bakfile_number:int, quietly=False): """ Deletes a bakfile by number """ console = Console() _bakfile = None bakfiles = db_handler.get_bakfile_entries(filename) if not bakfiles: console.print(f"No bakfiles found for {filename}") return False if not bakfile_number: try: _bakfile, bakfile_number = \ __do_select_bakfile(bakfiles, select_prompt=(("Delete which .bakfile?"), default_select_prompt[0]), return_index=True) bakfile_number += 1 except TypeError: return True confirm = input( f"Confirming: Delete bakfile #{bakfile_number} for {filename}? " f"(y/N) ").lower() == 'y' if not quietly else True if confirm: _bakfile = _bakfile or __get_bakfile_entry(filename, bakfile_number=bakfile_number, console=console) if not _bakfile: return False __remove_bakfiles([_bakfile]) return True
3ded066c23708a3fdcc2e38fb07706dc0e0cd628
13,514
def fetch_county_data(file_reference): """The name of this function is displayed to the user when there is a cache miss.""" path = file_reference.filename return (pd .read_csv(path) .assign(date = lambda d: pd.to_datetime(d.date)) )
d29459efc5a46901cce970c2ddf4e499094f1aea
13,515
def preston_sad(abund_vector, b=None, normalized = 'no'): """Plot histogram of species abundances on a log2 scale""" if b == None: q = np.exp2(list(range(0, 25))) b = q [(q <= max(abund_vector)*2)] if normalized == 'no': hist_ab = np.histogram(abund_vector, bins = b) if normalized == 'yes': hist_ab_norm = np.histogram(abund_vector, bins = b) hist_ab_norm1 = hist_ab_norm[0]/(b[0:len(hist_ab_norm[0])]) hist_ab_norm2 = hist_ab_norm[1][0:len(hist_ab_norm[0])] hist_ab = (hist_ab_norm1, hist_ab_norm2) return hist_ab
97eec01c5d23ca7b48951d4c62c7066b77ffb467
13,516
def exp_rearrangement(): """Example demonstrating of Word-Blot for pairwise local similarity search on two randomly generated sequencees with motif sequences violating collinearity :math:`S=M_1M_2M_3, T=M'_1M'_1M'_3M'_2` where motif pairs :math:`(M_i, M'_i)_{i=1,2,3}` have lengths 200, 400, 600 and are related by match probabilities 0.95, 0.85, and 0.75, respectively. .. figure:: https://www.dropbox.com/s/nsvsf5gaui6t9ww/rearrangement.png?raw=1 :target: https://www.dropbox.com/s/nsvsf5gaui6t9ww/rearrangement.png?raw=1 :alt: lightbox Dynamic programming scores of the forward pass of Smith Waterman are shown in color code (*left*) with seeds (word length 6) grey intensity coded according to the local match probability assigned by Word-Blot (minimum similarity length 200). Similar segments reported by Word-Blot are shown as grey diagonal strips (*left*) and schematically (*right*) color coded by their Word-Blot estimated match probabilities (note agreement with true match probabilities). """ # NOTE we are running whole table DP later here; be careful with size K = 200 wordlen = 6 A = Alphabet('ACGT') WB_kw = {'g_max': .2, 'sensitivity': .9, 'alphabet': A, 'wordlen': wordlen, 'path': ':memory:', 'log_level': logging.INFO} # homologies Hs = [rand_seq(A, i) for i in [i * K for i in range(1, 4)]] ps = [.95, .85, .75] Ms = [] for p_match in ps: subst = gap = 1 - np.sqrt(p_match) print subst, gap Ms.append( MutationProcess(A, subst_probs=subst, ge_prob=gap, go_prob=gap) ) # connector junk def J(): return rand_seq(A, 2 * K) S = J() + Hs[0] + J() + Hs[1] + J() + Hs[2] + J() Hs = [M.mutate(hom)[0] for hom, M in zip(Hs, Ms)] T = J() + Hs[0] + J() + Hs[0] + Hs[2] + J() + Hs[1] + J() fig = plt.figure(figsize=(9, 6)) gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1]) ax_seeds = plt.subplot(gs[0]) ax_mapping = plt.subplot(gs[1]) WB = WordBlot(S, T, **WB_kw) p_min = .95 * min(ps) scored_seeds = WB.score_seeds(K) scored_seeds = [(WB.to_ij_coordinates(*rec['seed']), rec['p']) for rec in scored_seeds] plot_seeds(ax_seeds, [x[0] for x in scored_seeds]) cmap = plt.cm.get_cmap('plasma') sim_segments = list(WB.similar_segments(K_min=K, p_min=p_min)) min_p_obs = min(rec['p'] for rec in sim_segments) max_p_obs = max(rec['p'] for rec in sim_segments) for rec in sim_segments: print rec seg = rec['segment'] (i_start, i_end), (j_start, j_end) = WB.to_ij_coordinates_seg(seg) i_ctr, j_ctr = (i_start + i_end) / 2, (j_start + j_end) / 2 color = cmap((rec['p'] - min_p_obs) / (max_p_obs - min_p_obs))[:3] plot_similar_segment(ax_seeds, seg, lw=5, alpha=.1, c='k') ax_mapping.plot([1, 1], [i_start, i_end], lw=3, c=color, alpha=.7) ax_mapping.plot([2, 2], [j_start, j_end], lw=3, c=color, alpha=.7) ax_mapping.plot([1, 2], [i_ctr, j_ctr], marker='o', markersize=7, lw=2, c=color, alpha=.4) ax_mapping.set_xticks([1, 2]) ax_mapping.set_xticklabels(['S', 'T']) ax_mapping.set_xlim(0, 3) ax_mapping.set_ylim(0, None) ax_c = make_axes_locatable(ax_mapping).append_axes('right', size='4%', pad=0.05) norm = matplotlib.colors.Normalize(vmin=min_p_obs, vmax=max_p_obs) matplotlib.colorbar.ColorbarBase(ax_c, cmap=cmap, norm=norm, orientation='vertical') aligner_kw = { 'match_score': 1 / p_min - 1, 'mismatch_score': -1, 'ge_score': -1, 'go_score': 0, 'alnmode': STD_MODE, 'alntype': LOCAL, } print len(S), len(T) with Aligner(S, T, **aligner_kw) as aligner: aligner.solve() scores = np.array(aligner.table_scores()) min_score = min(scores.flatten()) max_score = max(scores.flatten()) ax_seeds.imshow(scores, cmap='plasma', alpha=.3) ax_c = make_axes_locatable(ax_seeds).append_axes('right', size='4%', pad=0.05) norm = matplotlib.colors.Normalize(vmin=min_score, vmax=max_score) matplotlib.colorbar.ColorbarBase(ax_c, cmap='plasma', norm=norm, orientation='vertical') adjust_pw_plot(ax_seeds, len(S), len(T)) ax_seeds.set_xlabel('T') ax_seeds.set_ylabel('S') fig.tight_layout() savefig(fig, 'rearrangement.png')
fdd7650d2ab0340bd11d150f7f6ad5e60ddd2d09
13,517
def package_install_site(name='', user=False, plat_specific=False): """pip-inspired, distutils-based method for fetching the default install location (site-packages path). Returns virtual environment or system site-packages, unless `user=True` in which case returns user-site (typ. under `~/.local/ on linux). If there's a distinction (on a particular system) between platform specific and pure python package locations, set `plat_specific=True` to retrieve the former. """ dist = Distribution({'name': name}) dist.parse_config_files() inst = dist.get_command_obj('install', create=True) # NOTE: specifying user=True will create user-site if user: inst.user = user inst.prefix = "" inst.finalize_options() # platform-specific site vs. purelib (platform-independent) site if plat_specific: loc = inst.install_platlib else: loc = inst.install_purelib # install_lib specified in setup.cfg has highest precedence if 'install_lib' in dist.get_option_dict('install'): loc = inst.install_lib return loc
31b477208954886f847bd33651464f386a4e6adf
13,518
def atlas_slice(atlas, slice_number): """ A function that pulls the data for a specific atlas slice. Parameters ---------- atlas: nrrd Atlas segmentation file that has a stack of slices. slice_number: int The number in the slice that corresponds to the fixed image for registration. Returns ------- sagittal: array Sagittal view being pulled from the atlas. coronal: array Coronal view being pulled from the atlas. horizontal: arrary Horizontal view being pulled from the atlas. """ epi_img_data2 = atlas.get_fdata() sagittal = epi_img_data2[140, :, :] coronal = epi_img_data2[:, slice_number, :] horizontal = epi_img_data2[:, :, 100] return sagittal, coronal, horizontal
bafe5d886568203792b0f6178302f3ca5d536e5b
13,519
def enviar_cambio_estado(request): """ Cambio de estado de una nota técnica y avisar por email al personal de stib """ if request.method == "POST" or request.POST.get("nota_tecnica"): try: nota_tecnica = get_object_or_404(NotasTecnicas, pk=request.POST.get("nota_tecnica")) nota_tecnica.estado = request.POST.get("estado") nota_tecnica.save() # -- envio de email notificando el cambio de estado subject = "Nota Técnica - Cambio de estado" ctx = { 'administracion': nota_tecnica.edificio.user.perfil.nombre_comercial, 'edificio': nota_tecnica.edificio, 'estado': NotasTecnicas.ESTADOS[ int(request.POST.get("estado"))-1 ][1], 'descripcion': nota_tecnica.descripcion, 'fecha': nota_tecnica.creado, 'comentario': request.POST.get("comentario") } body = render_to_string('emails/email_cambio_estado_nota_tecnica_notificaciones.html', ctx) _send_email(STIB_TO_EMAIL, subject, body) # -- / envio de email notificando el cambio de estado messages.success(request, "Se ha cambiado el estado de la Nota Técnica.") except: messages.error(request, "Error al cambiar el estado de la Nota Técnica.") return HttpResponseRedirect(reverse('notas-tecnicas:detail', args=[request.POST.get("nota_tecnica")])) else: messages.success(request, "Error.") return HttpResponseRedirect("/")
176a9a9d1bf7fd0ba1bec0c34526180581d33a8d
13,520
from typing import Dict import aiohttp async def head(url: str) -> Dict: """Fetch headers returned http GET request. :param str url: The URL to perform the GET request for. :rtype: dict :returns: dictionary of lowercase headers """ async with aiohttp.request("HEAD", url) as res: response_headers = res.headers return {k.lower(): v for k, v in response_headers.items()}
b4decbfb4e92863c07c5202e2c884c02e590943f
13,522
def node_vectors(node_id): """Get the vectors of a node. You must specify the node id in the url. You can pass direction (incoming/outgoing/all) and failed (True/False/all). """ exp = Experiment(session) # get the parameters direction = request_parameter(parameter="direction", default="all") failed = request_parameter(parameter="failed", parameter_type="bool", default=False) for x in [direction, failed]: if type(x) == Response: return x # execute the request node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/node/vectors, node does not exist") try: vectors = node.vectors(direction=direction, failed=failed) exp.vector_get_request(node=node, vectors=vectors) session.commit() except Exception: return error_response(error_type="/node/vectors GET server error", status=403, participant=node.participant) # return the data return success_response(vectors=[v.__json__() for v in vectors])
c61d85e4f4ae975bdd015f6bd181d1ae78aa245d
13,524
def newFlatDict(store, selectKeys=None, labelPrefix=''): """ Takes a list of dictionaries and returns a dictionary of 1D lists. If a dictionary did not have that key or list element, then 'None' is put in its place Parameters ---------- store : list of dicts The dictionaries would be expected to have many of the same keys. Any dictionary keys containing lists in the input have been split into multiple numbered keys selectKeys : list of strings, optional The keys whose data will be included in the return dictionary. Default ``None``, which results in all keys being returned labelPrefix : string An identifier to be added to the beginning of each key string. Returns ------- newStore : dict The new dictionary with the keys from the keySet and the values as 1D lists with 'None' if the keys, value pair was not found in the store. Examples -------- >>> store = [{'list': [1, 2, 3, 4, 5, 6]}] >>> newFlatDict(store) {'list_[0]': [1], 'list_[1]': [2], 'list_[2]': [3], 'list_[3]': [4], 'list_[4]': [5], 'list_[5]': [6]} >>> store = [{'string': 'string'}] >>> newFlatDict(store) {'string': ["'string'"]} >>> store = [{'dict': {1: {3: "a"}, 2: "b"}}] >>> newFlatDict(store) {'dict_1_3': ["'a'"], 'dict_2': ["'b'"]} """ keySet = flatDictKeySet(store, selectKeys=selectKeys) newStore = {} if labelPrefix: labelPrefix += "_" for key, loc in keySet.items(): newKey = labelPrefix + str(key) if isinstance(loc, dict): subStore = [s[key] for s in store] keyStoreSet = newFlatDict(subStore, labelPrefix=newKey) newStore.update(keyStoreSet) elif isinstance(loc, (list, np.ndarray)): for locCo in loc: tempList = [] for s in store: rawVal = s.get(key, None) if rawVal is None: tempList.append(None) else: tempList.append(listSelection(rawVal, locCo)) newStore.setdefault(newKey + "_" + str(locCo), tempList) else: vals = [repr(s.get(key, None)) for s in store] newStore.setdefault(newKey, vals) return newStore
d44dec60de06779a8e965eb9e3771c66dd25e10b
13,525
from typing import Any from typing import List from typing import Union async def get_races( db: Any, token: str, raceplan_id: str ) -> List[Union[IndividualSprintRace, IntervalStartRace]]: """Check if the event has a races.""" races = await RacesService.get_races_by_raceplan_id(db, raceplan_id) if len(races) == 0: raise NoRacesInRaceplanException( f"No races in raceplan {raceplan_id}. Cannot proceed." ) return races
393a38992be404e5a82517b13d24e85b42b57b30
13,526
from typing import Any def yaml_load(data: str) -> Any: """Deserializes a yaml representation of known objects into those objects. Parameters ---------- data : str The serialized YAML blob. Returns ------- Any The deserialized Python objects. """ yaml = yaml_import(raise_error=True) return yaml.safe_load(data)
2e721698ef0bde3bd084127556d41503417ee516
13,528
def _get_current_branch(): """Retrieves the branch Git is currently in. Returns: (str): The name of the current Git branch. """ branch_name_line = _run_cmd(GIT_CMD_GET_STATUS).splitlines()[0] return branch_name_line.split(' ')[2]
1b0d93d6e69205981c06f4dc8a45cf21259f4ccd
13,529
from models.progressive_gan import ProgressiveGAN as PGAN def PGAN(pretrained=False, *args, **kwargs): """ Progressive growing model pretrained (bool): load a pretrained model ? model_name (string): if pretrained, load one of the following models celebaHQ-256, celebaHQ-512, DTD, celeba, cifar10. Default is celebaHQ. """ if 'config' not in kwargs or kwargs['config'] is None: kwargs['config'] = {} model = PGAN(useGPU=kwargs.get('useGPU', True), storeAVG=True, **kwargs['config']) checkpoint = {"celebAHQ-256": 'https://dl.fbaipublicfiles.com/gan_zoo/PGAN/celebaHQ_s6_i80000-6196db68.pth', "celebAHQ-512": 'https://dl.fbaipublicfiles.com/gan_zoo/PGAN/celebaHQ16_december_s7_i96000-9c72988c.pth', "DTD": 'https://dl.fbaipublicfiles.com/gan_zoo/PGAN/testDTD_s5_i96000-04efa39f.pth', "celeba": "https://dl.fbaipublicfiles.com/gan_zoo/PGAN/celebaCropped_s5_i83000-2b0acc76.pth"} if pretrained: if "model_name" in kwargs: if kwargs["model_name"] not in checkpoint.keys(): raise ValueError("model_name should be in " + str(checkpoint.keys())) else: print("Loading default model : celebaHQ-256") kwargs["model_name"] = "celebAHQ-256" state_dict = model_zoo.load_url(checkpoint[kwargs["model_name"]], map_location='cpu') model.load_state_dict(state_dict) return model
cb78031a6aeca887c2ed17d02419c2b551a4b1ba
13,530
def _runge_kutta_step(func, y0, f0, t0, dt, tableau=_DORMAND_PRINCE_TABLEAU, name=None): """Take an arbitrary Runge-Kutta step and estimate error. Args: func: Function to evaluate like `func(y, t)` to compute the time derivative of `y`. y0: Tensor initial value for the state. f0: Tensor initial value for the derivative, computed from `func(y0, t0)`. t0: float64 scalar Tensor giving the initial time. dt: float64 scalar Tensor giving the size of the desired time step. tableau: optional _ButcherTableau describing how to take the Runge-Kutta step. name: optional name for the operation. Returns: Tuple `(y1, f1, y1_error, k)` giving the estimated function value after the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`, estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for calculating these terms. """ with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope: y0 = ops.convert_to_tensor(y0, name='y0') f0 = ops.convert_to_tensor(f0, name='f0') t0 = ops.convert_to_tensor(t0, name='t0') dt = ops.convert_to_tensor(dt, name='dt') dt_cast = math_ops.cast(dt, y0.dtype) k = [f0] for alpha_i, beta_i in zip(tableau.alpha, tableau.beta): ti = t0 + alpha_i * dt yi = y0 + _scaled_dot_product(dt_cast, beta_i, k) k.append(func(yi, ti)) if not (tableau.c_sol[-1] == 0 and tableau.c_sol[:-1] == tableau.beta[-1]): # This property (true for Dormand-Prince) lets us save a few FLOPs. yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k) y1 = array_ops.identity(yi, name='%s/y1' % scope) f1 = array_ops.identity(k[-1], name='%s/f1' % scope) y1_error = _scaled_dot_product( dt_cast, tableau.c_error, k, name='%s/y1_error' % scope) return (y1, f1, y1_error, k)
f106c6842a7f9faed6e37bcb4305adbd4bd83146
13,532
def _create_serialize(cls, serializers): """ Create a new serialize method with extra serializer functions. """ def serialize(self, value): for serializer in serializers: value = serializer(value) value = super(cls, self).serialize(value) return value serialize.__doc__ = serializers[0].__doc__ return serialize
522f6a14fe3e2bca70c141f14dc8b400be1ca680
13,533
def confusion_matrix(y_true, y_pred, labels=None): """Compute confusion matrix to evaluate the accuracy of a classification By definition a confusion matrix cm is such that cm[i, j] is equal to the number of observations known to be in group i but predicted to be in group j. Parameters ---------- y_true : array, shape = [n_samples] true targets y_pred : array, shape = [n_samples] estimated targets labels : array, shape = [n_classes] lists all labels occuring in the dataset. If none is given, those that appear at least once in y_true or y_pred are used. Returns ------- CM : array, shape = [n_classes, n_classes] confusion matrix References ---------- http://en.wikipedia.org/wiki/Confusion_matrix """ if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels, dtype=np.int) n_labels = labels.size label_to_ind = dict((y, x) for x, y in enumerate(labels)) # convert yt, yp into index y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred]) y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true]) # intersect y_pred, y_true with labels, eliminate items not in labels ind = np.logical_and(y_pred < n_labels, y_true < n_labels) y_pred = y_pred[ind] y_true = y_true[ind] CM = np.asarray(coo_matrix((np.ones(y_true.shape[0]), (y_true, y_pred)), shape=(n_labels, n_labels), dtype=np.int).todense()) return CM
53d143a5388b23a61f927f4b8b4407cf8a051d3f
13,534
def dialect_selector(s): """Return a dialect given it's name.""" s = s or 'ansi' lookup = { 'ansi': ansi_dialect } return lookup[s]
e9232e22e2ef0789d98a16c8e2f3fd7efa5a7981
13,535
import unittest def importlib_only(fxn): """Decorator to skip a test if using __builtins__.__import__.""" return unittest.skipIf(using___import__, "importlib-specific test")(fxn)
3cdc1ac5e0a2062b6822291973770459f6bf2318
13,536
def bf(x): """ returns the given bitfield value from within a register Parameters: x: a pandas DataFrame line - with a column named BF_NUMBER which holds the definition of given bit_field reg_val: integer Returns: -------- res: str the bit field value from within the register """ try: reg_val = int(x[REG_VALUE][2:],16) except: if isnan(x[REG_VALUE]): return nan else: raise if str(x[BF_NUMBER]).find("..")>0: #Example #BF=3..1 => min_bit =1 , max_bit = 3 #mask = 14 = 0xE #(1<<4) - (1<<1)= 16 - 2 =14 min_bit = int(x[BF_NUMBER].split("..")[1]) max_bit = int(x[BF_NUMBER].split("..")[0]) mask = (1<<(max_bit+1)) -(1<<(min_bit)) res= mask & reg_val res = res>>min_bit res = "{:04x}".format(res).upper() res = "0x"+res else: mask = (1<<int(x[BF_NUMBER])) res = mask & reg_val res = res >> int(x[BF_NUMBER]) res = "{:04x}".format(res).upper() res = "0x"+res return res
6167666cf7c6c5df8b121b2f418d29ff95df8898
13,537
from typing import Union def get_mean_brightness( frame: np.ndarray, mask: Union[ np.ndarray, None, ] = None, ) -> int: """Return the mean brightness of a frame. Load the frame, calculate a histogram, and iterate through the bins until half or more of the pixels have been counted. Args: `frame`: A video data frame. `mask`: An `np.ndarray` instance that represents a bit mask, or `None`. (See, *e.g.*, <https://docs.opencv.org/master/d1/db7/tutorial_py_histogram_begins.html>.) Returns: A integer representing the mean brightness of the frame. (Note that this is defined relative to the number of bins in the histogram.) """ try: grayscale_frame = cv.cvtColor( frame, cv.COLOR_RGB2GRAY, ) except Exception as error: print(f'Could not convert frame to grayscale. ({error})') return False num_pixels = frame.shape[0] * frame.shape[1] histogram = cv.calcHist( [grayscale_frame], [0], mask, [RANGE], [0, RANGE], ) pixel_count = 0 bin_index = 0 while pixel_count / num_pixels <= 0.5: pixel_count += histogram[bin_index] bin_index += 1 return bin_index
825afe97500f247aee4b1ccb045555fb21300cfe
13,538
def rearrange(s): """ Args: s Returns: [] """ if not can_arrange_palindrome2(s): return [] m = {} for c in s: if c in m: m[c] += 1 else: m[c] = 1 middle = "" for k in m: if m[k] % 2 == 0: m[k] /= 2 else: middle = k if middle: del m[middle] res = rec_rearrange("", m) palindromes = [] for i in res: palindromes.append(i + middle + "".join(list(i)[::-1])) return palindromes
bb6e03d35cc3f786c52ce7535628e02b51abd3a0
13,539
def get_org_memberships(user_id: str): """Return a list of organizations and roles where the input user is a member""" query = ( model.Session.query(model.Group, model.Member.capacity) .join(model.Member, model.Member.group_id == model.Group.id) .join(model.User, model.User.id == model.Member.table_id) .filter( model.User.id == user_id, model.Member.state == "active", model.Group.is_organization == True, ) .order_by(model.Group.name) ) return query.all()
eaa5ba796798289185816719a176efb31d7f25e6
13,540
def standardize_concentration(df, columns, unit="nM"): """Make all concentrations match the given unit. For a given DataFrame and column, convert mM, uM, nM, and pM concentration values to the specified unit (default nM). Rename the column to include ({unit}). Parameters ---------- df : a pandas DataFrame columns : str or list column name(s) to be converted to the given unit unit : one of ["mM", "uM", "nM", "pM"], default "nM" Returns ------- A modified dataframe. Examples -------- >>> df.head() experiment [DNA] A 100 nM B 1 uM >>> standardize_concentration(df, columns="[DNA]", unit="nM").head() experiment [DNA] (nM) A 100.0 B 1000.0 """ conversions_dict = { "mM to mM": 1, "mM to uM": 1000, "mM to nM": 1000000, "mM to pM": 1000000000, "uM to mM": 1 / 1000, "uM to uM": 1, "uM to nM": 1000, "uM to pM": 1000000, "nM to mM": 1 / 1000000, "nM to uM": 1 / 1000, "nM to nM": 1, "nM to pM": 1000, "pM to mM": 1 / 1000000000, "pM to uM": 1 / 1000000, "pM to nM": 1 / 1000, "pM to pM": 1, } # don't modify in place df = df.copy().reset_index(drop=True) if type(columns) == str: columns = [columns] for column in columns: for i, row in df.iterrows(): # variables that didn't exist in all concatanated dfs will be represented as NaN if type(row[column]) is float: if np.isnan(row[column]): df.loc[i, column] = 0 continue else: raise RuntimeError( f"Something has gone wrong in row {i}, column {column}. " + "Value is {row[column]}." ) molar_index = row[column].find("M") current_unit = row[column][molar_index - 1 : molar_index + 1] if current_unit not in ["mM", "uM", "nM", "pM"]: raise RuntimeError( f"Unit {current_unit} not recognized in row {i}, column {column}." ) value = float(row[column][: molar_index - 1]) df.loc[i, column] = value * conversions_dict[f"{current_unit} to {unit}"] df = df.rename(columns={column: f"{column} ({unit})" for column in columns}) return df
79f889640faf10e5b66989b0444a235cba872fd2
13,541
from visonic import alarm as visonicalarm def setup(hass, config): """ Setup the Visonic Alarm component.""" global HUB HUB = VisonicAlarmHub(config[DOMAIN], visonicalarm) if not HUB.connect(): return False HUB.update() # Load the supported platforms for component in ('sensor', 'alarm_control_panel'): discovery.load_platform(hass, component, DOMAIN, {}, config) return True
be11f167b393ed97d318f6f516c353ad1df39670
13,542
def generate_http_request_md_fenced_code_block( language=None, fence_string='```', **kwargs, ): """Wraps [``generate_http_request_code``](#generate_http_request_code) function result in a Markdown fenced code block. Args: fence_string (str): Code block fence string used wrapping the code. It does not perform any check about if the fenced string is a "valid" markdown code block fence string. **kwargs: All other optional arguments are passed to [``generate_http_request_code``](#generate_http_request_code) function. Examples: >>> generate_http_request_md_fenced_code_block(setup=False) "```python\\nreq = requests.get('http://localhost')\\n```" >>> generate_http_request_md_fenced_code_block(fence_string='~~~', ... setup=False) "~~~python\\nreq = requests.get('http://localhost')\\n~~~" Returns: str: Fenced code block with HTTP request code snippet inside. """ return '{fence_string}{language}\n{render}\n{fence_string}'.format( language=language if language else DEFAULT_LANGUAGE, render=generate_http_request_code(language=language, **kwargs), fence_string=fence_string, )
a34581e8c0d40542a625d222183adb601c60b408
13,544
def confident_hit_ratio(y_true, y_pred, cut_off=0.1): """ This function return the hit ratio of the true-positive for confident molecules. Confident molecules are defined as confidence values that are higher than the cutoff. :param y_true: :param y_pred: :param cut_off: confident value that defines if a prediction are considered confident :return: """ actual_indexes = np.where(y_true==1)[0] confident_indexes = np.where(y_pred>cut_off)[0] confident_hit = np.intersect1d(actual_indexes, confident_indexes) ratio = 1.0 * len(confident_hit) / len(actual_indexes) return ratio
0a7dbe9f3d81b877c309fd1fffb2840ec71dbeee
13,545
import click def onion(ctx, port, onion_version, private_key, show_private_key, detach): """ Add a temporary onion-service to the Tor we connect to. This keeps an onion-service running as long as this command is running with an arbitrary list of forwarded ports. """ if len(port) == 0: raise click.UsageError( "You must use --port at least once" ) if private_key is not None: if onion_version == 3 and not private_key.startswith('ED25519-V3'): raise click.UsageError( "Private key type is not version 3" ) if onion_version == 2 and not private_key.startswith('RSA1024'): raise click.UsageError( "Private key type is not version 2" ) def _range_check(p): try: p = int(p) if p < 1 or p > 65535: raise click.UsageError( "{} invalid port".format(p) ) except ValueError: raise click.UsageError( "{} is not an int".format(p) ) validated_ports = [] for p in port: if ':' in p: remote, local = p.split(':', 1) _range_check(remote) # the local port can be an ip:port pair, or a unix:/ # socket so we'll let txtorcon take care validated_ports.append((int(remote), local)) else: _range_check(p) validated_ports.append(int(p)) try: onion_version = int(onion_version) if onion_version not in (2, 3): raise ValueError() except ValueError: raise click.UsageError( "--onion-version must be 2 or 3" ) cfg = ctx.obj return _run_command( carml_onion.run, cfg, list(validated_ports), onion_version, private_key, show_private_key, detach, )
7f36e967fc30877b504fda79699c7d3347a4f410
13,546
def determine_if_pb_should_be_filtered(row, min_junc_after_stop_codon): """PB should be filtered if NMD, a truncation, or protein classification is not likely protein coding (intergenic, antisense, fusion,...) Args: row (pandas Series): protein classification row min_junc_after_stop_codon (int): mininum number of junctions after stop codon a protein can have. used in NMD determination Returns: int: 1 if should be filtered, 0 if should not be filtered """ # filter out pbs that are artifacts or noncoding pclass = str(row['protein_classification']) num_junc_after_stop_codon = int(row['num_junc_after_stop_codon']) pclass_base_to_keep = ['pFSM','pNIC'] pclass_base = str(row['protein_classification_base']) if pclass_base not in pclass_base_to_keep and num_junc_after_stop_codon > min_junc_after_stop_codon: return 1 elif 'trunc' in pclass: return 1 elif 'intergenic' in pclass: return 1 elif 'antisense' in pclass: return 1 elif 'fusion' in pclass: return 1 elif 'orphan' in pclass: return 1 elif 'genic' in pclass: return 1 return 0
29ab7ce53ac7569c4d8a29e8e8564eab33b3f545
13,547
from typing import MutableMapping import hashlib def _get_hashed_id(full_name: str, name_from_id: MutableMapping[int, str]) -> int: """Converts the string-typed name to int-typed ID.""" # Built-in hash function will not exceed the range of int64, which is the # type of id in metadata artifact proto. result = int(hashlib.sha256(full_name.encode('utf-8')).hexdigest(), 16) % _INT64_MAX name_from_id[result] = full_name return result
2eadfac0369d33ae29e4c054691180720995ef93
13,549
def find_adjustment(tdata : tuple, xdata : tuple, ydata : tuple, numstept=10,numstepx=10,tol=1e-6) -> tuple: """ Find best fit of data with temporal and spatial offset in range. Returns the tuple err, dt, dx. Finds a temporal and spatial offset to apply to the temporal and spatial locations of the lif data such that the corresponding elevation data has minimal absolute difference. find_adjustment takes a brute force approach, and will compare the difference in ydata at overlapping tdata and xdata locations for all offsets within plus or minus numstept and numstepx. By default 400 possible offsets are evaluated. tdata and xdata must be integer types in order to find the overlapping tdata and xdata locations. Raises a TypeError for some inputs. Raises a ValueError if there is no intersection in tdata & xdata, """ if not (isinstance(tdata,tuple) and len(tdata)==2): raise TypeError("tdata must be a tuple with length 2") elif not (tdata[0].dtype==int and tdata[1].dtype==int): raise TypeError(f"t in tdata must have dtype int but has dtypes " \ f"{tdata[0].dtype} and {tdata[1].dtype}") elif not (isinstance(xdata,tuple) and len(xdata)==2): raise TypeError("xdata must be a tuple with length 2") elif not (xdata[0].dtype==int and xdata[1].dtype==int): raise TypeError(f"x in xdata must have dtype int but has dtypes " \ f"{xdata[0].dtype} and {xdata[1].dtype}") elif not (isinstance(ydata,tuple) and len(ydata)==2): raise TypeError("ydata must be a tuple with length 2") # create all possibile pairs of offsets in the range if numstept == 0: dt = np.asarray([0],dtype=int) else: dt = np.arange(-numstept,numstept+1) if numstepx == 0: dx = np.asarray([0],dtype=int) else: dx = np.arange(-numstepx,numstepx+1) DT, DX = tuple(np.meshgrid(dt,dx)) pos = np.transpose(np.stack([DT.ravel(),DX.ravel()])) # for each possible offset in space and time, estimate the error err = np.empty(DT.ravel().shape) err[:] = np.nan # invalid by default for idx, p in enumerate(pos): dt, dx = p _, tidx0, tidx1 = np.intersect1d(tdata[0],tdata[1]+dt,return_indices=True) _, xidx0, xidx1 = np.intersect1d(xdata[0],xdata[1]+dx,return_indices=True) # it is possible that dt and dx will push them out of overlapping # skip in that case (err[idx] = np.nan by default) if not ( tidx0.size==0 or xidx0.size==0 or tidx1.size==0 or xidx1.size==0 ): yidx0 = tuple(np.meshgrid(tidx0,xidx0,indexing = 'ij')) yidx1 = tuple(np.meshgrid(tidx1,xidx1,indexing = 'ij')) #err[idx] = np.mean(np.abs(ydata[0][yidx0] - ydata[1][yidx1])) err[idx] = np.mean((ydata[0][yidx0] - ydata[1][yidx1])**2) # error out if there is no intersection of the data for any offset if np.isnan(err).all(): raise ValueError("xdata and tdata have no intersection") idx_min = np.nanargmin(err) dt, dx = pos[idx_min] return err[idx_min], dt, dx
4efe607c40606b1235a5f9d62c3002a673a47828
13,550
import yaml def get_params(): """Loads ./config.yml in a dict and returns it""" with open(HERE/'config.yml') as file: params = yaml.load(file) return params
8e2e1b3ae47ff9a296aab7945562e3ea8ad43598
13,551
def get_textgrid(path_transcription): """Get data from TextGrid file""" data = textgriddf_reader(path_file=path_transcription) text_df = textgriddf_df(data, item_no=2) sentences = textgriddf_converter(text_df) return sentences
d3e037ff10488eb1eed777e008599769ddf9d81f
13,553
import http def accessible_required(f): """Decorator for an endpoint that requires a user have accessible or read permission in the given room. The function must take a `room` argument by name, as is typically used with flask endpoints with a `<Room:room>` argument.""" @wraps(f) def required_accessible_wrapper(*args, room, **kwargs): if not room.check_accessible(g.user): abort(http.NOT_FOUND) return f(*args, room=room, **kwargs) return required_accessible_wrapper
e4e13632963fb80377dcbdaa36e90c4c62dd9a1f
13,554
import warnings def make_erb_cos_filters_nx(signal_length, sr, n, low_lim, hi_lim, sample_factor, padding_size=None, full_filter=True, strict=True, **kwargs): """Create ERB cosine filters, oversampled by a factor provided by "sample_factor" Args: signal_length (int): Length of signal to be filtered with the generated filterbank. The signal length determines the length of the filters. sr (int): Sampling rate associated with the signal waveform. n (int): Number of filters (subbands) to be generated with standard sampling (i.e., using a sampling factor of 1). Note, the actual number of filters in the generated filterbank depends on the sampling factor, and will also include lowpass and highpass filters that allow for perfect reconstruction of the input signal (the exact number of lowpass and highpass filters is determined by the sampling factor). The number of filters in the generated filterbank is given below: +---------------+---------------+-+------------+---+---------------------+ | sample factor | n_out |=| bandpass |\ +| highpass + lowpass | +===============+===============+=+============+===+=====================+ | 1 | n+2 |=| n |\ +| 1 + 1 | +---------------+---------------+-+------------+---+---------------------+ | 2 | 2*n+1+4 |=| 2*n+1 |\ +| 2 + 2 | +---------------+---------------+-+------------+---+---------------------+ | 4 | 4*n+3+8 |=| 4*n+3 |\ +| 4 + 4 | +---------------+---------------+-+------------+---+---------------------+ | s | s*(n+1)-1+2*s |=| s*(n+1)-1 |\ +| s + s | +---------------+---------------+-+------------+---+---------------------+ low_lim (int): Lower limit of frequency range. Filters will not be defined below this limit. hi_lim (int): Upper limit of frequency range. Filters will not be defined above this limit. sample_factor (int): Positive integer that determines how densely ERB function will be sampled to create bandpass filters. 1 represents standard sampling; adjacent bandpass filters will overlap by 50%. 2 represents 2x overcomplete sampling; adjacent bandpass filters will overlap by 75%. 4 represents 4x overcomplete sampling; adjacent bandpass filters will overlap by 87.5%. padding_size (int, optional): If None (default), the signal will not be padded before filtering. Otherwise, the filters will be created assuming the waveform signal will be padded to length padding_size*signal_length. full_filter (bool, default=True): If True (default), the complete filter that is ready to apply to the signal is returned. If False, only the first half of the filter is returned (likely positive terms of FFT). strict (bool, default=True): If True (default), will throw an error if sample_factor is not a power of two. This facilitates comparison across sample_factors. Also, if True, will throw an error if provided hi_lim is greater than the Nyquist rate. Returns: tuple: A tuple containing the output: * **filts** (*array*)-- The filterbank consisting of filters have cosine-shaped frequency responses, with center frequencies equally spaced on an ERB scale from low_lim to hi_lim. * **center_freqs** (*array*) -- something * **freqs** (*array*) -- something Raises: ValueError: Various value errors for bad choices of sample_factor; see description for strict parameter. """ if not isinstance(sample_factor, int): raise ValueError('sample_factor must be an integer, not %s' % type(sample_factor)) if sample_factor <= 0: raise ValueError('sample_factor must be positive') if sample_factor != 1 and np.remainder(sample_factor, 2) != 0: msg = 'sample_factor odd, and will change ERB filter widths. Use even sample factors for comparison.' if strict: raise ValueError(msg) else: warnings.warn(msg, RuntimeWarning, stacklevel=2) if padding_size is not None and padding_size >= 1: signal_length += padding_size if np.remainder(signal_length, 2) == 0: # even length n_freqs = signal_length // 2 # .0 does not include DC, likely the sampling grid max_freq = sr / 2 # go all the way to nyquist else: # odd length n_freqs = (signal_length - 1) // 2 # .0 max_freq = sr * (signal_length - 1) / 2 / signal_length # just under nyquist # verify the high limit is allowed by the sampling rate if hi_lim > sr / 2: hi_lim = max_freq msg = 'input arg "hi_lim" exceeds nyquist limit for max frequency; ignore with "strict=False"' if strict: raise ValueError(msg) else: warnings.warn(msg, RuntimeWarning, stacklevel=2) # changing the sampling density without changing the filter locations # (and, thereby changing their widths) requires that a certain number of filters # be used. n_filters = sample_factor * (n + 1) - 1 n_lp_hp = 2 * sample_factor freqs = utils.matlab_arange(0, max_freq, n_freqs) filts = np.zeros((n_freqs + 1 , n_filters + n_lp_hp)) # ?? n_freqs+1 # cutoffs are evenly spaced on an erb scale -- interpolate linearly in erb space then convert back # get the actual spacing use to generate the sequence (in case numpy does something weird) center_freqs, erb_spacing = np.linspace(freq2erb(low_lim), freq2erb(hi_lim), n_filters + 2, retstep=True) # +2 for bin endpoints # we need to exclude the endpoints center_freqs = center_freqs[1:-1] freqs_erb = freq2erb(freqs) for i in range(n_filters): i_offset = i + sample_factor l = center_freqs[i] - sample_factor * erb_spacing h = center_freqs[i] + sample_factor * erb_spacing # the first sample_factor # of rows in filts will be lowpass filters filts[(freqs_erb > l) & (freqs_erb < h), i_offset] = make_cosine_filter(freqs_erb, l, h, convert_to_erb=False) # be sample_factor number of each for i in range(sample_factor): # account for the fact that the first sample_factor # of filts are lowpass i_offset = i + sample_factor lp_h_ind = max(np.where(freqs < erb2freq(center_freqs[i]))[0]) # lowpass filter goes up to peak of first cos filter lp_filt = np.sqrt(1 - np.power(filts[:lp_h_ind+1, i_offset], 2)) hp_l_ind = min(np.where(freqs > erb2freq(center_freqs[-1-i]))[0]) # highpass filter goes down to peak of last cos filter hp_filt = np.sqrt(1 - np.power(filts[hp_l_ind:, -1-i_offset], 2)) filts[:lp_h_ind+1, i] = lp_filt filts[hp_l_ind:, -1-i] = hp_filt # ensure that squared freq response adds to one filts = filts / np.sqrt(sample_factor) # get center freqs for lowpass and highpass filters cfs_low = np.copy(center_freqs[:sample_factor]) - sample_factor * erb_spacing cfs_hi = np.copy(center_freqs[-sample_factor:]) + sample_factor * erb_spacing center_freqs = erb2freq(np.concatenate((cfs_low, center_freqs, cfs_hi))) # rectify center_freqs[center_freqs < 0] = 1 # discard highpass and lowpass filters, if requested if kwargs.get('no_lowpass'): filts = filts[:, sample_factor:] if kwargs.get('no_highpass'): filts = filts[:, :-sample_factor] # make the full filter by adding negative components if full_filter: filts = make_full_filter_set(filts, signal_length) return filts, center_freqs, freqs
207a9d3be6b732c1d86a5ed5bde069d5ea760347
13,555
def button_ld_train_first_day(criteria, min_reversal_number): """ This function creates a csv file for the LD Train test. Each row will be the first day the animal ran the test. At the end, the function will ask the user to save the newly created csv file in a directory. :param criteria: A widget that contains a string that represents the duration of the criteria as n days/n+1 days :param min_reversal_number: An entry widget that contains a value that represents the the minimum required reversal number for an animal """ # check that the inputs to the criteria widgets are valid if ld_train_criteria_min_rev_check(criteria, min_reversal_number) is not None: criteria_list, min_rev = ld_train_criteria_min_rev_check(criteria, min_reversal_number) else: mb.showerror('LD Train Criteria Error', 'button_ld_train_first_day() error: One of the three criteria is empty or invalid!') print('button_ld_train_first_day() error: One of the three criteria is empty or invalid!') return None if ld_criteria_list_check(criteria_list) is not None: criteria_value, criteria_max_days = ld_criteria_list_check(criteria_list) else: mb.showerror('LD Train Criteria Error', 'button_ld_train_first_day() error: The n/n+1 days criteria is empty or invalid!') print('button_ld_train_first_day() error: The n/n+1 days criteria is empty or invalid!') return None df = data_setup('LD Train') if df is not None: ld_train_delete_other_difficulties(df) get_ld_train_normal(df, criteria_value, criteria_max_days, min_rev) save_file_message(df) else: mb.showerror('LD Train Criteria Error', 'button_ld_train_first_day() error: One of the criterias is invalid or you hit the cancel button!') print('button_ld_train_first_day() error: One of the criterias is invalid or you hit the cancel button!') return None
9de68279f6ffb8253275a7a7051a1ed9b2df8f8e
13,556
def analyze_video(file, name, api): """ Call Scenescoop analyze with a video """ args = Namespace(video=file, name=name, input_data=None, api=True) scene_content = scenescoop(args) content = '' maxframes = 0 for description in scene_content: if(len(scene_content[description]) > maxframes): content = description maxframes = len(scene_content[description]) if(api): return jsonify(status="200", scene_content=scene_content, content=content, maxframes=maxframes) else: return content
92e176a5c951d038aa8477db7aec0705fba0152c
13,557
def encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size): """ Build and train an encoder-decoder model on x and y :param input_shape: Tuple of input shape :param output_sequence_length: Length of output sequence :param english_vocab_size: Number of unique English words in the dataset :param french_vocab_size: Number of unique French words in the dataset :return: Keras model built, but not trained """ # OPTIONAL: Implement return None
47fa1893cc04b491292461db6c8a3418b464ba45
13,559
def close_to_cron(crontab_time, time_struct): """coron的指定范围(crontab_time)中 最接近 指定时间 time_struct 的值""" close_time = time_struct cindex = 0 for val_struct in time_struct: offset_min = val_struct val_close = val_struct for val_cron in crontab_time[cindex]: offset_tmp = val_struct - val_cron if offset_tmp > 0 and offset_tmp < offset_min: val_close = val_struct offset_min = offset_tmp close_time[cindex] = val_close cindex = cindex + 1 return close_time
7ce04d9b4260e7ea1ed7c3e95e7c36928989024e
13,560
def remove_stop_words(words_list: list) -> list: """ Remove stop words from strings list """ en_stop_words = set(stopwords.words('english')) return [w for w in words_list if str(w).lower not in en_stop_words]
a6e3c117ea805bdfaffe80c17fc5e340a869d55d
13,561
def min_geodesic_distance_rotmats_pairwise_tf(r1s, r2s): """Compute min geodesic distance for each R1 wrt R2.""" # These are the traces of R1^T R2 trace = tf.einsum('...aij,...bij->...ab', r1s, r2s) # closest rotation has max trace max_trace = tf.reduce_max(trace, axis=-1) return tf.acos(tf.clip_by_value((max_trace - 1.0) / 2.0, -1.0, 1.0))
a4da40aa9594c301b0366da0a26d73afce83e05f
13,563
def project_to_2D(xyz): """Projection to (0, X, Z) plane.""" return xyz[0], xyz[2]
c6cdb8bd6dce65f6ce39b14b9e56622832f35752
13,564
def Geom2dInt_Geom2dCurveTool_D2(*args): """ :param C: :type C: Adaptor2d_Curve2d & :param U: :type U: float :param P: :type P: gp_Pnt2d :param T: :type T: gp_Vec2d :param N: :type N: gp_Vec2d :rtype: void """ return _Geom2dInt.Geom2dInt_Geom2dCurveTool_D2(*args)
6ac157e171af9d4bab852a9677287e33bb1d90f2
13,565
def notfound(request): """ Common notfound return message """ msg = CustomError.NOT_FOUND_ERROR.format(request.url, request.method) log.error(msg) request.response.status = 404 return {'error': 'true', 'code': 404, 'message': msg}
b690d9b879db15e192e8ee50d4ea2b0847ba658b
13,566
def l2norm(a): """Return the l2 norm of a, flattened out. Implemented as a separate function (not a call to norm() for speed).""" return np.sqrt(np.sum(np.absolute(a)**2))
b5ce94bfc0f3472e60a4338c379bc4dfe490e623
13,567
def create_container(request): """ Creates a container (empty object of type application/directory) """ storage_url = get_endpoint(request, 'adminURL') auth_token = get_token_id(request) http_conn = client.http_connection(storage_url, insecure=settings.SWIFT_INSECURE) form = CreateContainerForm(request.POST or None) if form.is_valid(): container = form.cleaned_data['containername'] try: client.put_container(storage_url, auth_token, container, http_conn=http_conn) messages.add_message(request, messages.SUCCESS, _("Container created.")) actionlog.log(request.user.username, "create", container) except client.ClientException as err: log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err)) messages.add_message(request, messages.ERROR, _('Access denied.')) return redirect(containerview) context = utils.update_default_context(request, { 'form': form, }) return render_to_response('create_container.html', context, context_instance=RequestContext(request))
15c25df933f7620cee71319f9f41e92e29880d1c
13,568
def get_searchable_models(): """ Returns a list of all models in the Django project which implement ISearchable """ app = AppCache(); return filter(lambda klass: implements(klass, ISearchable), app.get_models())
ad7c56f17ec4e0fc77942fe1466b879bd45eb191
13,570
def create_updated_alert_from_slack_message(payload, time, alert_json): """ Create an updated raw alert (json) from an update request in Slack """ values = payload['view']['state']['values'] for value in values: for key in values[value]: if key == 'alert_id': continue if key == 'severity': if values[value][key].get('selected_option'): alert_json[key] = \ values[value][key]['selected_option']['text']['text'] if key == 'active': if values[value][key].get('selected_option'): alert_json[key] = \ values[value][key]['selected_option']['text']['text'] else: if values[value][key].get('value'): alert_json[key] = values[value][key]['value'] alert_json['datetime'] = time return alert_json
a685a0c0da472f055dc8860bdf09970a1ecc8aff
13,571
def enforce(*types): """ decorator function enforcing, and converting, argument data types """ def decorator(fn): def new_function(*args, **kwargs): # convert args into something mutable, list in this case newargs = [] for original_argument, type_to_convert in zip(args, types): newargs.append(type_to_convert(original_argument)) return fn(*newargs, **kwargs) return new_function return decorator
217ad3adccdaa9fc83ceaf5ef2c0905b8d54f1ed
13,572
from typing import Type from typing import Any from typing import Sequence from enum import Enum from datetime import datetime def modify_repr(_cls: Type[Any]) -> None: """Improved dataclass repr function. Only show non-default non-internal values, and summarize containers. """ # let classes still create their own if _cls.__repr__ is not object.__repr__: return def new_repr(self: Any) -> str: name = self.__class__.__qualname__ lines = [] for f in sorted(fields(self), key=lambda f: f.name not in ("name", "id")): if f.name.endswith("_"): continue # https://github.com/python/mypy/issues/6910 if f.default_factory is not MISSING: # type: ignore default = f.default_factory() # type: ignore else: default = f.default current = getattr(self, f.name) if current != default: if isinstance(current, Sequence) and not isinstance(current, str): rep = f"[<{len(current)} {f.name.title()}>]" elif isinstance(current, Enum): rep = repr(current.value) elif isinstance(current, datetime): rep = f"datetime.fromisoformat({current.isoformat()!r})" else: rep = repr(current) lines.append(f"{f.name}={rep},") if len(lines) == 1: body = lines[-1].rstrip(",") elif lines: body = "\n" + indent("\n".join(lines), " ") + "\n" else: body = "" out = f"{name}({body})" return out setattr(_cls, "__repr__", new_repr)
ddc860bbe3c9d04723a3cc0b4cdcce960d0ecf71
13,573
def _is_binary(path): """Checks if the file at |path| is an ELF executable. This is done by inspecting its FourCC header. """ with open(path, 'rb') as f: file_tag = f.read(4) return file_tag == '\x7fELF'
0c5bc0917f405604a6d36495b786c9fbc9268ad1
13,574
from typing import Optional import typing def update_item(*, table: str, hash_key: str, sort_key: Optional[str] = None, update_expression: Optional[str], expression_attribute_values: typing.Dict, return_values: str = 'ALL_NEW'): """ Update an item from a dynamoDB table. Will determine the type of db this is being called on by the number of keys provided (omit sort_key to UPDATE from a db with only 1 primary key). NOTE: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html :param table: Name of the table in AWS. :param str hash_key: 1st primary key that can be used to fetch associated sort_keys and values. :param str sort_key: 2nd primary key, used with hash_key to fetch a specific value. Note: If not specified, this will DELETE only 1 key (hash_key) and 1 value. :param str update_expression: Expression used to update value, needs action to be performed and new value :param str expression_attribute_values: attribute values to use from the expression :param str return_values: return values to get back from the dynamodb API, defaults to 'ALL_NEW' which provides all item attributes after the update. :return: None """ query = {'TableName': table, 'Key': _format_item(hash_key=hash_key, sort_key=sort_key, value=None)} if update_expression: query['UpdateExpression'] = update_expression query['ExpressionAttributeValues'] = expression_attribute_values query['ReturnValues'] = return_values resp = db.update_item(**query) return _format_ddb_response(resp.get('Attributes'))
a6efc6638708d1c5dfc79d89216cfa866e3a24fa
13,575
import re def google_login_required(fn): """Return 403 unless the user is logged in from a @google.com domain.""" def wrapper(self, *args, **kwargs): user = users.get_current_user() if not user: self.redirect(users.create_login_url(self.request.uri)) return email_match = re.match('^(.*)@(.*)$', user.email()) if email_match: _, domain = email_match.groups() if domain == 'google.com': return fn(self, *args, **kwargs) self.error(403) # Unrecognized email or unauthroized domain. self.response.out.write('unauthroized email %s' % user.user_id()) return wrapper
1e45f2ea026e772b6b4c9048dddf93b2fe3ec991
13,577
def init_res_fig(n_subplots, max_sess=None, modif=False): """ init_res_fig(n_subplots) Initializes a figure in which to plot summary results. Required args: - n_subplots (int): number of subplots Optional args: - max_sess (int): maximum number of sessions plotted default: None - modif (bool) : if True, plots are made in a modified (simplified way) default: False Returns: - fig (plt Fig): figure - ax (plt Axis): axis """ subplot_hei = 14 subplot_wid = 7.5 if max_sess is not None: subplot_wid *= max_sess/4.0 if modif: sess_plot_util.update_plt_linpla() figpar_init = sess_plot_util.fig_init_linpla(sharey=True)["init"] fig, ax = plot_util.init_fig(n_subplots, **figpar_init) else: fig, ax = plot_util.init_fig(n_subplots, 2, sharey=True, subplot_hei=subplot_hei, subplot_wid=subplot_wid) return fig, ax
3c12c18c16a371d10977d165875a2aa346c009bf
13,578
import json def change_personal_data_settings(request): """ Creates a question with summarized data to be changed :param request: POST request from "Change personal data settings" Dialogflow intent :return: JSON with summarized data to be changed """ language = request.data['queryResult']['languageCode'] response_spoken_pl = "Nie mogę zmienić żadnych ustawień, ponieważ nie posiadasz jeszcze konta. Jeśli chcesz " \ "założyć konto w best transport Polska, wybierz poniższą opcję Zarejestruj się" display_spoken_pl = "Nie mogę zmienić żadnych ustawień. Załóż konto przez wybranie poniższej opcji Zarejestruj się" response_spoken_en = "I can't change any settings, because you don't have an account yet. If you want to create a best" \ " transport Poland account, select the option \"Sign up\" below" display_spoken_en = "I can't change any settings. Create an account by selecting the option below \"Sign up\"" access_token = request.data['originalDetectIntentRequest']['payload']['user'] if 'accessToken' in access_token: access_token = access_token['accessToken'] else: access_token = None if access_token: account_exist = check_token(access_token, language, response_spoken_pl, display_spoken_pl, response_spoken_en, display_spoken_en) if account_exist == "token exist": with open('api/response.json') as json_file: response = json.load(json_file) part_to_modify = response['payload']['google']['richResponse'] parameters_from_request = request.data["queryResult"]["parameters"] if language == "pl": entities_pl = {'First_name': 'imię', 'Surname': "nazwisko", 'Email': 'email', 'telephone-number': 'numer telefonu', 'geo-city': 'miejsce zamieszkania', 'post-code': 'kod pocztowy','geo-country': 'kraj', 'tax_number': "numer płatnika"} response_pl = "Czy na pewno chcesz zmienić " for k,v in parameters_from_request.items(): if v != "" and k in entities_pl: response_pl += entities_pl[k] + " na " + v + ", " response_pl = response_pl[:-2] response_pl += "?" suggestions_pl = [{"title": "Tak"}, {"title": "Nie"}] part_to_modify['items'][0]['simpleResponse']['textToSpeech'] = response_pl part_to_modify['items'][0]['simpleResponse']['displayText'] = response_pl part_to_modify['suggestions'] = suggestions_pl elif language == "en": entities_en = {'First_name': 'name', 'Surname': "surname", 'Email': 'email', 'telephone-number': 'phone number', 'geo-city': 'residence place', 'post-code': 'post code', 'geo-country': 'country', 'tax_number': "tax number"} response_en = "Are you sure you want to change " for k, v in parameters_from_request.items(): if v != "" and k in entities_en: response_en += entities_en[k] + " to " + v + ", " response_en = response_en[:-2] response_en += "?" suggestions_en = [{"title": "Yes"}, {"title": "No"}] part_to_modify['items'][0]['simpleResponse']['textToSpeech'] = response_en part_to_modify['items'][0]['simpleResponse']['displayText'] = response_en part_to_modify['suggestions'] = suggestions_en response['payload']['google']['richResponse'] = part_to_modify return response else: return account_exist else: access_token = "There is no" account_exist = check_token(access_token, language, response_spoken_pl, display_spoken_pl, response_spoken_en, display_spoken_en) return account_exist
c18641ea4cc32e8d2703dfca90066b6736c5103a
13,579
def get_selected_cells(mesh, startpos, endpos): """ Return a list of cells contained in the startpos-endpos rectangle """ xstart, ystart = startpos xend, yend = endpos selected_cells = set() vertex_coords = mesh.coordinates() for cell in dolfin.cells(mesh): cell_vertices = cell.entities(0) for vid in cell_vertices: x, y = vertex_coords[vid] if xstart <= x <= xend and ystart <= y <= yend: selected_cells.add(cell.index()) break return selected_cells
c637bfa195aae4125e65553b1f4023cc3dae1f3a
13,580
def flip_axis(array, axis): """ Flip the given axis of an array. Note that the ordering follows the numpy convention and may be unintuitive; that is, the first axis flips the axis horizontally, and the second axis flips the axis vertically. :param array: The array to be flipped. :type array: `ndarray` :param axis: The axis to be flipped. :type axis: `int` :returns: The flipped array. :rtype: `ndarray` """ # Rearrange the array so that the axis of interest is first. array = np.asarray(array).swapaxes(axis, 0) # Reverse the elements along the first axis. array = array[::-1, ...] # Put the array back and return. return array.swapaxes(0, axis)
e2839125ddf3b22dea732857763fda636b748dda
13,581
def fizz_buzz_tree(input_tree): """ traverses a tree and performs fizz buzz on each element, agumenting the val """ input_tree.in_order_trav(lambda x: fizzbuzz(x)) return input_tree
29b7380fb6215bf8ecf67fa85321445b8954abdc
13,582
def generate_key(keysize=KEY_SIZE): """Generate a RSA key pair Keyword Arguments: keysize {int} -- Key (default: {KEY_SIZE}) Returns: bytes -- Secret key bytes -- Public key """ key = RSA.generate(keysize) public_key = key.publickey().exportKey() secret_key = key.exportKey(passphrase=None) return secret_key, public_key
0a38221269b167c4ceefc95eb4cee3452f2aaffe
13,585
import functools def get_api_endpoint(func): """Register a GET endpoint.""" @json_api.route(f"/{func.__name__}", methods=["GET"]) @functools.wraps(func) def _wrapper(*args, **kwargs): return jsonify({"success": True, "data": func(*args, **kwargs)}) return _wrapper
d49dc725e7538374910e3819f8eae647250747f7
13,586
def get_mt4(alias=DEFAULT_MT4_NAME): """ Notes: return mt4 object which is initialized. Args: alias(string): mt4 object alias name. default value is DEFAULT_MT4_NAME Returns: mt4 object(metatrader.backtest.MT4): instantiated mt4 object """ global _mt4s if alias in _mt4s: return _mt4s[alias] else: raise RuntimeError('mt4[%s] is not initialized.' % alias)
ee228ced5790124768c8a41e70bf596181a55ca2
13,588
import warnings def get_log_probability_function(model=None): """ Builds a theano function from a PyMC3 model which takes a numpy array of shape ``(n_parameters)`` as an input and returns returns the total log probability of the model. This function takes the **transformed** random variables defined withing the model context which is a different behaviour from :func:`caustic.utils.get_log_likelihood_function`. The ordering of th para eters in the input array should match the ordering of the RVs in model context. The purpose of this function is to be able to use external samplers with PyMC3 models. Parameters ---------- model : pymc3.Model PyMC3 model object. Returns ------- ndarray Total log probability of the model. """ model = pm.modelcontext(model) if ( "_interval__" or "_log__" or "_lowerbound__" or "_upperbound__" ) in str(model.vars): warnings.warn( """Your model contains transformed variables. Keep in mind, that the compiled log probability function expects the, transformed variables as an input.""", ) f = theano.function(model.vars, [model.logpt]) def log_prob(params): dct = model.bijection.rmap(params[::-1]) args = (dct[k.name] for k in model.vars) results = f(*args) return tuple(results)[0] return log_prob
8e4332ce6943341b196d1628942f3360ce9f4e05
13,589
def get_health(check_celery=True): """ Gets the health of the all the external services. :return: dictionary with key: service name like etcd, celery, elasticsearch value: dictionary of health status :rtype: dict """ health_status = { 'etcd': _check_etcd(), 'store': _check_store() } if check_celery: health_status['celery'] = _check_celery() return health_status
95fec6ee762ab81a8e27ebe796b914be6d38c59d
13,590
def add_quotation_items(quotation_items_data): """ 添加信息 :param quotation_items_data: :return: None/Value of user.id :except: """ return db_instance.add(QuotationItems, quotation_items_data)
09f60cc4e8182909acb34bb0b406336849bf8543
13,591
def load_cifar10_human_readable(path: str, img_nums: list) -> np.array: """ Loads the Cifar10 images in human readable format. Args: path: The path to the to the folder with mnist images. img_nums: A list with the numbers of the images we want to load. Returns: The images as a Mx3x32x32 numpy array. """ return load_img(path, img_nums, (3, 32, 32))
991ff4cd7192c0ed4b1d6e2d566ed1f0ce446db5
13,592
import typing def get_project_linked_to_object(object_id: int) -> typing.Optional[Project]: """ Return the project linked to a given object, or None. :param object_id: the ID of an existing object :return: the linked project or None :raise errors.ObjectDoesNotExistError: if no object with the given ID exists """ association = projects.ProjectObjectAssociation.query.filter_by( object_id=object_id ).first() if association is None: # make sure the object exists objects.get_object(object_id) return None return get_project(association.project_id)
189282be5acfb063678ca2c6765eeb1a7fa6b6c5
13,594
def calc_distances_for_everyon_in_frame(everyone_in_frame, people_graph, too_far_distance, minimum_distance_change): """ :param everyone_in_frame: [PersonPath] :type everyone_in_frame: list :param people_graph: :type people_graph: Graph :param too_far_distance: :param minimum_distance_change: :return: :rtype: Graph """ points = [[person_path.world_frame_coordinates_list.current_frame_coord().x, person_path.world_frame_coordinates_list.current_frame_coord().y] for person_path in everyone_in_frame] # all points of everyone in this frame points = np.array(points) ids = [person_path.id_number for person_path in everyone_in_frame] for index, person_path in enumerate(everyone_in_frame): x, y = person_path.world_frame_coordinates_list.current_frame_coord_xy() point = np.array([x, y]) all_euclidean_distances = np.linalg.norm(points - point, axis=1) # calculate all euclidean distances closer = deque() further = deque() for i in range(len(ids)): id_number = ids[i] if id_number == person_path.id_number: # if it's the same id as the person_path's id continue distance = all_euclidean_distances[i] people_graph.add_edge(person_path.id_number, id_number, distance) if distance < too_far_distance: # if it's not too far event = distance_event(person_path.id_number, id_number, people_graph, minimum_distance_change) if event == DistanceEvent.CLOSER: closer.append(id_number) elif event == DistanceEvent.FURTHER: further.append(id_number) if closer: print('%3d is getting CLOSER to' % person_path.id_number, list(closer)) if further: print('%3d is getting FURTHER from' % person_path.id_number, list(further)) return people_graph
83dcb204b53d2ac784d1cc8bb0da61a114a41768
13,596
import torch def conv2d(input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor = None, stride=1, padding=0, dilation=1, groups=1, mode=None): """Standard conv2d. Returns the input if weight=None.""" if weight is None: return input ind = None if mode is not None: if padding != 0: raise ValueError('Cannot input both padding and mode.') if mode == 'same': padding = (weight.shape[2] // 2, weight.shape[3] // 2) if weight.shape[2] % 2 == 0 or weight.shape[3] % 2 == 0: ind = (slice(-1) if weight.shape[2] % 2 == 0 else slice(None), slice(-1) if weight.shape[3] % 2 == 0 else slice(None)) elif mode == 'valid': padding = (0, 0) elif mode == 'full': padding = (weight.shape[2] - 1, weight.shape[3] - 1) else: raise ValueError('Unknown mode for padding.') out = F.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) if ind is None: return out return out[:, :, ind[0], ind[1]]
b43b975d96d273fa1ee1cfe4034ca1fd195b5019
13,597
import tqdm import torch def infer(model, loader_test): """ Returns the prediction of a model in a dataset. Parameters ---------- model: PyTorch model loader_test: PyTorch DataLoader. Returns ------- tuple y_true and y_pred """ model.eval() ys, ys_hat = [], [] for ids, masks, y_true in tqdm(loader_test): ids = ids.to(device) masks = masks.to(device) y_true = y_true.to(device) y_hat = model(ids, masks) loss = F.cross_entropy(y_hat, y_true) y_pred = torch.argmax(y_hat, dim=1) ys.extend(y_true.cpu().numpy().tolist() ) ys_hat.extend(y_pred.cpu().numpy().tolist()) return ys, ys_hat
956b17d8b3869eeff6d35019ac82cd3ca5d4092e
13,598
import keyword def validate_project(project_name): """ Check the defined project name against keywords, builtins and existing modules to avoid name clashing """ if not project_name_rx.search(project_name): return None if keyword.iskeyword(project_name): return None if project_name in dir(__builtins__): return None try: __import__(project_name) return None except ImportError: return project_name
569fdb1d6d37ce50b144facc6cba725a0575b2f6
13,599
def _print_available_filters(supported_filters): """Prints information on available filters and their thresholds.""" widths = (20, 40, 20) data = [("Filter", "Description", "Threshold Values"), ("------", "-----------", "----------------")] # this is stupid for f, (d, t, c) in supported_filters.items(): data.append((f, d, t)) print for row in data: i = 1 nextline = "\n" for col, width in zip(row, widths): print col[:width] + " " * max(0, width - len(col)), if not i == 2: i += 1 continue mycol = col[width:] mybgn = width + 1 while len(mycol) > 1: nextline += " " * 21 nextline += mycol[:width] nextline += " " * (width - len(mycol)) nextline += "\n" mycol = mycol[width:] mybgn += width i += 1 print nextline, print return 0
49a769a27d2a4a0beaba1021821c8d1e551f53eb
13,600
def _get_dates(i, *args, **kwargs): """ Get dates from arguments """ try: start_date = kwargs['start_date'] except: try: start_date = args[i] except: start_date = None try: end_date = kwargs['end_date'] except: try: end_date = args[i+1] except: end_date = None start_date, end_date = _sanitize_dates(start_date, end_date) return(start_date, end_date)
708bc0fcc5be80ef3b3008b9569bb14a01c4bace
13,601
def home_page(): """Shows home page""" html = """ <html> <body> <h1>Home Page</h1> <p>Welcome to my simple app!</p> <a href='/hello'>Go to hello page</a> </body> </html> """ return html
444833ab61803d1fe52676834e211ac79e770b4e
13,602
def GetPDFHexString(s, i, iend): """Convert and return pdf hex string starting at s[i], ending at s[iend-1].""" j = i + 1 v = [] c = '' jend = iend - 1 while j < jend: p = _re_pswhitespaceandcomments.match(s, j) if p: j = p.end() d = chr(ordat(s, j)) if c != '': v.append(FromHexPair(c, d)) c = '' else: c = d j += 1 if c != '': v.append(FromHexPair(c, '0')) return ((OSTRING, ''.join(v)), iend)
516d7c33bcd1b2237eb482e9722de4552ac79ce2
13,603
def get_edge_syslog_info(edge_id): """Get syslog information for specific edge id""" nsxv = get_nsxv_client() syslog_info = nsxv.get_edge_syslog(edge_id)[1] if not syslog_info['enabled']: return 'Disabled' output = "" if 'protocol' in syslog_info: output += syslog_info['protocol'] if 'serverAddresses' in syslog_info: for server_address in syslog_info['serverAddresses']['ipAddress']: output += "\n" + server_address return output
5c5ea79109b9a9053f95945a7902d9e6322a6ba6
13,604
from typing import List def _get_rec_suffix(operations:List[str]) -> str: """ finished, checked, Parameters ---------- operations: list of str, names of operations to perform (or has performed), Returns ------- suffix: str, suffix of the filename of the preprocessed ecg signal """ suffix = "-".join(sorted([item.lower() for item in operations])) return suffix
270a1b3749342d05819eafef3fa5175da393b1ad
13,605
def get_A_text(params, func_type=None): """ Get text associated with the fit of A(s) """ line1 = r'$A(s|r)$ is assumed to take the form:' line2 = (r'$A(s|r) = s^{-1}\bigg{(}\frac{s}{\Sigma(r)}\bigg{)}^a ' r'exp\bigg{(}{-\bigg{(}\frac{s}{\Sigma(r)}\bigg{)}^b}\bigg{)}$') a, b = params['a'], params['b'] line3 = r'where a = {:.4f} and b = {:.4f}'.format(a, b) text = '\n'.join([line1, line2, line3]) return text
ec68c49a7912dc5630e3c96a09d667ce52f89914
13,606
def transform_to_dict(closest_list: list) -> dict: """ Returns dict {(latitude, longitude): {film1, film2, ...}, ...} from closest_list [[film1, (latitude, longitude)], ...], where film1, film2 are titles of films, (latitude, longitude) is a coordinates of a place where those films were shoot. >>> transform_to_dict([["film1", (49, 24)]]) {(49, 24): {'film1'}} """ closest_dict = {} for film, coord in closest_list: if coord in closest_dict: closest_dict[coord].add(film) else: closest_dict[coord] = {film} return closest_dict
e7c6fae73792a828d85db03e794bfb69c7b1fe87
13,607
def get_numpy_val_from_form_input(input_name): """Get a NumPy-compatible numerical value from the request object""" return get_numpy_val(input_name, request.form[input_name])
fadfbf106c82088103674e5da5f526e08e2a05ac
13,609
import numpy def load_model(model, path): """Load a the model parameters from a file and set them. Parameters ---------- model : a :class:`Layer` instance The model with unset parameters. path : string The file with the model parameters. Returns ------- a :class:`Layer` instance The given model with set parameters. """ with numpy.load(path) as fobj: values = [fobj['arr_%d' % i] for i in range(len(fobj.files))] set_all_param_values(model, values) return model
dae27ffc78be7aa7476c645c4f021d4acaef5b44
13,610
def node_id_at_cells(shape): """Node ID at each cell. Parameters ---------- shape : tuple of int Shape of grid of nodes. Returns ------- ndarray : ID of node associated with each cell. Examples -------- >>> from landlab.grid.structured_quad.cells import node_id_at_cells >>> node_id_at_cells((3, 4)) array([[5, 6]]) """ node_ids = nodes.node_ids(shape) return node_ids[1:-1, 1:-1].copy().reshape(shape_of_cells(shape))
f089f598cacc4d5ec6885477098dcca741358820
13,611
def search_media(search_queries, media, ignore_likes=True): """Return a list of media matching a queary that searches for a match in the comments, likes, and tags in a list of media""" # Initialize update message update_message = print_update_message(len(media)) update_message.send(None) # Initialize result data if type(search_queries) is not list: search_queries = [search_queries] matches = [ [] for _ in range(len(search_queries))] # Iterate through media looking for matches to search_queries for idx0, medium in enumerate(media): results = search_medium(search_queries, medium, ignore_likes=ignore_likes) for idx1, result in enumerate(results): if result: matches[idx1].append(medium) # Send update message message = Template( 'Found {} matches in {} media out of {}. {} api calls remaining'.format( repr([len(x) for x in matches]), idx0+1, len(media), _api.last_used_api.x_ratelimit_remaining) ) update_message.send( (idx0, message) ) return matches
23ed38496310cc86c4d3d7f8aff4e1d5c61f9d69
13,612
def get_produced_messages(func): """Returns a list of message fqn and channel pairs. Args: func (Function): function object Returns: list """ result = [] for msg, channel in func.produces: result.append((_build_msg_fqn(msg), channel)) return result
b63d9305f3af3e474beb1fb328881123d8f4ece6
13,614
from pathlib import Path from typing import Sequence def find_coverage_files(src_path: Path) -> Sequence: """ Find the coverage files within the specified src_path. Parameters: src_path (Path): The path in which to look for the .coverage files. Returns: (Sequence) The set of .coverage files within the specified folder. """ return Path(src_path).glob("**/*.coverage")
53fd9b2d2405ed6fe895718e22cc6b1ddb86f4df
13,615