content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Tuple from typing import Any def concatenate_and_process_data( data_consent: pd.DataFrame, data_noconsent: pd.DataFrame, conversion_column: str = CONVERSION_COLUMN, drop_columns: Tuple[Any, ...] = DROP_COLUMNS, non_dummy_columns: Tuple[Any, ...] = NON_DUMMY_COLUMNS ) -> Tuple[pd.DataFrame, pd.DataFrame]: """Concatenates consent and no-consent data and preprocesses them. Args: data_consent: Dataframe of consent customers. data_noconsent: Dataframe of no-consent customers. conversion_column: Name of the conversion column in the data. drop_columns: Names of columns that should be dropped from the data. non_dummy_columns: Names of (categorical) columns that should be kept, but not dummy-coded. Raises: ValueError: if concatenating consent and no-consent data doesn't match the expected length. Returns: Processed dataframes for consent and no-consent customers. """ data_noconsent["consent"] = 0 data_consent["consent"] = 1 data_concat = pd.concat([data_noconsent, data_consent]) data_concat.reset_index(inplace=True, drop=True) if len(data_concat) != (len(data_noconsent) + len(data_consent)): raise ValueError( "Length of concatenated data does not match sum of individual dataframes." ) data_preprocessed = preprocess_data( data=data_concat, drop_columns=list(drop_columns), non_dummy_columns=list(non_dummy_columns), conversion_column=conversion_column) data_noconsent_processed = data_preprocessed[data_preprocessed["consent"] == 0] data_consent_processed = data_preprocessed[data_preprocessed["consent"] == 1] return data_consent_processed, data_noconsent_processed
57c84f0b406750b40161bb7f5ed19c5f2cd509e8
7,096
def plot(nRows=1, nCols=1, figSize=5): """ Generate a matplotlib plot and axis handle Parameters ----------------- nRows : An int, number of rows for subplotting nCols : An int, number of columns for subplotting figSize : Numeric or array (xFigSize, yFigSize). The size of each axis. """ if isinstance(figSize, (list, tuple)): xFigSize, yFigSize = figSize elif isinstance(figSize, (int, float)): xFigSize = yFigSize = figSize else: raise Exception('figSize type {} not recognised'.format(type(figSize))) fig, axs = plt.subplots(nRows, nCols, figsize=(nCols * xFigSize, nRows * yFigSize)) if nRows * nCols > 1: axs = axs.ravel() return fig, axs
a0ec25fa932933f717ef9a576d0f80d531865aad
7,097
def make_rate_data(grp, valuevars, query="none == 'All'", data=ob): """Filters, Groups, and Calculates Rates Params: grp [list]: A list detailing the names of the variables to group by. valuevars [list]: A list detailing the names of the quantitative variable summarise and calculate a rate for (as a function of population). query [string]: A query string used to subset the data prior to aggregation. data [pd.DataFrame]: The obesity dataset. Returns: [pd.DataFrame]: A pandas data frame containing the grouping variables and rates for the value variables (carrying the same column name). Cells where a rate could not be calculated due to missing information are return as np.NaN. """ grp_plus = grp + ["none"] ratedata = ( data.query(query) .loc[:, grp + ["pop"] + valuevars] .melt(id_vars=grp + ["pop"], var_name="variable", value_name="value") .dropna() .groupby(grp + ["variable"])[["pop", "value"]] .sum() .reset_index() .assign(rate=lambda x: x["value"] / x["pop"]) .drop(columns=["value", "pop"]) .pivot(index=grp, columns="variable", values="rate") .reset_index() ) return ratedata
8342d5b20f7020a97f283ce80b04b92b42476862
7,098
def test_compare_sir_vs_seir(sir_data_wo_policy, seir_data, monkeypatch): """Checks if SEIR and SIR return same results if the code enforces * alpha = gamma * E = 0 * dI = dE """ x_sir, pars_sir = sir_data_wo_policy x_seir, pars_seir = seir_data pars_seir["alpha"] = pars_sir["gamma"] # will be done by hand def mocked_seir_step(data, **pars): data["exposed"] = 0 new_data = SEIRModel.simulation_step(data, **pars) new_data["infected"] += new_data["exposed_new"] return new_data seir_model = SEIRModel() monkeypatch.setattr(seir_model, "simulation_step", mocked_seir_step) sir_model = SIRModel() predictions_sir = sir_model.propagate_uncertainties(x_sir, pars_sir) predictions_seir = seir_model.propagate_uncertainties(x_seir, pars_seir) assert_frame_equal( predictions_sir[COLS_TO_COMPARE], predictions_seir[COLS_TO_COMPARE], )
d70b841b23af6883a14bb1c97f31f3e24ae7fd4d
7,099
def login(client, username='', password=''): """ Log a specific user in. :param client: Flask client :param username: The username :type username: str :param password: The password :type password: str :return: Flask response """ user = dict(login=username, password=password) response = client.post(url_for('blog.login'), data=user, follow_redirects=True) return response
c0a9ac806fc0f1b55ebc76f5f50aa4b8e71436c4
7,100
def func(x, params): """The GNFW radial profile. Args: x (:obj:`np.ndarray`): Radial coordinate. params (:obj:`dict`): Dictionary with keys `alpha`, `beta`, `gamma`, `c500`, and `P0` that defines the GNFW profile shape. Returns: Profile (1d :obj:`np.ndarray`). """ G, A, B, c500, P0 = params['gamma'], params['alpha'], params['beta'], params['c500'], params['P0'] prof=np.zeros(x.shape) mask=np.greater(x, 0) prof[mask]=P0*((x[mask]*c500)**-G * (1+(x[mask]*c500)**A)**((G-B)/A)) #prof[x == 0]=np.inf return prof
a7510bdcc7e5938ece6d888620372f95d013a114
7,101
def _readFromSettings(self, key): """Loads the settings object associated with the program and returns the value at the key.""" COMPANY, APPNAME, _ = SELMAGUISettings.getInfo() COMPANY = COMPANY.split()[0] APPNAME = APPNAME.split()[0] settings = QtCore.QSettings(COMPANY, APPNAME) val = None try: val = settings.value(key) except: self._signalObject.errorMessageSignal.emit( "Wrong setting accessed.") return val #Return the right type if val == "true": return True if val == "false": return False return float(val)
a96b7b14789bb848fe288e419b1b9ff8c9b35db8
7,103
import logging def is_statu(search_data): """ 判断是否有参数,且为正常还是停用 :param search_data: :return: """ logging.info('is_statu') if search_data: if search_data == '正常': return '1' elif search_data == '停用': return '0' else: return search_data else: return ''
b9bcc643f2bb73fd692017cf5ff1dee23d528a8f
7,106
def get_mysql_exception(errno, msg, sqlstate=None): """Get the exception matching the MySQL error This function will return an exception based on the SQLState. The given message will be passed on in the returned exception. The exception returned can be customized using the mysql.connector.custom_error_exception() function. Returns an Exception """ try: return _CUSTOM_ERROR_EXCEPTIONS[errno]( msg=msg, errno=errno, sqlstate=sqlstate) except KeyError: # Error was not mapped to particular exception pass try: return _ERROR_EXCEPTIONS[errno]( msg=msg, errno=errno, sqlstate=sqlstate) except KeyError: # Error was not mapped to particular exception pass if not sqlstate: return DatabaseError(msg=msg, errno=errno) try: return _SQLSTATE_CLASS_EXCEPTION[sqlstate[0:2]]( msg=msg, errno=errno, sqlstate=sqlstate) except KeyError: # Return default InterfaceError return DatabaseError(msg=msg, errno=errno, sqlstate=sqlstate)
4ce4ae51a9a87b2a303aca4de5ac238fc6adf115
7,107
from typing import List def get_image_resize_transform_steps(config, dataset) -> List: """ Resizes the image to a slightly larger square. """ assert dataset.original_resolution is not None assert config.resize_scale is not None scaled_resolution = tuple( int(res * config.resize_scale) for res in dataset.original_resolution ) return [ transforms.Resize(scaled_resolution) ]
d3c1ddd5a072efc853cc7967b70f2e98011d31a4
7,109
def get_page_title(page_src, meta_data): """Returns the title of the page. The title in the meta data section will take precedence over the H1 markdown title if both are provided.""" return ( meta_data['title'] if 'title' in meta_data and isinstance(meta_data['title'], str) else get_markdown_title(page_src) )
e9fc19f9bc1d615c2ba8b4210f9be5212c282e53
7,110
def saveReplayBuffer(): """ Flush and save the contents of the Replay Buffer to disk. This is basically the same as triggering the "Save Replay Buffer" hotkey. Will return an `error` if the Replay Buffer is not active. """ return __createJSON("SaveReplayBuffer", {})
4be684acb7751ee6c78825a3e8c702db1b5d18f2
7,113
import base64 import json def view_or_basicauth(view, request, test_func, realm = "", *args, **kwargs): """ This is a helper function used by both 'logged_in_or_basicauth' and 'has_perm_or_basicauth' that does the nitty of determining if they are already logged in or if they have provided proper http-authorization and returning the view if all goes well, otherwise responding with a 401. """ if request.user is None or not request.user.is_authenticated() or not user_has_student(request.user) or ALWAYS_LOGIN: key = 'HTTP_AUTHORIZATION' if key not in request.META: key = 'REDIRECT_HTTP_AUTHORIZATION' if key not in request.META: key = 'HTTP_X_AUTHORIZATION' if key in request.META: auth = request.META[key].split() if len(auth) == 2: if auth[0].lower() == "basic": # Basic authentication - this is not an API client uname, passwd = base64.b64decode(auth[1]).split(':') user = authenticate(username=uname, password=passwd) permissions = APIClient.universal_permission_flag() elif auth[0].lower() == "bearer": # The client bears a FireRoad-issued token user, permissions, error = extract_token_info(request, auth[1]) if error is not None: return HttpResponse(json.dumps(error), status=401, content_type="application/json") user.backend = 'django.contrib.auth.backends.ModelBackend' else: raise PermissionDenied request.session['permissions'] = permissions if user is not None: if user.is_active: login(request, user) request.user = user return view(request, *args, **kwargs) raise PermissionDenied #return redirect('login') else: if 'permissions' not in request.session: print("Setting universal permission flag - this should only occur in dev or from FireRoad-internal login.") request.session['permissions'] = APIClient.universal_permission_flag() return view(request, *args, **kwargs)
e3bca2ba1f0bf2a82105e7a530bb0ce05f324898
7,114
def _subtract_background_one_line(data_line, e_off, e_lin, e_quad, width): """ Subtract background from spectra in a single line of the image Parameters ---------- data_line : ndarray spectra for one line of an image, size NxM, N-the number of pixels in the line, M - the number of points in one spectrum (typically 4096) e_off : float offset - coefficient for polynomial approximation of energy axis e_lin : float linear coefficient of polynomial approximation of energy axis e_quad : float quadratic coefficient of polynomial approximation of energy axis background_width : float parameter of snip algorithm for background estimation Returns ------- ndarray of the same shape as data_line. Contains spectra with subtracted background. """ data_line = np.copy(data_line) xx, _ = data_line.shape for n in range(xx): bg = snip_method(data_line[n, :], e_off=e_off, e_lin=e_lin, e_quad=e_quad, width=width) data_line[n, :] -= bg return data_line
10c9928b1e9d576e404ee82a028394381963472f
7,115
def clean_principals_output(sql_result, username, shell=False): """ Transform sql principals into readable one """ if not sql_result: if shell: return username return [username] if shell: return sql_result return sql_result.split(',')
313d04aef55c7fd689605a19d22c801123624a51
7,116
def matchesType(value, expected): """ Returns boolean for whether the given value matches the given type. Supports all basic JSON supported value types: primitive, integer/int, float, number/num, string/str, boolean/bool, dict/map, array/list, ... """ result = type(value) expected = expected.lower() if result is int: return expected in ("integer", "number", "int", "num", "primitive") elif result is float: return expected in ("float", "number", "num", "primitive") elif result is str: return expected in ("string", "str", "primitive") elif result is bool: return expected in ("boolean", "bool", "primitive") elif result is dict: return expected in ("dict", "map") elif result is list: return expected in ("array", "list") return False
24949f01a1bc3ae63a120d91549ae06ba52298a8
7,117
def csv_logging(record): """generate output in csv format""" csv_record = ('{ts},{si},{di},{sp},{dp},{t},"{p}",{h},{v},"{ha}",' '"{k}","{e}","{m}","{c}"') if 'hassh' in record: hasshType = 'client' kexAlgs = record['ckex'] encAlgs = record['ceacts'] macAlgs = record['cmacts'] cmpAlgs = record['ccacts'] hassh = record['hassh'] hasshAlgorithms = record['hasshAlgorithms'] identificationString = record['client'] elif 'hasshServer' in record: hasshType = 'server' kexAlgs = record['skex'] encAlgs = record['seastc'] macAlgs = record['smastc'] cmpAlgs = record['scastc'] hassh = record['hasshServer'] hasshAlgorithms = record['hasshServerAlgorithms'] identificationString = record['server'] csv_record = csv_record.format( ts=record['timestamp'], si=record['sourceIp'], di=record['destinationIp'], sp=record['sourcePort'], dp=record['destinationPort'], t=hasshType, p=identificationString, h=hassh, v=HASSH_VERSION, ha=hasshAlgorithms, k=kexAlgs, e=encAlgs, m=macAlgs, c=cmpAlgs) return csv_record
53fdbc8e634162199cec94d7cb1a7b737f08310f
7,118
import itertools from typing import Counter def get_top_words(keywords): """ Orders the topics from most common to least common for displaying. """ keywords = itertools.chain.from_iterable(map(str.split, keywords)) top_words = list(Counter(keywords)) return top_words
307a5a0e0e900e411097a84d19daf0ca7187c9bc
7,121
def obj_prop(*args, **kwargs): """ Build an object property wrapper. If no arguments (or a single ``None`` argument) are suppled, return a dummy property. If one argument is supplied, return :class:`AttrObjectProperty` for a property with a given name. Otherwise, return :class:`MethodObjectProperty` property. """ if len(args)==0: return empty_object_property() if len(args)==1: if args[0] is None: # empty property return empty_object_property() return AttrObjectProperty(args[0],**kwargs) elif len(args)<=3: return MethodObjectProperty(*args,**kwargs) else: raise ValueError("invalid number of arguments")
9b2e7e28c7b68cafdcd39a447a5dcb15c493e399
7,124
from typing import Iterable def _check_name(name: str, invars: Iterable[str]) -> str: """Check if count is valid""" if name is None: name = _n_name(invars) if name != "n": logger.warning( "Storing counts in `%s`, as `n` already present in input. " 'Use `name="new_name" to pick a new name.`' ) elif not isinstance(name, str): raise ValueError("`name` must be a single string.") return name
1dd4fce937e9a48a64147b9c4a03f713e7f7c433
7,126
def get_documents(corpus_tag): """ Returns a list of documents with a particular corpus tag """ values = db.select(""" SELECT doc_id FROM document_tag WHERE tag=%(tag)s ORDER BY doc_id """, tag=corpus_tag) return [x.doc_id for x in values]
933dd00e76475fbd14e4cd8b3dff9e918d98ff46
7,127
def draw_with_indeces(settings): """ Drawing function that displays the input smiles string with all atom indeces """ m = Chem.MolFromSmiles(settings['SMILESSTRING']) dm = Draw.PrepareMolForDrawing(m) d2d = Draw.MolDraw2DSVG(350,350) opts = d2d.drawOptions() for i in range(m.GetNumAtoms()): opts.atomLabels[i] = m.GetAtomWithIdx(i).GetSymbol()+str(i) d2d.DrawMolecule(dm) d2d.FinishDrawing() return d2d.GetDrawingText()
b32b7031e97c264630e0cf6024c60b7eb87c6ff9
7,128
from app.extensions.celerybackend import models from app.extensions.logger.models import Log from app.modules.auth.models import User from app.utils import local def get_main_page_info(): """获取首页统计信息 :return info: Dict 统计信息 """ task_cnt = models.Tasks.objects(time_start__gte=local.localdate()).count() user_cnt = User.query.count() new_user_cnt = User.query.filter(User.created > local.localdate()).count() log_cnt = Log.objects( created__gte=local.localdate(), module__nin=["static", "admin", "unknown"] ).count() task_success_cnt = models.Tasks.objects( time_start__gte=local.localdate(), state="success" ).count() task_run_cnt = models.Tasks.objects( time_start__gte=local.localdate(), state="run" ).count() task_fail_cnt = models.Tasks.objects( time_start__gte=local.localdate(), state="fail" ).count() if task_success_cnt == 0: task_success = 0 else: task_success = int(task_success_cnt / task_cnt * 100) if task_run_cnt == 0: task_run = 0 else: task_run = int(task_run_cnt / task_cnt * 100) if task_fail_cnt == 0: task_fail = 0 else: task_fail = int(task_fail_cnt / task_cnt * 100) info = { "task": task_cnt, "user": user_cnt, "new_user": new_user_cnt, "log": log_cnt, } return info, task_success, task_run, task_fail
5a8c67dcafd0f822102f89195726cb7648b136fb
7,129
def get_tablenames(cur): """ Conveinience: """ cur.execute("SELECT name FROM sqlite_master WHERE type='table'") tablename_list_ = cur.fetchall() tablename_list = [str(tablename[0]) for tablename in tablename_list_ ] return tablename_list
311335c38d9ea19396da3292513e3e1d7bd5caf0
7,130
import urllib def reverse_geocode(userCoords): """ Returns the city, state (or equivalent administrative region), and country that the specified point is in userCoords is a tuple: (latitude, longitude) """ lat, lng = userCoords latlng = "{0},{1}".format(lat, lng) data = urllib.parse.urlencode({"latlng" : latlng, "result_type" : "locality", "key" : API_KEY}) result = make_google_api_request(API_URL + data) if result["status"] == "OK": return result["results"][0]["formatted_address"] else: return "Status: " + result["status"]
b38d9585033c012ea6a90a14f2f321a538b42e86
7,131
def match_red_baselines(model, model_antpos, data, data_antpos, tol=1.0, verbose=True): """ Match unique model baseline keys to unique data baseline keys based on positional redundancy. Ideally, both model and data contain only unique baselines, in which case there is a one-to-one mapping. If model contains extra redundant baselines, these are not propagated to new_model. If data contains extra redundant baselines, the lowest ant1-ant2 pair is chosen as the baseline key to insert into model. Parameters: ----------- model : type=DataContainer, model dictionary holding complex visibilities must conform to DataContainer dictionary format. model_antpos : type=dictionary, dictionary holding antennas positions for model dictionary keys are antenna integers, values are ndarrays of position vectors in meters data : type=DataContainer, data dictionary holding complex visibilities. must conform to DataContainer dictionary format. data_antpos : type=dictionary, dictionary holding antennas positions for data dictionary same format as model_antpos tol : type=float, baseline match tolerance in units of baseline vectors (e.g. meters) Output: (data) ------- new_model : type=DataContainer, dictionary holding complex visibilities from model that had matching baselines to data """ # create baseline keys for model model_keys = list(model.keys()) model_bls = np.array(list(map(lambda k: Baseline(model_antpos[k[1]] - model_antpos[k[0]], tol=tol), model_keys))) # create baseline keys for data data_keys = list(data.keys()) data_bls = np.array(list(map(lambda k: Baseline(data_antpos[k[1]] - data_antpos[k[0]], tol=tol), data_keys))) # iterate over data baselines new_model = odict() for i, bl in enumerate(model_bls): # compre bl to all model_bls comparison = np.array(list(map(lambda mbl: bl == mbl, data_bls)), np.str) # get matches matches = np.where((comparison == 'True') | (comparison == 'conjugated'))[0] # check for matches if len(matches) == 0: echo("found zero matches in data for model {}".format(model_keys[i]), verbose=verbose) continue else: if len(matches) > 1: echo("found more than 1 match in data to model {}: {}".format(model_keys[i], list(map(lambda j: data_keys[j], matches))), verbose=verbose) # assign to new_data if comparison[matches[0]] == 'True': new_model[data_keys[matches[0]]] = model[model_keys[i]] elif comparison[matches[0]] == 'conjugated': new_model[data_keys[matches[0]]] = np.conj(model[model_keys[i]]) return DataContainer(new_model)
83c7d5cc371593ad694fa81e56be6e1034bd693f
7,132
def _choose_random_genes(individual): """ Selects two separate genes from individual. Args: individual (np.array): Genotype of individual. Returns: gene1, gene2 (tuple): Genes separated by at least another gene. """ gene1, gene2 = np.sort(np.random.choice(len(individual), size=(2, 1), replace=False).flatten()) while gene2 - gene1 < 2: gene1, gene2 = np.sort(np.random.choice(len(individual), size=(2, 1), replace=False).flatten()) return (gene1, gene2)
08555dd3b3f1a04bbd93290fb9c60c37acc3583b
7,133
import types def incomplete_sample_detection(device_name): """Introspect whether a device has 'incomplete sample detection', described here: www.ni.com/documentation/en/ni-daqmx/latest/devconsid/incompletesampledetection/ The result is determined empirically by outputting a pulse on one counter and measuring it on another, and seeing whether the first sample was discarded or not. This requires a non-simulated device with at least two counters. No external signal is actually generated by the device whilst this test is performed. Credit for this method goes to Kevin Price, who provided it here: forums.ni.com/t5/Multifunction-DAQ/_/td-p/3849429 This workaround will hopefully be deprecated if and when NI provides functionality to either inspect this feature's presence directly, or to disable it regardless of its presence. """ if is_simulated(device_name): msg = "Can only detect incomplete sample detection on non-simulated devices" raise ValueError(msg) if not supports_period_measurement(device_name): msg = "Device doesn't support period measurement" raise ValueError(msg) CI_chans = get_CI_chans(device_name) if len(CI_chans) < 2: msg = "Need at least two counters to detect incomplete sample detection" raise ValueError(msg) # The counter we will produce a test signal on: out_chan = CI_chans[0] # The counter we will measure it on: meas_chan = CI_chans[1] # Set up the output task: out_task = daqmx.Task() out_task.CreateCOPulseChanTime( out_chan, "", c.DAQmx_Val_Seconds, c.DAQmx_Val_Low, 0, 1e-3, 1e-3 ) # Prevent the signal being output on the physical terminal: out_task.SetCOPulseTerm("", "") # Force CO into idle state to prevent spurious edges when the task is started: out_task.TaskControl(c.DAQmx_Val_Task_Commit) # Set up the measurement task meas_task = daqmx.Task() meas_task.CreateCIPeriodChan( meas_chan, "", 1e-3, 1.0, c.DAQmx_Val_Seconds, c.DAQmx_Val_Rising, c.DAQmx_Val_LowFreq1Ctr, 10.0, 0, "", ) meas_task.CfgImplicitTiming(c.DAQmx_Val_ContSamps, 1) # Specify that we are measuring the internal output of the other counter: meas_task.SetCIPeriodTerm("", '/' + out_chan + 'InternalOutput') try: meas_task.StartTask() out_task.StartTask() out_task.WaitUntilTaskDone(10.0) # How many samples are in the read buffer of the measurement task? samps_avail = types.uInt32() meas_task.GetReadAvailSampPerChan(samps_avail) if samps_avail.value == 0: # The device discarded the first edge return True elif samps_avail.value == 1: # The device did not discard the first edge return False else: # Unexpected result msg = "Unexpected number of samples: %d" % samps_avail.value raise ValueError(msg) finally: out_task.ClearTask() meas_task.ClearTask()
52fac104ba408273c7876de0c37a62bc6548b7b6
7,134
def diag_numba(A, b): """ Fill matrix A with a diagonal represented by vector b. Parameters ---------- A : array Base matrix. b : array Diagonal vector to fill with. Returns ------- array Matrix A with diagonal filled. """ for i in range(b.shape[0]): A[i, i] = b[i] return A
7eb722eaea9e932c7e7d0f3c52b40d224c7152cc
7,135
def get_symminfo(newsymms: dict) -> str: """ Adds text about the symmetry generators used in order to add symmetry generated atoms. """ line = 'Symmetry transformations used to generate equivalent atoms:\n' nitems = len(newsymms) n = 0 for key, value in newsymms.items(): sep = ';' if n == nitems: sep = '' n += 1 line += "#{}: {}{} ".format(key, value, sep) if newsymms: return line else: return ''
2b3fdeebac85ea3329839406e611ba051f45ddce
7,136
import random def get_random_sequences( self, n=10, length=200, chroms=None, max_n=0.1, outtype="list" # noqa ): """ Return random genomic sequences. Parameters ---------- n : int , optional Number of sequences to return. length : int , optional Length of sequences to return. chroms : list , optional Return sequences only from these chromosomes. max_n : float , optional Maximum fraction of Ns. outtype : string , optional return the output as list or string. Options: "list" or "string", default: "list". Returns ------- list coordinates as lists or strings: List with [chrom, start, end] genomic coordinates. String with "chrom:start-end" genomic coordinates (can be used as input for track2fasta). """ if not chroms: chroms = self.keys() # dict of chromosome sizes after subtracting the number of Ns sizes = dict( [(chrom, len(self[chrom]) - self.gaps.get(chrom, 0)) for chrom in chroms] ) # list of (tuples with) chromosomes and their size # (if that size is long enough for random sequence selection) lengths = [ (sizes[x], x) for x in chroms if sizes[x] / len(self[x]) > 0.1 and sizes[x] > 10 * length ] if len(lengths) == 0: raise Exception("No contigs of sufficient size were found.") # random list of chromosomes from lengths (can have duplicates) chroms = weighted_selection(lengths, n) coords = [] retries = 100 cutoff = length * max_n for chrom in chroms: for _ in range(retries): start = int(random() * (sizes[chrom] - length)) end = start + length count_n = self[chrom][start:end].seq.upper().count("N") if count_n <= cutoff: break else: raise Exception( f"Random subset ran {retries} times, " f"but could not find a sequence with less than {cutoff} N's in {chrom}.\n" "You can specify contigs using the CHROMS argument." ) # list output example ["chr1", 123, 456] coords.append([chrom, start, end]) if outtype != "list": # bed output example: "chr1:123-456" for i, region in enumerate(coords): coords[i] = [f"{region[0]}:{region[1]}-{region[2]}"] return coords
ac6c33de8d333b3998d5e1b4d20dd2780745f9db
7,137
def get_sampleentropy(data): """Sample entropy, using antropy.sample_entropy, in the ML and AP directions. """ x, y = np.array(data[4]), np.array(data[5]) sample_entropy_ML = ant.sample_entropy(x) sample_entropy_AP = ant.sample_entropy(y) return sample_entropy_ML, sample_entropy_AP
d69d86de426bf4c7c9110d71a1a3c386a1d042d8
7,138
def from_json(filename, columns=None, process_func=None): """Read data from a json file Args: filename: path to a json file columns (list, optional): list of columns to keep. All columns are kept by default process_func (function, optional): A callable object that you can pass to process you data in a specific way Returns: pandas.DataFrame: return a dataframe object """ df = pd.read_json(filename) return __process_data(df, columns, process_func)
489603ff61c0a5aaa5012770d7c7649141002027
7,139
def render_content(tab): """ This function displays tabs based on user selection of tab """ if tab == 'tab-2': return filter_based_recommendation.TAB2_LAYOUT return choice_based_recommendation.CHOICE_BASED_RECOMMENDATION_LAYOUT
085e17b401b52de4b2ff8b11765a798586832cf8
7,140
import requests def course(name, reviews = False): """ Get a course. Parameters ---------- name: string The name of the course. reviews: bool, optional Whether to also return reviews for the course, specifically reviews for professors that taught the course and have the course listed as the one being reviewed. Defaults to False. """ params = {"name" : name, "reviews": "true" if reviews else "false"} url = BASE_URL + "course?" + urlencode(params) return requests.get(url).json()
969c2a94ecf1bfad227279ba3475772a45939848
7,141
def task(weight=1): """ Used as a convenience decorator to be able to declare tasks for a TaskSet inline in the class. Example:: class ForumPage(TaskSet): @task(100) def read_thread(self): pass @task(7) def create_thread(self): pass """ def decorator_func(func): func.locust_task_weight = weight return func """ Check if task was used without parentheses (not called), like this:: @task def my_task() pass """ if callable(weight): func = weight weight = 1 return decorator_func(func) else: return decorator_func
9a5af6cb9dabe73a5c08c8938b438b74881f9f26
7,142
from datetime import datetime def generate_age(issue_time): """Generate a age parameter for MAC authentication draft 00.""" td = datetime.datetime.now() - issue_time age = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 return unicode_type(age)
3164ee1422b5eafd56dee0b0e73183fc64d14597
7,143
def _bqm_from_1sat(constraint): """create a bqm for a constraint with only one variable bqm will have exactly classical gap 2. """ configurations = constraint.configurations num_configurations = len(configurations) bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) if num_configurations == 1: val, = next(iter(configurations)) v, = constraint.variables bqm.add_variable(v, -1 if val > 0 else +1) else: bqm.add_variables_from((v, 0.0) for v in constraint.variables) return bqm.change_vartype(constraint.vartype)
149967077070e71eae66dc5521dfbae479645eda
7,144
import paramiko import traceback import traceback def _ssh(server): """ SSH into a Server """ remote_user = server.remote_user private_key = server.private_key if not private_key or not remote_user: if remote_user: return {"result": "Critical. Missing Private Key", "status": 3, } elif private_key: return {"result": "Critical. Missing Remote User", "status": 3, } else: return {"result": "Critical. Missing Remote User & Private Key", "status": 3, } # SSH in & run check try: except ImportError: return {"result": "Critical. Paramiko required.", "status": 3, } keyfile = open(os.path.join(current.request.folder, "uploads", private_key), "r") mykey = paramiko.RSAKey.from_private_key(keyfile) ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: ssh.connect(hostname = server.host_ip, username = remote_user, pkey = mykey) except paramiko.ssh_exception.AuthenticationException: tb_parts = sys.exc_info() tb_text = "".join(traceback.format_exception(tb_parts[0], tb_parts[1], tb_parts[2])) return {"result": "Critical. Authentication Error\n\n%s" % tb_text, "status": 3, } except paramiko.ssh_exception.SSHException: tb_parts = sys.exc_info() tb_text = "".join(traceback.format_exception(tb_parts[0], tb_parts[1], tb_parts[2])) return {"result": "Critical. SSH Error\n\n%s" % tb_text, "status": 3, } return ssh
e54e72c8c4bff8deeac0e29a57393860954b299c
7,145
from typing import Dict from typing import List from typing import Any from typing import Tuple def _create_group_codes_and_info( states: pd.DataFrame, assort_bys: Dict[str, List[str]], contact_models: Dict[str, Dict[str, Any]], ) -> Tuple[pd.DataFrame, Dict[str, Dict[str, Any]]]: """Create group codes and additional information. Args: states (pd.DataFrame): The states. assort_bys (Dict[str, List[str]]): The assortative variables for each contact model. contact_models (Dict[str, Dict[str, Any]]): The contact models. Returns: A tuple containing: - states (pandas.DataFrame): The states. - group_codes_info (Dict[str, Dict[str, Any]]): A dictionary where keys are names of contact models and values are dictionaries containing the name and the original codes of the assortative variables. """ group_codes_names = _create_group_codes_names(contact_models, assort_bys) group_codes_info = {} for model_name, assort_by in assort_bys.items(): is_factorized = contact_models[model_name].get("is_factorized", False) group_code_name = group_codes_names[model_name] # Create the group code column if it is not available or if it exists - meaning # we are resuming a simulation - to recover the groups. if (group_code_name not in states.columns) or ( group_code_name in states.columns and not is_factorized ): states[group_code_name], groups = factorize_assortative_variables( states, assort_by ) elif group_code_name in states.columns and is_factorized: states[group_code_name] = states[group_code_name].astype(DTYPE_GROUP_CODE) unsorted_groups = states[group_code_name].unique() groups = np.sort(unsorted_groups[unsorted_groups != -1]) else: groups = states[group_code_name].cat.categories if is_factorized: groups = groups[groups != -1] group_codes_info[model_name] = {"name": group_code_name, "groups": groups} return states, group_codes_info
6acac718aa639e9584dbc5d7cb7d601731aa674e
7,146
def quiver_plotter(X, Y, Z, plot_range=None, mes_unit='', title='', x_label=r'$x$', y_label=r'$y$', show_plot=True, dark=False): """ Generates a plot of some vector fields. Parameters ---------- X : numpy.ndarray Matrix with values for the first axis on all the rows. Y : numpy.ndarray Matrix with values for the second axis on all the columns. Z : numpy.ndarray or list of numpy.ndarray Either a matrix with 3 dimension and the last two dimensions like the dimensions of X and Y or a list of two matricies with the same size as X and Y. plot_range : list of floats, optional List with the range for the plot. The defualt is None. mes_unit : str, optional Units of measure of the vectors shown. The default is ''. title : str, optional Title of the plot. The default is ''. x_label : str, optional The name on the first axis. The default is r'$x$'. y_label : str, optional Name on the second axis. The default is r'$y$'. show_plot : bool, optional Flag for printing the figure with plt.show(). The default is True. dark : bool, optional Flag for changing the graph color to a dark theme. The default is False. Raises ------ ValueError If the size of either X, Y or Z don't match. TypeError If the Z parameter is neither a list of numpy.ndarray or a numpy.ndarray Returns ------- fig : matplotlib.figure.Figure Figure with the plot. """ if isinstance(Z, list): if len(Z) != 2: raise ValueError("The argument z should be a list of two elements.") else: q_x = Z[0] q_y = Z[1] elif isinstance(Z, np.ndarray): if len(Z.shape) != 3 or Z.shape[0] < 2: raise ValueError( "The argument z should be a numpy array of dimension 3 with at least 2 values on the first axis.") else: q_x = Z[0, :] q_y = Z[1, :] else: raise TypeError( "The argument z should be a list of numpy.ndarray or an instance of numpy.ndarray.") range_reduction = True if plot_range == None: range_reduction = False elif not isinstance(plot_range, list): raise TypeError('The argument should be a list of floats.') elif len(plot_range) != 4: raise ValueError( 'The number of elements in plot_range should be 4, here it is {}'.format(len(plot_range))) if q_x.shape != X.shape or q_x.shape != Y.shape or q_y.shape != X.shape or q_y.shape != Y.shape: raise ValueError("The shape of X, Y and the two elements in Z must coincide.") if range_reduction: x_max = plot_range[1] x_min = plot_range[0] y_max = plot_range[3] y_min = plot_range[2] idx_x_min, idx_x_max = _crop_array_idxs(X[:, 0], x_min, x_max) idx_y_min, idx_y_max = _crop_array_idxs(Y[0, :], y_min, y_max) X = X[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1] Y = Y[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1] q_x = q_x[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1] q_y = q_y[idx_x_min: idx_x_max + 1, idx_y_min: idx_y_max + 1] # plotting of the function fig = plt.figure(figsize=fig_size) ax = fig.gca() Q = ax.quiver(X, Y, q_x, q_y, pivot='tail') ax.quiverkey(Q, 0.9, 0.9, 1, '1' + mes_unit, labelpos='E', coordinates='figure') if range_reduction: ax.axis(plot_range) ax.set_xlabel(x_label) ax.set_ylabel(y_label) title = ax.set_title(title) if dark: _darkizer(fig, ax, title) if show_plot: plt.show() return fig
55a0c5768ce2827c851abf468030824ee9a1411e
7,147
def get_attr(item, name, default=None): """ similar to getattr and get but will test for class or dict :param item: :param name: :param default: :return: """ try: val = item[name] except (KeyError, TypeError): try: val = getattr(item, name) except AttributeError: val = default return val
0c68c7e54ef901e18a49d327188f29f72f54da01
7,148
def float2(val, min_repeat=6): """Increase number of decimal places of a repeating decimal. e.g. 34.111111 -> 34.1111111111111111""" repeat = 0 lc = "" for i in range(len(val)): c = val[i] if c == lc: repeat += 1 if repeat == min_repeat: return float(val[:i+1] + c * 10) else: lc = c repeat = 1 return float(val)
07fc521e877387242a1e6cf951a6d5cbdc925aaf
7,149
def load_array_meta(loader, filename, index): """ Load the meta-data data associated with an array from the specified index within a file. """ return loader(filename, index)
e53ed1d795edf2285b3eca333a7650a378c26b9a
7,150
def viterbi_value(theta: np.ndarray, operator: str = 'hardmax') \ -> float: """ Viterbi operator. :param theta: _numpy.ndarray, shape = (T, S, S), Holds the potentials of the linear chain CRF :param operator: str in {'hardmax', 'softmax', 'sparsemax'}, Smoothed max-operator :return: float, DTW value $Vit(\theta)$ """ return viterbi_grad(theta, operator)[0]
7b1c37143c05f400cc910e07c97e51d4d3788ca9
7,151
def pack32(n): """Convert a Python int to a packed signed long (4 bytes).""" return pack('<i', n)
0caebee4af80c4defb75ed8512cb2d5d13cd7ede
7,152
def run_rollout( policy, env, horizon, use_goals=False, render=False, video_writer=None, video_skip=5, terminate_on_success=False, ): """ Runs a rollout in an environment with the current network parameters. Args: policy (RolloutPolicy instance): policy to use for rollouts. env (EnvBase instance): environment to use for rollouts. horizon (int): maximum number of steps to roll the agent out for use_goals (bool): if True, agent is goal-conditioned, so provide goal observations from env render (bool): if True, render the rollout to the screen video_writer (imageio Writer instance): if not None, use video writer object to append frames at rate given by @video_skip video_skip (int): how often to write video frame terminate_on_success (bool): if True, terminate episode early as soon as a success is encountered Returns: results (dict): dictionary containing return, success rate, etc. """ assert isinstance(policy, RolloutPolicy) assert isinstance(env, EnvBase) policy.start_episode() ob_dict = env.reset() goal_dict = None if use_goals: # retrieve goal from the environment goal_dict = env.get_goal() results = {} video_count = 0 # video frame counter total_reward = 0. success = { k: False for k in env.is_success() } # success metrics try: for step_i in range(horizon): # get action from policy ac = policy(ob=ob_dict, goal=goal_dict) # play action ob_dict, r, done, _ = env.step(ac) # render to screen if render: env.render(mode="human") # compute reward total_reward += r cur_success_metrics = env.is_success() for k in success: success[k] = success[k] or cur_success_metrics[k] # visualization if video_writer is not None: if video_count % video_skip == 0: video_img = env.render(mode="rgb_array", height=512, width=512) video_writer.append_data(video_img) video_count += 1 # break if done if done or (terminate_on_success and success["task"]): break except env.rollout_exceptions as e: print("WARNING: got rollout exception {}".format(e)) results["Return"] = total_reward results["Horizon"] = step_i + 1 results["Success_Rate"] = float(success["task"]) # log additional success metrics for k in success: if k != "task": results["{}_Success_Rate".format(k)] = float(success[k]) return results
52324d72a80aea83b667faa08e6e95c561311ee5
7,153
from typing import Union from typing import Type from typing import List from typing import Optional from typing import Dict from typing import Any from typing import cast def create_test_client( route_handlers: Union[ Union[Type[Controller], BaseRouteHandler, Router, AnyCallable], List[Union[Type[Controller], BaseRouteHandler, Router, AnyCallable]], ], after_request: Optional[AfterRequestHandler] = None, allowed_hosts: Optional[List[str]] = None, backend: str = "asyncio", backend_options: Optional[Dict[str, Any]] = None, base_url: str = "http://testserver", before_request: Optional[BeforeRequestHandler] = None, cors_config: Optional[CORSConfig] = None, dependencies: Optional[Dict[str, Provide]] = None, exception_handlers: Optional[Dict[Union[int, Type[Exception]], ExceptionHandler]] = None, guards: Optional[List[Guard]] = None, middleware: Optional[List[Union[Middleware, Type[BaseHTTPMiddleware], Type[MiddlewareProtocol]]]] = None, on_shutdown: Optional[List[LifeCycleHandler]] = None, on_startup: Optional[List[LifeCycleHandler]] = None, openapi_config: Optional[OpenAPIConfig] = None, template_config: Optional[TemplateConfig] = None, plugins: Optional[List[PluginProtocol]] = None, raise_server_exceptions: bool = True, root_path: str = "", static_files_config: Optional[Union[StaticFilesConfig, List[StaticFilesConfig]]] = None, cache_config: CacheConfig = DEFAULT_CACHE_CONFIG, ) -> TestClient: """Create a TestClient""" return TestClient( app=Starlite( after_request=after_request, allowed_hosts=allowed_hosts, before_request=before_request, cors_config=cors_config, dependencies=dependencies, exception_handlers=exception_handlers, guards=guards, middleware=middleware, on_shutdown=on_shutdown, on_startup=on_startup, openapi_config=openapi_config, template_config=template_config, plugins=plugins, route_handlers=cast(Any, route_handlers if isinstance(route_handlers, list) else [route_handlers]), static_files_config=static_files_config, cache_config=cache_config, ), backend=backend, backend_options=backend_options, base_url=base_url, raise_server_exceptions=raise_server_exceptions, root_path=root_path, )
f110972c735e9ad81eca1f267651e97732d6e37c
7,154
def queue_tabnav(context): """Returns tuple of tab navigation for the queue pages. Each tuple contains three elements: (tab_code, page_url, tab_text) """ counts = context['queue_counts'] request = context['request'] listed = not context.get('unlisted') if listed: tabnav = [('nominated', 'queue_nominated', (ungettext('New Add-on ({0})', 'New Add-ons ({0})', counts['nominated']) .format(counts['nominated']))), ('pending', 'queue_pending', (ungettext('Update ({0})', 'Updates ({0})', counts['pending']) .format(counts['pending']))), ('moderated', 'queue_moderated', (ungettext('Moderated Review ({0})', 'Moderated Reviews ({0})', counts['moderated']) .format(counts['moderated'])))] if acl.action_allowed(request, amo.permissions.ADDONS_POST_REVIEW): tabnav.append( ('auto_approved', 'queue_auto_approved', (ungettext('Auto Approved Add-on ({0})', 'Auto Approved Add-ons ({0})', counts['auto_approved']) .format(counts['auto_approved']))), ) else: tabnav = [ ('all', 'unlisted_queue_all', ugettext('All Unlisted Add-ons')) ] return tabnav
6f3777ce46f09a6946ba66755a3ae27eda126da5
7,155
def _plot_feature_correlations(ax, correlation_matrix, cmap="coolwarm", annot=True, fmt=".2f", linewidths=.05): """ Creates a heatmap plot of the feature correlations Args: :ax: the axes object to add the plot to :correlation_matrix: the feature correlations :cmap: the color map :annot: whether to annotate the heatmap :fmt: how to format the annotations :linewidths: line width in the plot Returns: The heatmap """ hm = sns.heatmap(correlation_matrix, ax=ax, cmap=cmap, annot=annot, fmt=fmt, linewidths=linewidths) return hm
c7835c743552eec6beb3441bc324c2192d4db9d7
7,156
import tempfile import copy def graphviz_visualization(activities_count, dfg, image_format="png", measure="frequency", max_no_of_edges_in_diagram=100000, start_activities=None, end_activities=None, soj_time=None, font_size="12", bgcolor="transparent", stat_locale: dict = None): """ Do GraphViz visualization of a DFG graph Parameters ----------- activities_count Count of attributes in the log (may include attributes that are not in the DFG graph) dfg DFG graph image_format GraphViz should be represented in this format measure Describes which measure is assigned to edges in direcly follows graph (frequency/performance) max_no_of_edges_in_diagram Maximum number of edges in the diagram allowed for visualization start_activities Start activities of the log end_activities End activities of the log soj_time For each activity, the sojourn time in the log stat_locale Dict to locale the stat strings Returns ----------- viz Digraph object """ if start_activities is None: start_activities = {} if end_activities is None: end_activities = {} if stat_locale is None: stat_locale = {} filename = tempfile.NamedTemporaryFile(suffix='.gv') viz = Digraph("", filename=filename.name, engine='dot', graph_attr={'bgcolor': bgcolor}) # first, remove edges in diagram that exceeds the maximum number of edges in the diagram dfg_key_value_list = [] for edge in dfg: dfg_key_value_list.append([edge, dfg[edge]]) # more fine grained sorting to avoid that edges that are below the threshold are # undeterministically removed dfg_key_value_list = sorted(dfg_key_value_list, key=lambda x: (x[1], x[0][0], x[0][1]), reverse=True) dfg_key_value_list = dfg_key_value_list[0:min(len(dfg_key_value_list), max_no_of_edges_in_diagram)] dfg_allowed_keys = [x[0] for x in dfg_key_value_list] dfg_keys = list(dfg.keys()) for edge in dfg_keys: if edge not in dfg_allowed_keys: del dfg[edge] # calculate edges penwidth penwidth = assign_penwidth_edges(dfg) activities_in_dfg = set() activities_count_int = copy(activities_count) for edge in dfg: activities_in_dfg.add(edge[0]) activities_in_dfg.add(edge[1]) # assign attributes color activities_color = get_activities_color(activities_count_int) # represent nodes viz.attr('node', shape='box') if len(activities_in_dfg) == 0: activities_to_include = sorted(list(set(activities_count_int))) else: # take unique elements as a list not as a set (in this way, nodes are added in the same order to the graph) activities_to_include = sorted(list(set(activities_in_dfg))) activities_map = {} for act in activities_to_include: if "frequency" in measure and act in activities_count_int: viz.node(str(hash(act)), act + " (" + str(activities_count_int[act]) + ")", style='filled', fillcolor=activities_color[act], fontsize=font_size) activities_map[act] = str(hash(act)) else: stat_string = human_readable_stat(soj_time[act], stat_locale) viz.node(str(hash(act)), act + f" ({stat_string})", fontsize=font_size) activities_map[act] = str(hash(act)) # make edges addition always in the same order dfg_edges = sorted(list(dfg.keys())) # represent edges for edge in dfg_edges: if "frequency" in measure: label = str(dfg[edge]) else: label = human_readable_stat(dfg[edge], stat_locale) viz.edge(str(hash(edge[0])), str(hash(edge[1])), label=label, penwidth=str(penwidth[edge]), fontsize=font_size) start_activities_to_include = [act for act in start_activities if act in activities_map] end_activities_to_include = [act for act in end_activities if act in activities_map] if start_activities_to_include: viz.node("@@startnode", "<&#9679;>", shape='circle', fontsize="34") for act in start_activities_to_include: label = str(start_activities[act]) if isinstance(start_activities, dict) else "" viz.edge("@@startnode", activities_map[act], label=label, fontsize=font_size) if end_activities_to_include: # <&#9632;> viz.node("@@endnode", "<&#9632;>", shape='doublecircle', fontsize="32") for act in end_activities_to_include: label = str(end_activities[act]) if isinstance(end_activities, dict) else "" viz.edge(activities_map[act], "@@endnode", label=label, fontsize=font_size) viz.attr(overlap='false') viz.format = image_format return viz
f04bd9e0f076887072805f57d260310f309547fd
7,157
from scipy.interpolate import RegularGridInterpolator def sig_io_func(p, ca, sv): # The method input gives control over how the Nafion conductivity is # calculated. Options are 'lam' for laminar in which an interpolation is # done using data from [1], 'bulk' for treating the thin Nafion shells the # as a bulk-like material using NR results from [5], and 'mix' which uses a # weighted parallel mixutre of 'lam' and 'bulk' based on how much Pt vs C # exists at current conditions. This is because it is speculated that Pt # may have lamellae although C may not. 'sun' was also added to the # agglomerate model options which takes constant values used in [2]. # Inputs: Temperature [K], Nafion shell thickness [m], rel. humiditiy [%], # Pt coverage [%], p['eps/tau2_n'] [-] and p['p_eff_SAnaf'] [-], # and calculation method [-] """ Lamellae Method """ # Data below is taken from "Proton Transport in Supported Nafion Nanothin # Films by Electrochemical Impedence Spectroscopy" by Paul, MacCreery, and # Karan in their Supporting Information Document [1]. The data was given in # mS/cm and converted to S/m for the model calling this function. # indecies: temperature [C], Nafion shell thickness [nm], RH [%] sig_data = np.zeros([5,5,5]) temp_vals = np.array([25,30,40,50,60]) thick_vals = np.array([4,10,55,160,300]) RH_vals = np.array([20,40,60,80,95]) # v_w = np.zeros([p['Ny'],p['Nr']]) # for i in range(p['Ny']): # ih_n = ca.naf_b[i].species_index('H(Naf)') # ih2o_n = ca.naf_b[i].species_index('H2O(Naf)') # for j in range(p['Nr']): # ca.naf_b[i].Y = sv[ca.ptr['rho_naf_k'] +i*p['nxt_y'] +j*p['nxt_r']] # v_k = ca.naf_b[i].X*ca.naf_b[i].partial_molar_volumes # v_w[i,j] = v_k[ih2o_n] / sum(v_k) # v_w_a = np.sum(p['Vf_shl']*v_w,axis=1) # lamb_n = np.clip((v_w_a / (1 - v_w_a) *983/1980 *1100/18.02), 0., 22.) rho_naf_w = np.zeros([p['Ny'],p['Nr']]) for i in range(p['Ny']): ih2o_n = ca.naf_b[i].species_index('H2O(Naf)') for j in range(p['Nr']): ind = ca.ptr['rho_naf_k'] +i*p['nxt_y'] +j*p['nxt_r'] rho_naf_w[i,j] = sv[ind][ih2o_n] rho_naf_av = np.sum(p['Vf_shl']*rho_naf_w,axis=1) RH, RH_C = np.zeros(p['Ny']), np.zeros(p['Ny']) for i in range(p['Ny']): av = rho_naf_av[i] if av > 0: RH[i] = RH_eq_func(av,p,i)*100 RH_C[i] = RH_eq_func(av/2,p,i)*100 else: RH[i] = min(RH_vals) RH_C[i] = min(RH_vals) "Data for 25C as thickness[nm] for rows and RH[%] for columns" sig_data[0,:,:] = np.array([[0.0002,0.0206,0.4138,4.9101,21.888], # t 4nm [0.0002,0.0199,0.4073,5.1758,23.9213], # t 10nm [0.0002,0.0269,0.5448,5.3493,22.753], # t 55nm [0.3362,3.2505,8.3065,27.0725,54.0428], # t 160nm [1.5591,8.8389,19.6728,None,None]]) # t 300nm "Data for 30C as thickness[nm] for rows and RH[%] for columns" sig_data[1,:,:] = np.array([[0.0001,0.012,0.278,3.432,21.481], # t 4nm [0.0003,0.018,0.339,3.895,22.062], # t 10nm [0.0004,0.028,0.550,4.296,20.185], # t 55nm [0.0016,0.081,1.120,9.244,34.810], # t 160nm [0.0071,0.359,2.797,10.978,43.913]]) # t 300nm "Data for 40C as thickness[nm] for rows and RH[%] for columns" sig_data[2,:,:] = np.array([[0.0003,0.029,0.585,6.164,30.321], # t 4nm [0.0009,0.034,0.625,5.374,48.799], # t 10nm [0.0011,0.065,0.931,6.909,40.439], # t 55nm [0.0032,0.152,1.770,14.162,68.326], # t 160nm [0.0140,0.605,4.939,17.083,68.334]]) # t 300nm "Data for 50C as thickness[nm] for rows and RH[%] for columns" sig_data[3,:,:] = np.array([[0.001,0.062,1.087,8.335,37.686], # t 4nm [0.002,0.077,1.031,8.127,57.339], # t 10nm [0.002,0.121,1.603,9.149,48.934], # t 55nm [0.007,0.247,2.704,19.221,72.006], # t 160nm [0.031,1.076,7.185,20.981,83.923]]) # t 300nm "Data for 60C as thickness[nm] for rows and RH[%] for columns" sig_data[4,:,:] = np.array([[0.003,0.14,1.51,11.16,55.18], # t 4nm [0.003,0.17,1.72,13.67,62.39], # t 10nm [0.007,0.24,2.29,16.60,63.20], # t 55nm [0.015,0.45,4.31,26.63,93.33], # t 160nm [0.009,0.44,3.43,26.73,100.60]]) # t 300nm "Create interpolation function for relavent ranges" sig_io_int = RegularGridInterpolator((temp_vals,thick_vals,RH_vals),sig_data) "Call interpolation function for model specified paramaters" # Multiplication by 0.1 is unit conversion from mS/cm to S/m. Runner file # stores T and t_naf in [K] and [m] so are also converted inside the # interpolation function to the same units as original data [C] and [nm]. RH = np.clip(RH, min(RH_vals), max(RH_vals)) RH_C = np.clip(RH_C, min(RH_vals), max(RH_vals)) pts = np.zeros([p['Ny'],3]) for i in range(p['Ny']): pts[i,:] = [p['T']-273, p['t_naf'][i]*1e9, RH[i]] sig_io_lam = sig_io_int(pts) *0.1 """ Bulk Method """ # This method assumes that the thin shell of Nafion is treated the same as # the bulk material. Lambda is calculated using an empirical relationship. # Then the sig_io formula from [5] for a bulk membrane is used and scaled # by the scaling factor, also from [5]. # The loop below assumes RH is not RH_eq and instead is the actual local # gas-phase RH. if p['sig_method'] == 'lit': for i in range(p['Ny']): ih2o_g = ca.gas.species_index('H2O') rho_gas_k = sv[ca.ptr['rho_gas_k'] +i*p['nxt_y']] ca.gas.TDY = p['T'], sum(rho_gas_k), rho_gas_k RH[i] = ca.gas.X[ih2o_g]*ca.gas.P / 19946 *100 lamb_n = 0.3 + 10.8*(RH/100) - 16*(RH/100)**2 + 14.1*(RH/100)**3 sig_io_lit = (0.5139*lamb_n - 0.326)*np.exp(1268*(1/303 - 1/p['T'])) sig_io_bulk = sig_io_lit *0.672 """ Mix Method """ # Using a parallel resistor network to weight the conductivity through # lamellae and that through bulk-like material is performed with respect to # the amount of Pt and C areas respectively. sig_io_mix = 1 / (p['p_Pt']/100 /sig_io_lam +(1-p['p_Pt']/100) /sig_io_bulk) " Set conductivity depending on method " # Based on the method, return the appropriate conductivity. if p['sig_method'] == 'lam': sig_io = sig_io_lam elif p['sig_method'] == 'bulk': sig_io = sig_io_bulk elif p['sig_method'] == 'mix': sig_io = sig_io_mix elif p['sig_method'] == 'lit': sig_io = sig_io_lit # Output returns ionic conductivity [S/m] return sig_io
3a51f6899d9d8792378d0870fa56f15172e1d6cc
7,158
def srwl_opt_setup_cyl_fiber(_foc_plane, _delta_ext, _delta_core, _atten_len_ext, _atten_len_core, _diam_ext, _diam_core, _xc, _yc): """ Setup Transmission type Optical Element which simulates Cylindrical Fiber :param _foc_plane: plane of focusing: 1- horizontal (i.e. fiber is parallel to vertical axis), 2- vertical (i.e. fiber is parallel to horizontal axis) :param _delta_ext: refractive index decrement of extenal layer :param _delta_core: refractive index decrement of core :param _atten_len_ext: attenuation length [m] of external layer :param _atten_len_core: attenuation length [m] of core :param _diam_ext: diameter [m] of external layer :param _diam_core: diameter [m] of core :param _xc: horizontal coordinate of center [m] :param _yc: vertical coordinate of center [m] :return: transmission (SRWLOptT) type optical element which simulates Cylindrical Fiber """ def ray_path_in_cyl(_dx, _diam): r = 0.5*_diam pathInCyl = 0 if((_dx > -r) and (_dx < r)): pathInCyl = 2*sqrt(r*r - _dx*_dx) return pathInCyl ne = 1 nx = 101 ny = 1001 rx = 10e-03 ry = _diam_ext*1.2 if(_foc_plane == 1): #focusing plane is horizontal nx = 1001 ny = 101 rx = _diam_ext*1.2 ry = 10e-03 opT = SRWLOptT(nx, ny, rx, ry, None, 1, 1e+23, 1e+23, _xc, _yc) hx = rx/(nx - 1) hy = ry/(ny - 1) ofst = 0 pathInExt = 0 pathInCore = 0 if(_foc_plane == 2): #focusing plane is vertical y = -0.5*ry #cylinder is always centered on the grid, however grid can be shifted for iy in range(ny): pathInExt = 0; pathInCore = 0 if(_diam_core > 0): pathInCore = ray_path_in_cyl(y, _diam_core) pathInExt = ray_path_in_cyl(y, _diam_ext) - pathInCore argAtten = -0.5*pathInExt/_atten_len_ext if(_atten_len_core > 0): argAtten -= 0.5*pathInCore/_atten_len_core ampTr = exp(argAtten) #amplitude transmission optPathDif = -_delta_ext*pathInExt - _delta_core*pathInCore #optical path difference for ix in range(nx): opT.arTr[ofst] = ampTr #amplitude transmission opT.arTr[ofst + 1] = optPathDif #optical path difference ofst += 2 y += hy else: #focusing plane is horizontal perY = 2*nx x = -0.5*rx #cylinder is always centered on the grid, however grid can be shifted for ix in range(nx): pathInExt = 0; pathInCore = 0 if(_diam_core > 0): pathInCore = ray_path_in_cyl(x, _diam_core) pathInExt = ray_path_in_cyl(x, _diam_ext) - pathInCore argAtten = -0.5*pathInExt/_atten_len_ext if(_atten_len_core > 0): argAtten -= 0.5*pathInCore/_atten_len_core ampTr = exp(argAtten) #amplitude transmission optPathDif = -_delta_ext*pathInExt - _delta_core*pathInCore #optical path difference ix2 = ix*2 for iy in range(ny): ofst = iy*perY + ix2 opT.arTr[ofst] = ampTr #amplitude transmission opT.arTr[ofst + 1] = optPathDif #optical path difference x += hx return opT
45cd80cc3dee7e9ab61311e7bbc574722feadb49
7,159
from enum import Enum def __create_menu_elements() -> Enum: """Create Menu Elements. :return: Menu elements as an enum in the format KEY_WORD -> Vales(char, KeyWord) """ menu_keys = ["MAIN_MENU", "PROFILE", "CLEAN_TIME", "READINGS", "PRAYERS", "DAILY_REFLECTION", "JUST_FOR_TODAY", "LORDS_PRAYER", "SERENITY_PRAYER", "ST_JOSEPHS_PRAYER", "TENDER_AND_COMPASSIONATE_GOD", "THIRD_STEP_PRAYER", "SEVENTH_STEP_PRAYER", "ELEVENTH_STEP_PRAYER"] menu_values_chr = [chr(ch) for ch in range(len(menu_keys))] menu_values_str = ["MainMenu", "Profile", "CleanTime", "Readings", "Prayers", "DailyReflection", "JustForToday", "LordsPrayer", "SerenityPrayer", "StJosephsPrayer", "TenderAndCompassionateGod", "ThirdStepPrayer", "SeventhStepPrayer", "EleventhStepPrayer"] return Enum('MenuElements', {k: MenuElementValues(data=v1, name=v2) for k, v1, v2 in zip(menu_keys, menu_values_chr, menu_values_str)})
4407da506b681b124975827d94471e58089452a5
7,160
import math def solve(coordinates): """ 알고리즘 풀이 함수 : 두 점의 최단거리를 구해주는 함수 :param coordinates: 좌표들 :return: 두 점의 최단거리 """ n = len(coordinates) x_coordinates = [coordinate[0] for coordinate in coordinates] y_coordinates = [coordinate[1] for coordinate in coordinates] middle_point = (sum_of_list(x_coordinates) / n, sum_of_list(y_coordinates) / n) # print(middle_point) # test distances = [distance(middle_point, point) for point in coordinates] # print(distances) # test distance_difference = list() for i in range(n - 1): coordinate_info = { 'indices': (i, i + 1), 'difference': math.fabs(distances[i] - distances[i + 1]) } distance_difference.append(coordinate_info) # print(distance_difference) # test indices = get_indices(distance_difference) return distance(coordinates[indices[0]], coordinates[indices[1]])
fc6594e45537cf07bac870b6f932b00dd59d57bd
7,161
def get_cache_key(account, container=None, obj=None): """ Get the keys for both memcache and env['swift.infocache'] (cache_key) where info about accounts, containers, and objects is cached :param account: The name of the account :param container: The name of the container (or None if account) :param obj: The name of the object (or None if account or container) :returns: a string cache_key """ if obj: if not (account and container): raise ValueError('Object cache key requires account and container') cache_key = 'object/%s/%s/%s' % (account, container, obj) elif container: if not account: raise ValueError('Container cache key requires account') cache_key = 'container/%s/%s' % (account, container) else: cache_key = 'account/%s' % account # Use a unique environment cache key per account and one container. # This allows caching both account and container and ensures that when we # copy this env to form a new request, it won't accidentally reuse the # old container or account info return cache_key
d46270d33fcbaecc0bf1886965ac1b1771a3fc8d
7,162
def arctan(x): """Returns arctan(x)""" if type(x) in (float,_numpy._numpy.float64): x = _numpy._numpy.array([x]) a = abs(x) r = arctan_1px( a - 1. ) f = arctan_series( a ) eps = _numpy._numpy.finfo(1.).eps g = arctan_series( 1. / maximum( 0.125, a ) ) g = 0.5 * _numpy._numpy.pi - g j = ( a < 0.5 ) r[j] = f[j] j = ( a > 2. ) r[j] = g[j] j = ( x<0 ) r[j] = -r[j] if r.size==1: return r[0] return r
6bb5f45115abd34bc7ba7892fac28eb397a131f6
7,164
def uniform_dist(low, high): """Return a random variable uniformly distributed between `low` and `high`. """ return sp_uniform(low, high - low)
e4520ee4a5a44c33fe565788b4d576a35f4c3430
7,165
import re from typing import MutableMapping def flatten(dictionary, parent_key=False, separator='_'): """ Turn a nested dictionary into a flattened dictionary :param dictionary: The dictionary to flatten :param parent_key: The string to prepend to dictionary's keys :param separator: The string used to separate flattened keys :return: A flattened dictionary """ items = [] for key, value in list(dictionary.items()): if crumbs: print(('checking:',key)) new_key = (re.sub('[^A-Za-z0-9]+', '', str(parent_key)) + separator + re.sub('[^A-Za-z0-9]+', '', key) if parent_key else key).lower() if isinstance(value, MutableMapping): if crumbs: print((new_key,': dict found')) if not list(value.items()): if crumbs: print(('Adding key-value pair:',new_key,None)) items.append((new_key,None)) else: items.extend(list(flatten(value, new_key, separator).items())) elif isinstance(value, list): if crumbs: print((new_key,': list found')) if len(value): for k, v in enumerate(value): items.extend(list(flatten({str(k): v}, new_key).items())) else: if crumbs: print(('Adding key-value pair:',new_key,None)) items.append((new_key,None)) else: if crumbs: print(('Adding key-value pair:',new_key,value)) items.append((new_key, value)) return dict(items)
45131797a602c4fcb9f40f275d755c068b1baa83
7,166
def format_sec_to_hms(sec): """Format seconds to hours, minutes, seconds. Args: sec: float or int Number of seconds in a period of time Returns: str Period of time represented as a string on the form ``0d\:00h\:00m``. """ rem_int, s_int = divmod(int(sec), 60) h_int, m_int, = divmod(rem_int, 60) return "{}h {:02d}m {:02d}s".format(h_int, m_int, s_int)
aa2cc5d6584cdebf4d37292435ecd46bb6adc4a4
7,167
def one_hot_encode(data): """turns data into onehot encoding Args: data (np.array): (n_samples,) Returns: np.array: shape (n_samples, n_classes) """ n_classes = np.unique(data).shape[0] onehot = np.zeros((data.shape[0], n_classes)) for i, val in enumerate(data.astype(int)): onehot[i, val] = 1. return onehot
58602ffa7d5964bfbb4b8457f698aad800cb3298
7,168
def is_number(input_str): """Check if input_str is a string number Args: input_str (str): input string Returns: bool: True if input_str can be parse to a number (float) """ try: float(input_str) return True except ValueError: return False
d22fe852a15e3d926cffb36ea3d8a235592ea62a
7,169
def impute_between(coordinate_a, coordinate_b, freq): """ Args: coordinate_a: coordinate_b: freq: Returns: """ metrics = discrete_velocity(coordinate_a, coordinate_b) b, d, sec = metrics['binning'], metrics['displacement'], metrics['time_delta'] if b != 'stationary' or d > 75 or sec > 60**2*12: return None a_lat, a_lon, a_ts = coordinate_a b_lat, b_lon, b_ts = coordinate_b if not (isinstance(a_ts, dt.datetime) and isinstance(b_ts, dt.datetime)): raise TypeError('third element of each coordinate tuple must be dt') fill_range = list(pd.date_range(a_ts, b_ts, freq=freq)) # ensure the returned dataframe range is exclusive if fill_range[0] == a_ts: fill_range.remove(fill_range[0]) if len(fill_range) == 0: return None if fill_range[-1] == b_ts: fill_range.remove(fill_range[-1]) fill_lat = np.linspace(a_lat, b_lat, len(fill_range)) fill_lon = np.linspace(a_lon, b_lon, len(fill_range)) t = dict(lat=fill_lat, lon=fill_lon, ts=fill_range) return pd.DataFrame(t)
208729df0bd701302103a30e01e0cbdc5208f118
7,170
def seq(fr,to,by): """An analogous function to 'seq' in R Parameters: 1. fr: from 2. to: to 3. by: by (interval) """ if fr<to: return range(fr,to+abs(by),abs(by)) elif fr>to: if by>0: aseq = range(fr,to-by,-1*by) else: aseq = range(fr,to+by,by) else: aseq = [fr] if aseq[-1]>to: return aseq[:-1] else: return aseq
39b7878f81e93c137eed1e435e438b1645b09f9f
7,171
def _get_config_from_context(ctx): """ :param ctx: :return: :rtype: semi.config.configuration.Configuration """ return ctx.obj["config"]
c085f69fd87ad5f72c8453e6f01771d943b2c481
7,172
def _invert_options(matrix=None, sparse=None): """Returns |invert_options| (with default values) for a given |NumPy| matrix. See :func:`sparse_options` for documentation of all possible options for sparse matrices. Parameters ---------- matrix The matrix for which to return the options. sparse Instead of providing a matrix via the `matrix` argument, `sparse` can be set to `True` or `False` to requset the invert options for sparse or dense matrices. Returns ------- A tuple of all possible |invert_options|. """ global _dense_options, _dense_options_sid, _sparse_options, _sparse_options_sid assert (matrix is None) != (sparse is None) sparse = sparse if sparse is not None else issparse(matrix) if sparse: if not _sparse_options or _sparse_options_sid != defaults_sid(): _sparse_options = sparse_options() _sparse_options_sid = defaults_sid() return _sparse_options else: return _sparse_options else: if not _dense_options or _dense_options_sid != defaults_sid(): _dense_options = dense_options() _dense_options_sid = defaults_sid() return _dense_options else: return _dense_options
0625b86038c29dbf0e6db4e87b9e76de05bce426
7,173
def get_Carrot_scramble(n=70): """ Gets a Carrot-notation scramble of length `n` for a Megaminx. Defaults to csTimer's default length of 70. """ return _UTIL_SCRAMBLER.call("util_scramble.getMegaminxCarrotScramble", n).replace('\n','').replace(' ',' ').replace(' ',' ')
8155294b86f9d5cbe756f7476afb952446603d8c
7,175
def convex_env_train(Xs, Ys): """ Identify the convex envelope on the set of models from the train set. """ # Sort the list in either ascending or descending order of the # items values in Xs key_X_pairs = sorted(Xs.items(), key=lambda x: x[1], reverse=False) # this is a list of (key, val) pairs # Start the Pareto frontier with the first key value in the sorted list p_front = [key_X_pairs[0][0]] # Loop through the sorted list count = 0 for (key, X) in key_X_pairs: if Ys[key] <= Ys[p_front[-1]]: # Look for lower values of Y if count > 0: p_front.append(key) count = count + 1 return remove_interior(p_front, Xs, Ys)
e9a9dd4a56bddd01ae1e071003ea8412b075b9de
7,176
def randthresh(Y,K,p=np.inf,stop=False,verbose=False,varwind=False,knownull=True): """ Wrapper for random threshold functions (without connexity constraints) In: Y (n,) Observations K <int> Some positive integer (lower bound on the number of null hypotheses) p <float> lp norm stop <bool> Stop when minimum is attained (save computation time) verbose <bool> 'Chatty' mode varwind <bool> Varying window variant (vs. fixed window, with width K) knownull <bool> Known null distribution (observations assumed Exp(1) under H0) versus unknown (observations assumed Gaussian under H0) Out: A dictionary D containing the following fields: "C" (n-K) Lp norm of partial sums fluctuation about their conditional expectation "thresh" <float> Detection threshold "detect" (k,) Index of detected activations "v" <float> Estimated null variance (if knownull is False) Note: Random thresholding is performed only if null hypothesis of no activations is rejected at level 5% """ D = {} # Test presence of activity if knownull: X = Y else: v = np.square(Y).mean() X = np.clip(-np.log(1 - ST.chi2.cdf(Y**2, 1, 0, scale=v)), 0, 1 / tol) D["v"] = v T = test_stat(X,p=np.inf) if T <= 0.65: print "No activity detected at 5% level" D["detect"] = np.array([]) D["thresh"] = np.inf else: # Find optimal threshold if varwind: if knownull: C = randthresh_varwind_knownull(Y,K,p,stop,verbose) else: C, V = randthresh_varwind_gaussnull(Y,K,p,stop,one_sided=False,verbose=verbose) else: if knownull: C = randthresh_fixwind_knownull(Y,K,p,stop,verbose) else: C, V = randthresh_fixwind_gaussnull(Y,K,p,stop,one_sided=False,verbose=verbose) n = len(X) if stop: I = np.where(C > 0)[0] if len(I) > 0: ncoeffs = I[-1] else: ncoeffs = n - K else: I = np.where((C[2:] > C[1:-1]) * (C[1:-1] < C[:-2]))[0] if len(I) > 0: ncoeffs = I[np.argmin(C[1:-1][I])] + 1 else: ncoeffs = n - K thresh = np.sort(np.abs(Y))[-ncoeffs] # Detected activations detect = np.where(np.abs(Y) > thresh)[0] D["C"] = C[2:] D["thresh"] = thresh D["detect"] = detect if not knownull: D["v"] = V[2:] return D
d42a9c4ddd27c3ad462d6a447b779db700a58976
7,177
import gc def referrednested(func, recurse=True): #XXX: return dict of {__name__: obj} ? """get functions defined inside of func (e.g. inner functions in a closure) NOTE: results may differ if the function has been executed or not. If len(nestedcode(func)) > len(referrednested(func)), try calling func(). If possible, python builds code objects, but delays building functions until func() is called. """ if PY3: att1 = '__code__' att0 = '__func__' else: att1 = 'func_code' # functions att0 = 'im_func' # methods funcs = set() # get the code objects, and try to track down by referrence for co in nestedcode(func, recurse): # look for function objects that refer to the code object for obj in gc.get_referrers(co): # get methods _ = getattr(obj, att0, None) # ismethod if getattr(_, att1, None) is co: funcs.add(obj) # get functions elif getattr(obj, att1, None) is co: funcs.add(obj) # get frame objects elif getattr(obj, 'f_code', None) is co: funcs.add(obj) # get code objects elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj) # frameobjs => func.func_code.co_varnames not in func.func_code.co_cellvars # funcobjs => func.func_code.co_cellvars not in func.func_code.co_varnames # frameobjs are not found, however funcobjs are... # (see: test_mixins.quad ... and test_mixins.wtf) # after execution, code objects get compiled, and then may be found by gc return list(funcs)
357fde8030423690a5ae2f8ebcf42c7e86337d2a
7,178
from typing import Dict from typing import Any from typing import Tuple def format_organizations_output(response: Dict[str, Any], page_number: int, limit: int) -> Tuple[list, int]: """ Formatting list organizations command outputs. Args: response (Dict[str,Any): The response from the API call. limit (int): Maximum number of results to return. page_number(int): The Page number to retrieve. Returns: Tuple[list,int]: Formatted command output and total results. """ formatted_organizations = [] relevant_output_entities, total_page_number = format_list_commands_output(response, ['response', 'result', 'domains', 'domain'], page_number, limit) for organization in relevant_output_entities: formatted_organization = {} for key, value in organization.items(): if key.startswith('@'): formatted_organization[key[1:]] = value else: formatted_organization[key] = value formatted_organizations.append(formatted_organization) return formatted_organizations, total_page_number
6eee18e58fd8b6fdba50f995df060689bdb63ef2
7,179
def which_db_version(cursor): """ Return version of DB schema as string. Return '5', if iOS 5. Return '6', if iOS 6 or iOS 7. """ query = "select count(*) from sqlite_master where name = 'handle'" cursor.execute(query) count = cursor.fetchone()[0] if count == 1: db_version = '6' else: db_version = '5' return db_version
07b1dbcea3fb4bf65bba5c578257440d39b6784c
7,180
def gaussian_total_correlation(cov): """Computes the total correlation of a Gaussian with covariance matrix cov. We use that the total correlation is the KL divergence between the Gaussian and the product of its marginals. By design, the means of these two Gaussians are zero and the covariance matrix of the second Gaussian is equal to the covariance matrix of the first Gaussian with off-diagonal entries set to zero. Args: cov: Numpy array with covariance matrix. Returns: Scalar with total correlation. """ return 0.5 * (np.sum(np.log(np.diag(cov))) - np.linalg.slogdet(cov)[1])
93b52d075cba08c58067f7e2c6b76e8c5b06fa76
7,182
def S3list(s3bucket, fdate, instrm, network='OKLMA'): """ get list of files in a s3 bucket for a specific fdate and instrument (prefix) fdate: e.g. '2017-05-17' instrm: e.g. 'GLM' """ prefix = {'GLM': 'fieldcampaign/goesrplt/GLM/data/L2/' + fdate + '/OR_GLM-L2-LCFA_G16', 'LIS': 'fieldcampaign/goesrplt/ISS_LIS/data/' + fdate + '/ISS_LIS_SC_V1.0_', # 'FEGS': 'fieldcampaign/goesrplt/FEGS/data/goesr_plt_FEGS_' + fdate.replace('-', '') + '_Flash', 'CRS': 'fieldcampaign/goesrplt/CRS/data/GOESR_CRS_L1B_' + fdate.replace('-', ''), 'NAV': 'fieldcampaign/goesrplt/NAV_ER2/data/goesrplt_naver2_IWG1_' + fdate.replace('-', ''), 'LMA': 'fieldcampaign/goesrplt/LMA/' + network + '/data/' + fdate + '/goesr_plt_' + network + '_' + fdate.replace( '-', '')} print("S3list searching for ", prefix[instrm]) s3 = boto3.resource('s3') bucket = s3.Bucket(s3bucket) keys = [] for obj in bucket.objects.filter(Prefix=prefix[instrm]): keys.append(obj.key) return keys
afe77daf5b78545ae89a555064511c3be19947f0
7,183
def formatted_karma(user, activity): """ Performs a karma check for the user and returns a String that's already formatted exactly like the usual response of the bot. :param user: The user the karma check will be performed for. :return: A conveniently formatted karma check response. """ response = good_karma_template.format(subreddit.display_name, user.name, activity[1], activity[2], activity[0]) if activity[3] > activity[0]/3: response = bad_karma_template.format(subreddit.display_name, user.name, activity[1], activity[2], activity[4], activity[0], activity[3]) elif activity[1] < 2 and activity[2] < 5: response = new_karma_template.format(subreddit.display_name, user.name, activity[1], activity[2], activity[0]) return response
fa130f6bd64763200ed76a9284f9e83c686b7fb7
7,184
import collections def extras_features(*features): """ Decorator used to register extras provided features to a model """ def wrapper(model_class): # Initialize the model_features store if not already defined if "model_features" not in registry: registry["model_features"] = {f: collections.defaultdict(list) for f in EXTRAS_FEATURES} for feature in features: if feature in EXTRAS_FEATURES: app_label, model_name = model_class._meta.label_lower.split(".") registry["model_features"][feature][app_label].append(model_name) else: raise ValueError("{} is not a valid extras feature!".format(feature)) return model_class return wrapper
03ff8f6fe9d020b55f416468cceacf0f163ec102
7,185
def setFeedMoleFraction(H2COxRatio, CO2COxRatio): """ set inlet feed mole fraction """ # feed properties # H2/COx ratio # H2COxRatio = 2.0 # CO2/CO ratio # CO2COxRatio = 0.8 # mole fraction y0_H2O = 0.00001 y0_CH3OH = 0.00001 y0_DME = 0.00001 # total molar fraction tmf0 = 1 - (y0_H2O + y0_CH3OH + y0_DME) # COx COx = tmf0/(H2COxRatio + 1) # mole fraction y0_H2 = H2COxRatio*COx y0_CO2 = CO2COxRatio*COx y0_CO = COx - y0_CO2 # total mole fraction tmf = y0_H2 + y0_CO + y0_CO2 + y0_H2O + y0_CH3OH + y0_DME # CO2/CO2+CO ratio CO2CO2CORatio = y0_CO2/(y0_CO2+y0_CO) # res feedMoFri = np.array([y0_H2, y0_CO2, y0_H2O, y0_CO, y0_CH3OH, y0_DME], dtype=np.float32) # res return feedMoFri
82d368cd84a06a29663aee4c04a0505dba7536bb
7,186
def format(message, *args, **kwargs): """Shortcut for :class:`tossi.Formatter.format` of the default registry. """ return formatter.vformat(message, args, kwargs)
9d32c6a7497ffaa9b0da592f2c5ad828f22cf294
7,187
from django.http.request import QueryDict def reverse_url(url_name,id,request): """ 编辑标签返回当前页 :param url_name: :param id: :param request: :return: """ path = request.get_full_path() query_dict_obj = QueryDict(mutable=True) query_dict_obj['next'] = path encode_url = query_dict_obj.urlencode() prefix_path = reverse(url_name,args=(id,)) full_path = prefix_path + '?' + encode_url return full_path
3453fed5717c2d3a335554e0b02965be8b3c04d0
7,188
from typing import Dict from typing import List def add_default_to_data(data: Dict[str, object], schema: SchemaDictType) -> Dict[str, object]: """Adds the default values present in the schema to the required fields if the values are not provided in the data """ # add non as defaults to the field that is not required and does not have # a default value non_default_values = [i for i in schema if all( j not in schema[i] for j in ["required", "default"])] for val in non_default_values: schema[val]["default"] = None defaults: List[str] = [j for j in [ i for i in schema if "default" in schema[i]] if "default" in schema[j]] if not all(i in data for i in defaults): for i in defaults: if i not in data: data[i] = schema[i]["default"] return data else: return data
58b460eebb675457ed7832b4e211b72e2b018d03
7,189
import re def repeating_chars(text: str, *, chars: str, maxn: int = 1) -> str: """Normalize repeating characters in `text`. Truncating their number of consecutive repetitions to `maxn`. Duplicates Textacy's `utils.normalize_repeating_chars`. Args: text (str): The text to normalize. chars: One or more characters whose consecutive repetitions are to be normalized, e.g. "." or "?!". maxn: Maximum number of consecutive repetitions of `chars` to which longer repetitions will be truncated. Returns: str """ return re.sub(r"({}){{{},}}".format(re.escape(chars), maxn + 1), chars * maxn, text)
9dc326947a900d3531dcd59bf51d5c3396a42fea
7,190
import io import csv def export_data_csv(): """ Build a CSV file with the Order data from the database :return: The CSV file in StringIO """ result = query_order.get_all_orders() output = io.StringIO() writer = csv.writer(output) line = ['Numéro de commande', 'Date', 'Montant total', 'Numéro client', 'Référence devis'] writer.writerow(line) for row in result: date = format_date_csv(str(row.orderDate)[:10]) line = [str(row.orderNumber), date, row.orderTotalAmount, str(row.clientNumber), str(row.quoteNumber)] writer.writerow(line) output.seek(0) return output
5839542a1ef366a63850d04909080b8bca8d4714
7,191
import re def findurls(s): """Use a regex to pull URLs from a message""" regex = r"(?i)\b(((https?|ftp|smtp):\/\/)?(www.)?[a-zA-Z0-9_.-]+\.[a-zA-Z0-9_.-]+(\/[a-zA-Z0-9#]+\/?)*\/*)" url = re.findall(regex,s) return [x[0] for x in url]
801947e893a23a4e440c8e5fc838d6aa89671e0c
7,192
def collide_rect(left, right): """collision detection between two sprites, using rects. pygame.sprite.collide_rect(left, right): return bool Tests for collision between two sprites. Uses the pygame.Rect colliderect function to calculate the collision. It is intended to be passed as a collided callback function to the *collide functions. Sprites must have "rect" attributes. New in pygame 1.8.0 """ return left.rect.colliderect(right.rect)
2111b4d6298cc435d61e12f301d5373cc07c54ff
7,193
from typing import Callable def get_minhash( doc: str, normalization_func: Callable, split_method: str, ngram_size: int, ngram_stride: int, num_minhashes: int, random_seed: int, ) -> LeanMinHash: """Returns a minhash fingerprint for the given document. Args: doc (str): The document to create the MinHash object for. normalization_func (Callable): The function to normalize the document with. split_method (str): The method to split the document into shingles. Can be 'word_ngram', 'paragraph', 'none' or None. ngram_size (int): The size of the ngrams to use. ngram_stride (int): The stride of the ngrams to use. num_minhashes (int): The number of minhashes to use. random_seed (int): The random seed to use. Returns: LeanMinHash: The minhash fingerprint for the given document. Raises: ValueError: If `split_method` is not 'word_ngram', 'paragraph', 'none' or None. """ # Extract shingles from the document, depending on the `split_method` shingles = get_shingles( doc, normalization_func=normalization_func, split_method=split_method, ngram_size=ngram_size, ngram_stride=ngram_stride, ) # Initialise the fingerprint minhash = MinHash(num_perm=num_minhashes, seed=random_seed) # Add all the shingles to the fingerprint minhash.update_batch([shingle.encode("utf-8") for shingle in shingles]) # Convert the fingerprint to a LeanMinHash fingerprint, to save memory # and increase performance minhash = LeanMinHash(minhash, seed=random_seed) # Return the fingerprint return minhash
7f9340885a8ec3b9eba85f627550ed9d8f2df6c1
7,195
import re def tokenize(text): """ Function: tokenize: This function splits text into words and return the root form of the words Args: text(str): the message Return: lemm(list of str): a list of the root form of the message words """ # Normalizing text (a-zA-Z0-9 matches all allalphanumeric characters) text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Tokenizing text words = word_tokenize(text) # Removing stop words stop = stopwords.words("english") words = [t for t in words if t not in stop] # Lemmatization lemm = [WordNetLemmatizer().lemmatize(w) for w in words] return lemm
4ee5cf7bad56f565c211824b1a5838d732cbeab5
7,196
import pickle def displayRandomForest(): """Run displayRandomForest""" executionStartTime = int(time.time()) # status and message success = True message = "ok" plotUrl = '' dataUrl = '' # get model1, var1, pres1, model2, var2, pres2, start time, end time, lon1, lon2, lat1, lat2, nSample center = [] model = [] var = [] pres = [] nVarP = 1 nVar = int(request.args.get('nVar', '')) for i in range( nVar+nVarP ): m1 = request.args.get('model'+str(i+1), '').lower() temp1 = m1.split('_') center.append(temp1[0]) model.append(temp1[1]) var.append(request.args.get('var'+str(i+1), '')) pres.append(request.args.get('pres'+str(i+1), '')) startT = request.args.get('timeS', '') endT = request.args.get('timeE', '') lonS = request.args.get('lonS', '') lonE = request.args.get('lonE', '') latS = request.args.get('latS', '') latE = request.args.get('latE', '') frontend_url = request.args.get('fromPage', '') print 'frontend_url: ', frontend_url userId = request.args.get('userid', '') print 'from url, userId: ', userId if userId != None and userId != '': userId = int(userId) else: userId = 0 json1 = { 'nVar':nVar, 'center':center, 'model':model, 'varName':var, 'pres':pres, 'yearS':startT[:4], 'yearE':endT[:4], 'monthS':startT[4:], 'monthE':endT[4:], 'lon1S':lonS, 'lon1E':lonE, 'lat1S':latS, 'lat1E':latE, } # get where the input file and output file are current_dir = os.getcwd() print 'current_dir: ', current_dir try: seed_str = str(time.time()) tag = md5.new(seed_str).hexdigest() output_dir = current_dir + '/svc/static/randomForest/' + tag print 'output_dir: ', output_dir if not os.path.exists(output_dir): os.makedirs(output_dir) json1['outDir'] = output_dir pFile = '%s/p.pickle'%output_dir fid = open(pFile,'w') pickle.dump(json1, fid) fid.close() # chdir to where the app is os.chdir(current_dir+'/svc/src/randomForest') # instantiate the app. class c1 = call_randomForest.call_randomForest(pFile) # call the app. function (0 means the image created is scatter plot) ### (message, imgFileName) = c1.displayScatterPlot2V(0) (message, imgFileName, dataFileName) = c1.display() # chdir back os.chdir(current_dir) ind1 = message.find('No Data') if ind1>0: message1 = message[ind1:(ind1+200)] message1a = message1.split('\n') print message1a[0] print message1a[1] hostname, port = get_host_port2("host.cfg") ### userId = 2 if hostname == 'EC2': try: req = urllib2.Request('http://169.254.169.254/latest/meta-data/public-ipv4') response = urllib2.urlopen(req) hostname = response.read() except Exception, e: print 'e: ', e """ try: req2 = urllib2.Request(' http://169.254.169.254/latest/user-data') response2 = urllib2.urlopen(req2) userId = json.loads(response2.read())['username'] except Exception, e: print 'e: ', e userId = 2 """ """ if userIdDict.has_key(userId): userId = userIdDict[userId] else : userId = 'lei' """ print 'userId: ', userId print 'hostname: ', hostname print 'port: ', port purpose = request.args.get('purpose')#"Test .\'\"\\purpose" backend_url, plotUrl, dataUrl, failedImgUrl = assignUrl('randomForest', tag, imgFileName, dataFileName) # backend_url = 'http://' + hostname + ':' + port + '/svc/randomForest' # print 'backend_url: ', backend_url # print 'imgFileName: ', imgFileName # plotUrl = 'http://' + hostname + ':' + port + '/static/randomForest/' + tag + '/' + imgFileName # print 'plotUrl: ', plotUrl # dataUrl = 'http://' + hostname + ':' + port + '/static/randomForest/' + tag + '/' + dataFileName # print 'dataUrl: ', dataUrl # failedImgUrl = 'http://' + hostname + ':' + port + '/static/plottingFailed.png' # print 'failedImgUrl: ', failedImgUrl if imgFileName is '' or not os.path.exists(output_dir+'/'+imgFileName): print '****** Error: %s not exist' % imgFileName plotUrl = failedImgUrl if dataFileName is '' or not os.path.exists(output_dir+'/'+dataFileName): print '****** Error: %s not exist' % dataFileName dataUrl = failedImgUrl print 'message: ', message if len(message) == 0 or message.find('Error') >= 0 or message.find('error:') >= 0 or message.find('No Data') >= 0: success = False plotUrl = '' dataUrl = '' except ValueError, e: # chdir to current_dir in case the dir is changed to where the app is in the try block os.chdir(current_dir) print 'change dir back to: ', current_dir success = False message = str(e) except Exception, e: # chdir to current_dir in case the dir is changed to where the app is in the try block os.chdir(current_dir) print 'change dir back to: ', current_dir success = False ### message = str("Error caught in displayScatterPlot2V()") message = str(e) executionEndTime = int(time.time()) urlLink = request.query_string urlLink = urlLink.strip() + '&image=%s&data_url=%s' % (plotUrl, dataUrl) print 'urlLink: ', urlLink urlLink = urlLink.replace('&fromPage='+frontend_url, '') print 'urlLink: ', urlLink # json dictionary for provenance service request post_json = {'source': 'JPL', 'parameters':urlLink, 'frontend_url': frontend_url, 'backend_url': backend_url, 'userId': long(userId), 'executionStartTime':long(executionStartTime)*1000, 'executionEndTime':long(executionEndTime)*1000} post_json = json.dumps(post_json) if USE_CMU: try: print post_json print requests.post(CMU_PROVENANCE_URL, data=post_json, headers=HEADERS).text print requests.post(CMU_PROVENANCE_URL_2, data=post_json, headers=HEADERS).text ### print requests.post(VIRTUAL_EINSTEIN_URL, data=post_json, headers=HEADERS).text except: print 'Something went wrong with Wei\'s stuff' return jsonify({ 'success': success, 'message': message, 'url': plotUrl, 'dataUrl': dataUrl })
a77cce60947d553a870f636fcfc8e3b282b69eea
7,197
def get_reports(request): """ Get a list of all :model:`reporting.Report` entries associated with an individual :model:`users.User` via :model:`rolodex.Project` and :model:`rolodex.ProjectAssignment`. """ active_reports = [] active_projects = ( ProjectAssignment.objects.select_related("project") .filter(Q(operator=request.user) & Q(project__complete=False)) .order_by("project__end_date") ) for active_project in active_projects: reports = Report.objects.filter( Q(project=active_project.project) & Q(complete=False) ) for report in reports: active_reports.append(report) return active_reports
dc622daf0303e6137a36962db45655de1c43deb2
7,198
import json def create_response(key, value): """Return generic AWS Lamba proxy response object format.""" return { "statusCode": 200, "headers": {"Content-Type": "application/json"}, "body": json.dumps({key: value}) }
9236a9e4504e6fbebe841b8cc6b6ad4602dae463
7,199
import io import torch def load_image_buffer_to_tensor(image_buf, device): """Maps image bytes buffer to tensor Args: image_buf (bytes buffer): The image bytes buffer device (object): The pytorch device object Returns: py_tensor tensor: Pytorch tensor """ image = Image.open(io.BytesIO(image_buf)) preprocess = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ), ]) input_tensor = preprocess(image) input_batch = input_tensor.unsqueeze(0) return input_batch.to(device, torch.float)
43740d2f9b7eec64f54111e85e0a54787afc8100
7,200
def alpha2tand(freq, a, b, n): """Convert Halpern's 'a' and 'b' from an absorption coefficient of the form `a*freq**b` to a (frequency-dependent) loss tangent. Parameters ---------- freq : numpy array or float The frequency (Hz) (or frequencies) at which to calculate the loss tangent. a : float Halpern's 'a' coefficient b : float Halpern's 'b' coefficient n : float The real part of the material's refractive index Returns ------- tand : numpy array The loss tangent of the material at the given frequency and Halpern coefficients. """ imagn = alpha2imagn(freq, a, b, n) # The complex index of refraction of a material is related to the # complex (relative) permittivity by the relation: # e_r = e' + i*e'' = n^2 = (n + i*k)^2 = n^2 - k^2 + i*2nk # By equating the real and imaginary parts we are left with: # e' = (n^2 - k^2); e'' = 2nk # With this information we can find the loss tangent, which is simply # the ratio of the real and imaginary parts of the relative # permittivity: # tand = (e''/e') ep = n**2 - imagn**2 epp = 2 * n * imagn tand = epp / ep return tand
2acf658e7d18a0e115ba557698cc4efd591ed26d
7,201
def convert_path_to_pixels(path): """ Purpose: --- This function should convert the obtained path (list of tuples) to pixels. Teams are free to choose the number of points and logic for this conversion. Input Arguments: --- `path` : [ list ] Path returned from task_4a.find_path() function. Returns: --- `pixel_path` : [ type can be decided by teams ] Example call: --- pixel_path = convert_path_to_pixels(path) """ ############## ADD YOUR CODE HERE ############## pixel_path = path tmp = 64 for i in range(len(pixel_path)): pixel_path[i][0] = path[i][0] * tmp * 2 + tmp pixel_path[i][1] = path[i][1] * tmp * 2 + tmp ################################################## print("Pixel path is : ", pixel_path) return pixel_path
a50557f252d43f9c3df1b3781c1203dd518d3797
7,202
def uniform_prob(*args, prob=None, inside=None, pscale=1.): """ Uniform probability function for discrete and continuous vtypes. """ # Detect ptype, default to prob if no values, otherwise detect vtype assert len(args) >= 1, "Minimum of a single positional argument" pscale = eval_pscale(pscale) use_logs = iscomplex(pscale) if prob is None: prob = 0. if use_logs else 1. vals = args[0] if vals is None: return prob vtype = eval_vtype(vals) if callable(inside) else eval_vtype(inside) # Set inside function by vtype if not specified if not callable(inside): if vtype in VTYPES[float]: inside = lambda x: np.logical_and(x >= min(inside), x <= max(inside)) else: inside = lambda x: np.isin(x, inside) # If scalar, check within variable set p_zero = NEARLY_NEGATIVE_INF if use_logs else 0. if isscalar(vals): prob = prob if inside(vals) else p_zero # Otherwise treat as uniform within range else: p_true = prob prob = np.tile(p_zero, vals.shape) prob[inside(vals)] = p_true # This section below is there just to play nicely with conditionals if len(args) > 1: for arg in args[1:]: if use_logs: prob = prob + uniform_prob(arg, inside=inside, pscale=0.j) else: prob = prob * uniform_prob(arg, inside=inside) return prob
75cd547fc2845cb94f5733310be0d7761ba379fb
7,203
import glob def obtenerListaArchivos(path: str): """ genera una lista de los archivos alojados en str """ lista = glob.glob(path, recursive=True) return lista
3b9582dbf086a2af673cc75277041f32d001e215
7,204
def is_equal_to(amount: float) -> Predicate: """Says that a field is exactly equal to some constant amount.""" return is_nearly_equal_to(amount, tolerance=0, taper=0)
c2c9b795d7bb089834e8e11e980b9d794e69d97a
7,205
def load_yaml(fname): """Load a YAML file.""" yaml = YAML(typ="safe") # Compat with HASS yaml.allow_duplicate_keys = True # Stub HASS constructors HassSafeConstructor.name = fname yaml.Constructor = HassSafeConstructor with open(fname, encoding="utf-8") as conf_file: # If configuration file is empty YAML returns None # We convert that to an empty dict return yaml.load(conf_file) or {}
957a5d171568592da89cfa58a69c746ffcf67d33
7,207
def unmix_cvxopt(data, endmembers, gammaConst=0, P=None): """ ****************************************************************** unmix finds an accurate estimation of the proportions of each endmember Syntax: P2 = unmix(data, endmembers, gammaConst, P) This product is Copyright (c) 2013 University of Missouri and University of Florida All rights reserved. CVXOPT package is used here. Parameters H,F,L,K,Aeq,beq are corresbonding to P,q,G,h,A,B, respectively. lb and ub are element-wise bound constraints which are added to matrix G and h respectively. Inputs: data = DxN matrix of N data points of dimensionality D endmembers = DxM matrix of M endmembers with D spectral bands gammaConst = Gamma Constant for SPT term P = NxM matrix of abundances corresponding to N input pixels and M endmembers Returns: P2 = NxM matrix of new abundances corresponding to N input pixels and M endmembers ****************************************************************** """ solvers.options['show_progress'] = False X = data M = endmembers.shape[1] # number of endmembers # endmembers should be column vectors N = X.shape[1] # number of pixels # Equation constraint Aeq*x = beq # All values must sum to 1 (X1+X2+...+XM = 1) Aeq = np.ones((1, M)) beq = np.ones((1, 1)) # Boundary Constraints ub >= x >= lb # All values must be greater than 0 (0 ? X1,0 ? X2,...,0 ? XM) lb = 0 ub = 1 g_lb = np.eye(M) * -1 g_ub = np.eye(M) # import pdb; pdb.set_trace() G = np.concatenate((g_lb, g_ub), axis=0) h_lb = np.ones((M, 1)) * lb h_ub = np.ones((M, 1)) * ub h = np.concatenate((h_lb, h_ub), axis=0) if P is None: P = np.ones((M, 1)) / M gammaVecs = np.divide(gammaConst, sum(P)) H = 2 * (endmembers.T @ endmembers) cvxarr = np.zeros((N,M)) for i in range(N): F = ((np.transpose(-2 * X[:, i]) @ endmembers) + gammaVecs).T cvxopt_ans = solvers.qp(P=matrix(H.astype(np.double)), q=matrix(F.astype(np.double)), G=matrix(G.astype(np.double)), h=matrix(h.astype(np.double)), A=matrix(Aeq.astype(np.double)), b=matrix(beq.astype(np.double))) cvxarr[i, :] = np.array(cvxopt_ans['x']).T cvxarr[cvxarr < 0] = 0 return cvxarr
d529b412afde7a7eb35a02d5d039ec271285829f
7,208
import logging def _accumulate_reward( timestep: dm_env.TimeStep, episode_return: float) -> float: """Accumulates rewards collected over the course of an episode.""" if timestep.reward and timestep.reward != 0: logging.info('Reward: %s', timestep.reward) episode_return += timestep.reward if timestep.first(): episode_return = 0 elif timestep.last(): logging.info('Episode return: %s', episode_return) return episode_return
8f96e9a5bbeb4babfd43283b6da8a7984e53f02b
7,209
def unsafe_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. Resolve all tags, even those known to be unsafe on untrusted input. """ return load(stream, UnsafeLoader)
ff74beb13746504508832cc9b658a8faf672d2ca
7,210
import pickle def load_tl_gan_model(): """ Load the linear model (matrix) which maps the feature space to the GAN's latent space. """ with open(FEATURE_DIRECTION_FILE, 'rb') as f: feature_direction_name = pickle.load(f) # Pick apart the feature_direction_name data structure. feature_direction = feature_direction_name['direction'] feature_names = feature_direction_name['name'] num_feature = feature_direction.shape[1] feature_lock_status = np.zeros(num_feature).astype('bool') # Rearrange feature directions using Shaobo's library function. feature_direction_disentangled = \ feature_axis.disentangle_feature_axis_by_idx( feature_direction, idx_base=np.flatnonzero(feature_lock_status)) return feature_direction_disentangled, feature_names
aeb0bd329e4c9f8c91ded7c80385c30e1fb69773
7,211