content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def calculate_Hubble_flow_velocity_from_cMpc(cMpc, cosmology="Planck15"): """ Calculates the Hubble flow recession velocity from comoving distance Parameters ---------- cMpc : array-like, shape (N, ) The distance in units of comoving megaparsecs. Must be 1D or scalar. cosmology : string or astropy.cosmology.core.FLRW The cosmology to assume whilst calculating distance. Default: Planck15. Returns ------- a : array-like, shape (N, ) The scale factor. """ cosmo = utils.get_cosmology_from_name(cosmology) H0 = cosmo.H0 scale_factor = utils.calculate_scale_factor_from_cMpc(cMpc, cosmology=cosmology) proper_dist = cMpc * apu.Mpc / scale_factor velocity = proper_dist * H0 return velocity
823d94faa682f3b5fb123ad00fe2a7d02eedd355
25,167
import itertools def CollapseDictionary(mapping): """ Takes a dictionary mapping prefixes to URIs and removes prefix mappings that begin with _ and there is already a map to their value >>> from rdflib import URIRef >>> a = {'ex': URIRef('http://example.com/')} >>> a['_1'] = a['ex'] >>> len(a) 2 >>> a.values() [rdflib.term.URIRef(%(u)s'http://example.com/'), rdflib.term.URIRef(%(u)s'http://example.com/')] >>> CollapseDictionary(a) {'ex': rdflib.term.URIRef(%(u)s'http://example.com/')} >>> a {'ex': rdflib.term.URIRef(%(u)s'http://example.com/'), '_1': rdflib.term.URIRef(%(u)s'http://example.com/')} """ def originalPrefixes(item): return item.find('_') + 1 == 1 revDict = {} for k, v in list(mapping.items()): revDict.setdefault(v, set()).add(k) prefixes2Collapse = [] for k, v in list(revDict.items()): origPrefixes = [] dupePrefixes = [] # group prefixes for a single URI by whether or not # they have a _ prefix for rt, items in itertools.groupby(v, originalPrefixes): if rt: dupePrefixes.extend(items) else: origPrefixes.extend(items) if origPrefixes and len(v) > 1 and len(dupePrefixes): # There are allocated prefixes for URIs that were originally # given a prefix assert len(origPrefixes) == 1 prefixes2Collapse.extend(dupePrefixes) return dict([(k, v) for k, v in list(mapping.items()) if k not in prefixes2Collapse])
9f2befbd52b75b75aa15cadf9e68d5f9eebcae71
25,168
def do_pre_context(PreContextSmToBeReversedList, PreContextSmIdList, dial_db): """Pre-context detecting state machine (backward). --------------------------------------------------------------------------- Micro actions are: pre-context fullfilled_f DropOut --> Begin of 'main' state machine. BLC --> ReloadStateBackward EndOfStream --> 'error' Variables (potentially) required: pre_context_fulfilled_f[N] --> Array of flags for pre-context indication. RETURNS: [0] generated code text [1] reload state BACKWARD, to be generated later. """ if not PreContextSmToBeReversedList: return [], None analyzer_txt, \ analyzer = __do_state_machine(PreContextSmToBeReversedList, engine.BACKWARD_PRE_CONTEXT, dial_db, ReverseF=True) epilog_txt = _get_pre_context_epilog_definition(dial_db) txt = analyzer_txt txt.extend(epilog_txt) for sm_id in PreContextSmIdList: variable_db.require("pre_context_%i_fulfilled_f", Index = sm_id) return txt, analyzer
d211cf1aac7e103b6d1efe25bfde964578b81950
25,169
from datetime import datetime async def check_user_cooldown(ctx: Context, config: Config, cooldown: dict): """Check if command is on cooldown.""" command = ctx.command.qualified_name last = cooldown[command]["last"] rate = cooldown[command]["rate"] per = cooldown[command]["per"] uses = cooldown[command]["uses"] now = utc_timestamp(datetime.utcnow()) if now >= last + per: cooldown[command] = { "last": utc_timestamp(datetime.utcnow()), "rate": rate, "per": per, "uses": 1 } return True else: if uses < rate: cooldown[command] = { "last": last, "rate": rate, "per": per, "uses": uses + 1 } return True return False
649b108def51c9029b17fa6e14eada141d7c5239
25,170
def round_robin(units, sets=None): """ Generates a schedule of "fair" pairings from a list of units """ if len(units) % 2: units.append(None) count = len(units) sets = sets or (count - 1) half = count / 2 schedule = [] for turn in range(sets): pairings = [] for i in range(half): if units[i] is None or units[count-i-1] is None: continue pairings.append((units[i], units[count-i-1])) units.insert(1, units.pop()) schedule.append(pairings) return schedule
f736fe4ce1f0b407f55d4627a7ecc8396943cdd0
25,171
def filter_df(p_df:pd.DataFrame, col_name:str, value, keep:bool=True, period=None): """ Filter a dataframe based on a specific date Parameters : p_df : pandas.DataFrame The original dataframe col_name : str The dataframe column name where the filter will be applyed value : item or list The value used to filter the specified column keep : bool If it must keep or not the selected value Return : pandas.DataFrame The filtered initial dataframe """ if type(value) == list: operator = 'not in' if keep == False else 'in' else: operator = "==" if keep == True else "!=" df = p_df.query(f"{col_name} {operator} @value") return df
f866ac1df9c436dc65e6a3d1b7eeb02487bba100
25,172
import logging def _VerifyOptions(options): """Verify the passed-in options. Args: options: The parsed options to verify. Returns: Boolean, True if verification passes, False otherwise. """ if options.endpoints_service and not options.openapi_template: logging.error('Please specify openAPI template with --openapi_template ' 'in deploying endpoints.') return False if options.openapi_template and not options.endpoints_service: logging.error('Please specify endpoints service with --endpoints_service ' 'in deploying endpoints.') return False if (options.endpoints_service and options.project_id not in options.endpoints_service): logging.error('The project "%s" is not matched to the endpoints service ' '"%s".', options.project_id, options.endpoints_service) return False return True
872feb5ac314ed2ef28ddbfaeff1b5dafc5e9ed8
25,173
def force_delegate(func: _F) -> _F: """ A decorator to allow delegation for the specified method even if cls.delegate = False """ func._force_delegate = True # type: ignore[attr-defined] return func
771159f2baafce044f480ce138596e4a07e89a97
25,174
import binascii def create_signature(key_dict, data): """ <Purpose> Return a signature dictionary of the form: {'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...', 'sig': '...'}. The signing process will use the private key in key_dict['keyval']['private'] and 'data' to generate the signature. The following signature schemes are supported: 'RSASSA-PSS' RFC3447 - RSASSA-PSS http://www.ietf.org/rfc/rfc3447. 'ed25519' ed25519 - high-speed high security signatures http://ed25519.cr.yp.to/ Which signature to generate is determined by the key type of 'key_dict' and the available cryptography library specified in 'settings'. >>> ed25519_key = generate_ed25519_key() >>> data = 'The quick brown fox jumps over the lazy dog' >>> signature = create_signature(ed25519_key, data) >>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature) True >>> len(signature['sig']) 128 >>> rsa_key = generate_rsa_key(2048) >>> signature = create_signature(rsa_key, data) >>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature) True >>> ecdsa_key = generate_ecdsa_key() >>> signature = create_signature(ecdsa_key, data) >>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature) True <Arguments> key_dict: A dictionary containing the keys. An example RSA key dict has the form: {'keytype': 'rsa', 'scheme': 'rsassa-pss-sha256', 'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...', 'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...', 'private': '-----BEGIN RSA PRIVATE KEY----- ...'}} The public and private keys are strings in PEM format. data: Data to be signed. This should be a bytes object; data should be encoded/serialized before it is passed here. The same value can be be passed into securesystemslib.verify_signature() (along with the public key) to later verify the signature. <Exceptions> securesystemslib.exceptions.FormatError, if 'key_dict' is improperly formatted. securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict' specifies an unsupported key type or signing scheme. TypeError, if 'key_dict' contains an invalid keytype. <Side Effects> The cryptography library specified in 'settings' is called to perform the actual signing routine. <Returns> A signature dictionary conformant to 'securesystemslib_format.SIGNATURE_SCHEMA'. """ # Does 'key_dict' have the correct format? # This check will ensure 'key_dict' has the appropriate number of objects # and object types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if the check fails. # The key type of 'key_dict' must be either 'rsa' or 'ed25519'. securesystemslib.formats.ANYKEY_SCHEMA.check_match(key_dict) # Signing the 'data' object requires a private key. Signing schemes that are # currently supported are: 'ed25519', 'ecdsa-sha2-nistp256', # 'ecdsa-sha2-nistp384' and rsa schemes defined in # `securesystemslib.keys.RSA_SIGNATURE_SCHEMES`. # RSASSA-PSS and RSA-PKCS1v15 keys and signatures can be generated and # verified by rsa_keys.py, and Ed25519 keys by PyNaCl and PyCA's # optimized, pure python implementation of Ed25519. signature = {} keytype = key_dict['keytype'] scheme = key_dict['scheme'] public = key_dict['keyval']['public'] private = key_dict['keyval']['private'] keyid = key_dict['keyid'] sig = None if keytype == 'rsa': if scheme in RSA_SIGNATURE_SCHEMES: private = private.replace('\r\n', '\n') sig, scheme = securesystemslib.rsa_keys.create_rsa_signature( private, data, scheme) else: raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported' ' RSA signature scheme specified: ' + repr(scheme)) elif keytype == 'ed25519': public = binascii.unhexlify(public.encode('utf-8')) private = binascii.unhexlify(private.encode('utf-8')) sig, scheme = securesystemslib.ed25519_keys.create_signature( public, private, data, scheme) # Continue to support keytypes of ecdsa-sha2-nistp256 and ecdsa-sha2-nistp384 # for backwards compatibility with older securesystemslib releases elif keytype in ['ecdsa', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384']: sig, scheme = securesystemslib.ecdsa_keys.create_signature( public, private, data, scheme) # 'securesystemslib.formats.ANYKEY_SCHEMA' should have detected invalid key # types. This is a defensive check against an invalid key type. else: # pragma: no cover raise TypeError('Invalid key type.') # Build the signature dictionary to be returned. # The hexadecimal representation of 'sig' is stored in the signature. signature['keyid'] = keyid signature['sig'] = binascii.hexlify(sig).decode() return signature
1a1e37838679a6912c8dc3d482a8092b1e75056c
25,175
def parse_line(line): """ Parse a queue trace line into a dict """ line = line.split() result = {} if len(line) < 12: return result result["event"] = line[0] result["time"] = float(line[1]) result["from"] = int(line[2]) result["to"] = int(line[3]) result["type"] = line[4] result["size"] = int(line[5]) result["flags"] = line[6] result["fid"] = int(line[7]) result["src"] = line[8] result["dst"] = line[9] result["seqnum"] = int(line[10]) result["pktid"] = int(line[11]) return result
432e6a624626e89d27fe6d3d9ed7c4230d97c0a6
25,176
from typing import Dict def gaussian_linear_combination(distributions_and_weights: Dict): """ Computes the PDF of the weighted average of two Gaussian variables. """ assert isinstance(distributions_and_weights, dict) assert all( isinstance(dist, MultivariateNormal) for dist in distributions_and_weights.keys() ) return MultivariateNormal( loc=sum( [ dist.loc * weight for dist, weight in distributions_and_weights.items() ] ), covariance_matrix=sum( [ dist.covariance_matrix * (weight ** 2) for dist, weight in distributions_and_weights.items() ] ), )
704a1f22392819075e3d9ba0c243c7364baab827
25,177
def check_pattern_startswith_slash(pattern): """ Check that the pattern does not begin with a forward slash. """ regex_pattern = pattern.regex.pattern if regex_pattern.startswith('/') or regex_pattern.startswith('^/'): warning = Warning( "Your URL pattern {} has a regex beginning with a '/'. " "Remove this slash as it is unnecessary.".format(describe_pattern(pattern)), id="urls.W002", ) return [warning] else: return []
9015f1f8d17297ace5fcef2e2cf0fe2c6dd6e76c
25,178
def ht(x): """ht(x) Evaluates the heaviside function Args: x: Domain points Returns: ht(x): Heaviside function evaluated over the domain x """ g = np.ones_like(x) for i in range(np.size(x)-1): if x[i] < 0: g[i] = 0 elif x[i] > 0: g[i] = 1 elif x[i] == 0: g[i] = .5 return g
b109a72a6fd57e088327cc1fa1d9d70950b1860a
25,180
def xoGkuXokhXpZ(): """Package link to class.""" pkg = Package("pkg") return pkg.circles.simple_class.Foo
500832ece1987a726812350faf72130de65f37a0
25,181
def create_all_pts_within_observation_window(observation_window_hours) -> str: """ create a view of all patients within observation window return the view name """ view_name = f"default.all_pts_{observation_window_hours}_hours" query = f""" CREATE OR REPLACE VIEW {view_name} AS WITH admits AS ( SELECT admits.subject_id, admits.hadm_id, admits.admittime, admits.admittime + interval %(time_window_hours)s hour index_date, CASE WHEN admits.deathtime <= (admits.admittime + interval %(time_window_hours)s hour) THEN 1 ELSE 0 END AS death_during_obs_win, CASE WHEN admits.dischtime <= (admits.admittime + interval %(time_window_hours)s hour) THEN 1 ELSE 0 END AS disch_during_obs_win FROM mimiciii.admissions admits ) SELECT admits.subject_id, admits.hadm_id, admits.index_date, admits.admittime FROM admits WHERE admits.death_during_obs_win != 1 and admits.disch_during_obs_win != 1 order by random() --limit 1000 """ params = { 'time_window_hours': str(observation_window_hours) } cursor.execute(query, params) return view_name
f711ac343815b9adc3b07e833ae8ee31cd07a125
25,182
def get_signature(data, raw_labels): """ Should return a 4 x z* matrix, where z* is the number of classes in the labels matrix. """ labels = raw_labels.reset_index() pca = decomposition.PCA(n_components=2) lle = manifold.LocallyLinearEmbedding(n_components=2) X_pca = pd.DataFrame(pca.fit_transform(data)) X_lle = pd.DataFrame(lle.fit_transform(data)) class_no = np.shape(labels[0].unique())[0] S = np.zeros([4,class_no]) for a in labels[0].unique(): this_pca = X_pca.loc[labels.loc[labels[0]==a].index] this_lle = X_lle.loc[labels.loc[labels[0]==a].index] S[0,a] = this_pca[0].mean() S[1,a] = this_pca[1].mean() S[2,a] = this_lle[0].mean() S[3,a] = this_lle[1].mean() return S
eefd7f5e682ad25bb31989d118747691f4cc64f0
25,183
def get_xyz_where(Z, Cond): """ Z and Cond are MxN matrices. Z are data and Cond is a boolean matrix where some condition is satisfied. Return value is x,y,z where x and y are the indices into Z and z are the values of Z at those indices. x,y,z are 1D arrays """ X,Y = np.indices(Z.shape) return X[Cond], Y[Cond], Z[Cond]
b1e1b2144e44f292dc6e2c5e917cb7511bdbf288
25,184
def retrieve_seq_length(data): """compute the length of a sequence. 0 are masked. Args: data: input sequence Returns: a `int`, length of the sequence """ with tf.name_scope('GetLength'): used = tf.sign(tf.reduce_max(tf.abs(data), axis=2)) length = tf.reduce_sum(used, axis=1) length = tf.cast(length, tf.int32) return length
ba6cb7ac9e9cc63311a6194e55b30ffa02fb3bc7
25,185
def get_census_params(variable_ids, county_level=False): """Gets census url params to make an API call. variable_ids: The ids of the variables to request. Automatically includes NAME. county_level: Whether to request at the county level, or the state level.""" keys = variable_ids.copy() keys.append("NAME") params = {"get": ",".join(keys)} params["for"] = "county:*" if county_level else "state:*" return params
b24204c8e9ef82575b54151bdc0ac98de0fb7fc0
25,186
def lookupName(n, names): """Check if name is in list of names Parameters ---------- n : str Name to check names : list List of names to check in Returns ------- bool Flag denoting if name has been found in list (True) or not (False) """ if n in names: return True else: return False
0fbb97e252f5daf9de52a946c206fa74395b01c6
25,187
def calculate_appointments(new_set, old_set): """ Calculate different appointment types. Used for making useful distinctions in the email message. new_set will be the fresh set of all available appointments at a given interval old_set will the previous appointments variable getting passed in. Ex1: Addition of HONEOYE new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} old_set = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON'} returns ->-> new_appointments = {'HONEOYE'} all_appointments = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON', HONEOYE} Ex2: No Changes new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} old_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} returns ->-> new_appointments = set() (empty set) all_appointments = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} """ new_appointments = new_set.difference(old_set) # set of All appointments minus set of Old appointments yields the set of New appointments old_appointments = new_set.intersection(old_set) # New intersect Old. yields those appointments that (intersect is equivalent the overlap in a venn diagram) return new_appointments, old_appointments # Return new sets
b54735293ba910e2b310e55e263e2611863d088a
25,188
def transaksi_hari_ini(): """ used in: app_kasir/statistik.html """ return Transaksi.objects.filter( tanggal_transaksi__year=timezone.now().year, tanggal_transaksi__month=timezone.now().month, tanggal_transaksi__day=timezone.now().day ).count()
a04e835be4cc495b09e1d7ae93ed141315168a81
25,190
def extractWindows(signal, window_size=10, return_window_indices=False): """ Reshape a signal into a series of non-overlapping windows. Parameters ---------- signal : numpy array, shape (num_samples,) window_size : int, optional return_window_indices : bool, optional Returns ------- windows : numpy array, shape (num_windows, window_size) window_indices : numpy array of int, shape (num_windows, window_size) """ tail_len = signal.shape[0] % window_size pad_arr = m.np.full(window_size - tail_len, m.np.nan) signal_padded = m.np.concatenate((signal, pad_arr)) windows = signal_padded.reshape((-1, window_size)) if not return_window_indices: return windows indices = m.np.arange(signal_padded.shape[0]) window_indices = indices.reshape((-1, window_size)) return windows, window_indices
2d9b319325dc1be9a92766c093db12c2e1f24123
25,191
def add(left: int, right: int): """ add up two numbers. """ print(left + right) return 0
75d7bd10cfdfb38211f6faf838b5e200e8593693
25,192
import random def rand_x_digit_num(x): """Return an X digit number, leading_zeroes returns a string, otherwise int.""" return '{0:0{x}d}'.format(random.randint(0, 10**x-1), x=x)
b46864143ca6186ebeede6c687a85d1b585e70db
25,194
def gen_workflow_steps(step_list): """Generates a table of steps for a workflow Assumes step_list is a list of dictionaries with 'task_id' and 'state' """ steps = format_utils.table_factory(field_names=['Steps', 'State']) if step_list: for step in step_list: steps.add_row([step.get('task_id'), step.get('state')]) else: steps.add_row(['None', '']) return format_utils.table_get_string(steps)
d01dc1937dc17e3d8b30390ccd1ea460391a7492
25,195
from typing import Union from typing import Any def sround(x: Union[np.ndarray, float, list, tuple], digits: int=1) -> Any: """ 'smart' round to largest `digits` + 1 Args x (float, list, tuple, ndarray) digits (int [1]) number of digits beyond highest Examples >>> sround(0.0212343, 2) # result 0.0212 """ if isinstance(x, (float, np.float64, np.float32)): safelog10 = lambda x: 0.0 if not x else np.log10(np.abs(x)) _sround = lambda x, d=1: np.round(x, max((-np.floor(safelog10(x)).astype(int) + digits), 0)) return _sround(x, digits) _as_tuple = False if isinstance(x, tuple): x = list(x) _as_tuple = True elif isinstance(x, (list, np.ndarray)): safelog10 = np.log10(np.abs(x)) safelog10[np.abs(safelog10) == np.inf] = 0 digits = np.maximum(-np.floor(safelog10).astype(int) + digits, 0) for i in range(len(x)): x[i] = np.round(x[i], digits[i]) if _as_tuple: x = tuple(x) return x
a695546c46d4bbd41b481d7b58d879bcd4d53247
25,197
def element_png_display(element, max_frames): """ Used to render elements to PNG if requested in the display formats. """ if 'png' not in Store.display_formats: return None info = process_object(element) if info: IPython.display.display(IPython.display.HTML(info)) return backend = Store.current_backend if type(element) not in Store.registry[backend]: return None renderer = Store.renderers[backend] # Current renderer does not support PNG if 'png' not in renderer.params('fig').objects: return None data, info = renderer(element, fmt='png') return data
273d19194c467d5596f99626bbe01e53005bee17
25,198
def rsa_obj(key_n, key_e, key_d=None, key_p=None, key_q=None): """ Wrapper for the RSAObj constructor The main reason for its existance is to compute the prime factors if the private exponent d is being set. In testing, the construct method threw exceptions because it wasn't able to compute the prime factors. The recover_prime_factors function seems work better. """ if key_n != None and key_e != None and key_d == None \ and key_p == None and key_q == None: key = RSA.construct((key_n, key_e)) elif key_n != None and key_e != None and key_d != None \ and key_p == None and key_q == None: key_p, key_q = recover_prime_factors(key_n, key_e, key_d) key = RSA.construct((key_n, key_e, key_d, long(key_p), long(key_q))) elif key_n != None and key_e != None and key_d != None \ and key_p != None and key_q == None: key = RSA.construct((key_n, key_e, key_d, key_p, key_n/key_p)) elif key_n != None and key_e != None and key_d != None \ and key_p != None and key_q != None: key = RSA.construct((key_n, key_e, key_d, key_p, key_q)) return key
1ad6d0b4c6f96170b2452b87ea049f63df350b8f
25,200
from operator import ge import logging def create_surface_and_gap(surf_data, radius_mode=False, prev_medium=None, wvl=550.0, **kwargs): """ create a surface and gap where surf_data is a list that contains: [curvature, thickness, refractive_index, v-number] """ s = surface.Surface() if radius_mode: if surf_data[0] != 0.0: s.profile.cv = 1.0/surf_data[0] else: s.profile.cv = 0.0 else: s.profile.cv = surf_data[0] if len(surf_data) > 2: if isanumber(surf_data[2]): # assume all args are numeric if len(surf_data) < 3: if surf_data[2] == 1.0: mat = m.Air() else: mat = m.Medium(surf_data[2]) else: mat = m.Glass(surf_data[2], surf_data[3], '') else: # string args if surf_data[2].upper() == 'REFL': s.interact_mode = 'reflect' mat = prev_medium else: num_args = len(surf_data[2:]) if num_args == 2: name, cat = surf_data[2], surf_data[3] else: name, cat = surf_data[2].split(',') try: mat = gfact.create_glass(name, cat) except ge.GlassNotFoundError as gerr: logging.info('%s glass data type %s not found', gerr.catalog, gerr.name) logging.info('Replacing material with air.') mat = m.Air() else: # only curvature and thickness entered, set material to air mat = m.Air() thi = surf_data[1] g = gap.Gap(thi, mat) rndx = mat.rindex(wvl) tfrm = np.identity(3), np.array([0., 0., thi]) return s, g, rndx, tfrm
187cad25433db2f64aeb482bc5313c97b31a834e
25,201
def deprecate_build(id): """Mark a build as deprecated. **Authorization** User must be authenticated and have ``deprecate_build`` permissions. **Example request** .. code-block:: http DELETE /builds/1 HTTP/1.1 Accept: */* Accept-Encoding: gzip, deflate Authorization: Basic ZXlKcFlYUWlPakUwTlRZM056SXpORGdzSW1WNGNDSTZNVFEx... Connection: keep-alive Content-Length: 0 Host: localhost:5000 User-Agent: HTTPie/0.9.3 **Example response** .. code-block:: http HTTP/1.0 200 OK Content-Length: 2 Content-Type: application/json Date: Tue, 01 Mar 2016 17:21:29 GMT Server: Werkzeug/0.11.3 Python/3.5.0 {} :reqheader Authorization: Include the token in a username field with a blank password; ``<token>:``. :param id: ID of the build. :statuscode 200: No error. :statuscode 404: Build not found. """ build = Build.query.get_or_404(id) build.deprecate_build() db.session.commit() return jsonify({}), 200
3002a8c46e4aa27a03d8b4fdb16fa94d4a7a8698
25,202
def subtractNums(x, y): """ subtract two numbers and return result """ return y - x
2b16636e74a2d1a15e79e4669699c96adcd3833b
25,204
def getTraceback(error=None): """Get formatted exception""" try: return traceback.format_exc( 10 ) except Exception, err: return str(error)
62da3e5b13860c2ecefa1da202aa63531c4fbc19
25,205
from datetime import datetime def confirm_email(token): """ Verify email confirmation token and activate the user account.""" # Verify token user_manager = current_app.user_manager db_adapter = user_manager.db_adapter is_valid, has_expired, object_id = user_manager.verify_token( token, user_manager.confirm_email_expiration) if has_expired: flash(_('Seu token de confirmacao expirou.'), 'error') return redirect(url_for('user.login')) if not is_valid: flash(_('Token de confirmacao invalido.'), 'error') return redirect(url_for('user.login')) """ Confirm email by setting User.confirmed_at=utcnow() or UserEmail.confirmed_at=utcnow()""" user = None if db_adapter.UserEmailClass: user_email = user_manager.get_user_email_by_id(object_id) if user_email: user_email.confirmed_at = datetime.utcnow() user = user_email.user else: user_email = None user = user_manager.get_user_by_id(object_id) if user: user.confirmed_at = datetime.utcnow() if user: user.set_active(True) db_adapter.commit() else: # pragma: no cover flash(_('Token de confirmacao invalido.'), 'error') return redirect(url_for('user.login')) # Send email_confirmed signal signals.user_confirmed_email.send( current_app._get_current_object(), user=user) # Prepare one-time system message flash(_('Seu email foi confirmado.'), 'success') # Auto-login after confirm or redirect to login page safe_next = _get_safe_next_param( 'next', user_manager.after_confirm_endpoint) if user_manager.auto_login_after_confirm: return _do_login_user(user, safe_next) # auto-login else: return redirect( url_for('user.login')+'?next='+quote(safe_next) ) # redirect to login page
83015e9fc74b88eeb57b1ef39decd37b7adf662d
25,207
def _GetInstanceField(instance, field): """Get the value of a field of an instance. @type instance: string @param instance: Instance name @type field: string @param field: Name of the field @rtype: string """ return _GetInstanceFields(instance, [field])[0]
fdf8eebf1dbd9cb443da21530058c6c7b30d8204
25,209
def infectious_rate_tweets(t, p0=0.001, r0=0.424, phi0=0.125, taum=2., t0=0, tm=24, bounds=None): """ Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bounds should be passed as an array in the form of [(lower r0, lower taum), (upper r0, upper taum)]. Converted to hours. :param t: point to evaluate function at (in hours) :param p0: base rate :param r0: amplitude :param phi0: shift (in days) :param taum: decay/freshness (in days) :param t0: start time of observation (in hours) :param tm: cyclic property (after what time a full circle passed, in hours) :param bounds: bounds for r0 and taum :return: intensity for point t """ if bounds is not None: if not (bounds[0][0] < r0 < bounds[1][0]): r0 = max(bounds[0][0], bounds[1][0] * sigmoid(taum / bounds[1][0])) if not (bounds[0][1] < taum < bounds[1][1]): taum = max(bounds[0][1], bounds[1][1] * sigmoid(taum / bounds[1][1])) return p0 * (1. - r0 * sin( (48 / tm) * pi * ((t + t0) / 24 + phi0))) * exp(-t / (24 * taum))
939ddde24301badaf1c43731027d40167b5ab414
25,210
def fetch_user(username): """ This method 'fetch_user' fetches an instances of an user if any """ return User.objects.get(username=username)
9861fc648c40312dea62450bd152d511867fcfe5
25,211
def get_script_name(key_combo, key): """ (e.g. ctrl-shift, a -> CtrlShiftA, a -> A """ if key_combo != 'key': return get_capitalized_key_combo_pattern(key_combo) + key.capitalize() return key.capitalize()
a550c4b3852bf7ee3c30c4cecd497ae48a4d4a9d
25,212
def layer(name, features): """Make a vector_tile.Tile.Layer from GeoJSON features.""" pbl = vector_tile_pb2.tile.layer() pbl.name = name pbl.version = 1 pb_keys = [] pb_vals = [] pb_features = [] for j, f in enumerate( chain.from_iterable(singles(ob) for ob in features)): pbf = vector_tile_pb2.tile.feature() pbf.id = j # Pack up the feature geometry. g = f.get('geometry') if g: gtype = g['type'] coords = g['coordinates'] if gtype == 'Point': geometry = [(1<<3)+1] + [ (n << 1) ^ (n >> 31) for n in imap(int, coords)] elif gtype == 'LineString': num = len(coords) geometry = [0]*(4 + 2*(num-1)) geometry[0] = (1<<3)+1 geometry[1:3] = ( (n << 1) ^ (n >> 31) for n in imap(int, coords[0])) geometry[3] = ((num-1)<<3)+2 for i, (prev, pair) in enumerate(pairwise(coords), 1): prev = map(int, prev) pair = map(int, pair) geometry[2*i+2:2*i+4] = ( (n << 1) ^ (n >> 31) for n in ( pair[0]-prev[0], pair[1]-prev[1])) pbf.geometry.extend(geometry) elif gtype == 'Polygon': rings = [] for ring in coords: num = len(ring) geometry = [0]*(5 + 2*(num-1)) geometry[0] = (1<<3)+1 geometry[1:3] = ( (n << 1) ^ (n >> 31) for n in imap(int, ring[0])) geometry[3] = ((num-1)<<3)+2 for i, (prev, pair) in enumerate(pairwise(ring), 1): prev = map(int, prev) pair = map(int, pair) geometry[2*i+2:2*i+4] = ( (n << 1) ^ (n >> 31) for n in ( pair[0]-prev[0], pair[1]-prev[1])) geometry[-1] = (1<<3)+7 pbf.geometry.extend(geometry) pbf.type = geom_type_map[gtype] # Pack up feature properties. props = f.get('properties', {}) tags = [0]*(2*len(props)) for i, (k, v) in enumerate(props.items()): if k not in pb_keys: pb_keys.append(k) if v not in pb_vals: pb_vals.append(v) tags[i*2:i*2+2] = pb_keys.index(k), pb_vals.index(v) pbf.tags.extend(tags) pb_features.append(pbf) # Finish up the layer. pbl.keys.extend(map(str, pb_keys)) pbl.values.extend(map(value, ifilter(None, pb_vals))) return pbl
a08e4dea809a938e1a451d34673f2845dd353a21
25,213
def serialize(formula, threshold=None): """Provides a string representing the formula. :param formula: The target formula :type formula: FNode :param threshold: Specify the threshold :type formula: Integer :returns: A string representing the formula :rtype: string """ return get_env().serializer.serialize(formula, threshold=threshold)
872b4d5e135a6b1b9c5964ca353edccd0a6d8a40
25,214
import collections def find_single_network_cost(region, option, costs, global_parameters, country_parameters, core_lut): """ Calculates the annual total cost using capex and opex. Parameters ---------- region : dict The region being assessed and all associated parameters. option : dict Contains the scenario and strategy. The strategy string controls the strategy variants being tested in the model and is defined based on the type of technology generation, core and backhaul, and the strategy for infrastructure sharing, the number of networks in each geotype, spectrum and taxation. costs : dict All equipment costs. global_parameters : dict All global model parameters. country_parameters : dict All country specific parameters. core_lut : dict Contains the number of existing and required, core and regional assets. Returns ------- region : dict Contains all regional data. """ strategy = option['strategy'] generation = strategy.split('_')[0] core = strategy.split('_')[1] backhaul = strategy.split('_')[2] new_mno_sites = region['new_mno_sites'] upgraded_mno_sites = region['upgraded_mno_sites'] all_sites = new_mno_sites + upgraded_mno_sites new_backhaul = region['backhaul_new'] regional_cost = [] regional_asset_cost = [] for i in range(1, int(all_sites) + 1): if i <= upgraded_mno_sites and generation == '4G': cost_structure = upgrade_to_4g(region, strategy, costs, global_parameters, core_lut, country_parameters) backhaul_quant = backhaul_quantity(i, new_backhaul) total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul, backhaul_quant, global_parameters, country_parameters) regional_cost.append(total_cost) regional_asset_cost.append(cost_by_asset) if i <= upgraded_mno_sites and generation == '5G' and core == 'nsa': cost_structure = upgrade_to_5g_nsa(region, strategy, costs, global_parameters, core_lut, country_parameters) backhaul_quant = backhaul_quantity(i, new_backhaul) total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul, backhaul_quant, global_parameters, country_parameters) regional_cost.append(total_cost) regional_asset_cost.append(cost_by_asset) if i <= upgraded_mno_sites and generation == '5G' and core == 'sa': cost_structure = upgrade_to_5g_sa(region, strategy, costs, global_parameters, core_lut, country_parameters) backhaul_quant = backhaul_quantity(i, new_backhaul) total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul, backhaul_quant, global_parameters, country_parameters) regional_cost.append(total_cost) regional_asset_cost.append(cost_by_asset) if i > upgraded_mno_sites and generation == '4G': cost_structure = greenfield_4g(region, strategy, costs, global_parameters, core_lut, country_parameters) backhaul_quant = backhaul_quantity(i, new_backhaul) total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul, backhaul_quant, global_parameters, country_parameters) regional_cost.append(total_cost) regional_asset_cost.append(cost_by_asset) if i > upgraded_mno_sites and generation == '5G' and core == 'nsa': cost_structure = greenfield_5g_nsa(region, strategy, costs, global_parameters, core_lut, country_parameters) backhaul_quant = backhaul_quantity(i, new_backhaul) total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul, backhaul_quant, global_parameters, country_parameters) regional_cost.append(total_cost) regional_asset_cost.append(cost_by_asset) if i > upgraded_mno_sites and generation == '5G' and core == 'sa': cost_structure = greenfield_5g_sa(region, strategy, costs, global_parameters, core_lut, country_parameters) backhaul_quant = backhaul_quantity(i, new_backhaul) total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul, backhaul_quant, global_parameters, country_parameters) regional_cost.append(total_cost) regional_asset_cost.append(cost_by_asset) counter = collections.Counter() for d in regional_asset_cost: counter.update(d) counter_dict = dict(counter) network_cost = 0 for k, v in counter_dict.items(): region[k] = v network_cost += v region['network_cost'] = network_cost return region
9602d13a85ee5d4273d6bb82d28a525f42714890
25,215
def get_headings(bulletin): """"function to get the headings from text file takes a single argument 1.takes single argument list of bulletin files""" with open("../input/cityofla/CityofLA/Job Bulletins/"+bulletins[bulletin]) as f: ##reading text files data=f.read().replace('\t','').split('\n') data=[head for head in data if head.isupper()] return data
4b84928f8f4f13692c1236277aba6c6d4eb6c5ba
25,216
def phi_text_page_parse(pageAnalyse_str, phi_main_url): """ params: pageAnalyse_str, str. return: phi_page_dict, dict. It takes the precedent functions and maps their information to a dictionary. """ # phi_page_dict = {} phi_page_dict["phi_text_id_no"] = phi_text_id(pageAnalyse_str, domain_url=phi_main_url)[0] phi_page_dict["phi_text_region"] = phi_lemma_region(pageAnalyse_str) phi_page_dict["phi_text_url"] = phi_text_id(pageAnalyse_str, domain_url=phi_main_url)[1] phi_page_dict["phi_text_info"] = phi_lemma_text_info(pageAnalyse_str) phi_page_dict["phi_text"] = phi_lemma_text(pageAnalyse_str) # return phi_page_dict
e11a8b14726c2a65fb721f7a1f8c2f4148d18fea
25,217
def fdfilt_lagr(tau, Lf, fs): """ Parameters ---------- tau : delay / s Lf : length of the filter / sample fs : sampling rate / Hz Returns ------- h : (Lf) nonzero filter coefficients ni : time index of the first element of h n0 : time index of the center of h """ d = tau * fs if Lf % 2 == 0: n0 = np.ceil(d) Lh = int(Lf/2) idx = np.arange(n0-Lh, n0+Lh).astype(int) elif Lf % 2 == 1: n0 = np.round(d) Lh = int(np.floor(Lf/2)) idx = np.arange(n0-Lh, n0+Lh+1).astype(int) else: print('Invalid value of Lf. Must be an integer') return lagr_poly_barycentric2(idx, d), idx[0], n0
3a80d3682eb255b7190cc5cd6ddf44a9abbd58bc
25,218
import tqdm def hit_n_run(A_mat, b_vec, n_samples=200, hr_timeout=ALG_TIMEOUT_MULT): """ Hit and Run Sampler: 1. Sample current point x 2. Generate a random direction r 3. Define gamma_i = ( b - a_i'x ) / ( r'a_i ) 4. Calculate max(gamma < 0) gamma_i and min(gamma > 0) gamma_i 5. Sample uniformly from [min_gamma, max_gamma] """ m, n = A_mat.shape curr_pt = hit_n_run_init(A_mat, b_vec) pts = [curr_pt] pts_len = 1 bar = tqdm(total=n_samples) for _ in range(n_samples * hr_timeout): direction = np.random.randn(n) direction = direction / np.linalg.norm(direction) # calculate gamma numer = b_vec - np.dot(A_mat, curr_pt) denom = np.dot(A_mat, direction) gamma = [ n / d for n, d in zip(numer, denom) ] gamma.append(0) gamma = np.array(gamma) if (gamma > 0).all(): gamma_min = 0 else: gamma_min = max(gamma[gamma < 0]) if (gamma < 0).all(): gamma_max = 0 else: gamma_max = min(gamma[gamma > 0]) magnitude = np.random.uniform(low=gamma_min, high=gamma_max) curr_pt = curr_pt + magnitude * direction if is_feasible(A_mat, b_vec, curr_pt): pts.append(curr_pt) bar.update(1) pts_len += 1 if pts_len >= n_samples: break else: pass bar.close() if len(pts) < min(0.4 * n_samples, 500): raise Exception( 'Sampled {} points instead of {}'.format(len(pts), 0.4 * n_samples) ) return pts
e867c66ec97b9bbb91ad373b93fb9772e2d519cb
25,219
def construct_simulation_hydra_paths(base_config_path: str) -> HydraConfigPaths: """ Specifies relative paths to simulation configs to pass to hydra to declutter tutorial. :param base_config_path: Base config path. :return Hydra config path. """ common_dir = "file://" + join(base_config_path, 'config', 'common') config_name = 'default_simulation' config_path = join(base_config_path, 'config', 'simulation') experiment_dir = "file://" + join(base_config_path, 'experiments') return HydraConfigPaths(common_dir, config_name, config_path, experiment_dir)
3353f910f9de708bbf0e5d46dc64b1d833230043
25,220
def delete_from_limits_by_id(id, connection, cursor): """ Delete row with a certain ID from limits table :param id: ID to delete :param connection: connection instance :param cursor: cursor instance :return: """ check_for_existence = get_limit_by_id(id, cursor) if check_for_existence.get('failure') is None: delete_query = '''Delete from limits where id = {}''' cursor.execute(delete_query.format(id)) connection.commit() print(f'Record with id={id} deleted') return {'status': 'success', 'message': f'Record with id={id} deleted'} else: print(f'Failed to delete, ID={id} does not exist') return {'failure': f'Failed to delete, ID={id} does not exist'}
7e035550c2d9d22be1af48434d0e36cd6424ecb7
25,221
def article_search(request): """Пошук статті і використанням вектору пошуку (за полями заголовку і тексту з ваговими коефіцієнтами 1 та 0.4 відповідно. Пошуковий набір проходить стемінг. При пошуку враховується близькість шуканих слів одне до одного""" query = '' results = [] if 'query' in request.GET: results, query = search_results(request) return render(request, 'articles/post/search.html', {'query': query, 'results': results})
423fd3cc4be6cdcef8fd4ab47bc7aa70f52e32bf
25,222
def get_campaign_data(api, campaign_id): """Return campaign metadata for the given campaign ID.""" campaign = dict() # Pulls the campaign data as dict from GoPhish. rawCampaign: dict = api.campaigns.get(campaign_id).as_dict() campaign["id"] = rawCampaign["name"] campaign["start_time"] = rawCampaign["launch_date"] campaign["end_time"] = rawCampaign["completed_date"] campaign["url"] = rawCampaign["url"] campaign["subject"] = rawCampaign["template"]["subject"] # Get the template ID from the GoPhish template name. campaign["template"] = ( api.templates.get(rawCampaign["template"]["id"]).as_dict()["name"].split("-")[2] ) campaign["clicks"] = get_click_data(api, campaign_id) # Get the e-mail send status from GoPhish. campaign["status"] = get_email_status(api, campaign_id) return campaign
6dc344079e73245ef280d770df3b07f62543d856
25,223
def create_small_map(sharing_model): """ Create small map and 2 BS :returns: tuple (map, bs_list) """ map = Map(width=150, height=100) bs1 = Basestation('A', Point(50, 50), get_sharing_for_bs(sharing_model, 0)) bs2 = Basestation('B', Point(100, 50), get_sharing_for_bs(sharing_model, 1)) bs_list = [bs1, bs2] return map, bs_list
ecc56eb95f7d2d8188d7caa7353f5bc95793f46e
25,224
def save_dataz(file_name, obj, **kwargs): """Save compressed structured data to files. The arguments will be passed to ``numpy.save()``.""" return np.savez(file_name, obj, **kwargs)
2ea4ecff522409d79fbecd779710a27a9026dbe4
25,225
def step(x): """Heaviside step function.""" step = np.ones_like(x, dtype='float') step[x<0] = 0 step[x==0] = 0.5 return step
2e11c87668b04acef33b7c7499ad10373b33ed76
25,226
def createRaviartThomas0VectorSpace(context, grid, segment=None, putDofsOnBoundaries=False, requireEdgeOnSegment=True, requireElementOnSegment=False): """ Create and return a space of lowest order Raviart-Thomas vector functions with normal components continuous on boundaries between elements. *Parameters:* - context (Context) A Context object that will determine the type used to represent the values of the basis functions of the newly constructed space. - grid (Grid) Grid on which the functions from the newly constructed space will be defined. - segment (GridSegment) (Optional) Segment of the grid on which the space should be defined. If set to None (default), the whole grid will be used. - putDofsOnBoundaries (bool) (Optional) If set to False (default), degrees of freedom will not be placed on edges lying on boundaries of the grid. This is usually the desired behaviour for simulations of open perfectly conducting surfaces (sheets). If set to True, degrees of freedom will be placed on all edges belonging to the chosen segment of the grid. *Returns* a newly constructed Space_BasisFunctionType object, with BasisFunctionType determined automatically from the context argument and equal to either float32, float64, complex64 or complex128. """ name = 'raviartThomas0VectorSpace' dofMode = 0 if requireEdgeOnSegment: dofMode |= 1 if requireElementOnSegment: dofMode |= 2 return _constructObjectTemplatedOnBasis( core, name, context.basisFunctionType(), grid, segment, putDofsOnBoundaries, dofMode)
100242b50f9aac6e55e6e02bb02c07f3b85c4195
25,227
def edus_toks2ids(edu_toks_list, word2ids): """ 将训练cbos的论元句子们转换成ids序列, 将训练cdtb论元关系的论元对转成对应的论元对的tuple ids 列表并返回 """ tok_list_ids = [] for line in edu_toks_list: line_ids = get_line_ids(toks=line, word2ids=word2ids) tok_list_ids.append(line_ids) # 数据存储 return tok_list_ids
b3b87bfb0ae90c78cff3b02e04f316d917834e2b
25,228
def pd_log_with_neg(ser: pd.Series) -> pd.Series: """log transform series with negative values by adding constant""" return np.log(ser + ser.min() + 1)
cf67df4173df27c7b97d320f04cd607c0ee8b866
25,229
def filter_X_dilutions(df, concentration): """Select only one dilution ('high', 'low', or some number).""" assert concentration in ['high','low'] or type(concentration) is int df = df.sort_index(level=['CID','Dilution']) df = df.fillna(999) # Pandas doesn't select correctly on NaNs if concentration == 'low': df = df.groupby(level=['CID']).first() elif concentration == 'high': df = df.groupby(level=['CID']).last() else: df = df.loc[[x for x in df.index if x[1]==concentration]] df = df.groupby(level=['CID']).last() df = df.replace(999,float('NaN')) # Undo the fillna line above. return df
b886c87c1c5b96e6efc951ef197d3a0fb13707c1
25,230
def update_params(base_param: dict, additional: dict): """overwrite base parameter dictionary Parameters ---------- base_param : dict base param dictionary additional : dict additional param dictionary Returns ------- dict updated parameter dictionary """ for key in additional: base_param[key] = additional[key] return base_param
e73581cb0b8d264343ead56da52c6dc12fe49dd7
25,231
import torch def lanczos_generalized( operator, metric_operator=None, metric_inv_operator=None, num_eigenthings=10, which="LM", max_steps=20, tol=1e-6, num_lanczos_vectors=None, init_vec=None, use_gpu=False, ): """ Use the scipy.sparse.linalg.eigsh hook to the ARPACK lanczos algorithm to find the top k eigenvalues/eigenvectors. Parameters ------------- operator: power_iter.Operator linear operator to solve. num_eigenthings : int number of eigenvalue/eigenvector pairs to compute which : str ['LM', SM', 'LA', SA'] L,S = largest, smallest. M, A = in magnitude, algebriac SM = smallest in magnitude. LA = largest algebraic. max_steps : int maximum number of arnoldi updates tol : float relative accuracy of eigenvalues / stopping criterion num_lanczos_vectors : int number of lanczos vectors to compute. if None, > 2*num_eigenthings init_vec: [torch.Tensor, torch.cuda.Tensor] if None, use random tensor. this is the init vec for arnoldi updates. use_gpu: bool if true, use cuda tensors. Returns ---------------- eigenvalues : np.ndarray array containing `num_eigenthings` eigenvalues of the operator eigenvectors : np.ndarray array containing `num_eigenthings` eigenvectors of the operator """ if isinstance(operator.size, int): size = operator.size else: size = operator.size[0] shape = (size, size) if num_lanczos_vectors is None: num_lanczos_vectors = min(2 * num_eigenthings, size - 1) if num_lanczos_vectors < 2 * num_eigenthings: warn( "[lanczos] number of lanczos vectors should usually be > 2*num_eigenthings" ) def _scipy_apply(x): x = torch.from_numpy(x) if use_gpu: x = x.cuda() return operator.apply(x.float()).cpu().numpy() scipy_op = ScipyLinearOperator(shape, _scipy_apply) if isinstance(metric_operator, np.ndarray) or \ isinstance(metric_operator, ScipyLinearOperator): metric_op = metric_operator else: def _scipy_apply_metric(x): x = torch.from_numpy(x) if use_gpu: x = x.cuda() return metric_operator.apply(x.float()).cpu().numpy() metric_op = ScipyLinearOperator(shape, _scipy_apply_metric) if isinstance(metric_inv_operator, np.ndarray) or \ isinstance(metric_inv_operator, ScipyLinearOperator): metric_inv_op = metric_inv_operator else: def _scipy_apply_metric_inv(x): x = torch.from_numpy(x) if use_gpu: x = x.cuda() return metric_inv_operator.apply(x.float()).cpu().numpy() metric_inv_op = ScipyLinearOperator(shape, _scipy_apply_metric_inv) if init_vec is None: init_vec = np.random.rand(size) elif isinstance(init_vec, torch.Tensor): init_vec = init_vec.cpu().numpy() eigenvals, eigenvecs = eigsh( A=scipy_op, k=num_eigenthings, M=metric_op, Minv=metric_inv_op, which=which, maxiter=max_steps, tol=tol, ncv=num_lanczos_vectors, return_eigenvectors=True, ) return eigenvals, eigenvecs.T
2a3c236817524f2656f9b1631801293b1acf5278
25,232
import urllib import json def get_articles_news(name): """ Function that gets the json response to our url request """ get_news_url = 'https://newsapi.org/v2/top-headlines?sources={}&apiKey=988fb23113204cfcb2cf79eb7ad99b76'.format(name) with urllib.request.urlopen(get_news_url) as url: get_news_data = url.read() get_news_response = json.loads(get_news_data) news_results = None if get_news_response['articles']: news_results_list = get_news_response['articles'] news_results = process_articles(news_results_list) return news_results
3394112b15903671ec5522c128e9035a404f2650
25,233
def coordConv(fromP, fromV, fromSys, fromDate, toSys, toDate, obsData=None, refCo=None): """Converts a position from one coordinate system to another. Inputs: - fromP(3) cartesian position (au) - fromV(3) cartesian velocity (au/year); ignored if fromSys is Geocentric, Topocentric or Observed - fromSys coordinate system from which to convert; any of the entries in the table below; use opscore.RO.CoordSys constants. - fromDate date* - toSys coordinate system to which to convert (see fromSys) - toDate date* - obsData an opscore.RO.Astro.Cnv.ObserverData object; required if fromSys or toSys is Topocentric or Observed; ignored otherwise. - refCo(2) refraction coefficients; required if fromSys or toSys is Observed; ignored otherwise. Returns: - toP(3) converted cartesian position (au) - toV(3) converted cartesian velocity (au/year) *the units of date depend on the associated coordinate system: coord sys def date date ICRS 2000.0 Julian epoch of observation FK5 2000.0 Julian epoch of equinox and observation FK4 1950.0 Besselian epoch of equinox and observation Galactic now Julian epoch of observation Geocentric now UT1 (MJD) Topocentric now UT1 (MJD) Observed now UT1 (MJD) **Setting fromV all zero means the object is fixed. This slighly affects conversion to or from FK4, which has fictitious proper motion. Error Conditions: - If obsData or refCo are absent and are required, raises ValueError. Details: The conversion is performed in two stages: - fromP/fromSys/fromDate -> ICRS - ICRS -> toP/toSys/toDate Each of these two stages is performed using the following graph: FK5 ------\ FK4 ------ ICRS --- Geocentric -*- Topocentric -**- Observed Galactic--/ * obsData required ** refCo required """ return _TheCnvObj.coordConv(fromP, fromV, fromSys, fromDate, toSys, toDate, obsData, refCo)
29ef79ff896806171a9819fc5ad8bf071bd48969
25,235
def sample_recipe(user, **params): """create recipe""" defaults = { 'title': 'paneer tikka', 'time_minute': 10, 'price': 5.00 } defaults.update(**params) return Recipe.objects.create(user=user, **defaults)
50b53622c68e6385c20296206759bc54f24afa3c
25,236
def briconToScaleOffset(brightness, contrast, drange): """Used by the :func:`briconToDisplayRange` and the :func:`applyBricon` functions. Calculates a scale and offset which can be used to transform a display range of the given size so that the given brightness/contrast settings are applied. :arg brightness: Brightness, between 0.0 and 1.0. :arg contrast: Contrast, between 0.0 and 1.0. :arg drange: Data range. """ # The brightness is applied as a linear offset, # with 0.5 equivalent to an offset of 0.0. offset = (brightness * 2 - 1) * drange # If the contrast lies between 0.0 and 0.5, it is # applied to the colour as a linear scaling factor. if contrast <= 0.5: scale = contrast * 2 # If the contrast lies between 0.5 and 1, it # is applied as an exponential scaling factor, # so lower values (closer to 0.5) have less of # an effect than higher values (closer to 1.0). else: scale = 20 * contrast ** 4 - 0.25 return scale, offset
b75ce49f4e79f7fef34a855f2897cfa6b4bd7cc7
25,237
def HostNameRequestHeader(payload_size): """ Construct a ``MessageHeader`` for a HostNameRequest command. Sends local host name to virtual circuit peer. This name will affect access rights. Sent over TCP. Parameters ---------- payload_size : integer Length of host name string. """ struct_args = (21, payload_size, 0, 0, 0, 0) # If payload_size or data_count cannot fit into a 16-bit integer, use the # extended header. return (ExtendedMessageHeader(*struct_args) if any((payload_size > 0xffff, )) else MessageHeader(*struct_args))
2371dba58d974408be28390462b5e7eb943edd88
25,238
def cinema_trip(persons, day, premium_seating, treat): """ The total cost of going to the cinema Parameters: ---------- persons: int number of people who need a ticket day: int day of the week to book (1 = Monday, 7 = Sunday) preimum_seating: bool boolean True/False if premium seats are required treat: str string value representing a choice of refreshment Returns: ------- float """ #fill in your code here return tickets(persons, day, premium_seating) + refreshment(treat)
8a2c4418124251ae16dddee6c1a134e3b883b1b8
25,240
import pathlib def check_path(path: pathlib.Path) -> bool: """Check path.""" return path.exists() and path.is_file()
2279dde6912ae6f6eb51d90ed5e71e0b3892fea9
25,241
def omega2kwave(omega, depth, grav=9.81): """ Solve the linear dispersion relation close to machine precision:: omega**2 = kwave * grav * tanh(kwave*depth) Parameters ---------- omega : float Wave oscillation frequency [rad/s] depth : float Constant water depth. [m] (<0 indicates infinite depth) grav : float, optional Acceleration of gravity [m/s^2] Returns ------- float Wave number (kwave) [1/m] Raises ------ None """ if depth < 0.0: return omega**2 / grav # Solve equivalent equation system: c == y * tanh(y), kwave = y / depth c = depth * omega**2 / grav # High accuracy fix point schemes if c > 2.5: def f(y0): # y == c/tanh(y) # tanh(y) = 1 - eps, Near y=y0 the RHS is almost c. # Solve y== c / tanh(y0) return c / np.tanh(y0) else: def f(y0): # y*(k0 + k1*(y-y0)) == c*(k0 + k1*(y-y0))/tanh(y0) # tanh(y0) = k0 + k1*(y-y0) + ... # Near y=y0 the RHS is almost c. # Solve y*(k0 + k1*(y-y0)) == c for y k0 = np.tanh(y0) k1 = 1.0 - k0**2 b = k0 - k1 * y0 return 0.5 * (-b + np.sqrt(b**2 + 4.0 * c * k1)) / k1 # Fist initial guess (MIT lecture notes... 4 digits accuracy) if c > 2.4: # Deeper water... y = c * (1.0 + 3.0 * np.exp(-2 * c) - 12.0 * np.exp(-4 * c)) # using fixed point iteration: y <= c + y - y * tanh(y) else: # Shallower water... y = np.sqrt(c) * (1.0 + 0.169 * c + 0.031 * c ** 2) # using fixed point iteration: y <= sqrt(c * y / tanh(y)) y_prev = -1.0 while abs(y - y_prev) > 100 * np.finfo(y).eps: y_prev = y y = f(y) kwave = y / depth return kwave
c448780d0edc3eb59ea79b4025182501875bb82f
25,242
def is_true(a: Bool) -> bool: """Returns whether the provided bool can be simplified to true. :param a: :return: """ return z3.is_true(a.raw)
e579a9793700132f38526d5cb737f3540d550821
25,243
from typing import Counter def traverse_caves_recursive(cave: str, cave_system: dict, current_path: list[str]): """Recursively traverse through all paths in the cave.""" if cave != "START": # build the current path traversed current_path = current_path[:] current_path.append(cave) if cave == "END": return current_path previous_cave_counts = Counter(current_path) small_caves_previously_visited = [ cave for cave in previous_cave_counts.keys() if cave.islower() and previous_cave_counts[cave] > 0 ] potential_next_caves = [ cave_ for cave_ in cave_system[cave] if cave_ not in small_caves_previously_visited ] if len(potential_next_caves) > 0: return [ traverse_caves_recursive(next_cave, cave_system, current_path) for next_cave in potential_next_caves ]
e78680ce8e1c3e7675d8fed980c4c706c87c1758
25,244
def count(A,target): """invoke recursive function to return number of times target appears in A.""" def rcount(lo, hi, target): """Use recursion to find maximum value in A[lo:hi+1].""" if lo == hi: return 1 if A[lo] == target else 0 mid = (lo+hi)//2 left = rcount(lo, mid, target) right = rcount(mid+1, hi, target) return left + right return rcount(0, len(A)-1, target)
79d9be64d332a11993f65f3c0deba8b4de39ebda
25,246
def asset_get_current_log(asset_id): """ """ db = current.db s3db = current.s3db table = s3db.asset_log query = ( table.asset_id == asset_id ) & \ ( table.cancel == False ) & \ ( table.deleted == False ) # Get the log with the maximum time asset_log = db(query).select(table.id, table.status, table.datetime, table.cond, table.person_id, table.site_id, #table.location_id, orderby = ~table.datetime, limitby=(0, 1)).first() if asset_log: return Storage(datetime = asset_log.datetime, person_id = asset_log.person_id, cond = int(asset_log.cond or 0), status = int(asset_log.status or 0), site_id = asset_log.site_id, #location_id = asset_log.location_id ) else: return Storage()
38bbdaade290e0f60a2dd7faa628b2c72dd48a8c
25,247
def _filter_to_k_shot(dataset, num_classes, k): """Filters k-shot subset from a dataset.""" # !!! IMPORTANT: the dataset should *not* be shuffled. !!! # Make sure that `shuffle_buffer_size=1` in the call to # `dloader.get_tf_data`. # Indices of included examples in the k-shot balanced dataset. keep_example = [] # Keep track of the number of examples per class included in # `keep_example`. class_counts = np.zeros([num_classes], dtype=np.int32) for _, label in dataset.as_numpy_iterator(): # If there are less than `k` examples of class `label` in `example_indices`, # keep this example and update the class counts. keep = class_counts[label] < k keep_example.append(keep) if keep: class_counts[label] += 1 # When there are `k` examples for each class included in `keep_example`, # stop searching. if (class_counts == k).all(): break dataset = tf.data.Dataset.zip(( tf.data.Dataset.from_tensor_slices(keep_example), dataset )).filter(lambda keep, _: keep).map(lambda _, example: example).cache() return dataset
d61f064dbbdc00b68fffc580baf7e658610e44eb
25,248
def _create_tf_example(entry): """ Creates a tf.train.Example to be saved in the TFRecord file. Args: entry: string containing the path to a image and its label. Return: tf_example: tf.train.Example containing the info stored in feature """ image_path, label = _get_image_and_label_from_entry(entry) # Convert the jpeg image to raw image. image = Image.open(image_path) image_np = np.array(image) image_raw = image_np.tostring() # Data which is going to be stored in the TFRecord file feature = { 'image': tfrecord_utils.bytes_feature(image_raw), 'image/height': tfrecord_utils.int64_feature(image_np.shape[0]), 'image/width': tfrecord_utils.int64_feature(image_np.shape[1]), 'label': tfrecord_utils.int64_feature(label), } tf_example = tf.train.Example(features=tf.train.Features(feature=feature)) return tf_example
c50c2ff02eccc286319db6263841472d7c2b9fe3
25,249
def _gen_parameters_section(names, parameters, allowed_periods=None): """Generate the "parameters" section of the indicator docstring. Parameters ---------- names : Sequence[str] Names of the input parameters, in order. Usually `Ind._parameters`. parameters : Mapping[str, Any] Parameters dictionary. Usually `Ind.parameters`, As this is missing `ds`, it is added explicitly. """ section = "Parameters\n----------\n" for name in names: if name == "ds": descstr = "Input dataset." defstr = "Default: None." unitstr = "" annotstr = "Dataset, optional" else: param = parameters[name] descstr = param["description"] if param["kind"] == InputKind.FREQ_STR and allowed_periods is not None: descstr += ( f" Restricted to frequencies equivalent to one of {allowed_periods}" ) if param["kind"] == InputKind.VARIABLE: defstr = f"Default : `ds.{param['default']}`. " elif param["kind"] == InputKind.OPTIONAL_VARIABLE: defstr = "" else: defstr = f"Default : {param['default']}. " if "choices" in param: annotstr = str(param["choices"]) else: annotstr = KIND_ANNOTATION[param["kind"]] if param.get("units", False): unitstr = f"[Required units : {param['units']}]" else: unitstr = "" section += f"{name} : {annotstr}\n {descstr}\n {defstr}{unitstr}\n" return section
9bc249ca67dcc1c0f7ff8538b8078bfcd5c231a1
25,250
def bins(df): """Segrega os dados de notas de 10 em 10 pontos para construção de gráficos. Parameters ---------- df : type Pandas DataFrame DataFrame de início. Returns ------- type Pandas DataFrame DataFrame final. """ df_bins = pd.DataFrame(df['ALUNO'].rename('Contagem').groupby(pd.cut( df['Pontuação final'].rename('Intervalos'), np.arange(0, 101, 10), right=False)).count()) df_bins['Contagem /%'] = round(100 * df_bins['Contagem'] / df_bins['Contagem'].sum(), 2) df_bins['Contagem cumulativa'] = df_bins['Contagem'].cumsum() df_bins['Contagem /% cumulativa'] = df_bins['Contagem /%'].cumsum() return df_bins
7c4570866fcb5795dc9052222479e23574fbf64b
25,252
from datetime import datetime def insert_video(ID): """ The function gets a valid YouTube ID, checks for its existence in database, if not found calls YouTube API and inserts into the MongoDB database. """ client = MongoClient('localhost:27017') db = client['PreCog'] collection = db['YoutubeRaw'] check_collection = db['YoutubeProcessed'] check = check_collection.find_one({"ID" : ID}) if check == None: video = youtube_search(ID) if video is not None: result = collection.insert_one(video) print(result.inserted_id, datetime.datetime.now()) return True else: print("Already in DataBase") return False
9c4f453db72739973384ea1890614463855fcca2
25,254
def get_neighbor_v6_by_search(search=None): """Return a list of NeighborV6's by dict.""" try: objects = NeighborV6.objects.filter() search_dict = search if search else dict() object_map = build_query_to_datatable_v3(objects, search_dict) except FieldError as e: raise api_rest_exceptions.ValidationAPIException(str(e)) except Exception as e: raise api_rest_exceptions.NetworkAPIException(str(e)) else: return object_map
3ed22479c140b7f71cd02d03be6c0fc82b0e81ca
25,255
import functools def validate_customer(fn): """ Validates that credit cards are between 1 and 5 and that each is 16 chars long """ @functools.wraps(fn) def wrapped(*args, **kwargs): # Validate credit card length cc_list = kwargs.get("credit_cards") trimmed_cc = [remove_non_numeric(cc.replace(' ', '')) for cc in cc_list] num_credit_cards = len(trimmed_cc) if num_credit_cards < 1 or num_credit_cards > 5: return "Number of credit cards must be between 1 and 5, inclusive", 400 # Validate credit card composition for cc in trimmed_cc: if len(cc) != 16 or not cc.isdigit(): return f"Credit card {cc} must be 16 digits long", 400 # If passed, continue with registration kwargs["credit_cards"] = trimmed_cc return fn(*args, **kwargs) return wrapped
ecd5b63ed5f1ae8ecf94a27ba3f353f371dabe39
25,256
from typing import Any from typing import Sequence from typing import Mapping def format_field(value: Any) -> str: """ Function that formats a single field for output on a table or CSV output, in order to deal with nested arrays or objects in the JSON outputs of the API. :param value: the value to format :return: a string that is fit for console output """ if isinstance(value, Sequence) and not isinstance(value, (str, bytes)): if all(isinstance(x, (str, bytes, int, float)) for x in value): return ", ".join([str(x) for x in value]) return dumps(value) if isinstance(value, Mapping): return dumps(value) return value
5eef5c433924807b195c574c568d0e0a0a433eb7
25,257
def count_honeypot_events(): """ Get total number of honeypot events Returns: JSON/Dict number of honeypot events """ date = fix_date( get_value_from_request("date") ) if date: try: return jsonify( { "count_honeypot_events_by_date": connector.honeypot_events.count_documents( { "date": { "$gte": date[0], "$lte": date[1] } } ), "date": date } ), 200 except Exception as _: return flask_null_array_response() else: try: return jsonify( { "count_honeypot_events": connector.honeypot_events.estimated_document_count() } ), 200 except Exception as _: return flask_null_array_response()
22a0fd932098ec9e846daaa097c01a9f62763cc6
25,258
def k_i_grid(gridsize, boxsize): """k_i_grid(gridsize, boxlen)""" halfgrid = gridsize // 2 boxsize = egp.utils.boxsize_tuple(boxsize) dk = 2 * np.pi / boxsize kmax = gridsize * dk _ = np.newaxis k1, k2, k3 = dk[:, _, _, _] * np.mgrid[0:gridsize, 0:gridsize, 0:halfgrid + 1] k1 -= kmax[0] * (k1 > dk[0] * (halfgrid - 1)) # shift the second half to negative k values k2 -= kmax[1] * (k2 > dk[1] * (halfgrid - 1)) return np.array((k1, k2, k3))
29ffb72367672b8dd3f2e0a37923af565fb26306
25,259
def is_bool_type(typ): """ Check if the given type is a bool. """ if hasattr(typ, '__supertype__'): typ = typ.__supertype__ return isinstance(typ, type) and issubclass(typ, bool)
3d8dfae184be330c8cbd7c0e7382311fef31ede5
25,260
def gaussians_entropy(covars, ns=nt.NumpyLinalg): """ Calculates entropy of an array Gaussian distributions :param covar: [N*D*D] covariance matrices :return: total entropy """ N = covar.shape[0] D = covar.shape[-1] return 0.5 * N * D * (1 + log(2*ns.pi)) + 0.5 * ns.sum(ns.det(covar))
fe858d891cb4243b0aaf8b73fc7f38e542c5130d
25,261
def genBetaModel(matshape, cen, betaparam): """ Generate beta model with given parameters inputs ====== matshape: tuple or list Shape of the matrix cen: tuple or list Location of the center pixel betaparam: dict Parameters of the beta function { "A": float, "r0": float, "theta": float, "beta": float, "majaxis": float, "minaxis": float, } output ====== matbeta: np.ndarray The matrix with modelled beta values """ if len(betaparam) != 6: print("There might be short of parameter.") return None # Init matbeta matbeta = np.zeros(matshape) # load paramters A = betaparam['A'] r0 = betaparam['r0'] theta = betaparam['theta'] beta = betaparam['beta'] majaxis = betaparam['majaxis'] minaxis = betaparam['minaxis'] ecc = majaxis / minaxis # eccentricity # Generate meshgrids X = np.linspace(1, matshape[0], matshape[0]) Y = np.linspace(1, matshape[1], matshape[1]) # anti-clock rot = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta),np.cos(theta)]]) # Calc for j, x in enumerate(X): for i,y in enumerate(Y): x_r = x - cen[0] y_r = y - cen[1] r = np.matmul(rot, np.array([x_r, y_r])) r = r[0]**2 + r[1]**2 * ecc**2 matbeta[i, j] = A * (1 + r/r0**2)**(-np.abs(beta)) return matbeta
cabe4ff2d217bf918af291ad1fe875a66de2ca2a
25,262
def folders_to_create(search_path, dirs, base_path=""): """ Recursively traverse through folder paths looking for the longest existing subpath. Return the dir info of the longest subpath and the directories that need to be created. """ # Allow user to pass in a string, but use a list in the recursion if isinstance(search_path, list): parts = search_path else: parts = search_path.strip("/").split("/") # shared drives don't start with a / if base_path == "" and not search_path.startswith("/"): base_path = parts.pop(0) parent = [dr for dr in dirs if dr.get("path", "") == base_path] if len(parent) == 0: parent = {"id": "root"} else: parent = parent.pop() # Stop if we ran out of parts to create if len(parts) == 0: return parent, [] base_path += "/" + parts[0] dirs = [dr for dr in dirs if dr.get("path", "").startswith(base_path)] # If there's base_path matches, then keep looking for a longer path if len(dirs) > 0: return folders_to_create(parts[1:], dirs, base_path) else: return parent, parts
91750afa8a4756a09b71cc397a5991b106fd8909
25,263
def profile_line(image, src, dst, linewidth=1, order=1, mode='constant', cval=0.0): """Return the intensity profile of an image measured along a scan line. Parameters ---------- image : numeric array, shape (M, N[, C]) The image, either grayscale (2D array) or multichannel (3D array, where the final axis contains the channel information). src : 2-tuple of numeric scalar (float or int) The start point of the scan line. dst : 2-tuple of numeric scalar (float or int) The end point of the scan line. The destination point is *included* in the profile, in contrast to standard numpy indexing. linewidth : int, optional Width of the scan, perpendicular to the line order : int in {0, 1, 2, 3, 4, 5}, optional The order of the spline interpolation to compute image values at non-integer coordinates. 0 means nearest-neighbor interpolation. mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional How to compute any values falling outside of the image. cval : float, optional If `mode` is 'constant', what constant value to use outside the image. Returns ------- return_value : array The intensity profile along the scan line. The length of the profile is the ceil of the computed length of the scan line. Examples -------- >>> x = np.array([[1, 1, 1, 2, 2, 2]]) >>> img = np.vstack([np.zeros_like(x), x, x, x, np.zeros_like(x)]) >>> img array([[0, 0, 0, 0, 0, 0], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 0, 0]]) >>> profile_line(img, (2, 1), (2, 4)) array([ 1., 1., 2., 2.]) >>> profile_line(img, (1, 0), (1, 6), cval=4) array([ 1., 1., 1., 2., 2., 2., 4.]) The destination point is included in the profile, in contrast to standard numpy indexing. For example: >>> profile_line(img, (1, 0), (1, 6)) # The final point is out of bounds array([ 1., 1., 1., 2., 2., 2., 0.]) >>> profile_line(img, (1, 0), (1, 5)) # This accesses the full first row array([ 1., 1., 1., 2., 2., 2.]) """ perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth) if image.ndim == 3: pixels = [ndi.map_coordinates(image[..., i], perp_lines, order=order, mode=mode, cval=cval) for i in range(image.shape[2])] pixels = np.transpose(np.asarray(pixels), (1, 2, 0)) else: pixels = ndi.map_coordinates(image, perp_lines, order=order, mode=mode, cval=cval) intensities = pixels.mean(axis=1) return intensities
29a020f77448c394d96be3fbd0b382b4657d401e
25,264
def set_out(pin, state): """ Set simple digital (high/low) output :param pin: pun number or logical name :param state: state: 1/0 = True/False :return: verdict """ __digital_out_init(pin).value(state) return {'pin': pin, 'state': state}
c76fb07d52fce9e0c66a6b63b5518b6575589807
25,265
import requests def get_session(): """Define a re-usable Session object""" session = requests.Session() session.auth = auth session.verify = False return session
3539e4a7433ffb58aa726c7fef87af080b011f64
25,266
def create_test_action(context, **kw): """Create and return a test action object. Create a action in the DB and return a Action object with appropriate attributes. """ action = get_test_action(context, **kw) action.create() return action
8093d8f9b73ad0871ee422e2ba977f34907a3ae1
25,268
def parse(header_array, is_paper=False): """ Decides which version of the headers to use.""" if not is_paper: version = clean_entry(header_array[2]) if old_eheaders_re.match(version): headers_list = old_eheaders elif new_eheaders_re.match(version): headers_list = new_eheaders else: raise UnknownHeaderError ("Couldn't find parser for electronic version %s" % (version)) else: version = clean_entry(header_array[1]) if paper_headers_v1_re.match(version): headers_list = paper_headers_v1 elif paper_headers_v2_2_re.match(version): headers_list = paper_headers_v2_2 elif paper_headers_v2_6_re.match(version): headers_list = paper_headers_v2_6 else: raise UnknownHeaderError ("Couldn't find parser for paper version %s" % (version)) headers = {} for i in range(0, len(headers_list)): this_arg = "" # It's acceptable for header rows to leave off delimiters, so enter missing trailing args as blanks. try: this_arg = clean_entry(header_array[i]) except IndexError: # [JACOB WHAT DOES THIS INDICATE?] pass headers[headers_list[i]] = this_arg return headers
91f692b20300f96fac5d67e40950f7af17552ecb
25,269
import json def lookup_plex_media(hass, content_type, content_id): """Look up Plex media for other integrations using media_player.play_media service payloads.""" content = json.loads(content_id) if isinstance(content, int): content = {"plex_key": content} content_type = DOMAIN plex_server_name = content.pop("plex_server", None) plex_server = get_plex_server(hass, plex_server_name) if playqueue_id := content.pop("playqueue_id", None): try: playqueue = plex_server.get_playqueue(playqueue_id) except NotFound as err: raise HomeAssistantError( f"PlayQueue '{playqueue_id}' could not be found" ) from err else: shuffle = content.pop("shuffle", 0) media = plex_server.lookup_media(content_type, **content) if media is None: raise HomeAssistantError( f"Plex media not found using payload: '{content_id}'" ) playqueue = plex_server.create_playqueue(media, shuffle=shuffle) return (playqueue, plex_server)
060b5cded7bbb8d149a7bf2b94d5a5352114d671
25,271
def dsym_test(func): """Decorate the item as a dsym test.""" if isinstance(func, type) and issubclass(func, unittest2.TestCase): raise Exception("@dsym_test can only be used to decorate a test method") @wraps(func) def wrapper(self, *args, **kwargs): try: if lldb.dont_do_dsym_test: self.skipTest("dsym tests") except AttributeError: pass return func(self, *args, **kwargs) # Mark this function as such to separate them from the regular tests. wrapper.__dsym_test__ = True return wrapper
d31c5bd87311b2582b9668fb25c1d4648663d1b8
25,273
from datetime import datetime def count_meetings(signups=None, left: datetime=None, right: datetime=None) -> int: """ Returns the number of meetings the user has been to, between two date ranges. Left bound is chosen as an arbitrary date guaranteed to be after any 8th periods from the past year, but before any from the current year. Right bound is chosen to be today. """ # can't use default arguments initialized at function definition # in a long-running app right would be the date of last reset not "today" if signups is None: signups = get_signups() if left is None: left = summer(get_year() - 1) if right is None: right = datetime.today() return len(list( filter(lambda signup: left < ion_date(signup["block"]["date"]) < right, signups)))
8cb5290111db947b3daa01248452cfd36c80ec46
25,274
def fake_surroundings(len_poem, size_surroundings=5): """ Retourne une liste d'indices tirée au sort :param len_poem: nombre de vers dans le poème :param size_surroundings: distance du vers de référence du vers (default 5) :return: liste """ # bornes inférieures lower_bounds_w_neg = np.array([row - size_surroundings for row in range(len_poem)]) lower_bounds_2d = np.stack([np.zeros(len_poem), lower_bounds_w_neg]) # calcul max entre 0 et le rang - surroundings lower_bounds = np.max(lower_bounds_2d, axis=0) # bornes supérieures higher_bounds_w_neg = np.array([row + size_surroundings for row in range(len_poem)]) higher_bounds_2d = np.stack([np.repeat(len_poem, len_poem), higher_bounds_w_neg]) # calcul min entre longueur du poeme et le rang + surroundings higher_bounds = np.min(higher_bounds_2d, axis=0) # tirage fake_within_poem = np.random.randint(low=lower_bounds, high=higher_bounds).tolist() return fake_within_poem
91bbdefedd3ed2aa4d63db1ef7281129adb20036
25,275
import numpy def interpolate_missing(sparse_list): """Use linear interpolation to estimate values for missing samples.""" dense_list = list(sparse_list) x_vals, y_vals, x_blanks = [], [], [] for x, y in enumerate(sparse_list): if y is not None: x_vals.append(x) y_vals.append(y) else: x_blanks.append(x) if x_blanks: interpolants = numpy.interp(x_blanks, x_vals, y_vals) for x, y in zip(x_blanks, interpolants): dense_list[x] = y return dense_list
a2983a08f00b4de2921c93cc14d3518bc8bd393d
25,276
def classifier_uncertainty(classifier: BaseEstimator, X: modALinput, **predict_proba_kwargs) -> np.ndarray: """ Classification uncertainty of the classifier for the provided samples. Args: classifier: The classifier for which the uncertainty is to be measured. X: The samples for which the uncertainty of classification is to be measured. **predict_proba_kwargs: Keyword arguments to be passed for the :meth:`predict_proba` of the classifier. Returns: Classifier uncertainty, which is 1 - P(prediction is correct). """ # calculate uncertainty for each point provided try: classwise_uncertainty = classifier.predict_proba(X, **predict_proba_kwargs) except NotFittedError: return np.ones(shape=(X.shape[0], )) # for each point, select the maximum uncertainty uncertainty = 1 - np.max(classwise_uncertainty, axis=1) return uncertainty
419da65825fff7de53ab30f4be31f2be0cf4bbbd
25,278
def test_preserve_scalars(): """ test the preserve_scalars decorator """ class Test(): @misc.preserve_scalars def meth(self, arr): return arr + 1 t = Test() assert t.meth(1) == 2 np.testing.assert_equal(t.meth(np.ones(2)), np.full(2, 2))
ae48d49e5dd6781a304f75abd9b43e67faa09ee1
25,280
def from_string(zma_str, one_indexed=True, angstrom=True, degree=True): """ read a z-matrix from a string """ syms, key_mat, name_mat, val_dct = ar.zmatrix.read(zma_str) val_mat = tuple(tuple(val_dct[name] if name is not None else None for name in name_mat_row) for name_mat_row in name_mat) zma = automol.create.zmat.from_data( syms, key_mat, val_mat, name_mat, one_indexed=one_indexed, angstrom=angstrom, degree=degree) return zma
cf3817268cab7e79bf924f9d52cb70d01188ea48
25,281
def get_jmp_addr(bb): """ @param bb List of PseudoInstructions of one basic block @return Address of jump instruction in this basic block """ for inst in bb: if inst.inst_type == 'jmp_T': return inst.addr return None
13e69032bc7d6ed5413b5efbb42729e11661eab1
25,283
import sqlite3 def open_db_conn(db_file=r'/home/openwpm/Desktop/crawl-data.sqlite'): """" open connection to sqlite database """ try: conn = sqlite3.connect(db_file) return conn except Exception as e: print(e) return None
28338f3ff3679c83a1e9040aa9b6e4f5026e4606
25,285
def totals_per_time_frame(data_points, time_frame): """For a set of data points from a single CSV file, calculate the average percent restransmissions per time frame Args: data_points (List[List[int,int,float]]): A list of data points. Each data points consist of 0: 1 if is a transmission, 1: 1 if is a retransmission, 2: time in seconds time_frame (float): increment of time in seconds in which new data points are calculated Returns: List[List[float,float]]: A list of data points containing the percent retransmissions, and the time in seconds """ time_frame_min = 0 time_frame_max = time_frame percent_retransmissions_list = [] transmissions_in_frame = 0 retransmissions_in_frame = 0 index = 0 while time_frame_max < data_points[-1][2] and index < len(data_points): if data_points[index][2] >= time_frame_min and data_points[index][2] < time_frame_max: transmissions_in_frame += data_points[index][0] + data_points[index][1] retransmissions_in_frame += data_points[index][1] index += 1 else: if transmissions_in_frame > 0: percent_retransmissions = 100*retransmissions_in_frame/transmissions_in_frame else: percent_retransmissions = 0 percent_retransmissions_list.append([percent_retransmissions,time_frame_min]) time_frame_min = time_frame_max time_frame_max += time_frame transmissions_in_frame = 0 retransmissions_in_frame = 0 return percent_retransmissions_list
9e71ac2fe7deabd36d7df8ae099575b191260c5d
25,286