content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def read(channel): """This function returns the state of a specified GPIO pin.""" GPIO.setup(channel, GPIO.IN) return GPIO.input(channel)
89ac5add868935617ad7d4032fd38e91ce704622
33,868
def sortmerna_indexdb(input_fp, output_fp, params="", HALT_EXEC=False): """ """ cmd = "indexdb_rna --ref %s,%s -v %s" % (input_fp, output_fp, params) return call_cmd(cmd, HALT_EXEC)
25c5321fc533a89b8524fe26ddcd1328d260a881
33,869
import copy def _extract_original_opp_board(finished_board): """ This function removes all shots that have been fired on it, reverting sunken ships to normal. Notably, coordinates that have hits, but no sunk ship will be set to be empty as the position of the original ship is unclear. The motivation for this function is to make past games against opponents replayable. However, as the opponent's positions are never revealed unless hits, instances where not all ships are sunk will be incomplete. :param finished_board: :return: """ opp_board = copy.deepcopy(finished_board) # copy the board. for (y, x), val in np.ndenumerate(opp_board): if val in ['M', 'H']: # Modify all hits and misses to blanks. opp_board[y][x] = '' # Only consider sunken ships. Half-finished ships (marked with 'H') are too ambiguous. elif len(val) == 2 and val[0] == 'S': opp_board[y][x] = val[1] return opp_board
60a722909415ffa5bced6e6b4fb640a036dcecaa
33,870
def pool(): """Fixture that returns a Pool object.""" return MagicMock()
d9f661fb8ec67bdf1694d7d1d62603e8313fd479
33,871
def extended_gcd(a, b): """ ----- THIS FUNCTION WAS TAKEN FROM THE INTERNET ----- Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb """ # r = gcd(a,b) i = multiplicitive inverse of a mod b # or j = multiplicitive inverse of b mod a # Neg return values for i or j are made positive mod b or a respectively # Iterateive Version is faster and uses much less stack space x = 0 y = 1 lx = 1 ly = 0 oa = a # Remember original a/b to remove ob = b # negative values from return results while b != 0: q = a // b (a, b) = (b, a % b) (x, lx) = ((lx - (q * x)), x) (y, ly) = ((ly - (q * y)), y) if lx < 0: lx += ob # If neg wrap modulo orignal b if ly < 0: ly += oa # If neg wrap modulo orignal a return a, lx, ly
19cb82dcce75c60e4672ecb6de73719452952f2f
33,872
def get_interp_BmV_from_Teff(teff): """ Given an effective temperature (or an array of them), get interpolated B-V color. """ mamadf = load_basetable() mamadf = mamadf[3:-6] # finite, monotonic BmV mamarstar, mamamstar, mamateff, mamaBmV = ( nparr(mamadf['R_Rsun'])[::-1], nparr(mamadf['Msun'])[::-1], nparr(mamadf['Teff'])[::-1], nparr(mamadf['B-V'])[::-1].astype(float) ) # include "isbad" catch because EVEN ONCE SORTED, you can have multivalued # BmVs. so remove anything where diff not strictly greater than... isbad = np.insert(np.diff(mamaBmV) == 0, False, 0) fn_teff_to_BmV = interp1d(mamateff[~isbad], mamaBmV[~isbad], kind='quadratic', bounds_error=False, fill_value='extrapolate') return fn_teff_to_BmV(teff)
ff7d2bef3daa68addcb0e3317a793d86a2ad0e57
33,873
import types import importlib async def _load_mfa_module(hass: HomeAssistant, module_name: str) -> types.ModuleType: """Load an mfa auth module.""" module_path = f"homeassistant.auth.mfa_modules.{module_name}" try: module = importlib.import_module(module_path) except ImportError as err: _LOGGER.error("Unable to load mfa module %s: %s", module_name, err) raise HomeAssistantError( f"Unable to load mfa module {module_name}: {err}" ) from err if hass.config.skip_pip or not hasattr(module, "REQUIREMENTS"): return module processed = hass.data.get(DATA_REQS) if processed and module_name in processed: return module processed = hass.data[DATA_REQS] = set() # https://github.com/python/mypy/issues/1424 await requirements.async_process_requirements( hass, module_path, module.REQUIREMENTS # type: ignore ) processed.add(module_name) return module
115520c3c2f2cf00ae238d189817728f6129130d
33,876
def model_metrics(key,codes="",nb=None,with_doc=False): """ param1: dictionary : AutoAI steps data param2: string : Code syntaxs param3: boolean : Whether to includer documentation/meta description for the following section return: string : Code syntaxs The function adds code syntax related to the model evaluation/performance metrics based on problem type, either classification or regression. """ if with_doc: if nb!=None: nb['cells'].append(nbf.v4.new_markdown_cell(IpynbComments.procedure['metrics'])) else: codes=codes+PyComments.procedure['metrics'] if nb!=None and codes=="": nb['cells'].append(nbf.v4.new_code_cell(SourceCode.metric[key])) return nb else: return codes+SourceCode.metric[key]
3893004b29edc53cdb780b3d6234b732365095a5
33,877
def so3_rotate_with_normal(batch_data): """ Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, rotated batch of point clouds """ rotated_data = np.zeros((batch_data.shape[0], batch_data.shape[1], 3), dtype=np.float32) rotated_normal = np.zeros((batch_data.shape[0], batch_data.shape[1], 3), dtype=np.float32) for k in range(batch_data.shape[0]): rotation_angle_A = np.random.uniform() * 2 * np.pi rotation_angle_B = np.random.uniform() * 2 * np.pi rotation_angle_C = np.random.uniform() * 2 * np.pi cosval_A = np.cos(rotation_angle_A) sinval_A = np.sin(rotation_angle_A) cosval_B = np.cos(rotation_angle_B) sinval_B = np.sin(rotation_angle_B) cosval_C = np.cos(rotation_angle_C) sinval_C = np.sin(rotation_angle_C) rotation_matrix = np.array([[cosval_B*cosval_C, -cosval_B*sinval_C, sinval_B], [sinval_A*sinval_B*cosval_C+cosval_A*sinval_C, -sinval_A*sinval_B*sinval_C+cosval_A*cosval_C, -sinval_A*cosval_B], [-cosval_A*sinval_B*cosval_C+sinval_A*sinval_C, cosval_A*sinval_B*sinval_C+sinval_A*cosval_C, cosval_A*cosval_B]]) shape_pc = batch_data[k, :, :3] shape_nm = batch_data[k, :, 3:] rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) rotated_normal[k, ...] = np.dot(shape_nm.reshape((-1,3)), rotation_matrix) rotated_data = np.concatenate((rotated_data, rotated_normal), axis=-1) return rotated_data
90114e4110f0b9e54cc71a77339574ecfa98a194
33,878
def populate_from_file(filename, gap=0.1, pool_intensity=4, pool_capacity=None, eps_diff=1e-7, verbose=False): """ Runs populate on a model file. :param filename: the model file. :param gap: MIP gap to use for the populate phase (default is 10%) :param pool_intensity: the value for the paramater mip.pool.intensity (defaut is 4) :param pool_capacity: the pool capacity (if any) :param eps_diff: precision to use for testing variable difference :param verbose: optional flag to print results. :return: the solution pool as returned by `docplex.mp.Model.populate()` """ m = ModelReader.read(filename) assert m return populate_from_model(m, gap, pool_intensity, pool_capacity, eps_diff, verbose)
f5e2ab7b5558a9f72593395055652b8e6d11eb94
33,879
def dihedral(p): """Praxeolitic formula 1 sqrt, 1 cross product""" p0 = p[0] p1 = p[1] p2 = p[2] p3 = p[3] b0 = -1.0*(p1 - p0) b1 = p2 - p1 b2 = p3 - p2 # normalize b1 so that it does not influence magnitude of vector # rejections that come next b1 /= np.linalg.norm(b1) # vector rejections # v = projection of b0 onto plane perpendicular to b1 # = b0 minus component that aligns with b1 # w = projection of b2 onto plane perpendicular to b1 # = b2 minus component that aligns with b1 v = b0 - np.dot(b0, b1)*b1 w = b2 - np.dot(b2, b1)*b1 # angle between v and w in a plane is the torsion angle # v and w may not be normalized but that's fine since tan is y/x x = np.dot(v, w) y = np.dot(np.cross(b1, v), w) return np.degrees(np.arctan2(y, x))
d6c1e2e0f1f4eb0fd10d34040371e1ba7dda8514
33,880
def mangle_varname(s): """Append underscores to ensure that `s` is not a reserved Python keyword.""" while s in _PYTHON_RESERVED_KEYWORDS: s += "_" return s
e679a0ff33c7df91c73f38c826c8f1a23dc185d9
33,881
def standard_prediction_error(x_data, y_data): """Return function to calculate standard prediction error. The standard prediction error of a linear regression is the error when predicting a new value which is not in the original data. Parameters ---------- x_data : numpy.array x coordinates of the points. y_data : numpy.array y coordinates of the points. Returns ------- callable Standard prediction error function for new x values. """ _check_input_arrays(x_data, y_data) reg = stats.linregress(x_data, y_data) y_estim = reg.slope * x_data + reg.intercept n_data = x_data.shape[0] see = np.sqrt(np.sum(np.square(y_data - y_estim)) / (n_data - 2)) x_mean = np.mean(x_data) ssx = np.sum(np.square(x_data - x_mean)) def spe(x_new): """Return standard prediction error.""" return see * np.sqrt(1.0 + 1.0 / n_data + (x_new - x_mean)**2 / ssx) return np.vectorize(spe)
e11497c9c4385a07cdabab2b8ad72273f237fdfb
33,882
import copy def update_dict(original, new): """ Update nested dictionary (dictionary possibly containing dictionaries) If a field is present in new and original, take the value from new. If a field is present in new but not original, insert this field :param original: source dictionary :type original: dict :param new: dictionary to take new values from :type new: dict :return: updated dictionary :rtype: dict """ updated = copy.deepcopy(original) for key, value in original.items(): if key in new.keys(): if isinstance(value, dict): updated[key] = update_dict(value, new[key]) else: updated[key] = new[key] return updated
1608d28321d294943f4c955e42939b054966751f
33,883
from typing import Tuple import yaml from pathlib import Path def parse() -> Tuple[Response, int]: """ Parse the inputs of a workflow. """ req_data = yaml.safe_load(request.get_data().decode("utf-8")) wf_location = req_data.get("wf_location", None) wf_content = req_data.get("wf_content", None) if wf_location is None and wf_content is None: return jsonify({"message": "Missing arguments"}), 400 if wf_location is not None: inputs = wf_location_to_inputs(wf_location.strip()) elif wf_content is not None: wf_obj = load_document_by_string(wf_content, uri=Path.cwd().as_uri()) # noqa: E501 inputs = Inputs(wf_obj) res = jsonify(inputs.as_dict()) res.headers["Access-Control-Allow-Origin"] = "*" return res, 200
a45694c7498b36acc08353a379db665ad79480cd
33,884
def try_which(): """ Locate hmmsearch on path, if possible :return: """ try: return str(local["which"]["hmmsearch"]()).rstrip("\r\n") except ProcessExecutionError: return "None"
5360db37bf6f31ee560fefda8897688dcef5d739
33,885
def getCountryClubCreditMultiplier(countryClubId): """ Returns the skill credit multiplier for a particular mint. mintId is the mint-interior zone defined in ToontownGlobals.py. """ return { BossbotCountryClubIntA: 2., BossbotCountryClubIntB: 2.5, BossbotCountryClubIntC: 3., }.get(countryClubId, 1.)
c5ac7f3a9198beeeabd1390843449cf0dc594aef
33,886
def get_world_rank() -> int: """Get the world rank of this worker. .. code-block:: python import time from ray.air import session def train_loop_per_worker(): for iter in range(100): time.sleep(1) if session.get_world_rank() == 0: print("Worker 0") train_dataset = ray.data.from_items( [{"x": x, "y": x + 1} for x in range(32)]) trainer = TensorflowTrainer(train_loop_per_worker, scaling_config={"num_workers": 1}, datasets={"train": train_dataset}) trainer.fit() """ session = _get_session() if not isinstance(session, _TrainSessionImpl): raise RuntimeError( "`get_world_rank` can only be called for TrainSession! " "Make sure you only use that in `train_loop_per_worker` function" "that is passed into `DataParallelTrainer`." ) return session.world_rank
ab62942a269249f13c40d060fd597b62c473732f
33,887
def solve(): """ Replace this with a nice docstring that describes what this function is supposed to do. :return: The answer required. """ return -1
f054515e7bb23bb84ecfb1847410fa111ec431c6
33,888
def read_data(files, poscount, locidx): """Builds input data from a files list.""" locs = [] # One element per file; each is a list of location indexes. vals = [] # One element per file; each is a parallel list of values. labels = [] # One element per file: true for '.s', false for '.f'. for fname in files: flocs = np.zeros(poscount, np.uint64) fvals = np.zeros((poscount, 1), np.float64) with open(fname) as f: for (p, (v, l)) in enumerate(logparse(f)): idx = locidx.get_index(l) if idx: flocs[p] = idx fvals[p] = v locs.append(flocs) vals.append(fvals) labels.append(fname.endswith('.s')) return np.array(locs), np.array(vals), np.array(labels)
6e64b8a445134b78498aefa9b8a87893ae16b35f
33,889
def deterministic_dynamics(init_cond, dt, num_steps, init_time=0.0): """ Uses naive euler's method: x(t+dt) = x_k + F(x_k, t_k) * dt """ # prep arrays states = np.zeros((num_steps, STATE_DIM)) times = np.zeros(num_steps) # fill init cond states[0, :] = init_cond times[0] = init_time for step in xrange(1, num_steps): states[step, :] = states[step-1, :] + deterministic_term(states, step-1) * dt times[step] = times[step-1] + dt return states, times
8d9ec7bc99dfb51b03d9c5844de233d59fc8945c
33,890
def get_scope_and_reuse_disc(network_id, layer_id, num_separate_layers): """Return the scope and reuse flag. Args: network_index: an integer as the network index. layer_id: an integer as the index of the layer. num_separate_layers: an integer as how many layers are independent. Return: scope: a string as the scope. reuse: a boolean as the reuse flag. """ if network_id == 1: if layer_id < num_separate_layers: scope = 'd1_encoder_{}'.format(layer_id) else: scope = 'd_shared_encoder_{}'.format(layer_id) reuse = False elif network_id == 2: if layer_id < num_separate_layers: scope = 'd2_encoder_{}'.format(layer_id) reuse = False else: scope = 'd_shared_encoder_{}'.format(layer_id) reuse = True return scope, reuse
8a52cdf4af4a6d545c172565f8c2151176572076
33,891
import logging def groups_list(access_token): """List FlexVM Groups""" logging.info("--> List FlexVM Groups...") uri = FLEXVM_API_BASE_URI + "groups/list" headers = COMMON_HEADERS.copy() headers["Authorization"] = f"Bearer {access_token}" results = requests_post(uri, "", headers) return results
7ec8a362e88349b28279f12d32319b9b1d8b5a45
33,892
def format_route(raw_route): """Cleans and formats route list into valid REXX input. Arguments: route {list} -- user input for route Returns: {bool} -- Flag indicating valid route. {str} -- Error message. {list} -- List of validated routes. """ raw_route_list = raw_route if isinstance(raw_route, str): raw_route_list = list(raw_route) if raw_route_list: delimiter = "," route_list = [] for r in raw_route_list: if r.isalnum(): route_list.append(r.strip()) else: is_valid = False error_msg = em.INVALID_ROUTE_MSG return is_valid, error_msg, None is_valid = True return is_valid, None, delimiter.join(route_list) return True, None, None
c23708f8384d2947ad3a161bbfd33f189446f424
33,895
def gamma_lnpdf(x, shape, rate): """ shape/rate formulation on wikipedia """ coef = shape * np.log(rate) - gammaln(shape) dterm = (shape-1.) * np.log(x) - rate*x return coef + dterm
692ca281ea51f2f01ecddfb59eb16fcf6379f2c2
33,896
from functools import reduce def get_total_variation(variable_img, shape, smoothing=1.5): """Compute total variation regularization loss term given a variable image (x) and its shape. Args: variable_img: 4D tensor representing the variable image shape: list representing the variable image shape smoothing: smoothing parameter for penalizing large variations Returns: variation: float tensor representing the total variation for a given image """ with tf.name_scope('get_total_variation'): # Get the dimensions of the variable image height = shape[1] width = shape[2] size = reduce(lambda a, b: a * b, shape) ** 2 # Disjoin the variable image and evaluate the total variation x_cropped = variable_img[:, :height - 1, :width - 1, :] left_term = tf.square(variable_img[:, 1:, :width - 1, :] - x_cropped) right_term = tf.square(variable_img[:, :height - 1, 1:, :] - x_cropped) smoothed_terms = tf.pow(left_term + right_term, smoothing / 2.) variation = tf.reduce_sum(smoothed_terms) / size return variation
83e5135bf9cba692a5fa40dae8a037f0a1749370
33,897
def dict_filter(d, keep): """ Remove all keys from dict except those specified in list 'keep'. Recurses over values to remove keys from nested dictionaries :param d: Dictionary from which to select key,value pairs :type d: dict :param remove: Keys to select :type remove: list :returns: dictionary with key,value pairs selected from d where key is in the keep list """ assert type(keep) is list if isinstance(d, dict): #recursively call for nested dicts return { key:dict_filter(value, keep) for key,value in d.iteritems() if key in keep } return d
20d4e5b86558be95d3b5cb31525407cbf98be3c1
33,898
def tf_depthwise_conv2d(input, w): """Two-dimensional depthwise convolution using TF. Params same as in depthwise_conv2d. """ input_4d = tf.reshape(tf.constant(input, dtype=tf.float32), [1, input.shape[0], input.shape[1], input.shape[2]]) # Set channel_multiplier dimension to 1 kernel_4d = tf.reshape(tf.constant(w, dtype=tf.float32), [w.shape[0], w.shape[1], w.shape[2], 1]) output = tf.nn.depthwise_conv2d(input_4d, kernel_4d, strides=[1, 1, 1, 1], padding='SAME') with tf.Session() as sess: ans = sess.run(output) # Remove the degenerate batch dimension, since we use batch 1. return ans.reshape(input.shape)
c8b78da33463271d3c42a401f68d09c961953972
33,899
def parse(puzzle_input): """Parse input""" return [tuple(line.split()) for line in puzzle_input.split('\n')]
42cb62348c5a6c9893480e71db7b60a6053ea4d0
33,900
def set_new_pw_extra_security_phone(email_code: str, password: str, phone_code: str) -> FluxData: """ View that receives an emailed reset password code, an SMS'ed reset password code, and a password, and sets the password as credential for the user, with extra security. Preconditions required for the call to succeed: * A PasswordResetEmailAndPhoneState object in the password_reset_state_db keyed by the received codes. * A flag in said state object indicating that the emailed code has already been verified. As side effects, this view will: * Compare the received password with the hash in the session to mark it accordingly (as suggested or as custom); * Revoke all password credentials the user had; This operation may fail due to: * The codes do not correspond to a valid state in the db; * Any of the codes have expired; * No valid user corresponds to the eppn stored in the state; * Communication problems with the VCCS backend; * Synchronization problems with the central user db. """ try: context = get_context(email_code=email_code) except StateException as e: return error_response(message=e.msg) if not isinstance(context.state, ResetPasswordEmailAndPhoneState): raise TypeError(f'State is not ResetPasswordEmailAndPhoneState ({type(context.state)})') if phone_code == context.state.phone_code.code: if not verify_phone_number(context.state): current_app.logger.info(f'Could not verify phone code for user {context.user}') return error_response(message=ResetPwMsg.phone_invalid) current_app.logger.info(f'Phone code verified for user {context.user}') current_app.stats.count(name='extra_security_phone_success') else: current_app.logger.info(f'Could not verify phone code for user {context.user}') return error_response(message=ResetPwMsg.unknown_phone_code) return reset_user_password(user=context.user, state=context.state, password=password)
cc695d439d4c0bb6e7ebc6dff9a1049d948a3dd0
33,901
import getpass def get_driver_and_zones(driver_name, account_name): """ Get the DNS driver, authenticate, and get some zones. """ secret_site = "libcloud/" + driver_name cls = get_driver(driver_name) pw = get_password(secret_site, account_name) if not pw: pw = getpass("Password:") while True: try: dns = cls(account_name, pw) zones = dns.list_zones() except InvalidCredsError: pw = getpass("Password:") else: set_password(secret_site, account_name, pw) return dns, zones
9217ff7082dbcd79154e278d97d47fecbaa574e9
33,902
def normalize(seed_url, link): """Normalize this URL by removing hash and adding domain """ link, _ = urldefrag(link) return urljoin(seed_url, link)
7e4d5bfbef2cb92869718d0d21acd86a5529aa1b
33,903
def multi_lab_segmentation_dilate_1_above_selected_label(arr_segm, selected_label=-1, labels_to_dilate=(), verbose=2): """ The orders of labels to dilate counts. :param arr_segm: :param selected_label: :param labels_to_dilate: if None all labels are dilated, in ascending order (algorithm is NOT order invariant). :param verbose: :return: """ answer = np.copy(arr_segm) if labels_to_dilate is (): labels_to_dilate = sorted(list(set(arr_segm.flat) - {selected_label})) num_labels_dilated = 0 for l in labels_to_dilate: if verbose > 1: print('Dilating label {} over hole-label {}'.format(l, selected_label)) selected_labels_mask = np.zeros_like(answer, dtype=np.bool) selected_labels_mask[answer == selected_label] = 1 bin_label_l = np.zeros_like(answer, dtype=np.bool) bin_label_l[answer == l] = 1 dilated_bin_label_l = ndimage.morphology.binary_dilation(bin_label_l) dilation_l_over_selected_label = dilated_bin_label_l * selected_labels_mask answer[dilation_l_over_selected_label > 0] = l num_labels_dilated += 1 if verbose > 0: print('Number of labels_dilated: {}\n'.format(num_labels_dilated)) return answer
fbc4f0a93cd9d80ef1f1cae23e79612881bcf5da
33,904
from datetime import datetime def POST(request): """Add a new Topology to the specified project and return it""" request.check_required_parameters(path={'projectId': 'string'}, body={'topology': {'name': 'string'}}) project = Project.from_id(request.params_path['projectId']) project.check_exists() project.check_user_access(request.google_id, True) topology = Topology({ 'projectId': project.get_id(), 'name': request.params_body['topology']['name'], 'rooms': request.params_body['topology']['rooms'], }) topology.insert() project.obj['topologyIds'].append(topology.get_id()) project.set_property('datetimeLastEdited', Database.datetime_to_string(datetime.now())) project.update() return Response(200, 'Successfully inserted topology.', topology.obj)
9ee233a708b2093ee482bb3e80f25c70f8eed4ae
33,906
import ray def deconvolve_channel(channel): """Deconvolve a single channel.""" y_pad = jax.device_put(ray.get(y_pad_list)[channel]) psf = jax.device_put(ray.get(psf_list)[channel]) mask = jax.device_put(ray.get(mask_store)) M = linop.Diagonal(mask) C0 = linop.CircularConvolve( h=psf, input_shape=mask.shape, h_center=snp.array(psf.shape) / 2 - 0.5 # forward operator ) C1 = linop.FiniteDifference(input_shape=mask.shape, circular=True) # gradient operator C2 = linop.Identity(mask.shape) # identity operator g0 = loss.SquaredL2Loss(y=y_pad, A=M) # loss function (forward model) g1 = λ * functional.L21Norm() # TV penalty (when applied to gradient) g2 = functional.NonNegativeIndicator() # non-negativity constraint if channel == 0: print("Displaying solver status for channel 0") display = True else: display = False solver = ADMM( f=None, g_list=[g0, g1, g2], C_list=[C0, C1, C2], rho_list=[ρ0, ρ1, ρ2], maxiter=maxiter, itstat_options={"display": display, "period": 10, "overwrite": False}, x0=y_pad, subproblem_solver=CircularConvolveSolver(), ) x_pad = solver.solve() x = x_pad[: yshape[0], : yshape[1], : yshape[2]] return (x, solver.itstat_object.history(transpose=True))
fc861fb6df2caa6a9dddce14d380c519f6bb84c1
33,907
def sell(): """Sell shares of stock""" if request.method == "POST": symbol = request.form.get("symbol").upper() shares = request.form.get("shares") stock = lookup(symbol) if (stock == None) or (symbol == ''): return apology("Stock was not found.") elif not shares.isdigit(): return apology("The number of shares must be an integer.") elif int(shares) < 0: return apology("Shares value must be a positive integer.") else: rows = db.execute(""" SELECT Symbol, SUM(Shares) AS Shares FROM transactions WHERE user_id=:user_id GROUP BY Symbol HAVING Shares > 0 """, user_id = session["user_id"]) for row in rows: if row["Symbol"] == symbol: if int(shares) > row["Shares"]: return apology("Shares entered are greater than what you actually have.") rows = db.execute("SELECT cash FROM users WHERE id=:id;", id=session["user_id"]) cash = rows[0]["cash"] updated_cash = cash + (int(shares) * stock["price"]) if updated_cash < 0: return apology("Insufficient balance.") db.execute("UPDATE users SET cash=:updated WHERE id=:id;", updated = updated_cash, id=session["user_id"]) db.execute("INSERT INTO transactions (user_id, symbol, shares, price) VALUES (:user_id, :symbol, :shares, :price)", user_id = session["user_id"], symbol = stock["symbol"], shares = -1 * shares, price = stock["price"]) flash("Sold!") return redirect("/") else: rows = db.execute(""" SELECT Symbol FROM transactions WHERE user_id=:user_id GROUP BY Symbol HAVING SUM(Shares) > 0 """, user_id=session["user_id"]) return render_template("sell.html", symbols = [row["Symbol"] for row in rows])
3328fba46393455be0baecffb05fdbf4d1a5c770
33,909
def _compute_third(first, second): """ Compute a third coordinate given the other two """ return -first - second
57ea03c71f13f3847d4008516ec8f0f5c02424af
33,910
def decipher(criptotext): """ Descifra el mensaje recuperando el texto plano siempre y cuando haya sido cifrado con XOR. Parámetro: cryptotext -- el mensaje a descifrar. """ messagedecrip = "" for elem in criptotext: code = ord(elem)^1 messagedecrip += chr(code) return messagedecrip
c90fc56fda9e65690a0a03ea7f33008883feb3f4
33,911
def mag(initial, final): """ calculate magnification for a value """ return float(initial) / float(final)
ab996ee84ff588ce41086927b4da1a74e164278a
33,912
def fetch_ids(product: str, use_usgs_ftp: bool = False) -> [str]: """Returns all ids for the given product.""" if use_usgs_ftp: return _fetch_ids_from_usgs_ftp(product) else: return _fetch_ids_from_aws(product)
10af69c7fe77255ff955c2f97b6056b41cd51d59
33,913
def to_be_implemented(request): """ A notice letting the user know that this particular feature hasn't been implemented yet. """ pagevars = { "page_title": "To Be Implemented...", } return render(request, 'tbi.html', pagevars)
4ee786b35589a94c0ccb8fe20bae368a594b44f1
33,914
def ganache_second_account(smart_contracts_dir: str): """ Returns the second ganache account. Useful for doing transfers so you can transfer to an ethereum address that doesn't have anything to do with paying gas fees. """ return ganache_accounts(smart_contracts_dir)["accounts"][1].lower()
8845f0445c37f48f782dd72ce4be6a31d17502d6
33,916
def _parse_list_of_lists(string, delimiter_elements=',', delimiter_lists=':', delimiter_pipelines=';', dtype=float): """ Parses a string that contains single or multiple lists. Args: delimiter_elements <str>: delimiter between inner elements of a list. delimiter_lists <str>: delimiter between lists. delimiter_pipelines <str>: delimiter between different pipelines. Returns: new_list <list> parsed list of configuration parameters. """ new_list = [] for sub_list in string.strip().replace(' ', '').split(delimiter_pipelines): if delimiter_lists in sub_list: new_list.append([_parse_list(item, dtype=dtype, delimiter=delimiter_elements) for item in sub_list.split(delimiter_lists)]) else: new_list.append(_parse_list(sub_list, dtype=dtype, delimiter=delimiter_elements)) return new_list
d0d14efba74863ec95245255ca10db48fcfb7a01
33,917
from typing import List def get_available_dictionaries() -> List[str]: """ Return a list of all available dictionaries Returns ------- List[str] Saved dictionaries """ return get_available_models("dictionary")
37c2b882fc443593a45329a2ddd44c517979c342
33,918
import requests def get_unscoped_token(os_auth_url, access_token, username, tenant_name): """ Get an unscoped token from an access token """ url = get_keystone_url(os_auth_url, '/v3/OS-FEDERATION/identity_providers/%s/protocols/%s/auth' % (username, tenant_name)) response = requests.post(url, headers={'Authorization': 'Bearer %s' % access_token}) if 'X-Subject-Token' in response.headers: return response.headers['X-Subject-Token'] return None
252990f59f4bc254337dc0f7e13583b7a383d315
33,919
import numpy def _pfa_check_stdeskew(PFA, Grid): """ Parameters ---------- PFA : sarpy.io.complex.sicd_elements.PFA.PFAType Grid : sarpy.io.complex.sicd_elements.Grid.GridType Returns ------- bool """ if PFA.STDeskew is None or not PFA.STDeskew.Applied: return True cond = True if Grid.TimeCOAPoly is not None: timecoa_poly = Grid.TimeCOAPoly.get_array(dtype='float64') if timecoa_poly.shape == (1, 1) or numpy.all(timecoa_poly.flatten()[1:] < 1e-6): PFA.log_validity_error( 'PFA.STDeskew.Applied is True, and the Grid.TimeCOAPoly is essentially constant.') cond = False # the Row DeltaKCOAPoly and STDSPhasePoly should be essentially identical if Grid.Row is not None and Grid.Row.DeltaKCOAPoly is not None and \ PFA.STDeskew.STDSPhasePoly is not None: stds_phase_poly = PFA.STDeskew.STDSPhasePoly.get_array(dtype='float64') delta_kcoa = Grid.Row.DeltaKCOAPoly.get_array(dtype='float64') rows = max(stds_phase_poly.shape[0], delta_kcoa.shape[0]) cols = max(stds_phase_poly.shape[1], delta_kcoa.shape[1]) exp_stds_phase_poly = numpy.zeros((rows, cols), dtype='float64') exp_delta_kcoa = numpy.zeros((rows, cols), dtype='float64') exp_stds_phase_poly[:stds_phase_poly.shape[0], :stds_phase_poly.shape[1]] = stds_phase_poly exp_delta_kcoa[:delta_kcoa.shape[0], :delta_kcoa.shape[1]] = delta_kcoa if numpy.max(numpy.abs(exp_delta_kcoa - exp_stds_phase_poly)) > 1e-6: PFA.log_validity_warning( 'PFA.STDeskew.Applied is True,\n' 'and the Grid.Row.DeltaKCOAPoly ({}) and PFA.STDeskew.STDSPhasePoly ({})\n' 'are not in good agreement.'.format(delta_kcoa, stds_phase_poly)) cond = False return cond
987c492e1210114bf8eb129f60711f280b116a75
33,920
import json def get_peers_for_info_hash_s3( info_hash, limit=50 ): """ Get current peers, S3. """ remote_object = s3.Object(BUCKET_NAME, info_hash + '/peers.json').get() content = remote_object['Body'].read().decode('utf-8') torrent_info = json.loads(content) return torrent_info['peers']
d1e0ee2112e399d76bdf31774abbf32dbca31526
33,922
def RGB_to_Lab(RGB, colourspace): """ Converts given *RGB* value from given colourspace to *CIE Lab* colourspace. Parameters ---------- RGB : array_like *RGB* value. colourspace : RGB_Colourspace *RGB* colourspace. Returns ------- bool Definition success. """ return XYZ_to_Lab( RGB_to_XYZ(np.array(RGB), colourspace.whitepoint, ILLUMINANTS.get( 'CIE 1931 2 Degree Standard Observer').get('E'), colourspace.to_XYZ, 'Bradford', colourspace.cctf_decoding), colourspace.whitepoint)
6942134980f1b0e6ca37276c1d2becec23ba3a2f
33,923
def ltl2ba(formula): """Convert LTL formula to Buchi Automaton using ltl2ba. @type formula: `str(formula)` must be admissible ltl2ba input @return: Buchi automaton whose edges are annotated with Boolean formulas as `str` @rtype: [`Automaton`] """ ltl2ba_out = ltl2baint.call_ltl2ba(str(formula)) symbols, g, initial, accepting = parser.parse(ltl2ba_out) ba = Automaton('Buchi', alphabet=symbols) ba.add_nodes_from(g) ba.add_edges_from(g.edges(data=True)) ba.initial_nodes = initial ba.accepting_sets = accepting logger.info('Resulting automaton:\n\n{ba}\n'.format(ba=ba)) return ba
289178f071675cf62546b4403dc8751540c5633f
33,924
def count_org_active_days(odf_day): """Return count of active days in org history""" odf_not_null = org_active_days(odf_day) return len(odf_not_null)
13733482dd0a4dcad5cf7a7eb28add145a08dc2f
33,925
import asyncio import functools async def update_zigbee_firmware(host: str, custom: bool): """Update zigbee firmware for both ZHA and zigbee2mqtt modes""" sh = TelnetShell() try: if not await sh.connect(host) or not await sh.run_zigbee_flash(): return False except: pass finally: await sh.close() await asyncio.sleep(0.5) args = ( [ host, [8115, 8038], NCP_URL % "mgl03_ncp_6_7_10_b38400_sw.gbl", "v6.7.10", 8038, ] if custom else [ host, [8115, 8038], NCP_URL % "ncp-uart-sw_mgl03_6_6_2_stock.gbl", "v6.6.2", 8115, ] ) for _ in range(3): if await utils.run_blocking(functools.partial(flash_zigbee_firmware, *args)): return True return False
00fe85632f55fe4a853ab55f9c2333263b3f6f86
33,926
def underdog(df): """ Filter the dataframe of game data on underdog wins (games where the team with lower odds won). Returns: tuple (string reason, pd dataframe) """ reason = 'underdog' filt = df.loc[df['winningOdds']<0.46] filt = filt.sort_values(['runDiff', 'winningScore'], ascending=[False, False]) return (reason, filt)
11e8f54d5deb1d61b2feaac163c6c896839a9af8
33,927
def plot_sino_coverage(theta, h, v, dwell=None, bins=[16, 8, 4], probe_grid=[[1]], probe_size=(0, 0)): """Plots projections of minimum coverage in the sinogram space.""" # Wrap theta into [0, pi) theta = theta % (np.pi) # Set default dwell value if dwell is None: dwell = np.ones(theta.shape) # Make sure probe_grid is array probe_grid = np.asarray(probe_grid) # Create one ray for each pixel in the probe grid dh, dv = np.meshgrid(np.linspace(0, probe_size[0], probe_grid.shape[0], endpoint=False) + probe_size[0]/probe_grid.shape[0]/2, np.linspace(0, probe_size[1], probe_grid.shape[1], endpoint=False) + probe_size[1]/probe_grid.shape[1]/2,) dh = dh.flatten() dv = dv.flatten() probe_grid = probe_grid.flatten() H = np.zeros(bins) for i in range(probe_grid.size): if probe_grid[i] > 0: # Compute histogram sample = np.stack([theta, h+dh[i], v+dv[i]], axis=1) dH, edges = np.histogramdd(sample, bins=bins, range=[[0, np.pi], [-.5, .5], [-.5, .5]], weights=dwell*probe_grid[i]) H += dH ideal_bin_count = np.sum(dwell) * np.sum(probe_grid) / np.prod(bins) H /= ideal_bin_count # Plot ax1a = plt.subplot(1, 3, 2) plt.imshow(np.min(H, axis=0).T, vmin=0, vmax=2, origin="lower", cmap=plt.cm.RdBu) ax1a.axis('equal') plt.xticks(np.array([0, bins[1]/2, bins[1]]) - 0.5, [-.5, 0, .5]) plt.yticks(np.array([0, bins[2]/2, bins[2]]) - 0.5, [-.5, 0, .5]) plt.xlabel("h") plt.ylabel("v") ax1b = plt.subplot(1, 3, 3) plt.imshow(np.min(H, axis=1).T, vmin=0, vmax=2, origin="lower", cmap=plt.cm.RdBu) ax1b.axis('equal') plt.xlabel('theta') plt.ylabel("v") plt.xticks(np.array([0, bins[0]]) - 0.5, [0, r'$\pi$']) plt.yticks(np.array([0, bins[2]/2, bins[2]]) - 0.5, [-.5, 0, .5]) ax1c = plt.subplot(1, 3, 1) plt.imshow(np.min(H, axis=2), vmin=0, vmax=2, origin="lower", cmap=plt.cm.RdBu) ax1c.axis('equal') plt.ylabel('theta') plt.xlabel("h") plt.yticks(np.array([0, bins[0]]) - 0.5, [0, r'$\pi$']) plt.xticks(np.array([0, bins[1]/2, bins[1]]) - 0.5, [-.5, 0, .5]) return H
0179fe192343dcb4ad25297b12981fa2049911f7
33,928
def divisors(n): """Returns the all divisors of n""" if n == 1: return 1 factors = list(distinct_factors(n)) length = int(log(n, min(factors))) + 1 comb = [item for item in product(list(range(length + 1)), repeat=len(factors))] result = [] for e in comb: tmp = [] for p, c in zip(factors, e): tmp.append(int(pow(p, c))) m = mult(tmp) if m <= n // 2 and n % m == 0: result.append(m) result = list(set(result)) result.append(n) result.sort() return result
ba6739d65e04c354fc8b52b592ba10e5502f407d
33,929
import itertools def compute_features_levels(features, base_level=0): """Adapted from dnafeaturesviewer, see https://github.com/Edinburgh-Genome-Foundry/DnaFeaturesViewer Author: Zulko Compute the vertical levels on which the features should be displayed in order to avoid collisions. `features` must be a list of `dna_features_viewer.GraphicFeature`. The method used is basically a graph coloring: - The nodes of the graph are features and they will be colored with a level - Two nodes are neighbors iff their features's locations overlap - Levels are attributed to nodes iteratively starting with the nodes corresponding to the largest features. - A node receives the lowest level (starting at 0) that is not already the level of one of its neighbors. """ edges = [ (f1, f2) for f1, f2 in itertools.combinations(features, 2) if f1.overlaps_with(f2) ] graph = Graph(features, edges) levels = {n: None for n in graph.nodes} def collision(base_level, node): """Return True if the node placed at base_level collides with its neighbors in the graph.""" for neighbor in graph.neighbors[node]: level = levels[neighbor] if level is None: continue # nlines ???? # if 'nlines' in neighbor.data: # top = numpy.ceil(level + 0.5 * neighbor.data['nlines']) # if level <= base_level < top: # return True # top = numpy.ceil(base_level + 0.5 * node.data['nlines']) # if base_level <= level < top: # return True # else: if level == base_level: return True return False for node in sorted(graph.nodes, key=lambda f: -f.length): while collision(base_level, node): base_level += 1 levels[node] = base_level return levels
93db63a854c2cf237a46d971286bb07feec4c3c1
33,930
def normalize_answer(s): """Lower text and remove extra whitespace.""" def remove_articles(text): return re_art.sub(' ', text) def remove_punc(text): return re_punc.sub(' ', text) # convert punctuation to spaces def white_space_fix(text): return ' '.join(text.split()) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s))))
9c0c378ae6da3a81c88cb06795ae8f2b2c6eb656
33,931
def bq_db_dtype_to_dtype(db_dtype: str) -> StructuredDtype: """ Given a db_dtype as returned by BigQuery, parse this to an instance-dtype. Note: We don't yet support Structs with unnamed fields (e.g. 'STRUCT<INT64>' is not supported. :param db_dtype: BigQuery db-dtype, e.g. 'STRING', or 'STRUCT<column_name INT64>', etc :return: Instance dtype, e.g. 'string', or {'column_name': 'int64'} """ bq_db_dtype_to_series = get_all_db_dtype_to_series()[DBDialect.BIGQUERY] scalar_mapping = {db_dtype: series.dtype for db_dtype, series in bq_db_dtype_to_series.items()} tokens = _tokenize(db_dtype) pos, result = _tokens_to_dtype(tokens=tokens, pos=0, scalar_mapping=scalar_mapping) if pos != len(tokens) - 1: raise ValueError(f'Unexpected tokens after last parsed tokens. ' f'Last parsed tokens position: {pos}, total tokens: {len(tokens)}') return result
5207c3eabaf424580f481d4433c8bf7716af59db
33,932
def getPixelSize(lat, latsize, lonsize): """ Get the pixel size (in m) based on latitude and pixel size in degrees """ # Set up parameters for elipse # Semi-major and semi-minor for WGS-84 ellipse ellipse = [6378137.0, 6356752.314245] radlat = np.deg2rad(lat) Rsq = (ellipse[0]*np.cos(radlat))**2+(ellipse[1]*np.sin(radlat))**2 Mlat = (ellipse[0]*ellipse[1])**2/(Rsq**1.5) Nlon = ellipse[0]**2/np.sqrt(Rsq) xsize = np.pi/180*np.cos(radlat)*Nlon*lonsize ysize = np.pi/180*Mlat*latsize return xsize, ysize
3df8e6d17f377493e469fb01e6657eec464ca632
33,933
def success_response( message, code = 200 ): """Returns a JSON response containing `success_message` with a given success code""" return jsonify( { 'success_message' : message } ), code
64d3e46872fc17abb0825e8345bfc9929f97e74d
33,934
def sensorsuite(w,q,Parameters): """ Measurements by onboard sensors """ w_actual = w q_actual = q Q_actual = utils.quat_to_rot(q_actual) bias = np.array([0.5,-0.1,0.2]) #np.zeros(3) # Measurement of reference directions by Sun Sensor rNSunSensor = Parameters['Sensors']['SunSensor']['rNSunSensor'] COV_suns = Parameters['Sensors']['SunSensor']['COV_suns'] rBSunSensor = Q_actual.T @ rNSunSensor + np.linalg.cholesky(COV_suns) @ np.random.randn(3) # rotation from inertial to body!!!! rBSunSensor = rBSunSensor/np.linalg.norm(rBSunSensor) # Measurement of reference directions by Magnetometer rNMagnetometer = Parameters['Sensors']['Magnetometer']['rNMagnetometer'] COV_magn = Parameters['Sensors']['Magnetometer']['COV_magn'] rBMagnetometer = Q_actual.T @ rNMagnetometer + np.linalg.cholesky(COV_magn) @ np.random.randn(3) # rotation from inertial to body!!!! rBMagnetometer = rBMagnetometer/np.linalg.norm(rBMagnetometer) # Measurement of attitude by Startracker COV_star = Parameters['Sensors']['Startracker']['COV_star'] phi_noise = np.linalg.cholesky(COV_star) @ np.random.randn(3) # noise introduced via phi parameterization quat_noise = utils.phi_to_quat(phi_noise) quat_noise = quat_noise/np.linalg.norm(quat_noise) qStarTracker = utils.qmult(q_actual,quat_noise) # Measurement by Gyroscope COV_rnd = Parameters['Sensors']['Gyroscope']['COV_rnd'] COV_arw = Parameters['Sensors']['Gyroscope']['COV_arw'] bias = bias + np.linalg.cholesky(COV_arw) @ np.random.randn(3) wGyro = w_actual + bias + np.linalg.cholesky(COV_rnd) @ np.random.randn(3) return rBSunSensor, rBMagnetometer, qStarTracker, wGyro, bias
cc78afe736ed61faed03e33a0354ece496ea82bb
33,935
import requests import time def get_task_response(taskurl): """Check a task url to get it's status. Will return SUCCESS or FAILED, or timeout after 10 minutes (600s) if the task is still pending Parameters ---------- taskurl: str URL to ping Returns ------- status: str SUCCESS or FAILURE """ print('pinging task to check status...') requests.request("GET", taskurl) complete=False while not complete: task_response = requests.request("GET", taskurl) if 'upload' in taskurl: status = task_response.json()['upload_task_status'] if 'update' in taskurl: status = task_response.json()['update_task_status'] if status == "SUCCESS": complete=True if status == "FAILURE": complete=True time.sleep(5) return status, task_response.json()
470bad355fe0ce48081112e1b877524c6ba438d4
33,936
import tokenize def align(gt, noise, gap_char=GAP_CHAR): """Align two text segments via sequence alignment algorithm **NOTE**: this algorithm is O(N^2) and is NOT efficient for longer text. Please refer to `genalog.text.anchor` for faster alignment on longer strings. Arguments: gt (str) : ground true text (should not contain GAP_CHAR) noise (str) : str with ocr noise (should not contain GAP_CHAR) gap_char (char, optional) : gap char used in alignment algorithm (default: GAP_CHAR) Returns: tuple(str, str) : a tuple of aligned ground truth and noise Invariants: The returned aligned strings will satisfy the following invariants: 1. ``len(aligned_gt) == len(aligned_noise)`` 2. ``number of tokens in gt == number of tokens in aligned_gt`` Example: :: gt: "New York is big" (num_tokens = 4) aligned_gt: "N@ew @@York @is big@@" (num_tokens = 4) """ if not gt and not noise: # Both inputs are empty string return "", "" elif not gt: # Either is empty return gap_char * len(noise), noise elif not noise: return gt, gap_char * len(gt) else: num_gt_tokens = len(tokenize(gt)) alignments = _align_seg(gt, noise, gap_char=gap_char) try: aligned_gt, aligned_noise, _, _, _ = _select_alignment_candidates( alignments, num_gt_tokens ) except ValueError as e: raise ValueError( f"Error with input strings '{gt}' and '{noise}': \n{str(e)}" ) return aligned_gt, aligned_noise
7634349d49163e19235a49b1937e4fa26b3c26bd
33,937
def get_similarity_order(labels, label_means, rank_proximity): """ This function take a dictionary of numeric data, and return the i-th closest data point Parameters ---------- base_statistics : dict {label: (mean, covariance)} each label is summurized by a mean and a covariance in the feature dimensions. rank_proximity : int for each label we want order the means and select the rank_proximity-th closest mean and covariance Returns ------- perturbation: dict {label: (mean_rank_proximity, cov_rank_proximity)} now each label is associated with another mean and covariance """ rank = [] for label in labels: dist = [] for relative_key in labels: # dist hold every distance from key to all other relative keys dist.append(np.linalg.norm(label_means[label]-label_means[relative_key])) relative_dist_from_key = np.array(dist).argsort() rank.append(relative_dist_from_key[rank_proximity]) # import pdb; pdb.set_trace() rank = np.array(rank) assert rank.size == labels.size, "rank should have the same nb of items than label" return rank
183024f39fa714e1e427d3795203c257c24bfddf
33,938
import json def api_v1_votes_put(): """Records user vote.""" # extract and validate post data json_string = flask.request.form.get('vote', None) if not json_string: abort_user_error('Missing required parameter "vote".') vote = json.loads(json_string) post_uid = vote.get('uid') if not post_uid: abort_user_error('Missing required parameter "vote.uid".') value = vote.get('value') if not value: abort_user_error('Missing required parameter "vote.value".') def action(user, unused_roles): votes = dao.Votes() member_uid = get_uid_for(user) # record vote post, vote = votes.insert_vote(member_uid, post_uid, value) result = dao.posts_query_to_list(member_uid, [post], fill_votes=False, client=votes.client)[0] # update my_vote_value directly; it may not get picked up # due to indexed query being out of date result['my_vote_value'] = vote.value return result return with_user(action)
e8cc7b9239d43af9a9f86b67ab7c34f27c81e197
33,939
import collections def interpolate(vertices, target_vertices, target_triangles): """ Interpolate missing data. Parameters ---------- vertices: array (n_samples, n_dim) points of data set. target_vertices: array (n_query, n_dim) points to find interpolated texture for. target_triangles: array (n_query, 3) the mesh geometry definition. Returns ------- interp_textures: array (n_query, n_feats) the interplatedd textures. """ interp_textures = collections.OrderedDict() graph = vertex_adjacency_graph(target_vertices, target_triangles) common_vertices = downsample(target_vertices, vertices) missing_vertices = set(range(len(target_vertices))) - set(common_vertices) for node in sorted(graph.nodes): if node in common_vertices: interp_textures[node] = [node] * 2 else: node_neighs = [idx for idx in graph.neighbors(node) if idx in common_vertices] node_weights = np.linalg.norm( target_vertices[node_neighs] - target_vertices[node], axis=1) interp_textures[node] = node_neighs return interp_textures
d1e3c1cf396f719ac8f68ada2cf4d672fc80f136
33,940
def parse_phot_table(table, rows): """ Retrieve filter information from the photometric file Parameters ---------- path : str path to LSST light curve file rows : slice range of rows for this SN Returns ------- dict dictionary of filter data for the light curve """ fitstable = table[rows] data = {} for filt in LSST_FILTERS: data[filt] = defaultdict(list) for row in fitstable: filt = row['BAND'].strip() #was FLT if filt not in LSST_FILTERS: continue data[filt]['mjd'].append(row['MJD']) data[filt]['fluxcal'].append(row['FLUXCAL']) data[filt]['fluxcalerr'].append(row['FLUXCALERR']) data[filt]['photflag'].append(row['PHOTFLAG']) return data
8029a981a8670dc475ee2e29b4fa410e0255ad6d
33,941
def XOR(a: bool, b: bool) -> bool: """XOR logical gate Args: a (bool): First input signal b (bool): Second input signal Returns: bool: Output signal """ return OR(AND(NOT(a), b), AND(a, NOT(b)))
4b9bfed5454008970e3f3c50b2a16b5224082103
33,942
def create_list_from_dict(mydict): """ Converts entities dictionary to flat list. Args: mydict (dict): Input entities dictionary Returns: list """ outputs = [] for k, v in mydict.items(): if len(v) > 0: for i in v: outputs.append(i) return outputs
50fba98b7590bd7d243464cf45be24c4405f2cef
33,943
from typing import Iterable from typing import Any import numpy def recode( _x: Iterable, *args: Any, _default: Any = None, _missing: Any = None, **kwargs: Any, ) -> Iterable[Any]: """Recode a vector, replacing elements in it Args: x: A vector to modify *args: and **kwargs: replacements _default: If supplied, all values not otherwise matched will be given this value. If not supplied and if the replacements are the same type as the original values in series, unmatched values are not changed. If not supplied and if the replacements are not compatible, unmatched values are replaced with NA. _missing: If supplied, any missing values in .x will be replaced by this value. Returns: The vector with values replaced """ if is_scalar(_x): _x = [_x] if not isinstance(_x, numpy.ndarray): _x_obj = numpy.array(_x, dtype=object) # Keep NAs _x = numpy.array(_x) if numpy.issubdtype(_x.dtype, numpy.str_): na_len = len(NA_character_) if (_x.dtype.itemsize >> 2) < na_len: # length not enough _x = _x.astype(f"<U{na_len}") _x[is_null(_x_obj)] = NA_character_ elif numpy.issubdtype(_x.dtype, numpy.integer): _x[is_null(_x_obj)] = NA_integer_ if numpy.issubdtype(_x.dtype, numpy.number) or numpy.issubdtype( Array(_x[is_not_null(_x)].tolist()).dtype, numpy.number ): return _recode_numeric( _x, *args, _default=_default, _missing=_missing, **kwargs ) return _recode_character( _x, *args, _default=_default, _missing=_missing, **kwargs )
b06b1467b55ad8c6c510088c7cd65777f384c415
33,944
from typing import List from typing import Dict from typing import Any def get_details_for_all_categories(categories: List[str]) -> List[Dict[str, Any]]: """Get all api details for categories :param categories List of all categories returned from server :returns List of api details """ api_details = [] for category in categories: total_count, data = get_details_for_category(category, 1) api_details.extend(data) start_page = 2 end_page = ceil(total_count / 10) for page in range(start_page, end_page + 1): _, data = get_details_for_category(category, page) api_details.extend(data) return api_details
f712d2aa0843b33fdeec7f8d46f2f3e00b3caf17
33,945
def update_one(collection_name, _id, **kwargs): """ Update document in mongo """ collection = getattr(database, collection_name) return collection.update_one({'_id': ObjectId(_id)}, {'$set': kwargs})
1ecd6b8113caa65179da6141b596d7f8f7093eef
33,948
def AAPIEnterVehicle(idveh, idsection): """Execute command once a vehicle enters the Aimsun instance.""" global entered_vehicles entered_vehicles.append(idveh) return 0
bfc93be891b104f1a8a3dd0de01265eaba04645c
33,949
import time import socket import torch import pickle def get_predictionnet(args, model, unlabeled_dataloader): """Get predictions using a server client setup with POW on the server side.""" initialized = False HOST = '127.0.0.1' # The server's hostname or IP address PORT = 65432 # The port used by the server timequery = 0 start1 = time.time() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = (HOST, PORT) sock.connect(server_address) end1 = time.time() timequery+=end1-start1 try: with torch.no_grad(): for data, _ in unlabeled_dataloader: start1 = time.time() datastr = pickle.dumps(data) sock.sendall(datastr) time.sleep(0.1) str = "done" sock.sendall(str.encode()) ### POW Challenge challenge = sock.recv(4096) challenge = pickle.loads(challenge) pos = challenge.find(":") pos2 = challenge[pos+1:].find(":") bits = challenge[pos+1:pos+pos2+1] bits = int(bits) xtype = 'bin' stamp = mint_iteractive(challenge=challenge, bits=bits, xtype=xtype) datastamp = pickle.dumps(stamp) sock.sendall(datastamp) ##### output = sock.recv(4096) output = pickle.loads(output) if not initialized: result = output initialized = True else: result = torch.cat((result, output), 0) end1 = time.time() timequery += end1-start1 start1 = time.time() time.sleep(0.1) str = "doneiter" sock.sendall(str.encode()) end1 = time.time() timequery += end1-start1 finally: sock.close() return result, timequery
a95c6dc14eb3ccbe804b60f1cbed720c93f6a1f1
33,950
async def get_history_care_plan( api_key: str, doctor: Doctor = Depends(get_current_doctor) ): """Get all care plan for client""" service = TestService() return service.get_history_care_plan(api_key, doctor)
9d7e1c56d93214b22f5018e739e1894ed03cf56b
33,951
def bode(sys_list,w=None,x_lim=None,y_lim=None,dB=True,Hz=False,deg=True,log_x=True): """ Returns the impulse response of the continuous or discrete-time systems `sys_list`. Parameters ---------- sys_list : system or list of systems A single system or a list of systems to analyse w : numpy vector (optional) The base angular frequency vector (in rad/s) x_lim : list (optional) A list of two element that defines the min and max value for the x axis y_lim : list (optional) A list of two element that defines the min and max value for the y axis dB : boolean (optional) Use a logarithmic scale for the magnitude plot Hz : boolean (optional) Use frequency in Hz for the x axis deg: boolean (optional) Use angle in degree for the phase plot. Returns ------- fig : plotly figure A plotly figure Example ------- .. code :: import control as ctl from control_plotly import bode sys1 = ctl.tf([1],[2,1,1]) sys2 = ctl.tf([1],[1,0.5,1]) w = np.logspace(-1,1,100) bode([sys1,sys2],w=w) .. image:: img/bode.png :alt: alternate text :align: center """ fig = Bode_Figure(dB=dB,Hz=Hz,deg=deg,log_x=log_x) fig = generic_frequency_fig(fig,sys_list,w=w) fig = generic_layout(fig,x_lim=x_lim,y_lim=y_lim) return fig.show()
4a11e8f7595ec6efeaac2a9a812f2620a5b5f737
33,952
def annotate_heatmap(im, data=None, valfmt="{x:.2f}", textcolors=["white", "black"], threshold=None, **textkw): """ A function to annotate a heatmap. Parameters ---------- im The AxesImage to be labeled. data Data used to annotate. If None, the image's data is used. Optional. valfmt The format of the annotations inside the heatmap. This should either use the string format method, e.g. "$ {x:.2f}", or be a `matplotlib.ticker.Formatter`. Optional. textcolors A list or array of two color specifications. The first is used for values below a threshold, the second for those above. Optional. threshold Value in data units according to which the colors from textcolors are applied. If None (the default) uses the middle of the colormap as separation. Optional. **kwargs All other arguments are forwarded to each call to `text` used to create the text labels. """ if not isinstance(data, (list, np.ndarray)): data = im.get_array() # Normalize the threshold to the images color range. if threshold is not None: threshold = im.norm(threshold) else: threshold = im.norm(data.max())/2. # Set default alignment to center, but allow it to be # overwritten by textkw. kw = dict(horizontalalignment="center", verticalalignment="center") kw.update(textkw) # Get the formatter in case a string is supplied if isinstance(valfmt, str): valfmt = matplotlib.ticker.StrMethodFormatter(valfmt) # Loop over the data and create a `Text` for each "pixel". # Change the text's color depending on the data. texts = [] for i in range(data.shape[0]): for j in range(data.shape[1]): kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)]) text = im.axes.text(j, i, valfmt(data[i, j], None), **kw) texts.append(text) return texts
6b2f49ecc91ca4af9e7b93cfa05a2fc8348b362f
33,953
def _extract_body(payload, embedded_newlines): """ Extract HTTP headers and body """ headers_str, body = payload.split('\r\n\r\n',1) headers = {} for line in headers_str.splitlines(): line = line.rstrip() if line.find(':') > -1: key, value = line.split(':',1) headers[key] = value else: if embedded_newlines: # SOAP input headers[key] += '\r\n' + line else: # SOAP output headers[key] += ' ' + line return headers, body
7ac74fe4454fd00e1ec718dc1c5bb1fe93e1dc37
33,954
def connect(db_url, *, external=False): """Connect to the database using an environment variable. """ logger.info("Connecting to SQL database %r", db_url) kwargs = {} if db_url.startswith('sqlite:'): kwargs['connect_args'] = {'check_same_thread': False} engine = create_engine(db_url, **kwargs) # logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) if db_url.startswith('sqlite:'): sqlalchemy.event.listen( engine, "connect", set_sqlite_pragma, ) # https://www.sqlite.org/security.html#untrusted_sqlite_database_files if external: logger.info("Database is not trusted") @sqlalchemy.event.listens_for(engine, "connect") def secure(dbapi_connection, connection_record): cursor = dbapi_connection.cursor() cursor.execute("PRAGMA trusted_schema=OFF") cursor.close() conn = engine.connect() conn.execute('PRAGMA quick_check') conn.execute('PRAGMA cell_size_check=ON') conn.execute('PRAGMA mmap_size=0') conn.close() alembic_cfg = alembic.config.Config() alembic_cfg.set_main_option('script_location', 'taguette:migrations') alembic_cfg.set_main_option('sqlalchemy.url', db_url) with engine.connect() as conn: if not engine.dialect.has_table(conn, Project.__tablename__): logger.warning("The tables don't seem to exist; creating") Base.metadata.create_all(bind=engine) # Mark this as the most recent Alembic version alembic.command.stamp(alembic_cfg, "head") # Set SQLite's "application ID" if db_url.startswith('sqlite:'): conn.execute("PRAGMA application_id=0x54677474;") # 'Tgtt' else: # Perform Alembic migrations if needed _auto_upgrade_db(db_url, conn, alembic_cfg, external) # Record to Prometheus conn = engine.connect() revision = MigrationContext.configure(conn).get_current_revision() PROM_DATABASE_VERSION.labels(revision).set(1) DBSession = sessionmaker(bind=engine) return DBSession
b62c5a22eeaf63e4989dbde4e685341f37e2a7a1
33,955
from typing import List def containsDuplicate(nums: List[int]) -> bool: """ Time: O(n) Space: O(n) """ visited = set() for n in nums: if n in visited: return True else: visited.add(n) return False
673544bcd10d31d185b65cb7c4b4330a0a7199a4
33,956
def getROC(detector = 0, methods = ['BDT']): """Get the ROC curve for a dectector given a set of methods testes Keyword arguments: detector -- detector used (default 0) methods -- list of methods used (default ['BDT']) """ # retrive root tree as datafram df= read_root('resultsDet{0}.root'.format(detector), 'resultsTree') print(df) # array for results res = np.ndarray([len(methods),2],dtype = np.ndarray) # loop over methods and get roc curves for i,m in enumerate(methods): res[i,0], res[i,1], _ = metrics.roc_curve(df['eventclass'], df['p{}'.format(m)]) return res
18e86eedfefd0fd079210a59a16f4537f311efbf
33,958
def classify_dtypes_using_TF2_in_test(data_sample, idcols, verbose=0): """ If you send in a batch of Ttf.data.dataset with the name of target variable(s), you will get back all the features classified by type such as cats, ints, floats and nlps. This is all done using TF2. """ print_features = False nlps = [] nlp_char_limit = 30 all_ints = [] floats = [] cats = [] bools = [] int_vocab = 0 feats_max_min = nested_dictionary() preds = find_preds(data_sample) #### Take(1) always displays only one batch only if num_epochs is set to 1 or a number. Otherwise No print! ######## #### If you execute the below code without take, then it will go into an infinite loop if num_epochs was set to None. if data_sample.element_spec[preds[0]].shape[0] is None or data_sample.element_spec[preds[0]].shape[0]: for feature_batch in data_sample.take(1): if verbose >= 1: print(f"{target}: {label_batch[:4]}") if len(feature_batch.keys()) <= 30: print_features = True if verbose >= 1: print("features and their max, min, datatypes in one batch of size: ",batch_size) for key, value in feature_batch.items(): feats_max_min[key]["dtype"] = data_sample.element_spec[key].dtype if feats_max_min[key]['dtype'] in [tf.float16, tf.float32, tf.float64]: ## no need to find vocab of floating point variables! floats.append(key) elif feats_max_min[key]['dtype'] in [tf.int16, tf.int32, tf.int64]: ### if it is an integer var, it is worth finding their vocab! all_ints.append(key) int_vocab = tf.unique(value)[0].numpy().tolist() feats_max_min[key]['size_of_vocab'] = len(int_vocab) elif feats_max_min[key]['dtype'] == 'bool': ### if it is an integer var, it is worth finding their vocab! bools.append(key) int_vocab = tf.unique(value)[0].numpy().tolist() feats_max_min[key]['size_of_vocab'] = len(int_vocab) elif feats_max_min[key]['dtype'] in [tf.string]: if tf.reduce_mean(tf.strings.length(feature_batch[key])).numpy() >= nlp_char_limit: print('%s is detected and will be treated as an NLP variable') nlps.append(key) else: cats.append(key) if not print_features: print('Number of variables in dataset is too numerous to print...skipping print') ints = [ x for x in all_ints if feats_max_min[x]['size_of_vocab'] > 30 and x not in idcols] int_cats = [ x for x in all_ints if feats_max_min[x]['size_of_vocab'] <= 30 and x not in idcols] return cats, int_cats, ints, floats, nlps, bools
da6c9a82def789f5bb22c806b3cde294cd8d3631
33,959
def cs2coords(start, qstart, length, strand, cs, offset=1, splice_donor=['gt', 'at'], splice_acceptor=['ag', 'ac']): """ # From minimap2 manual this is the cs flag definitions Op Regex Description = [ACGTN]+ Identical sequence (long form) : [0-9]+ Identical sequence length * [acgtn][acgtn] Substitution: ref to query + [acgtn]+ Insertion to the reference - [acgtn]+ Deletion from the reference ~ [acgtn]{2}[0-9]+[acgtn]{2} Intron length and splice signal """ cs = cs.replace('cs:Z:', '') ProperSplice = True exons = [int(start)] position = int(start) query = [int(qstart)] querypos = 0 num_exons = 1 gaps = 0 mismatches = 0 indels = [] if strand == '+': sp_donor = splice_donor sp_acceptor = splice_acceptor sort_orientation = False elif strand == '-': # rev comp and swap donor/acceptor sp_donor = [mp.revcomp(x).lower() for x in splice_acceptor] sp_acceptor = [mp.revcomp(x).lower() for x in splice_donor] sort_orientation = True for s, value in cs2tuples(cs): if s == ':': position += int(value) querypos += int(value) indels.append(0) elif s == '-': gaps += 1 position += len(value) querypos += len(value) indels.append(-len(value)) elif s == '+': gaps += 1 position += len(value) querypos += len(value) indels.append(len(value)) elif s == '~': if value.startswith(tuple(sp_donor)) and value.endswith(tuple(sp_acceptor)): ProperSplice = True else: ProperSplice = False num_exons += 1 exons.append(position+indels[-1]) query.append(querypos) query.append(querypos+1) intronLen = int(value[2:-2]) position += intronLen exons.append(position) indels.append(0) elif s == '*': mismatches += len(value)/2 # add last Position exons.append(position) query.append(int(length)) # convert exon list into list of exon tuples exontmp = list(zip(exons[0::2], exons[1::2])) queryList = list(zip(query[0::2], query[1::2])) exonList = [] for x in sorted(exontmp, key=lambda tup: tup[0], reverse=sort_orientation): exonList.append((x[0]+offset, x[1])) return exonList, queryList, mismatches, gaps, ProperSplice
fa9b283ff58e494914e13aca552ca0bf71e8853a
33,960
def get_learner_goals_from_model(learner_goals_model): """Returns the learner goals domain object given the learner goals model loaded from the datastore. Args: learner_goals_model: LearnerGoalsModel. The learner goals model from the datastore. Returns: LearnerGoals. The learner goals domain object corresponding to the given model. """ return user_domain.LearnerGoals( learner_goals_model.id, learner_goals_model.topic_ids_to_learn, learner_goals_model.topic_ids_to_master)
9216cc711f24ffbb0554a9dad4af8571c51429a3
33,962
def prepare_metadata_for_build_wheel( metadata_directory, config_settings, _allow_fallback): """Invoke optional prepare_metadata_for_build_wheel Implements a fallback by building a wheel if the hook isn't defined, unless _allow_fallback is False in which case HookMissing is raised. """ backend = _build_backend() try: hook = backend.prepare_metadata_for_build_wheel except AttributeError: if not _allow_fallback: raise HookMissing() return _get_wheel_metadata_from_wheel(backend, metadata_directory, config_settings) else: return hook(metadata_directory, config_settings)
24826b1f18a83c97ec516dec2e67110d920725a9
33,963
from datetime import datetime def get_modtime(ftp, filename): """ Get the modtime of a file. :rtype : datetime """ resp = ftp.sendcmd('MDTM ' + filename) if resp[:3] == '213': s = resp[3:].strip() mod_time = datetime.strptime(s,'%Y%m%d%H%M%S') return mod_time return datetime.min
2a69d0448c093319392afafcfff96dc04ec225d0
33,964
def rotor_between_objects_root(X1, X2): """ Lasenby and Hadfield AGACSE2018 For any two conformal objects X1 and X2 this returns a rotor that takes X1 to X2 Uses the square root of rotors for efficiency and numerical stability """ X21 = (X2 * X1) X12 = (X1 * X2) gamma = (X1 * X1).value[0] if gamma > 0: C = 1 + gamma*(X2 * X1) if abs(C.value[0]) < 1E-6: R = (I5eo * X21)(2).normal() return (R * rotor_between_objects_root(X1, -X2)).normal() return pos_twiddle_root(C)[0].normal() else: C = 1 - X21 if abs(C.value[0]) < 1E-6: R = (I5eo * X21)(2) R = (R * biv3dmask)(2).normal() R2 = rotor_between_objects_root(apply_rotor(X1, R), X2).normal() return (R2 * R).normal() else: return C.normal()
9b4818aa149b5b8ea1cda3791f88c0650fb7a0bf
33,965
def propose_time_step( dt: float, scaled_error: float, error_order: int, limits: LimitsType ): """ Propose an updated dt based on the scheme suggested in Numerical Recipes, 3rd ed. """ SAFETY_FACTOR = 0.95 err_exponent = -1.0 / (1 + error_order) return jnp.clip( dt * SAFETY_FACTOR * scaled_error ** err_exponent, limits[0], limits[1], )
fa587b612433605ad002cb89a9ba5a46caa90679
33,966
def cross_column(columns, hash_backet_size=1e4): """ generate cross column feature from `columns` with hash bucket. :param columns: columns to use to generate cross column, Type must be ndarray :param hash_backet_size: hash bucket size to bucketize cross columns to fixed hash bucket :return: cross column, represented as a ndarray """ assert columns.shape[0] > 0 and columns.shape[1] > 0 _crossed_column = np.zeros((columns.shape[0], 1)) for i in range(columns.shape[0]): _crossed_column[i, 0] = (hash("_".join(map(str, columns[i, :]))) % hash_backet_size + hash_backet_size) % hash_backet_size return _crossed_column
c4dfbf9083686c083e753ec90f9dbb00b06d515c
33,967
def conform_json_response(api, json_response): """Get the right data from the json response. Expects a list, either like [[],...], or like [{},..]""" if api=='cryptowatch': return list(json_response['result'].values())[0] elif api=='coincap': return json_response['data'] elif api in {'poloniex', 'hitbtc', 'bitfinex', 'coinbase'}: return json_response else: raise Exception('API not supported', api, 'Response was ', json_response) return None
a9a2ec51edc13843d0b8b7ce5458bb44f4efd242
33,968
import re def parse_content(content): """ 解析网页 :param content: :return: """ movie = {} html = etree.HTML(content) try: info = html.xpath("//div[@id='info']")[0] movie['director'] = info.xpath("./span[1]/span[2]/a/text()")[0] movie['screenwriter'] = info.xpath("./span[2]/span[2]/a/text()")[0] movie['actors'] = '/'.join(info.xpath("./span[3]/span[2]/a/text()")) movie['type'] = '/'.join(info.xpath("./span[@property='v:genre']/" "text()")) movie['initialReleaseDate'] = '/'.\ join(info.xpath(".//span[@property='v:initialReleaseDate']/text()")) movie['runtime'] = \ info.xpath(".//span[@property='v:runtime']/text()")[0] def str_strip(s): return s.strip() def re_parse(key, regex): ret = re.search(regex, content) movie[key] = str_strip(ret[1]) if ret else '' re_parse('region', r'<span class="pl">制片国家/地区:</span>(.*?)<br/>') re_parse('language', r'<span class="pl">语言:</span>(.*?)<br/>') re_parse('imdb', r'<span class="pl">IMDb链接:</span> <a href="(.*?)" ' r'target="_blank" rel="nofollow">') except Exception as e: print('解析异常: %s' % e) return movie
3e1bab821268abe99e088f331e317ddf74bb36e8
33,971
def model_save(self, commit=True): """ Creates and returns model instance according to self.clean_data. This method is created for any form_for_model Form. """ if self.errors: raise ValueError("The %s could not be created because the data didn't validate." % self._model._meta.object_name) return save_instance(self, self._model(), commit)
47192e57a91961fcd08ea7091ab8a3a7448e2c97
33,972
def count_items(item_list: list) -> (list, list): """ Essa função lista as categorias de itens e as suas respetivas quantidades. :param item_list: lista de itens :return: uma tupla contendo lista de tipos e lista com a contagem de elementos dos tipos """ item_types = set(item_list) count_items_of_each_types = [item_list.count(t) for t in item_types] return item_types, count_items_of_each_types
07144327c72fb1c54a1adbd5cadbf8b78324a0df
33,973
def find_adjacent_citations(adfix, uuid_ctd_mid_map, backwards=False): """ Given text after or before a citation, find all directly adjacent citations. """ if backwards: perimeter = adfix[-50:] else: perimeter = adfix[:50] match = CITE_PATT.search(perimeter) if not match: return [] uuid = match.group(1) if uuid not in uuid_ctd_mid_map: return [] id_tuple = uuid_ctd_mid_map[uuid] margin = perimeter.index(match.group(0)) if backwards: adfix = adfix[:-(50-margin)] else: adfix = adfix[45+margin:] moar = find_adjacent_citations(adfix, uuid_ctd_mid_map, backwards=backwards) return [id_tuple] + moar
3630c524a55c09c3b44e6aec3f5706e6a6253c07
33,974
def defineTrendingObjects(subsystem): """ Defines trending histograms and the histograms from which they should be extracted. Args: subsystem (subsystemContainer): Current subsystem container subsystem (str): The current subsystem by three letter, all capital name (ex. ``EMC``). """ functionName = "define{}TrendingObjects".format(subsystem) findFunction = getattr(currentModule, functionName, None) trending = {} if findFunction is not None: trending = findFunction(trending) else: logger.info("Could not find histogram trending function for subsystem {0}".format(subsystem)) return trending
f0bd25476c7b9e2db245e4fd07fa03cc521d6c3c
33,975
def staff_beers(): """ This method is used to return 3 beer """ return Beer.query.limit(3)
f4f0d8dbb1b2a0550889ee7d00674e7c939f9eef
33,976
def gen_curves(gen_data, file_path="", plot=True): """ Generates all parameters/values needed to produce an IV surface. No actual computations done except to compute relevant values for the drift. """ num_gen_days = len(gen_data) tau = DEFAULT_TAU pis, mus, sigs, As, lams = construct_params(gen_data.detach(), 3, 8, calc_mu = True, inc_pi=False) m_arr = [] mean_arr = [] for i in range(num_gen_days): m_list = [] mean_list = [] for j in range(len(tau)): if len(mean_list) == 0: last_mean = None else: last_mean = mean_list[-1] m, mean = compute_m(tau[:j+1], pis[i], As[i,:j+1,:,:], mus[i,:j+1,:], sigs[i,:j+1,:], last_mean) m_list.append(m) mean_list.append(mean) m_arr.append(m_list) mean_arr.append(mean_list) return tau, pis, mus, As, sigs, lams, np.array(m_arr), np.array(mean_arr)
07f509588bb1c6e21de14189b34c979306e2cc1b
33,977
import math def mm2phi(mm): """Convert a mm float to phi-scale. Attributes: mm <float64>: A float value for conversion""" return(-math.log2(mm))
b40a9125be92dcdcc2be5b888c971f36f17c1c38
33,978
def IV_action(arr, iv=None, action="extract"): """Extract or store IV at the end of the arr.""" if action == "store" and iv != None: arr.append(iv) elif action == "extract" and iv == None: iv = arr.pop() return iv else: return "Error: No action assigned."
0844cbb8eb3fb07ff49fb63d035fc7d4b7201700
33,979
def get_tty_width(): """ :return: Terminal width as a string """ return str(get_terminal_size()[0])
2c09599d13417e0243af142b7c79da2cf9802825
33,980
def set_train_val_test_sequence(df, dt_dset, rcpower_dset, test_cut_off,val_cut_off, f = None ): """ :param df: DataFrame object :param dt_dset: - date/time header name, i.e. "Date Time" :param rcpower_dset: - actual characteristic header name, i.e. "Imbalance" :param test_cut_off: - value to pass time delta value in the 'minutes' resolution or None, like as 'minutes=<value>.' NOte: the timedelta () function does not accept string as parameter, but as value timedelta(minutes=value) The last sampled values before time cutoff represent the test sequence. :param val_cut_off: - value to pass time delta value in the 'minutes' resolution or None, like as 'minutes=<value>.' The last sampled values before the test sequence. :param f: - log file hadler :return: """ if test_cut_off is None or test_cut_off=="": test_cutoff_date = df[dt_dset].max() df_test = None else: test_cutoff_date = df[dt_dset].max() - timedelta(minutes=test_cut_off) df_test = df[df[dt_dset] > test_cutoff_date] if val_cut_off is None or val_cut_off == "": df_val = None else: val_cutoff_date = test_cutoff_date - timedelta(minutes=val_cut_off) df_val = df[(df[dt_dset] > val_cutoff_date) & (df[dt_dset] <= test_cutoff_date)] df_train = df[df[dt_dset] <= val_cutoff_date] print('Train dates: {} to {}'.format(df_train[dt_dset].min(), df_train[dt_dset].max())) f.write("\nTrain dataset\n") f.write('Train dates: {} to {}\n\n'.format(df_train[dt_dset].min(), df_train[dt_dset].max())) for i in range(len(df_train)): f.write('{} {}\n'.format(df_train[dt_dset][i], df_train[rcpower_dset][i])) if df_val is None: pass else: print('Validation dates: {} to {}'.format(df_val[dt_dset].min(), df_val[dt_dset].max())) f.write("\nValidation dataset\n") f.write('Validation dates: {} to {}\n\n'.format(df_val[dt_dset].min(), df_val[dt_dset].max())) for i in range(len(df_train), len(df_train) + len(df_val)): f.write('{} {}\n'.format(df_val[dt_dset][i], df_val[rcpower_dset][i])) if df_test is None: pass else: print('Test dates: {} to {}'.format(df_test[dt_dset].min(), df_test[dt_dset].max())) f.write("\nTest dataset\n") f.write('Test dates: {} to {}\n\n'.format(df_test[dt_dset].min(), df_test[dt_dset].max())) start=len(df_train) if df_val is None else len(df_train) + len(df_val) stop =len(df_train)+len(df_test) if df_val is None else len(df_train) + len(df_val) +len(df_test) for i in range(start,stop): f.write('{} {}\n'.format(df_test[dt_dset][i], df_test[rcpower_dset][i])) datePredict = df_test[dt_dset].values[0] actvalPredict = df_test[rcpower_dset].values[0] return df_train, df_val, df_test, datePredict, actvalPredict
88145303e494578116e74d713f0e2333a1bd7798
33,981