content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import List from typing import Tuple from typing import Any from typing import Optional def walk_extension( state: State, trie_prefix: Bytes, node_key: Bytes, extension_node: ExtensionNode, dirty_list: List[Tuple[Bytes, Node]], cursor: Any, ) -> Optional[InternalNode]: """ Consume the last element of `dirty_list` and update the `ExtensionNode` at `node_key`, potentially turning it into `ExtensionNode` -> `BranchNode` -> `ExtensionNode`. This function returns the new value of the visited node, but does not write it to the database. """ key, value = dirty_list[-1] if key[len(node_key) :].startswith(extension_node.key_segment): target_node = walk( state, trie_prefix, node_key + extension_node.key_segment, dirty_list, cursor, ) return make_extension_node( state, trie_prefix, node_key, node_key + extension_node.key_segment, target_node, cursor, ) prefix_length = common_prefix_length( extension_node.key_segment, key[len(node_key) :] ) prefix = extension_node.key_segment[:prefix_length] if prefix_length != len(extension_node.key_segment) - 1: new_extension_node = ExtensionNode( extension_node.key_segment[prefix_length + 1 :], extension_node.subnode, ) write_internal_node( cursor, trie_prefix, node_key + extension_node.key_segment[: prefix_length + 1], new_extension_node, ) encoded_new_extension_node = encode_internal_node(new_extension_node) else: encoded_new_extension_node = extension_node.subnode node = split_branch( state, trie_prefix, node_key + prefix, extension_node.key_segment[prefix_length], encoded_new_extension_node, dirty_list, cursor, ) if prefix_length != 0: return make_extension_node( state, trie_prefix, node_key, node_key + prefix, node, cursor ) else: return node
bcc31ae61729db82d84e02168926845b7b42da44
26,451
def bottleneck_block_v2(inputs, filters, strides, training, projection_shortcut, data_format): """ 3-layer bottleneck residual block with batch normalization and relu before convolution layer. :param inputs: Input images :param filters: number of filters :param strides: strides of convolutions :param training: a flag to indicate whether during training or not :param projection_shortcut: a function if projection is necessary on shortcuts, None otherwise :param data_format: the format of data, either channels_first or channels_last :return: one 3-layer bottleneck residual block """ shortcut = inputs if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = batch_norm_relu(inputs, training, data_format) inputs = padded_conv2d(inputs=inputs, kernel_size=1, filters=filters, strides=1, data_format=data_format) inputs = batch_norm_relu(inputs, training, data_format) inputs = padded_conv2d(inputs=inputs, kernel_size=3, filters=filters, strides=strides, data_format=data_format) inputs = batch_norm_relu(inputs, training, data_format) inputs = padded_conv2d(inputs=inputs, kernel_size=1, filters=4 * filters, strides=1, data_format=data_format) return inputs + shortcut
8330e68d1411c643ffcee6916ba95ef77b7cc5ee
26,453
def preprocess_lines(lines, otherAutorizedSymbols, sentencesSeparator=None): """ complete my dataset""" if sentencesSeparator : result = [] for line in lines : e = line.split(sentencesSeparator) if e[0] != "__Error__" and e[1]!= "__Error__" : lignes_i = sent_tokenize(e[0]) lignes_j = sent_tokenize(e[1]) l = len(lignes_i) if l == len(lignes_j) : for k in range(l) : result.append(EPE(lignes_i[k])+sentencesSeparator+EPE(lignes_j[k])+'\n') else : lignes_i = EPE(e[0]) lignes_j = EPE(e[1]) result.append(lignes_i+sentencesSeparator+lignes_j+'\n') for line in result.copy() : e = line.split(sentencesSeparator) lignes_i = help_preprocess_lines(e[0], otherAutorizedSymbols).split(',') lignes_j = help_preprocess_lines(e[1], otherAutorizedSymbols).split(',') l = len(lignes_i) if l == len(lignes_j) : for k in range(l) : result.append(EPE(lignes_i[k])+sentencesSeparator+EPE(lignes_j[k])+'\n') return result else : return lines
04bf9f90bf06f07803ca8dd6199728d33a73a6de
26,455
def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: """ Check if ``validator`` is slashable. """ return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
9ea82379e270f668d2dde3cb5b10a59f29f2e6e6
26,456
def check_rate_limit() -> None: """ Check whether or not a user has exceeded the rate limits specified in the config. Rate limits per API key or session and per user are recorded. The redis database is used to keep track of caching, by incrementing "rate limit" cache keys on each request and setting a timeout on them. The rate limit can be adjusted in the configuration file. :raises APIException: If the rate limit has been exceeded """ if not flask.g.user: return check_rate_limit_unauthenticated() user_cache_key = f'rate_limit_user_{flask.g.user.id}' key_cache_key = f'rate_limit_api_key_{flask.g.api_key.hash}' auth_specific_requests = cache.inc( key_cache_key, timeout=app.config['RATE_LIMIT_AUTH_SPECIFIC'][1] ) if auth_specific_requests > app.config['RATE_LIMIT_AUTH_SPECIFIC'][0]: time_left = cache.ttl(key_cache_key) raise APIException( f'Client rate limit exceeded. {time_left} seconds until lock expires.' ) user_specific_requests = cache.inc( user_cache_key, timeout=app.config['RATE_LIMIT_PER_USER'][1] ) if user_specific_requests > app.config['RATE_LIMIT_PER_USER'][0]: time_left = cache.ttl(user_cache_key) raise APIException( f'User rate limit exceeded. {time_left} seconds until lock expires.' )
ca49c4b2c4287bca3d664b20abebd9b7df53a0fb
26,457
def update_db(mode): """Mode can be 'add', 'move', 'delete'""" def decorator(func): @wraps(func) def wrapped(*args, **kwargs): # did and rse are the first 2 args did, rse = args[0], args[1] update_db = kwargs.get('update_db', False) if not update_db or db is None: return func(*args, **kwargs) func(did, rse, **kwargs) number, dtype, h = parse_did(did) if mode == 'add': data = build_data_dict(did, rse, status='transferring') db.update_data(number, data) elif mode == 'delete': data = db.get_data(number, type=dtype, did=did, location=rse) if len(data) == 0: pass else: for d in data: db.delete_data(number, d) elif mode == 'move': from_rse = kwargs.pop('from_rse') if not from_rse: raise ValueError(f"'from_rse' must be passed when calling {func.__name__}") from_data = db.get_data(number, type=dtype, did=did, location=from_rse) if len(from_data) == 0: to_data = build_data_dict(did, rse, 'transferring') else: to_data = from_data[0].copy() to_data['location'] = rse to_data['status'] = 'transferring' db.update_data(number, to_data) db.delete_data(number, from_data) return wrapped return decorator
cc91757030c9d398bd17ad73521e23d835879560
26,458
def right_size2(a1, a2): """ Check that a1 and a2 have equal shapes. a1 and a2 are NumPy arrays. """ if hasattr(a1, 'shape') and hasattr(a2, 'shape'): pass # ok, a1 and a2 are NumPy arrays else: raise TypeError('%s is %s and %s is %s - both must be NumPy arrays' \ % (a1_name, type(a1), a2_name, type(a2))) if a1.shape != a2.shape: a1_name, where = get_argname_in_call('right_size2', 1) a2_name, where = get_argname_in_call('right_size2', 2) raise ValueError( '%s\n%s has size %s, which is not compatible with size %s of %s' \ % (where, a1_name, a1.shape, a2.shape, a2_name)) else: return True
66aa2097ff67a2ef44c49118dbdbba1539f1e3ba
26,459
def spherDist(stla, stlo, evla, evlo): """spherical distance in degrees""" return SphericalCoords.distance(stla, stlo, evla, evlo)
e2ae206c63712dbf263d6d3af28066f279571e20
26,461
def get_example(matrix, start_row, input_timesteps=INPUT_TIMESTEPS, output_timesteps=OUTPUT_TIMESTEPS): """Returns a pair of input, output ndarrays. Input starts at start_row and has the given input length. Output starts at next timestep and has the given output length.""" # Make sure there are enough time steps remaining. if len(matrix) < start_row + input_timesteps + output_timesteps: raise Exception('Not enough rows to get example.') input_ex = matrix[start_row : start_row + input_timesteps] output_ex = matrix[start_row + input_timesteps : start_row+input_timesteps+output_timesteps] return (input_ex, output_ex)
aa396588a32492c29e1dbb4a0e8f96016974b805
26,462
def full_mul_modifier(optree): """ extend the precision of arguments of a multiplication to get the full result of the multiplication """ op0 = optree.get_input(0) op1 = optree.get_input(1) optree_type = optree.get_precision() assert(is_std_integer_format(op0.get_precision()) and is_std_integer_format(op1.get_precision()) and is_std_integer_format(optree_type)) op0_conv = Conversion(op0, precision = optree_type) if optree_type != op0.get_precision() else op0 op1_conv = Conversion(op1, precision = optree_type) if optree_type != op1.get_precision() else op0 return Multiplication(op0_conv, op1_conv, precision = optree_type)
6c191c274d9f130619e2831b50de65a270b41748
26,463
def find_used_modules(modules, text): """ Given a list of modules, return the set of all those imported in text """ used = set() for line in text.splitlines(): for mod in modules: if 'import' in line and mod in line: used.add(mod) return used
0b1b2b31f60a565d7ba30a9b21800ba7ec265d0c
26,464
def string2mol2(filename, string): """ Writes molecule to filename.mol2 file, input is a string of Mol2 blocks """ block = string if filename[-4:] != '.mol2': filename += '.mol2' with open(filename, 'w') as file: file.write(block) return None
51043e7f4edde36682713455dc33c643f89db397
26,465
def getTimeFormat(): """ def getTimeFormat(): This functions returns the time format used in the bot. """ return timeFormat
cd13ab983cd91dca4fc3ae3414c3724b5019f248
26,466
from typing import Sequence from typing import Optional import numpy def concatenate( arrays: Sequence[PolyLike], axis: int = 0, out: Optional[ndpoly] = None, ) -> ndpoly: """ Join a sequence of arrays along an existing axis. Args: arrays: The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis: The axis along which the arrays will be joined. If axis is None, arrays are flattened before use. Default is 0. out: If provided, the destination to place the result. The shape must be correct, matching that of what concatenate would have returned if no out argument were specified. Returns: The concatenated array. Examples: >>> const = numpy.array([[1, 2], [3, 4]]) >>> poly = numpoly.variable(2).reshape(1, 2) >>> numpoly.concatenate((const, poly), axis=0) polynomial([[1, 2], [3, 4], [q0, q1]]) >>> numpoly.concatenate((const, poly.T), axis=1) polynomial([[1, 2, q0], [3, 4, q1]]) >>> numpoly.concatenate((const, poly), axis=None) polynomial([1, 2, 3, 4, q0, q1]) """ arrays = numpoly.align_exponents(*arrays) if out is None: coefficients = [numpy.concatenate( [array.values[key] for array in arrays], axis=axis) for key in arrays[0].keys] out = numpoly.polynomial_from_attributes( exponents=arrays[0].exponents, coefficients=coefficients, names=arrays[0].names, dtype=coefficients[0].dtype, ) else: for key in out.keys: if key in arrays[0].keys: numpy.concatenate([array.values[key] for array in arrays], out=out.values[key], axis=axis) return out
df6c897b25279dca5187e6cafe5c1b9d22b8a994
26,467
import requests import json def get_cik_map(key="ticker"): """Get dictionary of tickers to CIK numbers. Args: key (str): Should be either "ticker" or "title". Choosing "ticker" will give dict with tickers as keys. Choosing "title" will use company name as keys. Returns: Dictionary with either ticker or company name as keys, depending on ``key`` argument, and corresponding CIK as values. .. versionadded:: 0.1.6 """ if key not in ("ticker", "title"): raise ValueError("key must be 'ticker' or 'title'. Was given {key}.".format(key=key)) response = requests.get(URL) json_response = json.loads(response.text) return {v[key]: str(v["cik_str"]) for v in json_response.values()}
6a9cf67bb63bfd057ee936e1a5d5be33d8655abe
26,468
def _data_layers(): """Index all configured data layers by their "shorthand". This doesn't have any error checking -- it'll explode if configured improperly""" layers = {} for class_path in settings.DATA_LAYERS: module, class_name = class_path.rsplit('.', 1) klass = getattr(import_module(module), class_name) layers[klass.shorthand] = klass return layers
34b0843c76086b41bf119987283fa4373bc07190
26,469
import hashlib def sha1base64(file_name): """Calculate SHA1 checksum in Base64 for a file""" return _compute_base64_file_hash(file_name, hashlib.sha1)
aaf2daca1676c822259bec8a519ab5eae7618b17
26,470
def readfsa(fh): """Reads a file and returns an fsa object""" raw = list() seqs = list() for line in fh: if line.startswith(">") and len(raw) > 0: seqs.append(Fsa("".join(raw))) raw.clear() raw.append(line) if len(raw) > 0: seqs.append(Fsa("".join(raw))) return seqs
089cd4b7addcf99baf9394b59d44702995eff417
26,471
def with_timeout(name): """ Method decorator, wraps method with :py:func:`asyncio.wait_for`. `timeout` argument takes from `name` decorator argument or "timeout". :param name: name of timeout attribute :type name: :py:class:`str` :raises asyncio.TimeoutError: if coroutine does not finished in timeout Wait for `self.timeout` :: >>> def __init__(self, ...): ... ... self.timeout = 1 ... ... @with_timeout ... async def foo(self, ...): ... ... pass Wait for custom timeout :: >>> def __init__(self, ...): ... ... self.foo_timeout = 1 ... ... @with_timeout("foo_timeout") ... async def foo(self, ...): ... ... pass """ if isinstance(name, str): return _with_timeout(name) else: return _with_timeout("timeout")(name)
7591a4ed176fad60510dfc7aafbb6df2b44672a4
26,472
import json def results(): """ function for predicting a test dataset input as a body of the HTTP request :return: prediction labels array """ data = request.get_json(force=True) data = pd.DataFrame(json.loads(data)) prediction = model.predict(data) output = list(map(int, prediction)) return jsonify(output)
caae7f04884532de7a4fef9e5a6d285c982d2187
26,473
from datetime import datetime def tradedate_2_dtime(td): """ convert trade date as formatted by yfinance to a datetime object """ td_str = str(int(td)) y, m, d = int(td_str[:4]), int(td_str[4:6]), int(td_str[6:]) return datetime(y, m, d)
29db7ed41a5cac48af1e7612e1cd2b59ab843a1f
26,475
def frmchg(frame1, frame2, et): """frmchg(SpiceInt frame1, SpiceInt frame2, SpiceDouble et)""" return _cspyce0.frmchg(frame1, frame2, et)
db05ebf45f0d265e8f75b26fbd7c1234d9a8b4cb
26,476
def map_popularity_score_keys(popularity): """ Maps popularity score keys to be more meaningful :param popularity: popularity scores of the analysis :return: Mapped dictionary of the scores """ return dict((config.popularity_terms[key], value) for (key, value) in popularity.items())
84c265e7ec6e881df878f74d5d9b9eda9d223bf3
26,477
from typing import Callable from typing import Optional def _get_utf16_setting() -> Callable[[Optional[bool]], bool]: """Closure for holding utf16 decoding setting.""" _utf16 = False def _utf16_enabled(utf16: Optional[bool] = None) -> bool: nonlocal _utf16 if utf16 is not None: _utf16 = utf16 return _utf16 return _utf16_enabled
1f0caeab03047cc847d34266c1ed53eabdf01a10
26,478
def uproot_ntuples_to_ntuple_dict(uproot_ntuples, properties_by_track_type, keep_invalid_vals=False): """Takes in a collection of uproot ntuples and a dictionary from track types to desired properties to be included, returns an ntuple dictionary formed by selecting properties from the ntuples and then concatenating them all together. Cuts any invalid values, like inf or nan, by default. Args: uproot_ntuples: an iterable of uproot ntuples. properties_by_track_type: a dictionary from track types (trk, matchtrk, etc.) to properties to be selected (eta, pt, chi2). keep_invalid_vals: if True, don't cut tracks with inf or nan as one of their values. Returns: An ntuple dict. """ return ndops.add_ntuple_dicts(list(map(lambda uproot_ntuple: uproot_ntuple_to_ntuple_dict(uproot_ntuple, properties_by_track_type, keep_invalid_vals), uproot_ntuples)))
e4f391fc0c63e73ff320e973f624e43841a3613f
26,479
from datetime import datetime def get_container_sas_token(block_blob_client, container_name, blob_permissions): """ Obtains a shared access signature granting the specified permissions to the container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the Azure Blob storage container. :param BlobPermissions blob_permissions: :rtype: str :return: A SAS token granting the specified permissions to the container. """ # Obtain the SAS token for the container, setting the expiry time and # permissions. In this case, no start time is specified, so the shared # access signature becomes valid immediately. container_sas_token = \ block_blob_client.generate_container_shared_access_signature( container_name, permission=blob_permissions, expiry=datetime.utcnow() + timedelta(days=10)) return container_sas_token
f17623e721e84a0953565854f2e2eedfb4f8afe6
26,480
def to_one_hot(y): """Transform multi-class labels to binary labels The output of to_one_hot is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters ---------- y : numpy array or sparse matrix of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : numpy array or CSR matrix of shape [n_samples, n_classes] Shape will be [n_samples, 1] for binary problems. classes_ : class vector extraceted from y. """ lb = LabelBinarizer() lb.fit(y) Y = lb.transform(y) return (Y.base, lb.classes_)
134f4bce729c98439bdca2cd586f95d0cc9178c7
26,483
def random(): """ getting a random number from 0 to 1 """ return randrange(10000) / 10000
12ab43d5e5c8a9a993f8053363e56c2acf8b0ceb
26,484
from typing import Union def parameter_string_to_value( parameter_string: str, passthrough_estimate: bool = False, ) -> Union[float, int, str]: """Cast a parameter value from string to numeric. Args: parameter_string: The parameter value, as a string. passthrough_estimate: Whether to return `ESTIMATE` as `ESTIMATE`. If `False`, raises an exception if `parameter_string == ESTIMATE`. Returns: The parameter value, as a numeric type. """ if parameter_string == ESTIMATE: if passthrough_estimate: return parameter_string raise ValueError('Please handle estimated parameters differently.') float_value = float(parameter_string) int_value = int(float_value) if int_value == float_value: return int_value return float_value
3271acf50f5171703e16bce183d246d149d5e053
26,485
def rfsize(spatial_filter, dx, dy=None, sigma=2.): """ Computes the lengths of the major and minor axes of an ellipse fit to an STA or linear filter. Parameters ---------- spatial_filter : array_like The spatial receptive field to which the ellipse should be fit. dx : float The spatial sampling along the x-dimension. dy : float The spatial sampling along the y-dimension. If None, uses the same value as dx. (Default: None) sigma : float, optional Determines the size of the ellipse contour, in units of standard deviation of the fitted gaussian. E.g., 2.0 means a 2 SD ellipse. Returns ------- xsize, ysize : float The x- and y-sizes of the ellipse fitted to the receptive field, at the given sigma. """ if dy is None: dy = dx # get ellipse parameters widths = get_ellipse(spatial_filter, sigma=sigma)[1] # return the scaled widths return widths[0] * dx, widths[1] * dy
a2e184dd597c840392c05d0955dba826aa528a06
26,487
import json def get_config_from_json(json_file): """ Get the config from a json file :param json_file: :return: config(namespace) or config(dictionary) """ # parse the configurations from the config json file provided with open(json_file, 'r') as config_file: config_dict = json.load(config_file) # convert the dictionary to a namespace using bunch lib config = Bunch(config_dict) config = default_values(config) return config, config_dict
17aec6d1d0413836f647b222681e32af1a298fbc
26,488
from pathlib import Path def create_polarimetric_layers(import_file, out_dir, burst_prefix, config_dict): """Pipeline for Dual-polarimetric decomposition :param import_file: :param out_dir: :param burst_prefix: :param config_dict: :return: """ # temp dir for intermediate files with TemporaryDirectory(prefix=f"{config_dict['temp_dir']}/") as temp: temp = Path(temp) # ------------------------------------------------------- # 1 Polarimetric Decomposition # create namespace for temporary decomposed product out_haa = temp / f"{burst_prefix}_h" # create namespace for decompose log haa_log = out_dir / f"{burst_prefix}_haa.err_log" # run polarimetric decomposition try: slc.ha_alpha(import_file, out_haa, haa_log, config_dict) except (GPTRuntimeError, NotValidFileError) as error: logger.info(error) return None, error # ------------------------------------------------------- # 2 Geocoding # create namespace for temporary geocoded product out_htc = temp / f"{burst_prefix}_pol" # create namespace for geocoding log haa_tc_log = out_dir / f"{burst_prefix}_haa_tc.err_log" # run geocoding try: common.terrain_correction( out_haa.with_suffix(".dim"), out_htc, haa_tc_log, config_dict ) except (GPTRuntimeError, NotValidFileError) as error: logger.info(error) return None, error # set nans to 0 (issue from SNAP for polarimetric layers) for infile in list(out_htc.with_suffix(".data").glob("*.img")): with rasterio.open(str(infile), "r") as src: meta = src.meta.copy() array = src.read() array[np.isnan(array)] = 0 with rasterio.open(str(infile), "w", **meta) as dest: dest.write(array) # --------------------------------------------------------------------- # 5 Create an outline ras.image_bounds(out_htc.with_suffix(".data")) # move to final destination ard = config_dict["processing"]["single_ARD"] h.move_dimap(out_htc, out_dir / f"{burst_prefix}_pol", ard["to_tif"]) # write out check file for tracking that it is processed with (out_dir / ".pol.processed").open("w+") as file: file.write("passed all tests \n") dim_file = out_dir / f"{burst_prefix}_pol.dim" return (str(dim_file), None)
ddfd3d9b12aefcf5f60b254ad299c59d4caca837
26,489
from typing import Literal import math def Builtin_FLOOR(expr, ctx): """ http://www.w3.org/TR/sparql11-query/#func-floor """ l_ = expr.arg return Literal(int(math.floor(numeric(l_))), datatype=l_.datatype)
495d7e2133028030e1766d0a04eb3d20f800b918
26,491
import requests def get(target: str) -> tuple: """Fetches a document via HTTP/HTTPS and returns a tuple containing a boolean indicating the result of the request, the URL we attempted to contact and the request HTML content in bytes and text format, if successful. Otherwise, returns a tuple containing a boolean indicating the result of the request, the URL we attempted to contact and the HTTP status code or exception error output from the request. :param target: :return: tuple """ if target.startswith('http://') is False and target.startswith('https://') is False: target = 'http://{0}'.format(target) try: request = requests.get(url=target, timeout=3, verify=False) except Exception as e: return False, target, e.__str__() try: request.raise_for_status() except requests.exceptions.HTTPError as e: return False, target, e.__str__() if request.ok: return True, request.url, request.content, request.text return False, request.url, request.status_code
1d9d650d77776419318cbd204b722d8abdff94c5
26,492
def DOM2ET(domelem): """Converts a DOM node object of type element to an ElementTree Element. domelem: DOM node object of type element (domelem.nodeType == domelem.ELEMENT_NODE) returns an 'equivalent' ElementTree Element """ # make some local variables for fast processing tyCDATA = domelem.CDATA_SECTION_NODE tyTEXT = domelem.TEXT_NODE tyPI = domelem.PROCESSING_INSTRUCTION_NODE tyCOMMENT = domelem.COMMENT_NODE tyELEMENT = domelem.ELEMENT_NODE # calculate the attributes of the domelem attribs = domelem.attributes attrs = dict((x.name, x.value) for x in (attribs.item(i) for i in range(attribs.length))) # build the ET Element etelem = ET.Element(domelem.tagName, attrs) last = None # to differentiate between 'text' and 'tail' for node in domelem.childNodes: nodeType = node.nodeType if (tyTEXT == nodeType or tyCDATA == nodeType) and node.data: data = node.data if last is None: etelem.text = etelem.text + data if etelem.text else data else: last.tail = last.tail + data if last.tail else data elif tyELEMENT == nodeType: last = DOM2ET(node) etelem.append(last) elif tyCOMMENT == nodeType: last = ET.Comment(node.data) etelem.append(last) elif tyPI == nodeType: last = ET.ProcessingInstruction(node.target, node.data) etelem.append(last) return etelem
b8288a2704995ec4fbe4dc1bc2805cd7658beb35
26,493
import torch def f_score(pr, gt, beta=1, eps=1e-7, threshold=.5): """dice score(also referred to as F1-score)""" if threshold is not None: pr = (pr > threshold).float() tp = torch.sum(gt * pr) fp = torch.sum(pr) - tp fn = torch.sum(gt) - tp score = ((1 + beta ** 2) * tp + eps) \ / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + eps) return score
2c54fd24cd04ac2b41a9d5ca4bf8a7afc5e88640
26,494
def makeSatelliteDir(metainfo): """ Make the directory name for the 'satellite' level. """ satDir = "Sentinel-" + metainfo.satId[1] return satDir
dfbb43f235bc027f25fc9b624097e8f2e0dee4f9
26,495
def resolve_tagged_field(field): """ Fields tagged with `swagger_field` shoudl respect user definitions. """ field_type = getattr(field, SWAGGER_TYPE) field_format = getattr(field, SWAGGER_FORMAT, None) if isinstance(field_type, list): # Ideally we'd use oneOf here, but OpenAPI 2.0 uses the 0.4-draft jsonschema # which doesn't include oneOf. Upgrading to OpenAPI 3.0 ought to suffice. return dict() elif field_format: return dict( type=field_type, format=field_format, ) else: return dict( type=field_type, )
90f59615395350dbd0f2ff3ff5573f28e926dada
26,497
def grid_id_from_string(grid_id_str): """Convert Parameters ---------- grid_id_str : str The string grid ID representation Returns ------- ret : tuple of ints A 4-length tuple representation of the dihedral id """ return tuple(int(i) for i in grid_id_str.split(','))
cec058302aae701c1aa28fcb4c4a9d762efa724e
26,498
def adj_to_edge_index(adj): """ Convert an adjacency matrix to an edge index :param adj: Original adjacency matrix :return: Edge index representation of the graphs """ converted = [] for d in adj: edge_index = np.argwhere(d > 0.).T converted.append(edge_index) return converted
e2c047a6c60bfb3ea109e8686a810749a726265f
26,499
def determine_all_layers_for_elev(std_atmos, layers, values, elevation): """Determine all of the layers to use for the elevation Args: std_atmos [StdAtmosInfo]: The standard atmosphere layers [<str>]: All the pressure layers values [<LayerInfo>]: All the interpolated layers information elevation <float>: The current elevation Returns: [PressureLayerInfo]: List of pressure layer information """ s_layers = determine_base_layers_for_elev(layers=layers, values=values, elevation=elevation) # Determine maximum height of base layers and where the # standard atmosphere is greater than this first_index = None layer_index = 0 for layer in std_atmos: # Check against the top base pressure layer if layer.hgt > s_layers[-1].hgt: first_index = layer_index break layer_index += 1 second_index = first_index + 2 ''' If there are more than 2 layers above the highest NARR layer, then we need to interpolate a value between the highest NARR layer and the 2nd standard atmosphere layer above the NARR layers to create a smooth transition between the NARR layers and the standard upper atmosphere ''' # Add an interpolated std layer if len(std_atmos[first_index:]) >= 3: # Setup for linear interpolation between the layers hgt = ((std_atmos[second_index].hgt + s_layers[-1].hgt) / 2.0) std_interp_factor = \ determine_interp_factor(hgt, s_layers[-1].hgt, std_atmos[second_index].hgt) pressure = (s_layers[-1].pressure + ((std_atmos[second_index].pressure - s_layers[-1].pressure) * std_interp_factor)) temp = (s_layers[-1].temp + ((std_atmos[second_index].temp - s_layers[-1].temp) * std_interp_factor)) rel_hum = (s_layers[-1].rh + ((std_atmos[second_index].rh - s_layers[-1].rh) * std_interp_factor)) s_layers.append(PressureLayerInfo(hgt=hgt, pressure=pressure, temp=temp, rh=rel_hum)) # Add the remaining standard atmosphere layers for layer in std_atmos[second_index:]: s_layers.append(PressureLayerInfo(hgt=layer.hgt, pressure=layer.pressure, temp=layer.temp, rh=layer.rh)) return s_layers
c3e23670437efa7caa03f1027b2a59d0bd62361a
26,500
from typing import List def remove_redundant( text: str, list_redundant_words: List[str] = S_GRAM_REDUNDANT, ) -> str: """To remove phrases that appear frequently and that can not be used to infere skills. Parameters ---------- text : str The text to clean. list_redundant_words : List[str], optional The list of phrases to remove, by default S_GRAM_REDUNDANT Returns ------- str returns text after removing all redundant words provided in `list_redundant_words` Examples -------- >>> from SkillNer.cleaner import remove_redundant >>> text = "you have professional experience building React apps, you are familiar with version control using git and GitHub" >>> print(remove_redundant(text)) building React apps, familiar with version control using git and GitHub """ for phrase in list_redundant_words: text = text.replace(phrase, "") # use .strip() to remove extra space in the begining/end of the text return text.strip()
07a3dfca84acb57b0786e17879d2ac17693c8eba
26,501
import asyncio def position_controller_mock(): """ Position controller mock. """ mock = MagicMock(spec=PositionController) future = asyncio.Future() future.set_result(None) mock.update_odometry = MagicMock(return_value=future) return mock
71e7c4a5894eb56ab99aaec03e9e569291e12e6c
26,502
def forest_overview(): """ The remove forest URL handler. :return: The remove_forest.html page """ all_sensors = mysql_manager.get_all_sensors() for sensor_module in all_sensors: sensor_module['latest_measurements'] = mysql_manager.get_latest_measurements_from_sensor(sensor_module['id']) return render_template('forest_overview.html', forests=mysql_manager.get_all_forests(), sensors=all_sensors)
05ffdd24a08ab6553ac175bd2dfc690bfbf1876e
26,504
def task_dev(): """Run the main task for the project""" return { 'actions': ["docker run --volume %s/:/app %s" % (CONFIG["project_root"], IMAGE)] }
7a8f3dab41076d56717312c01a6144dfd7fac43b
26,505
import struct from datetime import datetime def header_parse(header, ts_resolution): """ parses the header of a TOB3 frame. Parameters ---------- header: string of binary encoded dat with length of 12-Bytes ts_resolution: frame time resolution. multiplier for sub-second part of frame timestamp to acheive microsecond resolution. from header[1][5] Returns ------- frame_ts: datetime object frame_rec: integer Notes ----- assumes header of TOB3, which is 12-Bytes. """ # seconds & sub-seconds: "two four bytes integers (least significant # byte fist)" # seconds since CSI epoch (1-Jan-1990) # sub-seconds into number of "Frame Time Resolution" values into current # second. # beginning record number: "four byte UNSIGNED integer stored least # significant byte first" # ...assume all are unsigned, and unpack all of them at once. # <<<===== Assume TOB3 format, i.e. 12-Byte header, not 8-Byte of TOB2 header_tuple = struct.unpack("<3L", header) # datetime.timedelta(days, seconds, microseconds) ts = CSI_EPOCH + datetime.timedelta( 0, header_tuple[0], header_tuple[1] * ts_resolution ) return ts, header_tuple[2]
7615064961e44167a3efbca888522fc9433e4598
26,506
def shuff_par_str(shuffle=True, str_type="file"): """ shuff_par_str() Returns string from shuffle parameter to print or for a filename. Optional args: - shuffle (bool): default: True - str_type (str): "print" for a printable string and "file" for a string usable in a filename, "label" for a label. default: "file" Returns: - shuff_str (str): shuffle parameter string """ if shuffle: if str_type == "print": shuff_str = ", shuffled" elif str_type == "file": shuff_str = "_shuffled" elif str_type == "labels": shuff_str = " (shuffled labels)" else: gen_util.accepted_values_error( "str_type", str_type, ["labels", "print", "file"]) else: shuff_str = "" return shuff_str
956fd86c4ce458d73ebfd425e8b9430dbd027705
26,507
def fft_imaginary(x: np.ndarray) -> np.ndarray: """ Imaginary values of FFT transform :param x: a numeric sequence :return: a numeric sequence """ x_fft = np.fft.fft(x) xt = np.imag(x_fft) return xt
6c9a60a160475f687fba8f624b8e06e4f63d5125
26,508
def prodtype_to_platform(prods): """ Converts one or more prodtypes into a string with one or more <platform> elements. """ if isinstance(prods, str): return name_to_platform(prodtype_to_name(prods)) return "\n".join(map(prodtype_to_platform, prods))
f23d14001ac9afdf3215b5ecbbc23759708c27fd
26,509
import pickle def train_config_setting(config, dataset): """ Configuring parameter for training process :param config: type dict: config parameter :param dataset: type str: dataset name :return: config: type dict: config parameter """ # Load max shape & channels of images and labels. if config['read_body_identification']: filename_max_shape = config['dir_dataset_info'] + '/max_shape_' + dataset + '_bi.pickle' else: filename_max_shape = config['dir_dataset_info'] + '/max_shape_' + dataset + '.pickle' with open(filename_max_shape, 'rb') as fp: config['max_shape'] = pickle.load(fp) print("max shape image: ", config['max_shape']['image']) print("max shape label: ", config['max_shape']['label']) # Get the amount of input and output channel # config[channel_img]: channel amount of model input, config[channel_label]: channel amount of model output config['channel_img_num'], config['channel_label_num'] = config['max_shape']['image'][-1], \ config['max_shape']['label'][ -1] if config['input_channel'][dataset] is not None: config['channel_img_num'] = len(config['input_channel'][dataset]) if not config['read_body_identification']: if config['output_channel'][dataset] is not None: config['channel_label_num'] = len(config['output_channel'][dataset]) # output channel+1 if the model output background channel (if the stored labels have no background channels) # some pretrained models had already added background output. if config['model_add_background_output']: # and (not config['train_premodel_add_background_output']): config['channel_label_num'] += 1 print('channel_img,', config['channel_img_num'], 'channel_label,', config['channel_label_num']) return config
ecb9fde6cf19220f4503c42345fdfaf690f2783a
26,510
def parse_bool(value): """Parses a boolean value from a string.""" return _parse_value(value, BooleanField())
0291ca0a68abe2f2a0e9b92f6c931e3d5a06a69a
26,511
def load_dataset_X_y(dirname, opt): """ load training data :param : dirname : str, loading target directory :param : opt : str, option data format "pandas" or "numpy" :return : data_X : numpy, training data :return : data_y : numpy, true data """ # X input_filename = dirname + '/data_X.csv' df_X = pd.read_csv(input_filename, encoding='shift-jis') # y input_filename = dirname + '/data_y.csv' df_y = pd.read_csv(input_filename, encoding='shift-jis') if (opt == 'pd'): return df_X, df_y elif (opt == 'np'): return df_X.values, df_y.values
602008cb3c03ec64861646a9f8a1a8647b01d59a
26,512
def connect_to_es(host, port, use_auth=False): """ Return client that's connected to an Elasticsearch cluster. Unless running from authorized IP, set use_auth to True so that credentials are based on role. """ if use_auth: http_auth = _aws_auth() else: http_auth = None es = elasticsearch.Elasticsearch( hosts=[{"host": host, "port": port}], use_ssl=True, verify_certs=True, connection_class=elasticsearch.connection.RequestsHttpConnection, http_auth=http_auth, send_get_body_as="POST", ) return es
db29aef8d08c46c375c68e4da6100164321495a5
26,513
import select from typing import cast def task_5(): """Задание 5""" s = select([ student.c.name, student.c.surname, cast((student.c.stipend * 100), Integer) ]) print(str(s)) rp = connection.execute(s) return rp.fetchall()
3932bb3da4a443154805e708a58924d07f1d5221
26,514
def sentinelCloudScore(img): """ Compute a custom cloud likelihood score for Sentinel-2 imagery Parameters: img (ee.Image): Sentinel-2 image Returns: ee.Image: original image with added ['cloudScore'] band """ im = sentinel2toa(img) # Compute several indicators of cloudyness and take the minimum of them. score = ee.Image(1) # Clouds are reasonably bright in the blue and cirrus bands. #score = score.min(rescale(im.select(['B2']), [0.1, 0.5])) score = score.min(rescale(im, 'img.B2', [0.1, 0.5])) #score = score.min(rescale(im.select(['B1']), [0.1, 0.3])) score = score.min(rescale(im, 'img.B1', [0.1, 0.3])) #score = score.min(rescale(im.select(['B1']).add(im.select(['B10'])), [0.15, 0.2])) score = score.min(rescale(im, 'img.B1 + img.B10', [0.15, 0.2])) # Clouds are reasonably bright in all visible bands. #score = score.min(rescale(im.select('B4').add(im.select('B3')).add(im.select('B2')), [0.2, 0.8])) score = score.min(rescale(im, 'img.B4 + img.B3 + img.B2', [0.2, 0.8])) # Clouds are moist ndmi = im.normalizedDifference(['B8','B11']) #score=score.min(rescale(ndmi, [-0.1, 0.1])) score=score.min(rescale(ndmi, 'img', [-0.1, 0.1])) # However, clouds are not snow. ndsi = im.normalizedDifference(['B3', 'B11']) #score=score.min(rescale(ndsi, [0.8, 0.6])) score=score.min(rescale(ndsi, 'img', [0.8, 0.6])) score = score.multiply(100).byte() #print('score:', type(score)) return img.addBands(score.rename(['cloudScore']))
672fca54fb0e43d9cae51de95149a44b0ed731bc
26,515
import re def safe_filename(name: str, file_ending: str = ".json") -> str: """Return a safe version of name + file_type.""" filename = re.sub(r"\s+", "_", name) filename = re.sub(r"\W+", "-", filename) return filename.lower().strip() + file_ending
98a887788046124354676a60b1cf7d990dbbc02f
26,516
def group(): """ RESTful CRUD controller """ if auth.is_logged_in() or auth.basic(): pass else: redirect(URL(c="default", f="user", args="login", vars={"_next":URL(c="msg", f="group")})) module = "pr" tablename = "%s_%s" % (module, resourcename) table = s3db[tablename] # Hide unnecessary fields table.description.readable = table.description.writable = False # Do not show system groups response.s3.filter = (table.system == False) return s3_rest_controller(module, resourcename, rheader=s3db.pr_rheader)
df532b93901b7c709ac21c2da4fffe1e06159f0c
26,517
def _tracking_cost(time: int = 0, state: np.ndarray = None) -> float: """Tracking cost function. The goal is to minimize the distance of the x/y position of the vehicle to the 'state' of the target trajectory at each time step. Args: time : Time of the simulation. Used for time-dependent cost functions. state : State of the system. Returns: cost : Real-valued cost. """ dist = state[:, :2] - np.array([target_trajectory[time]]) result = np.linalg.norm(dist, ord=2, axis=1) result = np.power(result, 2) return result
cd4f39c78687d3975e2e4d1bc0e3b8d8f8fc4b2c
26,518
def fixture_repo(repo_owner: str, repo_name: str) -> Repository: """Return a GitHub repository.""" return Repository(repo_owner, repo_name)
4a2cae78bfcb0158ae8751305c7a187b368f5d5d
26,519
def get_clean(source_file): """Generate a clean data frame from source file""" print('Reading from source...') df = pd.read_csv(source_file) print('Cleaning up source csv...') # Drop rows with too much NA data df.drop(['Meter Id', 'Marked Time', 'VIN'], axis=1, inplace=True) # Drop rows missing "Make" print('Import complete') return df[df['Make'].notna()]
f565dcd75faf3464b540f42cc76aafdc59f0c2d8
26,520
def populate_db(): """Populate the db with the common pop/rock songs file""" songs = dm.parse_file('static/common_songs.txt') freq_matrix = dm.get_frequency_matrix(songs) model, clusters = clustering.clusterize(freq_matrix) clustering.save(model, MODEL_FILENAME) clusters_for_db = {} try: for song, cluster_id in zip(songs, clusters): if cluster_id in clusters_for_db: cluster = clusters_for_db[cluster_id] else: cluster = Cluster(name=str(cluster_id)) clusters_for_db[cluster_id] = cluster db.session.add(cluster) template = SongTemplate(numerals=",".join(song), cluster=cluster) db.session.add(template) db.session.commit() except Exception as e: print(e) db.session.rollback() return "DB Population Failed" else: return "DB Populated with {} song(s).".format(len(songs))
87ff8fdebd398e0cff006f40ad9a69f658991eda
26,521
def magenta_on_red(string, *funcs, **additional): """Text color - magenta on background color - red. (see sgr_combiner()).""" return sgr_combiner(string, ansi.MAGENTA, *funcs, attributes=(ansi.BG_RED,))
3b6062a6ae326766a8d44d34e2c3a4e47c232430
26,522
def create_entry(entry: Entry) -> int: """ Create an entry in the database and return an int of it's ID """ return create_entry(entry.title, entry.text)
6aa2832a9bf7b81460e792c96b06a599cc512e7b
26,524
def process_document_bytes( project_id: str, location: str, processor_id: str, file_content: bytes, mime_type: str = DEFAULT_MIME_TYPE, ) -> documentai.Document: """ Processes a document using the Document AI API. Takes in bytes from file reading, instead of a file path """ # The full resource name of the processor, e.g.: # projects/project-id/locations/location/processor/processor-id # You must create new processors in the Cloud Console first resource_name = documentai_client.processor_path(project_id, location, processor_id) # Load Binary Data into Document AI RawDocument Object raw_document = documentai.RawDocument(content=file_content, mime_type=mime_type) # Configure the process request request = documentai.ProcessRequest(name=resource_name, raw_document=raw_document) # Use the Document AI client to process the sample form result = documentai_client.process_document(request=request) return result.document
92ae70d0e3754b76f7cc8b6b0370d53e9031b319
26,525
import numpy as np def merge_imgs(imgs, cols=6, rows=6, is_h=True): """ 合并图像 :param imgs: 图像序列 :param cols: 行数 :param rows: 列数 :param is_h: 是否水平排列 :param sk: 间隔,当sk=2时,即0, 2, 4, 6 :return: 大图 """ if not imgs: raise Exception('[Exception] 合并图像的输入为空!') img_shape = imgs[0].shape h, w, c = img_shape large_imgs = np.ones((rows * h, cols * w, c)) * 255 # 大图 large_imgs = large_imgs.astype(np.uint8) if is_h: for j in range(rows): for i in range(cols): idx = j * cols + i if idx > len(imgs) - 1: # 少于帧数,输出透明帧 break # print('[Info] 帧的idx: {}, i: {}, j:{}'.format(idx, i, j)) large_imgs[(j * h):(j * h + h), (i * w): (i * w + w)] = imgs[idx] # print(large_imgs.shape) # show_png(large_imgs) # show_png(large_imgs) else: for i in range(cols): for j in range(rows): idx = i * cols + j if idx > len(imgs) - 1: # 少于帧数,输出透明帧 break large_imgs[(j * h):(j * h + h), (i * w): (i * w + w)] = imgs[idx] return large_imgs
c591c450e54aea76ea237263b3169ef4af306a96
26,526
def text_editor(): """Solution to exercise R-2.3. Describe a component from a text-editor GUI and the methods that it encapsulates. -------------------------------------------------------------------------- Solution: -------------------------------------------------------------------------- The spellchecker is a common component in a text editor. Its methods might include: 1. run_spellcheck() 2. parse_text() 3. lookup_word() 4. underline_misspelled_word() """ return True
39fd3f41cbc28d333dd5d39fc8d1967164bd7bc4
26,528
def generate_imm5(value): """Returns the 5-bit two's complement representation of the number.""" if value < 0: # the sign bit needs to be bit number 5. return 0x1 << 4 | (0b1111 & value) else: return value
72be8225d364ec9328e1bb3a6e0c94e8d8b95fb0
26,529
def CalculateChiv6p(mol): """ ################################################################# Calculation of valence molecular connectivity chi index for path order 6 ---->Chiv6 Usage: result=CalculateChiv6p(mol) Input: mol is a molecule object. Output: result is a numeric value ################################################################# """ return _CalculateChivnp(mol, NumPath=6)
332a5fab80beaa115366ed3a5967a7c433aa8981
26,530
def _cart2sph(x,y,z): """A function that should operate equally well on floats and arrays, and involves trignometry...a good test function for the types of functions in geospacepy-lite""" r = x**2+y**2+z**2 th = np.arctan2(y,x) ph = np.arctan2(x**2+y**2,z) return r,th,ph
fb0184c315c9b4206b4ffc5b019264162b9641d4
26,531
from typing import Optional def get_existing_key_pair(ec2: EC2Client, keypair_name: str) -> Optional[KeyPairInfo]: """Get existing keypair.""" resp = ec2.describe_key_pairs() keypair = next( (kp for kp in resp.get("KeyPairs", {}) if kp.get("KeyName") == keypair_name), None, ) if keypair: LOGGER.info( KEYPAIR_LOG_MESSAGE, keypair.get("KeyName"), keypair.get("KeyFingerprint"), "exists", ) return { "status": "exists", "key_name": keypair.get("KeyName", ""), "fingerprint": keypair.get("KeyFingerprint", ""), } LOGGER.info('keypair "%s" not found', keypair_name) return None
d066431f784e108e0b27a7c73f833b716cf6e879
26,532
import jinja2 def render_template(template: str, context: dict, trim_blocks=True, lstrip_blocks=True, **env_kwargs): """ One-time-use jinja environment + template rendering helper. """ env = jinja2.Environment( loader=jinja2.DictLoader({'template': template}), trim_blocks=trim_blocks, lstrip_blocks=lstrip_blocks, **env_kwargs, ) env.filters.update(get_jinja_filters()) return env.get_template('template').render(context)
26ec504d694682d96fce0240145549f0f62c0695
26,533
def _func_to_user(uid, func): """ Internal function for doing actions against a user. Gets the user from the database, calls the provided function, and then updates the user back in the database. Args: uid (string) -- The user's key func (function) -- activity to perform on the user record Returns: Bool -- True if the users is saved back to the database """ user = FireBase.get_user(uid) if not user: _, user = create_new_user(uid) user = func(user) return FireBase.put_user(uid, user)
f3f9af52410a15f61492adbb88cb9706de2e7042
26,534
import torch def breg_sim_divergence(K, p, q, symmetric=False): # NOTE: if you make changes in this function, do them in *_stable function under this as well. """ Compute similarity sensitive Bregman divergence of between a pair of (batches of) distribution(s) p and q over an alphabet of n elements. Inputs: p [batch_size x n tensor] : Probability distributions over n elements q [batch_size x n tensor] : Probability distributions over n elements K [n x n tensor or callable] : Positive semi-definite similarity matrix or function symmetric [boolean]: Use symmetrized Bregman divergence. Output: div [batch_size x 1 tensor] i-th entry is divergence between i-th row of p and i-th row of q """ if symmetric: r = (p + q) / 2. if callable(K): pK = K(p) qK = K(q) if symmetric: rK = K(r) else: pK = p @ K qK = q @ K if symmetric: rK = r @ K if symmetric: rat1 = (pK, rK) rat2 = (qK, rK) else: rat1 = (pK, qK) if callable(K): # we're dealing with an image sum_dims = (-2, -1) else: sum_dims = -1 if symmetric: t1 = (p * (torch.log(rat1[0]) - torch.log(rat1[1]))).sum(sum_dims) t2 = (r * (rat1[0] / rat1[1])).sum(sum_dims) t3 = (q * (torch.log(rat2[0]) - torch.log(rat2[1]))).sum(sum_dims) t4 = (r * (rat2[0] / rat2[1])).sum(sum_dims) return (2 + t1 - t2 + t3 - t4) / 2. else: t1 = (p * (torch.log(rat1[0]) - torch.log(rat1[1]))).sum(sum_dims) t2 = (q * (rat1[0] / rat1[1])).sum(sum_dims) return 1 + t1 - t2
f668b4af511c1689ec66a34f386c756b7b0fdb0e
26,535
def manage_pages(request): """Dispatches to the first page or to the form to add a page (if there is no page yet). """ try: page = Page.objects.all()[0] url = reverse("lfs_manage_page", kwargs={"id": page.id}) except IndexError: url = reverse("lfs_add_page") return HttpResponseRedirect(url)
5b50821c516cd450dde5dba9f83c27687e63c8ef
26,536
def get_network_info(): """ Sends a NETWORK_INTERFACES_INFO command to the server. Returns a dict with: - boolean status - list of str ifs (network interfaces detected by RaSCSI) """ command = proto.PbCommand() command.operation = proto.PbOperation.NETWORK_INTERFACES_INFO data = send_pb_command(command.SerializeToString()) result = proto.PbResult() result.ParseFromString(data) ifs = result.network_interfaces_info.name return {"status": result.status, "ifs": ifs}
0d0517eeee5260faa2b12de365f08b8d962eb0c8
26,537
def jp_clean_year(string, format): """Parse the date and return the year.""" return getYearFromISODate(iso8601date(string,format))
33b1a9121485abaff93c4ec979c3c3f63d7d1252
26,540
def _epsilon(e_nr, atomic_number_z): """For lindhard factor""" return 11.5 * e_nr * (atomic_number_z ** (-7 / 3))
8c6115b77ce3fb4956e5596c400c347e68382502
26,541
def compute_heatmap(cnn_model, image, pred_index, last_conv_layer): """ construct our gradient model by supplying (1) the inputs to our pre-trained model, (2) the output of the (presumably) final 4D layer in the network, and (3) the output of the softmax activations from the model """ gradModel = Model( inputs=[cnn_model.inputs], outputs=[cnn_model.get_layer(last_conv_layer).output, cnn_model.output] ) # record operations for automatic differentiation with tf.GradientTape() as tape: """ cast the image tensor to a float-32 data type, pass the image through the gradient model, and grab the loss associated with the specific class index """ #print(pred_index) inputs = tf.cast(image, tf.float32) #print(image.shape) last_conv_layer_output, preds = gradModel(inputs) #print(preds) #print(preds.shape) # class_channel = preds[:, pred_index] class_channel = preds ## use automatic differentiation to compute the gradients grads = tape.gradient(class_channel, last_conv_layer_output) """ This is a vector where each entry is the mean intensity of the gradient over a specific feature map channel """ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2)) """ We multiply each channel in the feature map array by "how important this channel is" with regard to the top predicted class then sum all the channels to obtain the heatmap class activation """ last_conv_layer_output = last_conv_layer_output[0] heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis] heatmap = tf.squeeze(heatmap) # For visualization purpose, we will also normalize the heatmap between 0 & 1 heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap) heatmap = heatmap.numpy() return heatmap
2ee24d643de319588307913eb5f15edbb4a8e386
26,542
def did_git_push_succeed(push_info: git.remote.PushInfo) -> bool: """Check whether a git push succeeded A git push succeeded if it was not "rejected" or "remote rejected", and if there was not a "remote failure" or an "error". Args: push_info: push info """ return push_info.flags & GIT_PUSH_FAILURE == 0
ff9ea6856767cda79ed6a6a82b5cadacb1318370
26,543
import six def find_only(element, tag): """Return the only subelement with tag(s).""" if isinstance(tag, six.string_types): tag = [tag] found = [] for t in tag: found.extend(element.findall(t)) assert len(found) == 1, 'expected one <%s>, got %d' % (tag, len(found)) return found[0]
fd4ec56ba3e175945072caec27d0438569d01ef9
26,544
async def mongoengine_invalid_document_exception_handler(request, exc): """ Error handler for InvalidDocumentError. Logs the InvalidDocumentError detected and returns the appropriate message and details of the error. """ logger.exception(exc) return JSONResponse( Response(success=False, error_code=422, message=str(exc)).dict() )
cb9722a6619dfcdaeebcc55a02ae091e54b26207
26,545
def gen_2Dsersic(size,parameters,normalize=False,show2Dsersic=False,savefits=False,verbose=True): """ Generating a 2D sersic with specified parameters using astropy's generator --- INPUT --- size The dimensions of the array to return. Expects [ysize,xsize]. The 2D gauss will be positioned in the center of the array parameters List of the sersic parameters. Expects [amplitude,effective radius, Sersic index,ellipticity,rotation angle] The amplitude is the central surface brightness within the effective radius (Ftot/2 is within r_eff) The rotation angle should be in degrees, counterclockwise from the positive x-axis. normalize Normalize the profile so sum(profile img) = 1. show2Dsersic Save plot of generated 2D Sersic savefits Save generated profile to fits file verbose Toggler verbosity --- EXAMPLE OF USE --- import tdose_utilities as tu size = [30,40] size = [31,41] parameters = [1,6.7,1.7,1.0-0.67,17.76-90] sersic2D = tu.gen_2Dsersic(size,parameters,show2Dsersic=True,savefits=True) size = [30,30] size = [31,31] parameters = [1,5,1.7,0.5,45] sersic2D = tu.gen_2Dsersic(size,parameters,show2Dsersic=True,savefits=True) """ x, y = np.meshgrid(np.arange(size[1]), np.arange(size[0])) if float(size[0]/2.) - float(int(size[0]/2.)) == 0.0: ypos = np.asarray(size[0])/2.0-0.5 else: ypos = np.floor(np.asarray(size[0])/2.0) if float(size[1]/2.) - float(int(size[1]/2.)) == 0.0: xpos = np.asarray(size[1])/2.0-0.5 else: xpos = np.floor(np.asarray(size[1])/2.0) model = Sersic2D(amplitude=parameters[0], r_eff=parameters[1], n=parameters[2], ellip=parameters[3], theta=parameters[4]*np.pi/180., x_0=xpos, y_0=ypos) sersic2D = model(x, y) if normalize: sersic2D = sersic2D / np.sum(sersic2D) if show2Dsersic: plt.clf() savename = './Generated2Dsersic.pdf' if verbose: print((' - Displaying resulting image of 2D sersic in '+savename)) centerdot = sersic2D*0.0 center = [int(sersic2D.shape[0]/2.),int(sersic2D.shape[1]/2.)] # centerdot[center[1],center[0]] = 2.0*np.max(sersic2D) print((' - Center of Sersic (pixelized - marked in plot): '+str(center))) plt.imshow(sersic2D,interpolation=None,origin='lower') plt.colorbar() plt.title('Generated 2D Sersic') plt.savefig(savename) plt.clf() if savefits: fitsname = './Generated2Dsersic.fits' hduimg = afits.PrimaryHDU(sersic2D) hdus = [hduimg] hdulist = afits.HDUList(hdus) # turn header into to hdulist hdulist.writeto(fitsname,overwrite=True) # write fits file if verbose: print((' - Saved image of shifted profile to '+fitsname)) return sersic2D
8202ef9d79cc7ccb42899a135645244ecd4fc541
26,546
import torch def dqn(agent, env, brain_name, n_episodes=2500, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.999, train=True): """Deep Q-Learning. Params ====== n_episodes (int): maximum number of training episodes max_t (int): maximum number of timesteps per episode eps_start (float): starting value of epsilon, for epsilon-greedy action selection eps_end (float): minimum value of epsilon eps_decay (float): multiplicative factor (per episode) for decreasing epsilon """ # list containing scores from each episode scores = [] # set length of buffer to last 100 scores scores_window = deque(maxlen=100) eps = eps_start if train else 0.0 for i_episode in range(1, n_episodes+1): # reset the environment env_info = env.reset(train_mode=train)[brain_name] # get the current state state = env_info.vector_observations[0] score = 0 for _ in range(max_t): # select an action action = agent.act(state, eps) # send the action to the envirinment env_info = env.step(action)[brain_name] # get the next state next_state = env_info.vector_observations[0] reward = env_info.rewards[0] done = env_info.local_done[0] if train: agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break # save the most recent score scores_window.append(score) scores.append(score) # decrease epsilon eps = max(eps_end, eps_decay*eps) print('\rEpisode {}\tAverage Score: {:.2f}'.format( i_episode, np.mean(scores_window)), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format( i_episode, np.mean(scores_window))) if train and np.mean(scores_window) >= 13.0: print('\nEnv solved in {:d} episodes!\tAverage Score: {:.2f}' .format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint_dqn.pth') break return scores
a1c1715451c8613866871c5e51e423d3a67e6928
26,547
from datetime import datetime def get_activity_stats_subprocess( data_full: list[ActivitiesUsers], data_cp: list[ActivitiesUsers] ) -> DestinyActivityOutputModel: """Run in anyio subprocess on another thread since this might be slow""" result = DestinyActivityOutputModel( full_completions=0, cp_completions=0, kills=0, precision_kills=0, deaths=0, assists=0, time_spend=datetime.timedelta(seconds=0), fastest=None, fastest_instance_id=None, average=None, ) # save some stats for each activity. needed because a user can participate with multiple characters in an activity # key: instance_id activities_time_played: dict[int, datetime.timedelta] = {} activities_total: list[int] = [] activities_completed: list[int] = [] # loop through all results for activity_stats in data_cp + data_full: result.kills += activity_stats.kills result.precision_kills += activity_stats.precision_kills result.deaths += activity_stats.deaths result.assists += activity_stats.assists result.time_spend += datetime.timedelta(seconds=activity_stats.time_played_seconds) # register all activity completions if activity_stats.activity_instance_id not in activities_total: if bool(activity_stats.completed): activities_total.append(activity_stats.activity_instance_id) for activity_stats in data_full: # register the full activity completions (with all chars) if activity_stats.activity_instance_id not in activities_completed: if bool(activity_stats.completed): activities_completed.append(activity_stats.activity_instance_id) # register the activity duration (once, same for all chars) if activity_stats.activity_instance_id not in activities_time_played: activities_time_played[activity_stats.activity_instance_id] = datetime.timedelta(seconds=0) activities_time_played[activity_stats.activity_instance_id] += datetime.timedelta( seconds=activity_stats.activity_duration_seconds ) result.full_completions = len(activities_completed) result.cp_completions = len(activities_total) - result.full_completions # make sure the fastest / average activity was completed activities_time_played = { activity_id: time_played for activity_id, time_played in activities_time_played.items() if activity_id in activities_completed } # only do that if they actually played an activity tho if activities_time_played: result.fastest_instance_id = min(activities_time_played, key=activities_time_played.get) result.fastest = activities_time_played[result.fastest_instance_id] result.average = sum(activities_time_played.values(), datetime.timedelta(seconds=0)) / len( activities_time_played ) return result
c7ef7f93e0aeefe659676bac98944fbf88eaabe7
26,548
def ma_cache_nb(close: tp.Array2d, windows: tp.List[int], ewms: tp.List[bool], adjust: bool) -> tp.Dict[int, tp.Array2d]: """Caching function for `vectorbt.indicators.basic.MA`.""" cache_dict = dict() for i in range(len(windows)): h = hash((windows[i], ewms[i])) if h not in cache_dict: cache_dict[h] = ma_nb(close, windows[i], ewms[i], adjust=adjust) return cache_dict
ae2547ba2c300386cc9d7a9262c643a305ca987f
26,549
def get_AllVolumes(controller, secondary=None): """Run smcli command 'show AllVolumes' on the controller, the output is returned to the calling function. The primary controller (a) is mandatory, but the secondary controller (b) is optional.""" # Check which controller is reachable. if not ping_node(controller): # The ping to the primary controller failed. Trying the secondary, # if it was specified. if secondary: if ping_node(secondary): # Good, the second controller is accessible. We can continue. controller = secondary else: # Bad, the second controller is not reachable too. print "Both controllers: " + controller + " and " + secondary + " are unreachable. Aborting..." sys.exit(STATE_UNKNOWN) else: print "Primary controller unreachable and no secondary was given. Aborting..." sys.exit(STATE_UNKNOWN) # OK. If we are here, I assume we have one controller to query. command = "SMcli " + controller + " -c 'show allVolumes;'" pOut = subp.Popen(command, shell=True, stdout=subp.PIPE, universal_newlines=True) text = pOut.communicate()[0] text = text.strip().split('\n') return(text)
32125adec4811d1813e3e804c05c647e1de1e011
26,550
def fetch_slot_freq_num(timestamp, slot, freq_nums): """Find GLONASS frequency number in glo_freq_nums and return it. Parameters ---------- timestamp : datetime.datetime slot : int GLONASS satellite number freq_nums : dict { slot_1: { datetime_1: freq-num, ... } } Returns ------- freq_num : int Raises ------ FetchSlotFreqNumError in case we can't find frequency number of the slot. """ freq_num = None try: slot_freq_nums = freq_nums[slot] except KeyError: msg = "Can't find slot {} in the glo_freq_nums dict.".format(slot) raise FetchSlotFreqNumError(msg) dates_times = sorted(slot_freq_nums.keys()) for ts in dates_times: if timestamp >= ts and timestamp.date() == ts.date(): freq_num = slot_freq_nums[ts] if freq_num is not None: return freq_num timestamp_date = timestamp.date() first_date = dates_times[0].date() if timestamp_date == first_date: freq_num = slot_freq_nums[dates_times[0]] return freq_num else: msg = "Can't find GLONASS frequency number for {}.".format(slot) raise FetchSlotFreqNumError(msg)
835a71def86478cbc7327b3873c203ad6936276d
26,551
def _apply_function(x, fname, **kwargs): """Apply `fname` function to x element-wise. # Arguments x: Functional object. # Returns A new functional object. """ validate_functional(x) fun = get_activation(fname) lmbd = [] for i in range(len(x.outputs)): lmbd.append( Lambda( lambda x: fun(x, **kwargs), name=graph_unique_name("{}".format(fname)) ) ) Functional = x.get_class() res = Functional( inputs = x.inputs.copy(), outputs = _apply_operation(lmbd, x), layers = lmbd ) return res
a968dcea7ac95f154c605ba34737c13198b62d77
26,552
def get_url(city): """ Gets the full url of the place you want to its weather You need to obtain your api key from open weather, then give my_api_key the value of your key below """ my_api_key = 'fda7542e1133fa0b1b312db624464cf5' unit = 'metric' # To get temperature in Celsius weather_query = 'http://api.openweathermap.org/data/2.5/weather?q=' full_query = weather_query + city + '&units=' + unit + '&APPID=' + my_api_key # This full_query results in smth like # 'http://api.openweathermap.org/data/2.5/weather?q=Nairobi&units=metric&APPID=YOUR-KEY-HERE' return full_query
9454a9ad4a2baacb7988216c486c497a0253056c
26,554
def login() -> ApiResponse: """Login a member""" member_json = request.get_json() email = member_json["email"] password = member_json["password"] member = MemberModel.find_by_email(email) if member and member.verify_password(password) and member.is_active: identity = member_schema.dump(member) access_token = create_access_token(identity=identity, fresh=True) _refresh_token = create_refresh_token(identity=identity) add_token_to_database([access_token, _refresh_token], member.id) return ( jsonify( { "access_token": access_token, "refresh_token": _refresh_token, "member": identity, }, ), 200, ) abort(401, description=MEMBER_401)
73122e8a52a420ed20c7f11eb63be1317a818c1d
26,555
from typing import Sequence from typing import Union from typing import Iterator def rhythmic_diminution(seq: Sequence, factor: Union[int, float]) -> Iterator[Event]: """Return a new stream of events in which all the durations of the source sequence have been reduced by a given factor. """ return (Event(e.pitches, e.duration/factor) for e in seq.events)
a2ce2982f487e228594ecf992b8d5ed01799f9e2
26,556
def get_line_offset(): """Return number of characters cursor is offset from margin. """ user_pos = emacs.point() emacs.beginning_of_line() line_start = emacs.point() emacs.goto_char(user_pos) return user_pos - line_start
6c2144699eec32f7f22991e8f12c4304024ca93f
26,557
def test_scheduler_task(scheduler: Scheduler) -> None: """ scheduler_task decorator should allow custom evaluation. """ @scheduler_task("task1", "redun") def task1( scheduler: Scheduler, parent_job: Job, sexpr: SchedulerExpression, x: int ) -> Promise: return scheduler.evaluate(x, parent_job=parent_job).then(lambda x2: x2 + 1) expr = task1(1) assert isinstance(task1, SchedulerTask) assert isinstance(expr, SchedulerExpression) assert expr.task_name == "redun.task1" assert expr.args == (1,) assert scheduler.run(task1(1)) == 2 assert scheduler.run(task1(task1(1))) == 3 # SchedulerTasks should be first-class values. assert isinstance(task1, Value) # SchedulerTasks should support partial application. expr2 = task1.partial() assert isinstance(expr2, PartialTask) assert isinstance(expr2(1), SchedulerExpression) assert scheduler.run(task1.partial()(1)) == 2
4b129d25a719019323905888649d8cec0e40513e
26,558
def verse(day): """Produce the verse for the given day""" ordinal = [ 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth', 'eleventh', 'twelfth', ] gifts = [ 'A partridge in a pear tree.', 'Two turtle doves,', 'Three French hens,', 'Four calling birds,', 'Five gold rings,', 'Six geese a laying,', 'Seven swans a swimming,', 'Eight maids a milking,', 'Nine ladies dancing,', 'Ten lords a leaping,', 'Eleven pipers piping,', 'Twelve drummers drumming,', ] # First part of the verse is a constant day_verse = [ f'On the {ordinal[day - 1]} day of Christmas,', f'My true love gave to me,' ] # extend takes in a list as an arg, expands it, # and adds each element to the new list # day is used to slice gifts, that is then reversed to # count down # gifts[:3] would return 'A partridge in a pear tree.', # 'Two turtle doves,', 'Three French hens,', this slice # is then reversed added to day_verse day_verse.extend(reversed(gifts[:day])) # if there are multiple days, verse needs to have # 'And a partridge...' not just 'A' if day > 1: day_verse[-1] = 'And ' + day_verse[-1].lower() return '\n'.join(day_verse) # My first attempt below, using dicts adds to readability, # same with if block down below, however lists would be # better off to utilize slicing and get rid of the # for loop # ordinal = { # 1 : 'first', # 2 : 'second', # 3 : 'third', # 4 : 'fourth', # 5 : 'fifth', # 6 : 'sixth', # 7 : 'seventh', # 8 : 'eighth', # 9 : 'ninth', # 10 : 'tenth', # 11 : 'eleventh', # 12 : 'twelfth', # } # gifts = { # 1 : 'partridge in a pear tree.', # 2 : 'Two turtle doves,', # 3 : 'Three French hens,', # 4 : 'Four calling birds,', # 5 : 'Five gold rings,', # 6 : 'Six geese a laying,', # 7 : 'Seven swans a swimming,', # 8 : 'Eight maids a milking,', # 9 : 'Nine ladies dancing,', # 10 : 'Ten lords a leaping,', # 11 : 'Eleven pipers piping,', # 12 : 'Twelve drummers drumming,', # } # day_verse = [ # f'On the {ordinal[day]} day of Christmas,', # f'My true love gave to me,' # ] # for n in range(day, 0, -1): # if day > 1 and n > 1: # day_verse.append(f'{gifts[n]}') # elif day > 1 and n == 1: # day_verse.append(f'And a {gifts[n]}') # else: # day_verse.append(f'A {gifts[n]}') # return '\n'.join(day_verse)
027cedf0b1c2108e77e99610b298e1019629c880
26,559
import uuid async def invite_accept_handler( invite_id: uuid.UUID = Form(...), current_user: models.User = Depends(get_current_user), db_session=Depends(yield_db_session_from_env), ) -> data.GroupUserResponse: """ Accept invite request. If the user is verified, adds user to desired group and marks the invitation as accepted. - **invite_id** (uuid): Invite ID to group """ try: invite = actions.get_invite(db_session, invite_id) if not invite.active: raise HTTPException(status_code=400, detail="Invite is not active") except actions.InviteNotFound: raise HTTPException( status_code=404, detail="Invitation with provided id not found" ) try: group = actions.get_group(db_session, group_id=invite.group_id) free_space = actions.get_user_limit(db_session, group, 1) if not free_space: raise HTTPException( status_code=403, detail="Space for users has been exhausted" ) except actions.GroupNotFound: raise HTTPException(status_code=404, detail="No group with that id") if invite.invited_email is not None: if current_user.email != invite.invited_email: raise HTTPException( status_code=400, detail="You are not allowed to use this invite link" ) else: user = actions.get_user(session=db_session, email=invite.invited_email) if not user.verified: raise HTTPException(status_code=400, detail="User is not verified") actions.update_invite(db_session, invite.id, active=False) try: group_user_response = actions.set_user_in_group( session=db_session, group_id=invite.group_id, current_user_type=data.Role.owner, current_user_autogenerated=False, user_type=invite.user_type, username=current_user.username, email=current_user.email, ) except actions.UserInvalidParameters: raise HTTPException(status_code=400, detail="Invalid user email") except actions.UserNotFound: raise HTTPException(status_code=404, detail="No user with that email") except actions.NoPermissions: raise HTTPException( status_code=403, detail="You nave no permission to change roles in group", ) return group_user_response
4faf22d3f3f9c59d040961b25b1f1660a002f9bd
26,561
def read_current_labels(project_id, label_history=None): """Function to combine label history with prior labels. Function that combines the label info in the dataset and the label history in the project file. """ # read the asreview data as_data = read_data(project_id) # use label history from project file if label_history is None: label_history = read_label_history(project_id) # get the labels in the import dataset labels = as_data.labels # make a list of NA labels if None if labels is None: labels = np.full(len(as_data), LABEL_NA, dtype=int) # update labels with label history label_idx = [idx for idx, incl in label_history] label_incl = [incl for idx, incl in label_history] # create a pandas series such that the index can be used labels_s = pd.Series(labels, index=as_data.df.index) labels_s.loc[label_idx] = label_incl return np.array(labels_s, dtype=int)
5676be04acc8dd2ab5490ec1223585707d65d834
26,562
import collections def get_mindiff(d1, d2, d3): """ This function determines the minimum difference from checkboxes 3, 5, and 7, and counts the number of repetitions. """ min_diff = [] for i, _ in enumerate(d1): diffs_list = [d1[i], d2[i], d3[i]] md = min(diffs_list) if md == d1[i]: m_diff = 3 elif md == d2[i]: m_diff = 5 elif md == d3[i]: m_diff = 7 min_diff.append(m_diff) counter=collections.Counter(min_diff) return min_diff, counter
d2bf03b31712a15dbee45dbd44f3f937a3fd4a1a
26,563
def mode(lyst): """Returns the mode of a list of numbers.""" # Obtain the set of unique numbers and their # frequencies, saving these associations in # a dictionary theDictionary = {} for number in lyst: freq = theDictionary.get(number, None) if freq == None: # number entered for the first time theDictionary[number] = 1 else: # number already seen, increment its freq theDictionary[number] = freq + 1 # Find the mode by obtaining the maximum freq # in the dictionary and determining its key if len(theDictionary) == 0: return 0 else: theMaximum = max(theDictionary.values()) for key in theDictionary: if theDictionary[key] == theMaximum: return key
bccf7955741ad4258dea7686559b9cb0bf934ab4
26,564
def project_permissions(project_id): """View and modify a project's permissions. """ # does user have access to the project? project = helpers.get_object_or_exception(Project, Project.id == project_id, exceptions.ProjectNotFoundException) if project not in [proj for proj in current_user.projects if not proj.deleted]: return app.login_manager.unauthorized() form = forms.ProjectPermissionsForm(prefix="permissions") ownerships = ProjectsUsers.query.filter_by(project = project).all() form.selection.choices = [] for ownership in ownerships: form.selection.add_choice(ownership.id, ownership) if request.method == "POST" and form.validate(): selected_rels = request.form.getlist("permissions-selection") ownerships = [ProjectsUsers.query.get(id) for id in selected_rels] if request.form["action"] == form.DELETE: for ownership in ownerships: form.selection.delete_choice(ownership.id, ownership) ownership.delete() if request.form["action"] == form.UPDATE: role = int(request.form["permissions-update_permissions"]) for ownership in ownerships: ownership.role = role ownership.save(False) db.session.commit() if request.form["action"] == form.CREATE: email = request.form["permissions-new_collaborator"] role = int(request.form["permissions-create_permissions"]) user = User.query.filter(User.email == email).one() rel = user.add_project(project=project, role=role) form.selection.add_choice(rel.id, rel) return render_template("project_permissions.html", project=project, form=form)
45fda8e95394e8271ee03e9cc65bdbe03dc18f8b
26,566
import pandas as pd def fetch_community_crime_data(dpath=None): """Downloads community crime data. This function removes missing values, extracts features, and returns numpy arrays Parameters ---------- dpath: str | None specifies path to which the data files should be downloaded. default: None Returns ------- X: numpy array (n_samples x n_features) y: numpy array (n_samples,) """ try: except ImportError: raise ImportError('The pandas module is required for reading the ' 'community crime dataset') dpath = get_data_home(data_home=dpath) file_name = 'communities.csv' if not (op.isdir(dpath) and op.exists(op.join(dpath, file_name))): fname = os.path.join(dpath, file_name) base_url = ( "http://archive.ics.uci.edu/ml/machine-learning-databases" ) url = base_url + "/" + "communities/communities.data" urlretrieve(url, fname, _reporthook) # Read in the file df = pd.read_csv(fname, header=None) df = pd.read_csv(op.join(dpath, file_name), header=None) # Remove missing values df.replace('?', np.nan, inplace=True) df.dropna(inplace=True, axis=1) df.dropna(inplace=True, axis=0) df.reset_index(inplace=True, drop=True) # Extract predictors and target from data frame X = np.array(df[df.keys()[range(3, 102)]]) y = np.array(df[127]) return X, y
c85e74c647d70497479fabb43d78887bb55751f0
26,567