content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def json_serialize(item): """ A function similar to L{dumps}. """ def helper(unknown): if isinstance(unknown, PlatedElement): return unknown._asJSON() else: raise TypeError("{input} not JSON serializable" .format(input=unknown)) return dumps(item, default=helper)
00ea0aa060eaf2335148402770eba173592e1a65
25,764
def get_sop_instance_uid(dicom): """ Return the SOP Instance UID. Args: dicom: pydicom Dataset """ return dicom.SOPInstanceUID
18991a81be2e143aecaf59bdf52e51ab2f9a621b
25,765
def remove_filter(): """ Removes a filter from the process Returns ------------- dictio Success, or not """ # reads the session session = request.args.get('session', type=str) # reads the requested process name process = request.args.get('process', default='receipt', type=str) if check_session_validity(session): user = get_user_from_session(session) if lh.check_user_log_visibility(user, process): # reads the specific filter to add filter = request.json['filter'] # reads all the filters all_filters = request.json['all_filters'] new_handler = lh.get_handler_for_process_and_session(process, session).remove_filter(filter, all_filters) lh.set_handler_for_process_and_session(process, session, new_handler) return jsonify({"status": "OK"}) return jsonify({"status": "FAIL"})
887c985694fd11e5adeff23e001d9b94f7879ff5
25,766
def flag_data(vis_windows, flag_windows): """ Returns flag_windows untouched """ def _flag_data(vis_windows, flag_windows): return flag_windows return da.blockwise(_flag_data, _WINDOW_SCHEMA, vis_windows, _WINDOW_SCHEMA, flag_windows, _WINDOW_SCHEMA, dtype=vis_windows.dtype)
55f3df8fa6ca30de2cd6c9bac5f55eb4fff0eb36
25,767
def preprocess_static_feature( static_feature_dict, imputation_strategy="median", standardize=False ): """Preprocessing for a dictionary of static features. Args: static_feature_dict: Dictionary of float values. imputation_strategy: "median" or "mean" or "most_frequent" imputation. standardize: If true, apply MinMax scaling. Returns: Preprocessing static features array and the fitted scaler. """ if imputation_strategy not in ["mean", "median", "most_frequent", "constant"]: raise ValueError( "Given imputation strategy {} is not supported. " "Use value from ['mean', 'median', 'most_frequent', 'constant'] " "as an imputation strategy.".format(imputation_strategy)) location_keys = list(static_feature_dict.keys()) static_values = np.empty([len(location_keys), 1]) for idx, location_key in enumerate(location_keys): if static_feature_dict[location_key] is not None: static_values[idx] = static_feature_dict[location_key] else: static_values[idx] = np.nan if imputation_strategy: # Check whether all elements are NaNs. if np.all(np.isnan(static_values)): static_values = np.zeros_like(static_values) else: imputer = SimpleImputer( missing_values=np.nan, strategy=imputation_strategy) static_values = imputer.fit_transform(static_values) scaler = None if standardize: # Check if there are at least two elements with different values to avoid # issues. if np.unique(static_values).size < 2: static_values = np.zeros_like(static_values) else: scaler = preprocessing.MinMaxScaler() static_values = scaler.fit_transform(static_values) static_feature_dict = {} for ind, location_key in enumerate(location_keys): static_feature_dict[location_key] = static_values[ind, 0] return static_feature_dict, scaler
a2bfbfc21afcd1ab03f86f82e72fff480746542e
25,768
from typing import List def from_emso(platform_code: str, parameters: List[str]=[], start_time: str='', end_time: str='', depth_min: float=None, depth_max: float=None, user: str='', password: str='', size: int=10, token: str='' ) -> WaterFrame: """ Get a WaterFrame with the data of the EMSO API (api.emso.eu). Parameters ---------- user: str Login for the EMSO ERIC API. password: str Password for the EMSO ERIC API. token: str Token for the EMSO ERIC API. platform_code: str Data filtered by platform_code parameters: List[str] List of parameters to get data start_time: str First date of the measurement end_time: str Last date of the measurement depth_min: float Minimum depth of the measurement depth_max: float Maximum depth of the measurement size: int Number of values Returns ------- wf: WaterFrame object """ # Login to EMSO emso = EMSO(user=user, password=password, token=token) # Get metadata metadata_list = emso.get_metadata(platform_codes=[platform_code]) # Get data complete_data = DataFrame() for parameter in parameters: data_list = emso.get_data(platform_codes=[platform_code], parameters=[parameter], start_time=start_time, end_time=end_time, depth_min=depth_min, depth_max=depth_max, size=size, sort='asc') if data_list: data = pd.DataFrame(data_list) data = data.rename( columns = { 'time': 'TIME', 'time_qc': 'TIME_QC', 'value': parameter, 'value_qc': f'{parameter}_QC', 'depth': 'DEPTH', 'depth_qc': 'DEPTH_QC' }) del data['parameter'] del data['metadata_id'] del data['platform_code'] del data['institution'] del data['area'] del data['long_name'] del data['units'] del data['location'] del data['location_qc'] data['TIME'] = pd.to_datetime(data['TIME']) data.set_index(['DEPTH', 'TIME'], inplace=True) data = data.astype(float) complete_data = complete_data.append(data) wf = WaterFrame() if metadata_list: wf.metadata = metadata_list[0] wf.data = complete_data.copy() for parameter in wf.parameters: find = False for metadata_parameter in wf.metadata['parameters']: try: acronym = metadata_parameter.split(' - ')[0].strip() long_name = metadata_parameter.split(' - ')[1].strip().split('[')[0].strip() units = metadata_parameter.split(' - ')[1].strip().split('[')[1][:-1] if parameter == acronym: wf.vocabulary[acronym] = {'long_name': long_name, 'units': units} find = True break except: pass if not find: wf.vocabulary[parameter] = {'long_name': '', 'units': ''} return wf
d8ac4520a3364e92d5e52768e12211989cdc876b
25,770
def _get_all_pivots(Ao, number_of_subsamples): """ A dummy case where we return all the subsamples. """ return np.arange(1, len(number_of_subsamples))
886a32ac34f0eb3acafa15917258e87b68b7323c
25,771
def clang_find_var(tu, name, ts, namespace=None, filename=None, onlyin=None): """Find the node for a given var.""" assert isinstance(name, basestring) kinds = CursorKind.ENUM_DECL, decls = clang_find_decls(tu, name, kinds=kinds, onlyin=onlyin, namespace=namespace) decls = list(set(c.get_definition() or c for c in decls)) # Use definitions if available if len(decls)==1: return decls[0] # Nothing found, time to complain where = clang_where(namespace, filename) if not decls: raise ValueError("var '{0}' could not be found{1}".format(name, where)) else: raise ValueError("var '{0}' found more than once ({2} times) {1}".format(name, len(decls), where))
cacdf6426353f99ed66f051c7d118706dc76f134
25,772
def ShouldRunOnInternalIpAddress(sending_vm, receiving_vm): """Returns whether a test should be run on an instance's internal IP. Based on the command line flag --ip_addresses. Internal IP addresses are used when: * --ip_addresses=BOTH or --ip-addresses=INTERNAL * --ip_addresses=REACHABLE and 'sending_vm' can ping 'receiving_vm' on its internal IP. Args: sending_vm: VirtualMachine. The client. receiving_vm: VirtualMachine. The server. Returns: Whether a test should be run on an instance's internal IP. """ return (FLAGS.ip_addresses in (IpAddressSubset.BOTH, IpAddressSubset.INTERNAL) or (FLAGS.ip_addresses == IpAddressSubset.REACHABLE and sending_vm.IsReachable(receiving_vm)))
961fc7343d6c3712d62fd1897f5897c49f6ec66d
25,773
def shn_get_crud_string(tablename, name): """ Get the CRUD strings for a table """ crud_strings = s3.crud_strings.get(tablename, s3.crud_strings) not_found = s3.crud_strings.get(name, None) return crud_strings.get(name, not_found)
106aba2b964b43d0afec45fa09cb68798bcc6a11
25,774
def default_invalid_token_callback(error_string): """ By default, if an invalid token attempts to access a protected endpoint, we return the error string for why it is not valid with a 422 status code :param error_string: String indicating why the token is invalid """ return jsonify({config.error_msg_key: error_string}), 422
70674a70a7b35838b154d2405e991c2653d02d8a
25,775
def bf_generate_dataplane(snapshot=None, extra_args=None): # type: (Optional[str], Optional[Dict[str, Any]]) -> str """Generates the data plane for the supplied snapshot. If no snapshot argument is given, uses the last snapshot initialized.""" return bf_session.generate_dataplane(snapshot=snapshot, extra_args=extra_args)
4134208bfef878e38f0428f8e8d830a7cc7b9377
25,776
def k_correction_pl(redshift, a_nu): """Calculate the k-correction for a power law spectrum with spectral index (per frequency) a_nu. :param redshift: Cosmological redshift of the source :type redshift: float :param a_nu: Power law index (per frequency) :type a_nu: float :return: K-correction :rtype: float """ # Hogg 1999, eq. 27 return -2.5 * (1. + a_nu) * np.log10(1.+redshift) * units.mag
e8fb84f3b98f49d4f1bb904eee61916f4a4bc3de
25,777
def make_points_image(pts, mask, radius=5): """ Create label image from physical space points Creates spherical points in the coordinate space of the target image based on the n-dimensional matrix of points that the user supplies. The image defines the dimensionality of the data so if the input image is 3D then the input points should be 2D or 3D. ANTsR function: `makePointsImage` Arguments --------- pts : numpy.ndarray input powers points mask : ANTsImage mask defining target space radius : integer radius for the points Returns ------- ANTsImage Example ------- >>> import ants >>> import pandas as pd >>> mni = ants.image_read(ants.get_data('mni')).get_mask() >>> powers_pts = pd.read_csv(ants.get_data('powers_mni_itk')) >>> powers_labels = ants.make_points_image(powers_pts.iloc[:,:3].values, mni, radius=3) """ powers_lblimg = mask * 0 npts = len(pts) dim = mask.dimension if pts.shape[1] != dim: raise ValueError('points dimensionality should match that of images') for r in range(npts): pt = pts[r,:] idx = tio.transform_physical_point_to_index(mask, pt.tolist() ).astype(int) in_image = (np.prod(idx <= mask.shape)==1) and (len(np.where(idx<0)[0])==0) if ( in_image == True ): if (dim == 3): powers_lblimg[idx[0],idx[1],idx[2]] = r + 1 elif (dim == 2): powers_lblimg[idx[0],idx[1]] = r + 1 return utils.morphology( powers_lblimg, 'dilate', radius, 'grayscale' )
dfb94ca6a80e315571c53c1c4b5377f35eec771c
25,778
def map_replacements(): """ create a map of what resources are replaced by others. This is a tree. """ isreplacedby = {} # isreplacedby[x] is the number of things that are replaced by x replaces = {} # replaces[x] are the number of things that x replaces. for r in BaseResource.objects.all(): if r.metadata and r.metadata.relations: for s in r.metadata.relations.filter(type='isReplacedBy'): uri = s.value if uri.startswith('http://www.hydroshare.org/resource/'): rid = uri[-32:] # r.short_id "is replaced by" rid if r.short_id in isreplacedby: isreplacedby[r.short_id].append(rid) else: isreplacedby[r.short_id] = [rid] # rid "replaces" r.short_id: # replaces[rid] is the things rid replaces. if rid in replaces: replaces[rid].append(r.short_id) else: replaces[rid] = [r.short_id] return isreplacedby, replaces
faf4e63bd902325d9c0563e60db0bb1348ed3e38
25,779
def order_basemaps(key, out): """check the apy key and then order the basemap to update the select list""" # checking the key validity validate_key(key, out) out.add_msg(cm.planet.mosaic.load) # autheticate to planet planet.client = api.ClientV1(api_key=planet.key) # get the basemap names # to use when PLanet decide to update it's API, until then I manually retreive the mosaics # mosaics = planet.client.get_mosaics().get()['mosaics'] url = planet.client._url("basemaps/v1/mosaics") mosaics = ( planet.client._get(url, api.models.Mosaics, params={"_page_size": 1000}) .get_body() .get()["mosaics"] ) # filter the mosaics in 3 groups bianual, monthly, other, res = [], [], [], [] for m in mosaics: name = m["name"] type_, short = mosaic_name(name) if type_ == "ANALYTIC_MONTHLY": monthly.append({"text": short, "value": name}) elif type_ == "ANALYTIC_BIANUAL": bianual.append({"text": short, "value": name}) elif type_ == "OTHER": monthly.append({"text": short, "value": name}) # fill the results with the found mosaics if len(bianual): res += [{"header": "NICFI bianual"}] + bianual if len(monthly): res += [{"header": "NICFI monthly"}] + monthly if len(other): res += [{"header": "other"}] + other out.add_msg(cm.planet.mosaic.complete, "success") print(mosaics) return res
491296f3231e093817119cd4ed72f2c90b2e02d8
25,780
def compute_iou(box1, box2, yxyx=False): """Calculates the intersection of union between box1 and box2. Args: box1: a `Tensor` whose shape is [..., 4] and represents the coordinates of boxes in x_center, y_center, width, height. box2: a `Tensor` whose shape is [..., 4] and represents the coordinates of boxes in x_center, y_center, width, height. yxyx: `bool`, whether or not box1, and box2 are in yxyx format. Returns: iou: a `Tensor` whose shape is [...] and value represents the intersection over union. Raises: ValueError: If the last dimension of either box1 or box2 is not 4. """ # Get box corners with tf.name_scope('iou'): if not yxyx: box1 = xcycwh_to_yxyx(box1) box2 = xcycwh_to_yxyx(box2) b1mi, b1ma = tf.split(box1, 2, axis=-1) b2mi, b2ma = tf.split(box2, 2, axis=-1) intersect_mins = tf.math.maximum(b1mi, b2mi) intersect_maxes = tf.math.minimum(b1ma, b2ma) intersect_wh = tf.math.maximum(intersect_maxes - intersect_mins, tf.zeros_like(intersect_mins)) intersection = tf.reduce_prod( intersect_wh, axis=-1) # intersect_wh[..., 0] * intersect_wh[..., 1] box1_area = tf.math.abs(tf.reduce_prod(b1ma - b1mi, axis=-1)) box2_area = tf.math.abs(tf.reduce_prod(b2ma - b2mi, axis=-1)) union = box1_area + box2_area - intersection iou = intersection / (union + 1e-7) iou = tf.clip_by_value(iou, clip_value_min=0.0, clip_value_max=1.0) return iou
47c9f9d6a10cc35984640c177ff49d637cf9a656
25,782
from typing import Union import warnings def ensure_pyspark_df(spark_session: SparkSession, df: Union[pandasDF, sparkDF]): """Method for checking dataframe type for each onData() call from a RunBuilder.""" if not isinstance(df, sparkDF): warnings.warn( "WARNING: You passed in a Pandas DF, so we will be using our experimental utility to " "convert it to a PySpark DF." ) df = PandasConverter.pandasDF_to_pysparkDF(spark_session, df) return df
0662665f93791640fed18d2615563e71d0abe1c3
25,783
from datetime import datetime def roundTime(dt=None, roundTo=60): """ Round a datetime object to any time lapse in seconds dt : datetime.datetime object, default now. roundTo : Closest number of seconds to round to, default 1 minute. Author: Thierry Husson 2012 - Use it as you want but don't blame me. Example: roundTo=30*60 - 30 minutes """ if dt == None : dt = datetime.datetime.now() seconds = (dt.replace(tzinfo=None) - dt.min).seconds rounding = (seconds+roundTo/2) // roundTo * roundTo return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)
0c949a0c69e2a9db38cff6e83b299d022b095ad3
25,784
def sample_recipe(user, **params): """Create and return a sample recipe""" recipe_defaults = { 'title': 'simple ricepi shot', 'time_minutes': 10, 'price': 5.0 } recipe_defaults.update(params) return Recipe.objects.create(user=user, **recipe_defaults)
8142b277f193ad76d3bb6d287963c6699001c636
25,785
def read_nitf_offsets(filename): """Read NITF fields relevant to parsing SICD SICD (versions 0.3 and above) is stored in a NITF container. NITF is a complicated format that involves lots of fields and configurations possibilities. Fortunately, SICD only really uses a small, specific portion of the NITF format. This function extracts only the few parts of the NITF metadata necessary for reading a SICD NITF file. """ # We have to open as binary, since there is some binary data in the file. # Python doesn't seem to let us read just part of the file as utf-8. with open(filename, mode='rb') as fid: # Read NITF file header if fid.read(9).decode('ascii') != "NITF02.10": # Check format raise(IOError('SICD files must be NITF version 2.1')) fid.seek(354) # Offset to first field of interest hl = np.uint32(fid.read(6)) # File header length numi = np.uint32(fid.read(3)) # Number of image segments img_segment_subhdr_lengths = np.zeros(numi, 'uint64') img_segment_data_lengths = np.zeros(numi, 'uint64') nitf = {} # Offset to image segment data from beginning of file (in bytes) nitf['img_segment_offsets'] = np.zeros(numi, 'uint64') # Number of rows in each image segment (in case data is spread across # multiple image segments) nitf['img_segment_rows'] = np.zeros(numi, 'uint32') # Number of columns in each image segment (in case data is spread # across multiple image segments) nitf['img_segment_columns'] = np.zeros(numi, 'uint32') for i in range(numi): img_segment_subhdr_lengths[i] = np.uint64(fid.read(6)) nitf['img_segment_offsets'][i] = ( hl + np.sum(img_segment_subhdr_lengths) + np.sum(img_segment_data_lengths)) img_segment_data_lengths[i] = np.uint64(fid.read(10)) segment_length = np.uint64(fid.read(3)) if segment_length > 0: raise(IOError('SICD does not allow for graphics segments.')) segment_length = np.uint64(fid.read(3)) if segment_length > 0: raise(IOError('SICD does not allow for reserved extension segments.')) numt = np.uint64(fid.read(3)) text_segment_subhdr_lengths = np.zeros(numt, 'uint64') text_segment_data_lengths = np.zeros(numt, 'uint64') for i in range(numt): text_segment_subhdr_lengths[i] = np.uint64(fid.read(4)) text_segment_data_lengths[i] = np.uint64(fid.read(5)) numdes = np.uint32(fid.read(3)) # Number of data extension segments des_subhdr_lengths = np.zeros(numdes, 'uint64') des_data_lengths = np.zeros(numdes, 'uint64') for i in range(numdes): # Length of data extension segment subheader des_subhdr_lengths[i] = np.uint32(fid.read(4)) # Length of data extension segment data des_data_lengths[i] = np.uint32(fid.read(9)) nitf['des_lengths'] = des_data_lengths nitf['des_offsets'] = ( hl + np.sum(img_segment_subhdr_lengths) + np.sum(img_segment_data_lengths) + np.sum(text_segment_subhdr_lengths) + np.sum(text_segment_data_lengths) + np.cumsum(des_subhdr_lengths) + # Doesn't work on older version of NumPy due to an unsafe cast # np.cumsum(np.insert(des_data_lengths[:-1], 0, 0)) # This should work in all versions of numpy: np.cumsum(np.hstack((np.uint64(0), des_data_lengths[:-1])))) # Get number of rows for each image segment from image segment headers next_img_subhdr_offset = hl for i in range(numi): fid.seek(next_img_subhdr_offset) # Jump to ith image segment fid.seek(333, 1) # Jump to number of rows field nitf['img_segment_rows'][i] = np.uint32(fid.read(8)) nitf['img_segment_columns'][i] = np.uint32(fid.read(8)) next_img_subhdr_offset = ( next_img_subhdr_offset + img_segment_subhdr_lengths[i] + img_segment_data_lengths[i]) return nitf
e2e8bb3ee6b32cf8964b1c13b4e8584cc7fd9917
25,786
from datetime import datetime def dttime(ts): """ 将DataTime对象转换为unix时间 :param ts: unix时间 :type ts: float :returns: datetime.datetime 对象 :rtype: datetime.datetime """ return datetime.datetime.fromtimestamp(ts)
7a3691df61f7fad641445b7b84ea88ae39f6dbb9
25,787
import copy def split_import(sc, node, alias_to_remove): """Split an import node by moving the given imported alias into a new import. Arguments: sc: (scope.Scope) Scope computed on whole tree of the code being modified. node: (ast.Import|ast.ImportFrom) An import node to split. alias_to_remove: (ast.alias) The import alias node to remove. This must be a child of the given `node` argument. Raises: errors.InvalidAstError: if `node` is not appropriately contained in the tree represented by the scope `sc`. """ parent = sc.parent(node) parent_list = None for a in ('body', 'orelse', 'finalbody'): if hasattr(parent, a) and node in getattr(parent, a): parent_list = getattr(parent, a) break else: raise errors.InvalidAstError('Unable to find list containing import %r on ' 'parent node %r' % (node, parent)) idx = parent_list.index(node) new_import = copy.deepcopy(node) new_import.names = [alias_to_remove] node.names.remove(alias_to_remove) parent_list.insert(idx + 1, new_import) return new_import
e83f2af8af108f3512e539ab50a8e264ccceb24f
25,788
def load_trace(path): """Load the trace located in path. Args: path (string): Path to the LTTng trace folder. Returns: babeltrace.TraceCollection: a collection of one trace. """ trace_collection = bt.TraceCollection() trace_collection.add_trace(path, 'ctf') return trace_collection
eed21b6d3ac62104e9661c26cd6d996768562e89
25,789
def get_repo_paths(config_file_path): """ Get a list of repository paths. Arguments: config_file_path (str): Path the to config file. Raises: (ConfigFileError): Raised if there was an error opening, reading or parsding through the config file. Returns: (list<str>): A list of repository paths. """ config = read_yml(config_file_path) if config is None: raise ConfigFileError() return list(config['repos'].keys())
2b79d55d83a40689206213097c1ddb8ed3535e69
25,790
async def get_bosswins_rank(conn : asyncpg.Connection, user_id : int) -> int: """Returns the rank in bosswins for the player given""" psql = """ WITH ranks AS ( SELECT ROW_NUMBER() OVER (ORDER BY bosswins DESC) AS rank, user_id, user_name, bosswins FROM players ) SELECT rank, user_id, user_name, bosswins FROM ranks WHERE user_id = $1 LIMIT 1; """ return await conn.fetchval(psql, user_id, timeout=0.2)
f87746fd3e77b6e41922bc9efbe2e9a2fead8b09
25,791
def detect_objects_yolo(imgs, tensors): """This function makes use of multiprocessing to make predictions on batch. Parameters ---------- imgs : list-like of images tensors : dict Contains tensors needed for making predictions. Returns ------- boxes: tuple Tuple of length `n_images` containing list of boxes for each image. scores: tuple classes: tuple Note that this object already converts label index to label (e.g from 1 to "phone"). """ yolo_net = tensors["yolo_net"] boxes_data = pool.map(lambda img: return_predict(yolo_net, img), imgs) boxes, scores, classes = list(zip(*boxes_data)) return np.array(boxes), np.array(scores), np.array(classes)
41a511e2fea6cb5abe449f995245ba3a76ae38f2
25,792
def append_id(endpoint, _id): """ append '_id' to endpoint if provided """ if _id is not None: return '/'.join([endpoint.rstrip('/'), _id]) return endpoint
60586a70bc8b9c9b10c1d54f6810c4528c5c0dec
25,793
import time import statistics def records() -> dict: """ Displays TJ's all time bests. """ records = cube.load_file("records") times, people = records["records"], records["people"] refresh = False if "wca_token" in flask.session and "ion_token" in flask.session: me = cube.api_call("wca", "me")["me"] year = cube.api_call("ion", "profile")["graduation_year"] if [me["url"], me["name"], year] not in people: records["people"].append([me["url"], me["name"], year]) # New person added refresh = True cube.dump_file(records, "records") if refresh or time.time() - records["time"] > cube.CONFIG["time"]: cube.update_records() (sing_rank, avg_rank), kinch_rank = cube.get_ranks() cube.dump_file({"sor_single": sing_rank, "sor_average": avg_rank, "kinch": kinch_rank}, "wca/ranks") return {"times": times, "events": cube.EVENTS, "icons": cube.ICONS, "DNF": statistics.DNF, "ranks": cube.RANKS}
c711634dcd7e06de481fc169d538b03735396cf0
25,794
import re def get_info_media(title: str, ydl_opts=None, search_engine=None, result_count=1): """ :param title: :param ydl_opts: :param search_engine: :param result_count: :return: """ if ydl_opts is None: ydl_opts = { # 'format': 'best[ext!=wav]/best', 'quiet': False, 'ignoreerrors': True, 'noplaylist': True, } if re.match(regex_link, title): url = title else: url = None with youtube_dl.YoutubeDL(ydl_opts) as ydl: ydl.cache.remove() if url: try: info = ydl.extract_info(url, download=False) except DownloadError: print('DownloadError') return False else: if search_engine: info = ydl.extract_info(f"{search_engine}{result_count}:{title}", download=False) else: try: info = ydl.extract_info(f"ytsearch{result_count}:{title}", download=False) except DownloadError: try: info = ydl.extract_info(f"scsearch{result_count}:{title}", download=False) except DownloadError: print('DownloadError') return False if not info: return if info['entries']: return info['entries'] else: return info
03f8be75da183a818961e4fa2c08ac554dac5841
25,795
def export_post(request, style, format=-1): """ :param request: :param style: :param format: :return: """ try: payload = request.get_json(force=True) # post data in json except: payload = dict(request.form) # post data in form encoding if not payload: return {'error': 'no information received'}, 400 if 'bibcode' not in payload: return {'error': 'no bibcode found in payload (parameter name is `bibcode`)'}, 400 if 'sort' in payload: sort = read_value_list_or_not(payload, 'sort') else: sort = 'date desc, bibcode desc' bibcodes = payload['bibcode'] if format == -1: current_app.logger.info('received request with bibcodes={bibcodes} to export in {style} style using sort order={sort}'. format(bibcodes=','.join(bibcodes), style=style, sort=sort)) else: current_app.logger.info('received request with bibcodes={bibcodes} to export in {style} style with output format {format} using sort order={sort}'. format(bibcodes=','.join(bibcodes), style=style, format=format, sort=sort)) # if in the test mode, return test solr data if current_app.config['EXPORT_SERVICE_TEST_BIBCODE_GET'] == bibcodes: return solrdata.data, 200 return get_solr_data(bibcodes=bibcodes, fields=default_solr_fields(), sort=sort, encode_style=adsFormatter().native_encoding(format)), 200
7d26bffd25c7557453906ba9438b893bc957223f
25,796
def unpack_literal_map_to_sdk_object(literal_map, type_map=None): """ :param lytekit.models.literals.LiteralMap literal_map: :param dict[Text, flytekit.common.types.base_sdk_types.FlyteSdkType] type_map: Type map directing unpacking. :rtype: dict[Text, T] """ type_map = type_map or {} return {k: get_sdk_value_from_literal(v, sdk_type=type_map.get(k, None)) for k, v in literal_map.literals.items()}
1c5de0c99d7e43c0012bdc0ce97c549e0888f4be
25,797
import requests def get_data(user:str,num_last:int)->int: """获取关注者数数据,输出数据增量并返回数据;重试3次,全部失败则返回False""" # error=None global proxies for i in range(3): try: num_this=requests.get('https://cdn.syndication.twimg.com/widgets/followbutton/info.json?screen_names='+user,proxies=proxies,timeout=(10,30)).json()[0]['followers_count'] # except requests.exceptions.ConnectTimeout: # error=requests.exceptions.ConnectTimeout # except requests.exceptions.ProxyError: # error=requests.exceptions.ProxyError # except requests.exceptions.ConnectionError: # error=requests.exceptions.ConnectionError except: # error=sys.exc_info()[0] continue else: print(num_this if num_last==0 else " " if num_this==num_last else ((PLUS if num_this>num_last else MINU)+str(abs(num_this-num_last)).rjust(6-LENGTH_OF_SIGN)),end=" | ") return num_this print(" 错误 ",end=" | ") return False
c72a8b4272e7432cfa81ace7344dba469082bcdd
25,798
def _init_basemap(border_colour): """Initializes basemap. :param border_colour: Colour (in any format accepted by matplotlib) of political borders. :return: narr_row_limits: length-2 numpy array of (min, max) NARR rows to plot. :return: narr_column_limits: length-2 numpy array of (min, max) NARR columns to plot. :return: axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :return: basemap_object: Instance of `mpl_toolkits.basemap.Basemap`. """ (narr_row_limits, narr_column_limits ) = nwp_plotting.latlng_limits_to_rowcol_limits( min_latitude_deg=MIN_LATITUDE_DEG, max_latitude_deg=MAX_LATITUDE_DEG, min_longitude_deg=MIN_LONGITUDE_DEG, max_longitude_deg=MAX_LONGITUDE_DEG, model_name=nwp_model_utils.NARR_MODEL_NAME) _, axes_object, basemap_object = nwp_plotting.init_basemap( model_name=nwp_model_utils.NARR_MODEL_NAME, first_row_in_full_grid=narr_row_limits[0], last_row_in_full_grid=narr_row_limits[1], first_column_in_full_grid=narr_column_limits[0], last_column_in_full_grid=narr_column_limits[1]) plotting_utils.plot_coastlines( basemap_object=basemap_object, axes_object=axes_object, line_colour=border_colour) plotting_utils.plot_countries( basemap_object=basemap_object, axes_object=axes_object, line_colour=border_colour) plotting_utils.plot_states_and_provinces( basemap_object=basemap_object, axes_object=axes_object, line_colour=border_colour) plotting_utils.plot_parallels( basemap_object=basemap_object, axes_object=axes_object, bottom_left_lat_deg=-90., upper_right_lat_deg=90., parallel_spacing_deg=PARALLEL_SPACING_DEG) plotting_utils.plot_meridians( basemap_object=basemap_object, axes_object=axes_object, bottom_left_lng_deg=0., upper_right_lng_deg=360., meridian_spacing_deg=MERIDIAN_SPACING_DEG) return narr_row_limits, narr_column_limits, axes_object, basemap_object
aeec84f7973972abd93bc344fc7f2028d216c4b5
25,800
def makeFigure(): """Get a list of the axis objects and create a figure""" ax, f = getSetup((9, 12), (5, 2)) cellTarget = "Treg" epitopesDF = pd.DataFrame(columns={"Classifier", "Epitope"}) posCorrs1, negCorrs = CITE_RIDGE(ax[4], cellTarget) for x in posCorrs1: epitopesDF = epitopesDF.append(pd.DataFrame({"Classifier": 'CITE_RIDGE', "Epitope": [x]})) possCorrs2 = distMetricScatt(ax[6:8], cellTarget, 10, weight=False) for x in possCorrs2: epitopesDF = epitopesDF.append(pd.DataFrame({"Classifier": 'distMetricF', "Epitope": [x]})) possCorrs3 = distMetricScatt(ax[8:10], cellTarget, 10, weight=True) for x in possCorrs3: epitopesDF = epitopesDF.append(pd.DataFrame({"Classifier": 'distMetricT', "Epitope": [x]})) print(epitopesDF) #do for Cite_SVM #put these three in data frame, get abundance and affinity data #use minSelect function #Feed into bispec binding model #optimize using minSelect return f
e0c4cf34630171e489f195d14a3d29f8d0fa10e1
25,801
def get_data_frame(binary_tables, all_inputs): """ Gets a data frame that needs QM reduction and further logic. Also removes the all_inputs from the DataFrame. :param binary_tables: contains a tables with True and False outputs. :param all_inputs: columns :return: Pandas DataFrame. """ columns = all_inputs + [KEYWORDS[OUTPUT]] df = from_dict_to_data_frame(binary_tables, columns) for an_input in all_inputs: df = df.sort([an_input], ascending=[1]) #import time #start = time.time() best_df = get_dataframe_duplicates(df, an_input) #print('get_dataframe_duplicates for {}: {}'.format(an_input, time.time() - start)) # only takes unique values. variables = set(helper.get_variables(best_df, an_input)) #start = time.time() df = add_empty_columns(df, variables) #print('add_empty_column for {}: {}'.format(an_input, time.time() - start)) #start = time.time() df = add_boolean_table(df, variables, an_input) #print('add_boolean_table for {}: {}'.format(an_input, time.time() - start)) # before dropping all_inputs columns, will record their range. input_ranges = {} for the_input in all_inputs: input_ranges[the_input] = [min(list(df[the_input])), max(list(df[the_input]))] df.drop(all_inputs, inplace=True, axis=1) df.drop_duplicates(keep='first', inplace=True) return df, input_ranges
e40a914f19242ee82cd0c3e0f706f97f6c71e9fa
25,802
def compute_shift_delay_samples(params_delays,vector_seconds_ref,freq_sample,seconds_frame,pair_st_so,data_type=0,\ front_time=None,cache_rates=[],cache_delays=[]): """ Compute number of samples to shift signal (always positive since reference station is closest to source). Parameters ---------- params_delays delay model ini file. vector_seconds_ref list of floats with seconds for delay information (start time polynomials). freq_sample sampling frequency [Hz]. seconds_frame seconds corresponding to the frame to be processed. station_id corresponds to id number in stations ini file. source_id [default 0], see limitations. pair_st_so data_type 0 for real, 1 for complex. front_time frontier time, that is, time corresponding to the start of the integration period (takes priority over the seconds of the frame) cache_rates temporary information on delays to avoid reprocessing of the input files (see lib_ini_files.get_rates_delays()). cache_delays list with [seconds_fr_nearest,pair_st_so,delay] from previous computation. Returns ------- shift_int number of sample components to offset (integer delay). delay total delay (=freq_sample*(shift_int+fractional_sample_delay)). fractional_sample_delay error_out 0 if sucess, -1 if error (e.g. accumulation period not found in ini file) cache_rates updated cache_rates (input). Notes ----- | | **Limitations:** | | Currently assuming single source (source_id always zero | | | **TO DO:** | | Simplify code, no need for params_delays nor find_nearest(). """ #print("ft: "+str(front_time)) seconds_fr_nearest=get_seconds_fr_front(front_time,vector_seconds_ref,seconds_frame) #seconds_fr_nearest=front_time if front_time is None: seconds_fr_nearest=find_nearest_seconds(vector_seconds_ref,seconds_frame) if seconds_fr_nearest>=-1: #rel_epoch=DELAY_MODEL_REL_MARKER+str(seconds_fr_nearest) #found_delay=1 try: #delay = float(get_param_serial(params_delays,pair_st_so,rel_epoch)) [delay,cache_delays] = get_delay_cache(seconds_fr_nearest,pair_st_so,params_delays,cache_delays) except ValueError: #found_delay=0 print("zM\tWarning: could not get delay for pair "+pair_st_so+", "+str(seconds_fr_nearest)+", skipping frame") seconds_fr_nearest=-2 if seconds_fr_nearest>=-1: [shift_int,fractional_sample_delay]=get_delay_shift_frac(delay,freq_sample,data_type) error_out=0 else: shift_int=-1 delay=-1 fractional_sample_delay=-1 error_out=1 return([shift_int,delay,fractional_sample_delay,error_out,cache_delays])
5cf35bd1b2187d107f8fd5809dd7db9822d5c110
25,803
from typing import Dict from typing import Tuple def classification_metrics_function( logits: jnp.ndarray, batch: base_model.Batch, target_is_onehot: bool = False, metrics: base_model.MetricNormalizerFnDict = _CLASSIFICATION_METRICS, ) -> Dict[str, Tuple[float, int]]: """Calculates metrics for the classification task. Currently we assume each metric_fn has the API: ```metric_fn(logits, targets, weights)``` and returns an array of shape [batch_size]. We also assume that to compute the aggregate metric, one should sum across all batches, then divide by the total samples seen. In this way we currently only support metrics of the 1/N sum f(inputs, targets). Note, the caller is responsible for dividing by the normalizer when computing the mean of each metric. Args: logits: Output of model in shape [batch, length, num_classes]. batch: Batch of data that has 'label' and optionally 'batch_mask'. target_is_onehot: If the target is a one-hot vector. metrics: The classification metrics to evaluate. The key is the name of the metric, and the value is the metrics function. Returns: A dict of metrics, in which keys are metrics name and values are tuples of (metric, normalizer). """ if target_is_onehot: one_hot_targets = batch['label'] else: one_hot_targets = common_utils.onehot(batch['label'], logits.shape[-1]) weights = batch.get('batch_mask') # batch_mask might not be defined # This psum is required to correctly evaluate with multihost. Only host 0 # will report the metrics, so we must aggregate across all hosts. The psum # will map an array of shape [n_global_devices, batch_size] -> [batch_size] # by summing across the devices dimension. The outer sum then sums across the # batch dim. The result is then we have summed across all samples in the # sharded batch. evaluated_metrics = {} for key, val in metrics.items(): evaluated_metrics[key] = model_utils.psum_metric_normalizer( (val[0](logits, one_hot_targets, weights), val[1](logits, one_hot_targets, weights))) return evaluated_metrics
defcde9e70822721a866a5af62c472386251b0e8
25,804
def get_host_finding_vulnerabilities_hr(vulnerabilities): """ Prepare human readable json for "risksense-get-host-finding-detail" command. Including vulnerabilities details. :param vulnerabilities: vulnerabilities details from response. :return: list of dict """ vulnerabilities_list = [{ 'Name': vulnerability.get('cve', ''), 'V2/Score': vulnerability.get('baseScore', ''), 'Threat Count': vulnerability.get('threatCount', ''), 'Attack Vector': vulnerability.get('attackVector', ''), 'Access Complexity': vulnerability.get('accessComplexity', ''), 'Authentication': vulnerability.get('authentication', '') } for vulnerability in vulnerabilities] # To present human readable horizontally if len(vulnerabilities) == 1: vulnerabilities_list.append({}) return vulnerabilities_list
8f0689441f2fef41bbd5da91c802dfb8baa2b979
25,806
from typing import Optional from typing import List import copy def train_on_file_dataset( train_dataset_path: str, valid_dataset_path: Optional[str], feature_ids: List[str], label_id: str, weight_id: Optional[str], model_id: str, learner: str, task: Optional[TaskType] = Task.CLASSIFICATION, generic_hparms: Optional[hyperparameter_pb2.GenericHyperParameters] = None, ranking_group: Optional[str] = None, uplift_treatment: Optional[str] = None, training_config: Optional[abstract_learner_pb2.TrainingConfig] = None, deployment_config: Optional[abstract_learner_pb2.DeploymentConfig] = None, guide: Optional[data_spec_pb2.DataSpecificationGuide] = None, model_dir: Optional[str] = None, keep_model_in_resource: Optional[bool] = True, working_cache_path: Optional[str] = None, distribution_config: Optional[DistributionConfiguration] = None, try_resume_training: Optional[bool] = False) -> tf.Operation: """Trains a model on dataset stored on file. The input arguments and overall logic of this OP is similar to the ":train" CLI or the "learner->Train()" method of Yggdrasil Decision Forests (in fact, this OP simply calls "learner->Train()"). Similarly as the `train` method, the implementation the learning algorithm should be added as a dependency to the binary. Similarly, the implementation the dataset format should be added as a dependency to the binary. In the case of distributed training, `train_on_file_dataset` should only be called by the `chief` process, and `deployment_config` should contain the address of the workers. Args: train_dataset_path: Path to the training dataset. valid_dataset_path: Path to the validation dataset. feature_ids: Ids/names of the input features. label_id: Id/name of the label feature. weight_id: Id/name of the weight feature. model_id: Id of the model. learner: Key of the learner. task: Task to solve. generic_hparms: Hyper-parameter of the learner. ranking_group: Id of the ranking group feature. Only for ranking. uplift_treatment: Id of the uplift treatment group feature. Only for uplift. training_config: Training configuration. deployment_config: Deployment configuration (e.g. where to train the model). guide: Dataset specification guide. model_dir: If specified, export the trained model into this directory. keep_model_in_resource: If true, keep the model as a training model resource. working_cache_path: Path to the working cache directory. If set, and if the training is distributed, all the workers should have write access to this cache. distribution_config: Socket addresses of the workers for distributed training. try_resume_training: Try to resume the training from the "working_cache_path" directory. The the "working_cache_path" does not contains any checkpoint, start the training from the start. Returns: The OP that trigger the training. """ if generic_hparms is None: generic_hparms = hyperparameter_pb2.GenericHyperParameters() if training_config is None: training_config = abstract_learner_pb2.TrainingConfig() else: training_config = copy.deepcopy(training_config) if deployment_config is None: deployment_config = abstract_learner_pb2.DeploymentConfig() else: deployment_config = copy.deepcopy(deployment_config) if guide is None: guide = data_spec_pb2.DataSpecificationGuide() if ranking_group is not None: training_config.ranking_group = ranking_group if uplift_treatment is not None: training_config.uplift_treatment = uplift_treatment # Set the method argument into the proto configs. if learner: training_config.learner = learner training_config.task = task training_config.label = label_id if weight_id is not None: training_config.weight_definition.attribute = weight_id training_config.weight_definition.numerical.SetInParent() for feature_id in feature_ids: training_config.features.append(normalize_inputs_regexp(feature_id)) if working_cache_path is not None: deployment_config.cache_path = working_cache_path if try_resume_training: if working_cache_path is None: raise ValueError("Cannot train a model with `try_resume_training=True` " "without a working cache directory.") deployment_config.try_resume_training = True if distribution_config is not None: deployment_config.try_resume_training = True deployment_config.distribute.implementation_key = "TF_DIST" if distribution_config.workers_addresses is not None: dst_addresses = deployment_config.distribute.Extensions[ tf_distribution_pb2.tf_distribution].addresses dst_addresses.addresses[:] = distribution_config.workers_addresses else: # Assume the worker paths are provided through the env. deployment_config.distribute.Extensions[ tf_distribution_pb2.tf_distribution].environment_variable.SetInParent( ) return training_op.SimpleMLModelTrainerOnFile( train_dataset_path=train_dataset_path, valid_dataset_path=valid_dataset_path if valid_dataset_path else "", model_id=model_id if keep_model_in_resource else "", model_dir=model_dir or "", hparams=generic_hparms.SerializeToString(), training_config=training_config.SerializeToString(), deployment_config=deployment_config.SerializeToString(), guide=guide.SerializeToString())
16e692adc9e72d06678680e13ef50636e6f17450
25,807
def db_fixture(): """Get app context for tests :return: """ return db
4c780071e5a092870a676685aede295365de6ad9
25,808
def login_required(route_function): """ 这个函数看起来非常绕 是实现装饰器的一般套路 """ def f(request): u = current_user(request) if u is None: log('非登录用户') return redirect('/login') else: return route_function(request) return f
44cff97ad32257e4dc4cfe5d7e3b79ea643986ef
25,809
def cuTypeConverter(cuType): """ Converts calendar user types to OD type names """ return "recordType", CalendarDirectoryRecordMixin.fromCUType(cuType)
a4afcfe912fc1d853ee841ed215c099c352ace0c
25,810
def add_viz_sphere( sim: habitat_sim.Simulator, radius: float, pos: mn.Vector3 ) -> habitat_sim.physics.ManagedRigidObject: """ Add a visualization-only sphere to the world at a global position. Returns the new object. """ obj_attr_mgr = sim.get_object_template_manager() sphere_template = obj_attr_mgr.get_template_by_handle( obj_attr_mgr.get_template_handles("icosphereWireframe")[0] ) sphere_template.scale = mn.Vector3(radius) obj_attr_mgr.register_template(sphere_template, "viz_sphere") new_object = sim.get_rigid_object_manager().add_object_by_template_handle( "viz_sphere" ) new_object.motion_type = habitat_sim.physics.MotionType.KINEMATIC new_object.collidable = False new_object.translation = pos return new_object
cc8f47c8b32ad2f4bf7c0e159d19dac048546aea
25,811
import torch def get_normalize_layer(dataset: str) -> torch.nn.Module: """Return the dataset's normalization layer""" if dataset == "imagenet": return NormalizeLayer(_IMAGENET_MEAN, _IMAGENET_STDDEV) elif dataset == "cifar10": return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)
52d5d0a744e0e10db1f54d83dfb9b7a8b779c49d
25,812
def summer_69(arr): """ Return the sum of the numbers in the array, except ignore sections of numbers starting with a 6 and extending to the next 9 (every 6 will be followed by at least one 9). Return 0 for no numbers. :param arr: list of integers :return: int """ get_result = 0 add = True for num in arr: while add: if num != 6: get_result += num break else: add = False while not add: if num != 9: break else: add = True break return get_result
d155a739afe131025b654002bebb51b25325bd1e
25,813
def get_notebook_title(nb_json, default=None): """Determine a suitable title for the notebook. This will return the text of the first header cell. If that does not exist, it will return the default. """ cells = nb_json['cells'] for cell in cells: if cell['cell_type'] == 'heading': return cell['source'] return default
4a20fe9890371ab107d0194e791c6faf9901d714
25,814
import torch def extract_sequence(sent, annotations, sources, label_indices): """ Convert the annotations of a spacy document into an array of observations of shape (nb_sources, nb_bio_labels) """ sequence = torch.zeros([len(sent), len(sources), len(label_indices)], dtype=torch.float) for i, source in enumerate(sources): sequence[:, i, 0] = 1.0 assert source in annotations, logger.error(f"source name {source} is not included in the data") for (start, end), vals in annotations[source].items(): for label, conf in vals: if start >= len(sent): logger.warning("Encountered incorrect annotation boundary") continue elif end > len(sent): logger.warning("Encountered incorrect annotation boundary") end = len(sent) sequence[start:end, i, 0] = 0.0 sequence[start, i, label_indices["B-%s" % label]] = conf if end - start > 1: sequence[start + 1: end, i, label_indices["I-%s" % label]] = conf return sequence
1d988fe82b19d583438b8bf1ceb00671de566fca
25,816
def is_valid_password_1(password): """ >>> is_valid_password_1("111111") True >>> is_valid_password_1("223450") False >>> is_valid_password_1("123789") False """ has_double = any(password[c] == password[c+1] for c in range(len(password)-1)) is_ascending = all(password[c] <= password[c+1] for c in range(len(password)-1)) return has_double and is_ascending
8544e15a7d50c025073a3ac51b9f5b8809341d2e
25,817
def mean(x, axis=None, keepdims=False): """Mean of a tensor, alongside the specified axis. Parameters ---------- x: A tensor or variable. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If keepdims is False, the rank of the tensor is reduced by 1 for each entry in axis. If keep_dims is True, the reduced dimensions are retained with length 1. Returns ------- A tensor with the mean of elements of x. """ axis = _normalize_axis(axis, get_ndim(x)) if x.dtype.base_dtype == tf.bool: x = tf.cast(x, tf.float32) return tf.reduce_mean(x, axis=axis, keep_dims=keepdims)
9f8d1b98a5f1dd37a91493fb822437885e04468e
25,818
def frame_pass_valid_sample_criteria(frame, image_type): """Returns whether a frame matches type criteria""" return frame_image_type_match(frame, image_type)
cf5b51dfe63e7667a14b41c9793a66aa065663e8
25,819
def embed(tokenizer, text): """ Embeds a text sequence using BERT tokenizer :param text: text to be embedded :return: embedded sequence (text -> tokens -> ids) """ return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
453d411d9c460dfc28cb54c7a6a807290905bed3
25,820
def exponent_fmt(x, pos): """ The two args are the value and tick position. """ return '{0:.0f}'.format(10 ** x)
46e2104e966ec452fb510a411b1907090d55daf3
25,821
def _unpack(arr, extent, order='C'): """ This is a helper method that handles the initial unpacking of a data array. ParaView and VTK use Fortran packing so this is convert data saved in C packing to Fortran packing. """ n1,n2,n3 = extent[0],extent[1],extent[2] if order == 'C': arr = np.reshape(arr, (n1,n2,n3)) arr = np.swapaxes(arr,0,2) extent = np.shape(arr) elif order == 'F': # effectively doing nothing #arr = np.reshape(arr, (n3,n2,n1)) return arr.flatten(), extent return arr.flatten(), extent
2d7054da8ffc5773bfd151973bf3b06c84c2e735
25,822
import torch def unzip(list): """unzip the tensor tuple list Args: list: contains tuple of segemented tensors """ T, loss = zip(*list) T = torch.cat(T) mean_loss = torch.cat(loss).mean() return T, mean_loss
5ed656aa8221c7bc5bd8a43b80fe0efd07d4df24
25,823
def ergs_to_lsun(luminosity): """ From luminostiy in erg/s to Lsun """ lum = u.Quantity(luminosity, u.erg / u.s) return lum.to(u.L_sun)
fa7e572f5509b0408520e15433141e6da88daae1
25,824
def decode(code, P): """ Decode an RNS representation array into decimal number :param P: list of moduli in order from bigger to smaller [pn, .., p2, p1, p0] >>> decode(code=[5, 3, 1], P=[7,6,5]) 201 """ lcms = np.fromiter(accumulate(P[::-1], np.lcm), int)[::-1] n = code[-1] % P[-1] for i in range(1, len(P)): bottom_p = lcms[-i] per_diff = bottom_p % P[-i - 1] # rev current_next = n % P[-i - 1] wanted_next = code[-i - 1] % P[-i - 1] if wanted_next < current_next: wanted_next = wanted_next + P[-i - 1] distance = wanted_next - current_next distance = distance % P[-i - 1] if distance > 0: bottomp_scroll_count = solve(a=per_diff, m=P[-i - 1], k=distance, allow_zero=True) n = n + bottomp_scroll_count * bottom_p return n
422128ef0d0da62b404b6e8c0b927221505ead17
25,825
def is_collection(obj): """ Check if a object is iterable. :return: Result of check. :rtype: bool """ return hasattr(obj, '__iter__') and not isinstance(obj, str)
70fa0262ea7bf91a202aade2a1151d467001071e
25,826
import hashlib def file_md5(fpath): """Return the MD5 digest for the given file""" with open(fpath,'rb') as f: m = hashlib.md5() while True: s = f.read(4096) if not s: break m.update(s) return m.hexdigest()
40b355b9a628d286bf86b5199fd7e2a8bea354de
25,827
def move(column, player): """Apply player move to the given column""" index = _index_of(column, None) if index < 0: print('Entire column is occupied') return False column[index] = player return True
9c728c4c764154390478e27408f5bc25acaacf1d
25,830
def calculate_costs(points, centric_point): """ Returns the accumulated costs of all point in `points` from the centric_point """ if len(points) == 1: return points[0].hyp() _part = (points - centric_point)**2 _fin = [] for point in _part: _fin.append(point.hyp()) return (np.array(_fin)).sum()
c35e00dabb3e85d5136afc3f5696a73aad607470
25,831
def formatUs(time): """Format human readable time (input in us).""" if time < 1000: return f"{time:.2f} us" time = time / 1000 if time < 1000: return f"{time:.2f} ms" time = time / 1000 return f"{time:.2f} s"
7546db60e3977e07dbbbad0a3ab767865840c2e3
25,832
from typing import Dict from typing import List from typing import Union def parse_annotations(ann_filepath: str) -> Dict[int, List[Label]]: """Parse annotation file into List of Scalabel Label type per frame.""" outputs = defaultdict(list) for line in load_file_as_list(ann_filepath): gt = line.strip().split(",") class_id = gt[7] if class_id not in NAME_MAPPING: continue class_name = NAME_MAPPING[class_id] if class_name in IGNORE: continue frame_id, ins_id = (int(x) for x in gt[:2]) box2d = bbox_to_box2d([float(x) for x in gt[2:6]]) attrs: Dict[str, Union[bool, float, str]] = dict( visibility=float(gt[8]) ) ann = Label( category=class_name, id=ins_id, box2d=box2d, attributes=attrs, ) outputs[frame_id].append(ann) return outputs
1ef42147fa4cb44b1ebd37f861444e502d0ea9b9
25,833
from typing import Dict from typing import List from typing import Optional from typing import Union from typing import Any def apply(lang1: Dict[List[str], float], lang2: Dict[List[str], float], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> float: """ Calculates the EMD distance between the two stochastic languages Parameters ------------- lang1 First language lang2 Second language parameters Parameters of the algorithm, including: - Parameters.STRING_DISTANCE: function that accepts two strings and returns a distance Returns --------------- emd_dist EMD distance """ if parameters is None: parameters = {} distance_function = exec_utils.get_param_value(Parameters.STRING_DISTANCE, parameters, normalized_levensthein) enc1, enc2 = encode_two_languages(lang1, lang2, parameters=parameters) # transform everything into a numpy array first_histogram = np.array([x[1] for x in enc1]) second_histogram = np.array([x[1] for x in enc2]) # including a distance matrix that includes the distance between # the traces distance_matrix = [] for x in enc1: distance_matrix.append([]) for y in enc2: # calculates the (normalized) distance between the strings dist = distance_function(x[0], y[0]) distance_matrix[-1].append(float(dist)) distance_matrix = np.array(distance_matrix) ret = emd(first_histogram, second_histogram, distance_matrix) return ret
c87d171018a6eddef6572a0bd3639952499fca44
25,835
import collections def reInpainting(image, ground_truth, teethColor): """ if pixel has pink color (marked for teeth) and not in range of teeth => fill by teethColor """ isTeeth, isNotTeeth = 0, 0 threshold = calculateThreshhold(image, teethColor) # print(f"Threshold: {threshold}") for i in range(0, image.shape[0]): for j in range(0, image.shape[1]): pixel = image[i][j] pink = [255, 0, 255] if collections.Counter(pixel) == collections.Counter(pink): if isTeethColor(ground_truth[i][j], teethColor, threshold): isTeeth = isTeeth + 1 else: # 229,224,212 _________ 200,160,75 ground_truth[i][j] = [teethColor[2], teethColor[1], teethColor[0]] isNotTeeth = isNotTeeth + 1 # print(f"isTeeth: {isTeeth}, isNotTeeth: {isNotTeeth}") return ground_truth
c5f8a71c9c1bbf6e3b4c03b477901b9669d9f72c
25,836
def data_for_cylinder_along_z(center_x, center_y, radius, height_z): """ Method for creating grid for cylinder drawing. Cylinder will be created along Z axis :param center_x: Euclidean 3 dimensional center of drawing on X axis :param center_y: Euclidean 3 dimensional center of drawing on Y axis :param radius: cylinder radius :param height_z: cylinder height :return: Three lists with grid coordinates for z, y, x sequentially """ z = np.linspace(0, height_z, 50) theta = np.linspace(0, 2 * np.pi, 50) theta_grid, z_grid = np.meshgrid(theta, z) x_grid = radius * np.cos(theta_grid) + center_x y_grid = radius * np.sin(theta_grid) + center_y return z_grid, y_grid, x_grid
2582860582564e7b8a4e9ba6e89d0740d44fa069
25,837
def _configure_learning_rate(num_samples_per_epoch, global_step): """Configures the learning rate. Args: num_samples_per_epoch: The number of samples in each epoch of training. global_step: The global_step tensor. Returns: A `Tensor` representing the learning rate. Raises: ValueError: if """ decay_steps = int(num_samples_per_epoch / (FLAGS.batch_size * FLAGS.num_clones) * FLAGS.num_epochs_per_decay) return tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps, FLAGS.learning_rate_decay_factor, staircase=True, name='exponential_decay_learning_rate')
c1395b7521b6a55e8a77c50b47dca920f8c27dc0
25,838
def cpm(adata: ad.AnnData) -> ad.AnnData: """Normalize data to counts per million.""" _cpm(adata) return adata
ec0a2a0ed61965e8c78ebf59fab569f2a4954790
25,839
def argon2_key(encryption_password, salt): """ Generates an encryption key from a password using the Argon2id KDF. """ return argon2.low_level.hash_secret_raw(encryption_password.encode('utf-8'), salt, time_cost=RFC_9106_LOW_MEMORY.time_cost, memory_cost=RFC_9106_LOW_MEMORY.memory_cost, parallelism=RFC_9106_LOW_MEMORY.parallelism, hash_len=32, type=argon2.low_level.Type.ID)
eaf5a0f3ca0ee12e22b0ddb9594dcd1734ef91e8
25,840
def print_policy_analysis(policies, game, verbose=False): """Function printing policy diversity within game's known policies. Warning : only works with deterministic policies. Args: policies: List of list of policies (One list per game player) game: OpenSpiel game object. verbose: Whether to print policy diversity information. (True : print) Returns: List of list of unique policies (One list per player) """ states_dict = get_all_states.get_all_states(game, np.infty, False, False) unique_policies = [] for player in range(len(policies)): cur_policies = policies[player] cur_set = set() for pol in cur_policies: cur_str = "" for state_str in states_dict: if states_dict[state_str].current_player() == player: pol_action_dict = pol(states_dict[state_str]) max_prob = max(list(pol_action_dict.values())) max_prob_actions = [ a for a in pol_action_dict if pol_action_dict[a] == max_prob ] cur_str += "__" + state_str for a in max_prob_actions: cur_str += "-" + str(a) cur_set.add(cur_str) unique_policies.append(cur_set) if verbose: print("\n=====================================\nPolicy Diversity :") for player, cur_set in enumerate(unique_policies): print("Player {} : {} unique policies.".format(player, len(cur_set))) print("") return unique_policies
51379d78dc3dd924da41dc00e8d6236d72b68f3c
25,841
def model_fn(is_training=True, **params): """ Create base model with MobileNetV2 + Dense layer (n class). Wrap up with CustomModel process. Args: is_training (bool): if it is going to be trained or not params: keyword arguments (parameters dictionary) """ baseModel = MobileNetV2( include_top=False, weights='imagenet', input_shape=(224, 224, 3), pooling="avg") fc = tf.keras.layers.Dense( params['n_class'], activation="softmax", name="softmax_layer")(baseModel.output) model = CustomModel(inputs=baseModel.input, outputs=fc) # If it is not training mode if not is_training: model.trainable = False return model
2a14d803c5d521f453ce30a641d8736364e64ac0
25,842
def roundToElement(dateTime, unit): """ Returns a copy of dateTime rounded to given unit :param datetime.datetime: date time object :param DtUnit unit: unit :return: datetime.datetime """ year = dateTime.year month = dateTime.month day = dateTime.day hour = dateTime.hour minute = dateTime.minute second = dateTime.second microsecond = dateTime.microsecond if unit.value < DtUnit.YEARS.value: pass # Never round years if unit.value < DtUnit.MONTHS.value: month = 1 if unit.value < DtUnit.DAYS.value: day = 1 if unit.value < DtUnit.HOURS.value: hour = 0 if unit.value < DtUnit.MINUTES.value: minute = 0 if unit.value < DtUnit.SECONDS.value: second = 0 if unit.value < DtUnit.MICRO_SECONDS.value: microsecond = 0 result = dt.datetime(year, month, day, hour, minute, second, microsecond, tzinfo=dateTime.tzinfo) return result
226f532e9e729d155d14135e4025015e8b00b2e0
25,844
def tuple_factory(colnames, rows): """ Returns each row as a tuple Example:: >>> from cassandra.query import tuple_factory >>> session = cluster.connect('mykeyspace') >>> session.row_factory = tuple_factory >>> rows = session.execute("SELECT name, age FROM users LIMIT 1") >>> print rows[0] ('Bob', 42) .. versionchanged:: 2.0.0 moved from ``cassandra.decoder`` to ``cassandra.query`` """ return rows
5526647a414b397ac9d71c35173718c01385a03b
25,845
def is_error(splunk_record_key): """Return True if the given string is an error key. :param splunk_record key: The string to check :type splunk_record_key: str :rtype: bool """ return splunk_record_key == 'error'
26371ec9c5941fbf07a84c6904ea739b02eb97ba
25,846
def parse_network_info(net_bond, response_json): """ Build the network info """ out_dict = {} ip_list = [] node_count = 0 # Build individual node information for node_result in response_json['result']['nodes']: for node in response_json['result']['nodes']: if node['nodeID'] == node_result['nodeID']: node_id = str(node_result['nodeID']) n_id = "Node ID " + node_id net_result = node['result']['network'][net_bond] bond_addr = net_result['address'] bond_mask = net_result['netmask'] bond_gateway = net_result['gateway'] bond_mode = net_result['bond-mode'] bond_mtu = net_result['mtu'] bond_speed = net_result['linkSpeed'] name_servers = net_result['dns-nameservers'] search_domains = net_result['dns-search'] out_dict['------' + n_id + ' ------'] = \ '--------------------------' out_dict[n_id + ' Bond name'] = net_bond out_dict[n_id + ' Address'] = bond_addr out_dict[n_id + ' Netmask'] = bond_mask out_dict[n_id + ' Gateway'] = bond_gateway out_dict[n_id + ' Bond mode'] = bond_mode out_dict[n_id + ' MTU'] = bond_mtu out_dict[n_id + ' Link speed'] = bond_speed if net_bond == 'Bond1G': out_dict[n_id + ' DNS servers'] = name_servers out_dict[n_id + ' DNS search'] = search_domains ip_list.append(bond_addr) node_count = node_count + 1 if net_bond != 'Bond10G': return out_dict, ip_list else: return out_dict
2c83aa72d6ee0195a42339546d1fded84f85680f
25,847
async def create_or_update(hub, ctx, name, resource_group, **kwargs): """ .. versionadded:: 1.0.0 Create or update a network security group. :param name: The name of the network security group to create. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash azurerm.network.network_security_group.create_or_update testnsg testgroup """ if "location" not in kwargs: rg_props = await hub.exec.azurerm.resource.group.get( ctx, resource_group, **kwargs ) if "error" in rg_props: log.error("Unable to determine location from resource group specified.") return { "error": "Unable to determine location from resource group specified." } kwargs["location"] = rg_props["location"] netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs) try: secgroupmodel = await hub.exec.azurerm.utils.create_object_model( "network", "NetworkSecurityGroup", **kwargs ) except TypeError as exc: result = { "error": "The object model could not be built. ({0})".format(str(exc)) } return result try: secgroup = netconn.network_security_groups.create_or_update( resource_group_name=resource_group, network_security_group_name=name, parameters=secgroupmodel, ) secgroup.wait() secgroup_result = secgroup.result() result = secgroup_result.as_dict() except CloudError as exc: await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs) result = {"error": str(exc)} except SerializationError as exc: result = { "error": "The object model could not be parsed. ({0})".format(str(exc)) } return result
251ee69d6077d2fd4ffda8c9da53b8ae84c9a696
25,848
def download_media_suite(req, domain, app_id): """ See Application.create_media_suite """ return HttpResponse( req.app.create_media_suite() )
fe0f5e0b5598b2368fd756a7f6bee89035813317
25,849
def non_numeric(string: str) -> str: """ Removes all numbers from the string """ return ''.join(letter for letter in string if not letter.isdigit())
fe16297c4cf1b144fb583986a5c01ea02920787e
25,850
import re def prepare_xs(path, numbergroup=1): """Prepare the needed representation of cross-section data Paramteres: ----------- path : str filename of cross-section data numbergroup : int number of energies neutron multigroup Returns: -------- energies : iterable of str energy discritization by multigroups xslib : dict key : MT number, value cross-section values (str) """ def skip(ofile, number): for i in range(number): line = next(ofile) energies = np.zeros(numbergroup + 1) xslib = {} xs = [] mtnum = '' with open(path,'r') as f: for line in f: res = re.search("MT=\w*\d+", line) if res: mtnum = re.search("\d+", line).group() skip(f, 5) xs = np.zeros(numbergroup) while(len(line.rstrip()) > 1): dump = line.rstrip().split() num = 0 en = 0.0 x = 0.0 for i, d in enumerate(dump): if (i % 3 == 0): num = int(d.rstrip()) if (i % 3 == 1): en = float(d.rstrip()) if (num < numbergroup + 2): if (energies[num - 1] == 0.0): energies[num - 1] = en if (i % 3 == 2): x = float(d.rstrip()) if (num < numbergroup + 1): xs[num - 1] = x line = next(f) if (sum(xs) > 0): xslib[mtnum] = xs return energies, xslib
5f6ffd4e7954984d43ebc00c108d268797831256
25,851
def shiftField(field, dz): """Shifts the z-coordinate of the field by dz""" for f in field: if f.ID == 'Polar Data': f.set_RPhiZ(f.r, f.phi, f.z + dz) elif f.ID == 'Cartesian Data': f.set_XYZ(f.x, f.y, f.z + dz) return field
c3c592356dc21688049a94291d075879a12012ee
25,852
def pair_equality(dataframe, column_1, column_2, new_feature_name): """ Adds a new binary feature to an existing dataframe which, for every row, is 1 if and only if that row has equal values in two given columns. :param dataframe: Dataframe to add feature to :param column_1: Name of first existing column :param column_2: Name of second existing column :param new_feature_name: Name of the new column to add :return: Modified version of given dataframe """ dataframe[new_feature_name] = dataframe.apply( lambda row: get_pair_equality(row, column_1, column_2), axis=1) return dataframe
d82a02c49399351aa62b712664bb9500390ebf81
25,853
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2 = filters if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, kernel_size, padding = 'same', kernel_initializer='he_normal', name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = LeakyReLU()(x) x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer='he_normal', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = layers.add([x, input_tensor]) x = LeakyReLU()(x) return x
3d33a0bec933697eae642199fe7b24e90e45e15b
25,855
def cmp_id(cls1, cls2, idx1, idx2): """Compare same particles between two clusters and output numbers Parameters ---------- cls1,cls2: Cluster object idx1,idx2: Indices of detected particles in the clusters. Output ------ The number of same particles. """ partId1 = cls1.gas_id[idx1] partId2 = cls2.gas_id[idx2] sameId = np.intersect1d(partId1, partId2) return len(sameId)
b3ebf4a3c98da18a84545caff446e0d1732208a0
25,858
def get_clinical_cup(): """ Returns tuple with clinical cup description """ return ("8", "2", "M", "01", 25)
fac133ea74fbe30b50e551fdd7cdce349cc02a3a
25,859
def to_utm_bbox(bbox: BBox) -> BBox: """Transform bbox into UTM CRS :param bbox: bounding box :return: bounding box in UTM CRS """ if CRS.is_utm(bbox.crs): return bbox lng, lat = bbox.middle utm_crs = get_utm_crs(lng, lat, source_crs=bbox.crs) return bbox.transform(utm_crs)
80e67ce402ba1551a5282f93fc629be78255e96a
25,860
def define_actions(action): """ Define the list of actions we are using. Args action: String with the passed action. Could be "all" Returns actions: List of strings of actions Raises ValueError if the action is not included in H3.6M """ actions = ["walking", "wiping", "lifting", "co-existing", "co-operating", "noise", "p1_1", "p1_2", "p2_1", "p5", "p7"] if action in actions: return [action] if action == "all": return actions if action == "all_srnn": return ["walking", "eating", "smoking", "discussion"] raise (ValueError, "Unrecognized action: %d" % action)
45bfbd20971a04f566feeed0de509b21be83963b
25,861
def gen_order_history_sequence(uid, history_grouped, has_history_flag): """ 用户订单历史结果构成的序列 """ # 311 天的操作记录 sequence = ['0'] * 311 if has_history_flag == 0: return sequence df = history_grouped[uid] for i in df['days_from_now']: sequence[i] = str(df[df['days_from_now'] == i].shape[0]) return sequence
9f9e93549ea4c35971f87957b74e44e258d79d49
25,862
def _get_plugin_type_ids(): """Get the ID of each of Pulp's plugins. Each Pulp plugin adds one (or more?) content unit type to Pulp. Each of these content unit types is identified by a certain unique identifier. For example, the `Python type`_ has an ID of ``python_package``. :returns: A set of plugin IDs. For example: ``{'ostree', 'python_package'}``. .. _Python type: http://pulp-python.readthedocs.org/en/latest/reference/python-type.html """ client = api.Client(config.get_config(), api.json_handler) plugin_types = client.get(PLUGIN_TYPES_PATH) return {plugin_type['id'] for plugin_type in plugin_types}
0c31a239980a2427f1fb115d890eea9d92edf396
25,863
def scale_48vcurrent(value, reverse=False, pcb_version=0): """ Given a raw register value and the PCB version number, find out what scale and offset are needed, convert the raw value to Amps (if reverse=False), or convert a value in Amps to raw (if reverse=True). For now, raw values are hundredths of an Amp, positive only. :param value: raw register contents as a value from 0-65535, or current in Amps :param reverse: Boolean, True to perform physical->raw conversion instead of raw->physical :param pcb_version: integer PCB version number, 0-65535 :return: output_value in Amps """ if reverse: return int(value * 100) & 0xFFFF else: return value / 100.0
562a9354f1648203ba9854f2404e00365e12f67f
25,864
import pickle def get_graph(graph_name): """Return graph, input can be string with the file name (reuse previous created graph), or a variable containing the graph itself""" # open file if its a string, or just pass the graph variable if '.p' not in graph_name: graph_name = add_extension(graph_name) if isinstance(graph_name, str): graph_path = get_whole_path(graph_name, 'graphs') # get graph info: returns file object, mode read binary infile = open(graph_path, 'rb') G = pickle.load(infile) infile.close() else: G = graph_name return G
0efe5cb90b6f8bf1fc59853704cf7e07038fb8fb
25,865
def get_transfer_encodings(): """Return a list of supported content-transfer-encoding values.""" return transfer_decoding_wrappers.keys()
c42dfd886b1080e6a49fe4dabc2616967855a7f0
25,867
def is_name_valid(name: str, rules: list) -> bool: """ Determine whether a name corresponds to a named rule. """ for rule in rules: if rule.name == name: return True return False
41e9f88d86a078ca6386f1d0d6b7123233c819b9
25,868
def fig2data(fig, imsize): """ :param fig: Matplotlib figure :param imsize: :return: """ canvas = FigureCanvas(fig) ax = fig.gca() # ax.text(0.0, 0.0, "Test", fontsize=45) # ax.axis("off") canvas.draw() image = np.fromstring(canvas.tostring_rgb(), dtype="uint8") width, height = imsize image = image.reshape((int(height), int(width), -1)) return image
5a8b7bf34d6aa3849f20b5ca140c572c5cad0e57
25,869
def draw_agent_trail(img, trail_data, rgb, vision): """ draw agent trail on the device with given color. Args: img : cv2 read image of device. trail_data : data of trail data of the agent rgb : (r,g,b) tuple of rgb color Returns: img : updated image with agent trail drawn. """ for j in range(len(trail_data)): if j > 0: if vision: cv2.line(img, trail_data[j], trail_data[j - 1], rgb, 5) else: cv2.line(img, trail_data[j], trail_data[j - 1], rgb, 12) return img
9524cb10cbe1ed7dceb8714c7ace443be64e8767
25,870
def get_total_received_items(scorecard): """ Gets the total number of received shipments in the period (based on Purchase Receipts)""" supplier = frappe.get_doc('Supplier', scorecard.supplier) # Look up all PO Items with delivery dates between our dates data = frappe.db.sql(""" SELECT SUM(pr_item.received_qty) FROM `tabPurchase Receipt Item` pr_item, `tabPurchase Receipt` pr WHERE pr.supplier = %(supplier)s AND pr.posting_date BETWEEN %(start_date)s AND %(end_date)s AND pr_item.docstatus = 1 AND pr_item.parent = pr.name""", {"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0)[0][0] if not data: data = 0 return data
856e5b42a1b572a6fa7150b789eb8754a045677d
25,871
def generate_default_filters(dispatcher, *args, **kwargs): """ Prepare filters :param dispatcher: for states :param args: :param kwargs: :return: """ filters_list = [] for name, filter_data in kwargs.items(): if filter_data is None: # skip not setted filter names # Note that state by default is not None, # check dispatcher.storage for more information continue if name == DefaultFilters.REQUEST_TYPE: filters_list.append(RequestTypeFilter(filter_data)) elif name == DefaultFilters.COMMANDS: if isinstance(filter_data, str): filters_list.append(CommandsFilter([filter_data])) else: filters_list.append(CommandsFilter(filter_data)) elif name == DefaultFilters.STARTS_WITH: if isinstance(filter_data, str): filters_list.append(StartsWithFilter([filter_data])) else: filters_list.append(StartsWithFilter(filter_data)) elif name == DefaultFilters.CONTAINS: if isinstance(filter_data, str): filters_list.append(ContainsFilter([filter_data])) else: filters_list.append(ContainsFilter(filter_data)) elif name == DefaultFilters.STATE: if isinstance(filter_data, (list, set, tuple, frozenset)): filters_list.append(StatesListFilter(dispatcher, filter_data)) else: filters_list.append(StateFilter(dispatcher, filter_data)) elif name == DefaultFilters.FUNC: filters_list.append(filter_data) elif name == DefaultFilters.REGEXP: filters_list.append(RegexpFilter(filter_data)) elif isinstance(filter_data, Filter): filters_list.append(filter_data) else: log.warning('Unexpected filter with name %r of type `%r` (%s)', name, type(filter_data), filter_data) filters_list += list(args) # Some custom filters return filters_list
e307b9933280bfc91ef25ac306586c3cc6cf8c94
25,872
def linear_map(x, init_mat_params=None, init_b=None, mat_func=get_LU_map, trainable_A=True, trainable_b=True, irange=1e-10, name='linear_map'): """Return the linearly transformed, y^t = x^t * mat_func(mat_params) + b^t, log determinant of Jacobian and inverse map. Args: x: N x d real tensor of covariates to be linearly transformed. init_mat_params: tensor of parameters for linear map returned by mat_func(init_mat_params, b) (see get_LU_map above). init_b: d length tensor of biases. mat_func: function that returns matrix, log determinant, and inverse for linear mapping (see get_LU_map). trainable_A: boolean indicating whether to train matrix for linear map. trainable_b: boolean indicating whether to train bias for linear map. name: variable scope. Returns: z: N x d linearly transformed covariates. logdet: scalar, the log determinant of the Jacobian for transformation. invmap: function that computes the inverse transformation. """ if irange is not None: initializer = tf.random_uniform_initializer(-irange, irange) else: initializer = None with tf.variable_scope(name, initializer=initializer): d = int(x.get_shape()[-1]) if init_mat_params is None: # mat_params = tf.get_variable( # 'mat_params', dtype=tf.float32, # shape=(d, d), trainable=trainable_A) mat_params = tf.get_variable( 'mat_params', dtype=tf.float32, initializer=tf.eye(d, dtype=tf.float32), trainable=trainable_A) else: mat_params = tf.get_variable('mat_params', dtype=tf.float32, initializer=init_mat_params, trainable=trainable_A) if init_b is None: # b = tf.get_variable('b', dtype=tf.float32, shape=(d,), # trainable=trainable_b) b = tf.get_variable('b', dtype=tf.float32, initializer=tf.zeros((d, ), tf.float32), trainable=trainable_b) else: b = tf.get_variable('b', dtype=tf.float32, initializer=init_b, trainable=trainable_b) A, logdet, invmap = mat_func(mat_params, b) z = tf.matmul(x, A) + tf.expand_dims(b, 0) return z, logdet, invmap
1286fc8087288f94b1ef63388fc6c8636d061b2f
25,873
def isolated_70(): """ Real Name: b'Isolated 70' Original Eqn: b'INTEG ( isolation rate symptomatic 70+isolation rate asymptomatic 70-isolated recovery rate 70\\\\ -isolated critical case rate 70, init Isolated 70)' Units: b'person' Limits: (None, None) Type: component b'' """ return _integ_isolated_70()
b1185a6a03759830f7cfeaefae34389699e62c48
25,874
def db_retry(using=None, tries=None, delay=None, max_delay=None, backoff=1, jitter=0, logger=logging_logger): """Returns a retry decorator. :param using: database alias from settings.DATABASES. :param tries: the maximum number of attempts. -1 means infinite. None - get from current connection. default: DATABASES[using].get('MAX_RETRIES', 1). :param delay: initial delay between attempts. None - get from current connection. default: DATABASES[using].get('RETRY_DELAY_SECONDS', 0). :param max_delay: the maximum value of delay. default: None (no limit). :param backoff: multiplier applied to delay between attempts. default: 1 (no backoff). :param jitter: extra seconds added to delay between attempts. default: 0. fixed if a number, random if a range tuple (min, max) :param logger: logger.warning(fmt, error, delay) will be called on failed attempts. default: retry.logging_logger. if None, logging is disabled. :returns: a retry decorator. """ if tries is None or delay is None: connection = get_connection(using=using) if tries is None: tries = connection.settings_dict.get("MAX_RETRIES", 1) if delay is None: # RETRY_DELAY_SECONDS might be None, so that added this "or 0" delay = connection.settings_dict.get("RETRY_DELAY_SECONDS", 0) or 0 def wrap(f): def wrapped_f(*fargs, **fkwargs): args = fargs if fargs else list() kwargs = fkwargs if fkwargs else dict() return __retry_internal( partial(f, *args, **kwargs), tries, delay, max_delay, backoff, jitter, logger ) return wrapped_f return wrap
5727bb89f55a8cc68cea2a35ea256b79b6b852da
25,875
from typing import Collection def A000142(start: int = 0, limit: int = 20) -> Collection[int]: """Factorial numbers: n! = 1*2*3*4*...*n (order of symmetric group S_n, number of permutations of n letters). """ sequence = [] colors = [] x = [] for i in range(start, start + limit): sequence.append(factorial(i)) colors.append(np.random.rand()) x.append(i) return sequence
c0c709529bb7926369912ea195aec5fba17f7887
25,876