content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import tempfile import gzip def load_training_data(): """Loads the Fashion-MNIST dataset. Returns: Tuple of Numpy arrays: `(x_train, y_train)`. License: The copyright for Fashion-MNIST is held by Zalando SE. Fashion-MNIST is licensed under the [MIT license]( https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE). """ download_directory = tempfile.mkdtemp() base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" files = [ "train-labels-idx1-ubyte.gz", "train-images-idx3-ubyte.gz", ] paths = [] for fname in files: paths.append(get_file(fname, origin=base + fname, cache_subdir=download_directory)) with gzip.open(paths[0], "rb") as lbpath: y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) with gzip.open(paths[1], "rb") as imgpath: x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28) return x_train, y_train
be2a5d84c8ef0cd1aa62564a3fd3882af49344ca
15,400
def set_difference(tree, context, attribs): """A meta-feature that will produce the set difference of two boolean features (will have keys set to 1 only for those features that occur in the first set but not in the second). @rtype: dict @return: dictionary with keys for key occurring with the first feature but not the second, and \ keys equal to 1 """ ret = {} for key, val in context['feats'][attribs[0]].items(): if key not in context['feats'][attribs[1]]: ret[key] = val return ret
7887f619e601624843c6507e7b93442020ecf1ea
15,401
def create_STATES(us_states_location): """ Create shapely files of states. Args: us_states_location (str): Directory location of states shapefiles. Returns: States data as cartopy feature for plotting. """ proj = ccrs.LambertConformal(central_latitude = 25, central_longitude = 265, standard_parallels = (25, 25)) reader = shpreader.Reader( f'{us_states_location}/ne_50m_admin_1_states_provinces_lines.shp') states = list(reader.geometries()) STATES = cfeature.ShapelyFeature(states, ccrs.PlateCarree()) return STATES
fe2b48f465ee7e63bb4dfa91e2c9917eeeab082f
15,402
def get_name_by_url(url): """Returns the name of a stock from the instrument url. Should be located at ``https://api.robinhood.com/instruments/<id>`` where <id> is the id of the stock. :param url: The url of the stock as a string. :type url: str :returns: Returns the simple name of the stock. If the simple name does not exist then returns the full name. """ data = helper.request_get(url) if not data: return(None) # If stock doesn't have a simple name attribute then get the full name. filter = helper.filter(data, info = 'simple_name') if not filter or filter == "": filter = helper.filter(data, info = 'name') return(filter)
c90e453bb1576d8c93a3388ab2cfe0d9f63d550c
15,403
def recursively_replace(original, replacements, include_original_keys=False): """Clones an iterable and recursively replaces specific values.""" # If this function would be called recursively, the parameters 'replacements' and 'include_original_keys' would have to be # passed each time. Therefore, a helper function with a reduced parameter list is used for the recursion, which nevertheless # can access the said parameters. def _recursion_helper(obj): #Determine if the object should be replaced. If it is not hashable, the search will throw a TypeError. try: if obj in replacements: return replacements[obj] except TypeError: pass # An iterable is recursively processed depending on its class. if hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)): if isinstance(obj, dict): contents = {} for key, val in obj.items(): new_key = _recursion_helper(key) if include_original_keys else key new_val = _recursion_helper(val) contents[new_key] = new_val else: contents = [] for element in obj: new_element = _recursion_helper(element) contents.append(new_element) # Use the same class as the original. return obj.__class__(contents) # If it is not replaced and it is not an iterable, return it. return obj return _recursion_helper(original)
aee393b09c74eb6cb1417d017d7004ac69bb3543
15,404
from typing import get_origin from typing import get_args def destructure(hint: t.Any) -> t.Tuple[t.Any, t.Tuple[t.Any, ...]]: """Return type hint origin and args.""" return get_origin(hint), get_args(hint)
451d1fd5a3277f882b9645dcdc78b2accc4d56a2
15,405
def f_x_pbe(x, kappa=0.804, mu=0.2195149727645171): """Evaluates PBE exchange enhancement factor. 10.1103/PhysRevLett.77.3865 Eq. 14. F_X(x) = 1 + kappa ( 1 - 1 / (1 + mu s^2)/kappa ) kappa, mu = 0.804, 0.2195149727645171 (PBE values) s = c x, c = 1 / (2 (3pi^2)^(1/3) ) Args: x: Float numpy array with shape (num_grids,), the reduced density gradient. kappa: Float, parameter. mu: Float, parameter. Returns: Float numpy array with shape (num_grids,), the PBE exchange enhancement factor. """ c = 1 / (2 * (3 * jnp.pi ** 2) ** (1 / 3)) s = c * x f_x = 1 + kappa - kappa / (1 + mu * s ** 2 / kappa) return f_x
9933a379b659b38082aa91d4498a399a43b2e20c
15,406
def index(): """ if no browser and no platform: it's a CLI request. """ if g.client['browser'] is None or g.client['platform'] is None: string = "hello from API {} -- in CLI Mode" msg = {'message': string.format(versions[0]), 'status': 'OK', 'mode': 200} r = Response(j.output(msg)) r.headers['Content-type'] = 'application/json; charset=utf-8' return r, 200 """ ELSE: it's obviously on a web browser """ string = "<h1>hello from API v1 | {} | {} | {} | {}</h1>" return string.format(g.client['browser'], g.client['platform'], g.client['version'], g.client['language'])
d497ce0cf12bbe914ab3147080c05a4441e9d39b
15,407
import sys def prompt_yes_no(question, default=None): """Asks a yes/no question and returns either True or False.""" prompt = (default is True and 'Y/n') or (default is False and 'y/N') or 'y/n' valid = {'yes': True, 'ye': True, 'y': True, 'no': False, 'n': False} while True: choice = input(question + prompt + ': ').lower() if not choice and default is not None: return default if choice in valid: return valid[choice] else: sys.stdout.write("Invalid reponse\n")
1cd9c6c19d8bca536b41baec901ba77baa1153c6
15,408
def attention_resnet20(**kwargs): """Constructs a ResNet-20 model. """ model = CifarAttentionResNet(CifarAttentionBasicBlock, 3, **kwargs) return model
e44061a9ad42ceea26aa263a5169ffec62857f90
15,409
import re def get_basename(name): """ [pm/cmds] オブジェクト名からベースネームを取得する """ fullpath = get_fullpath(name) return re(r"^.*\|", "", fullpath)
a18cd5ac563dd37c53bdf6b0c1ea6a55efa7a221
15,410
from typing import Optional def get_live_token(resource_uri: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLiveTokenResult: """ The response to a live token query. :param str resource_uri: The identifier of the resource. """ __args__ = dict() __args__['resourceUri'] = resource_uri if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:insights/v20200602preview:getLiveToken', __args__, opts=opts, typ=GetLiveTokenResult).value return AwaitableGetLiveTokenResult( live_token=__ret__.live_token)
b82d799ff261994643c807f4d1b947ba591d6a14
15,411
def load_subspace_vectors(embd, subspace_words): """Loads all word vectors for the particular subspace in the list of words as a matrix Arguments embd : Dictonary of word-to-embedding for all words subspace_words : List of words representing a particular subspace Returns subspace_embd_mat : Matrix of word vectors stored row-wise """ subspace_embd_mat = [] ind = 0 for word in subspace_words: if word in embd: subspace_embd_mat.append(embd[word]) ind = ind+1 return subspace_embd_mat
5eb1db8be8801cf6b1fe294a6f2c93570e9a9fe1
15,412
from datetime import datetime def _safe_filename(filename): """ Generates a safe filename that is unlikely to collide with existing objects in Google Cloud Storage. ``filename.ext`` is transformed into ``filename-YYYY-MM-DD-HHMMSS.ext`` """ filename = secure_filename(filename) date = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H%M%S") basename, extension = filename.rsplit('.', 1) return "{0}-{1}.{2}".format(basename, date, extension)
63e55ccbf29505868efe702cd7c25cdfb0e6ad2f
15,413
from typing import List from typing import Tuple def get_raw(contents: List[str]) -> Tuple[sections.Raw, List[str]]: """Parse the \\*RAW section""" raw_dict, rest = get_section(contents, "raw") remarks = raw_dict[REMARKS] if REMARKS in raw_dict else "" raw_info = sections.Raw( remarks=remarks, raw=raw_dict, ) return raw_info, rest
005af62533c129d39b7af1524b00a48a9113adde
15,414
import itertools def transfers_from_stops( stops, stop_times, transfer_type=2, trips=False, links_from_stop_times_kwargs={'max_shortcut': False, 'stop_id': 'stop_id'}, euclidean_kwargs={'latitude': 'stop_lat', 'longitude': 'stop_lon'}, seek_traffic_redundant_paths=True, seek_transfer_redundant_paths=True, max_distance=800, euclidean_speed=5 * 1000 / 3600 / 1.4, geometry=False, gtfs_only=False ): """ Builds a relevant footpath table from the stop_times and stops tables of a transitfeed. The trips table may be used to spot the "dominated" footpaths that offer no new connection (compared to the pool of stops), for example: * line A stops at station i and station k; * line B stops at station j and station k; * no other line stops at a or b; * the footpath F goes from i to j; * In our understanding : F is dominated by the station k :param stops: DataFrame consistent with the GTFS table "trips" :param stop_times: DataFrame consistent with the GTFS table "trips" :param transfer_type: how to fill the 'transfer_type' column of the feed :param trips: DataFrame consistent with the GTFS table "trips" :param links_from_stop_times_kwargs: kwargs to pass to transitlinks.links_from_stop_times, called on stop_times :param euclidean_kwargs: kwargs to pass to skims.euclidean (the name of the latitude and longitude column) :param seek_traffic_redundant_paths: if True, only the footpaths that do not belong to the transit links are kept. the transit links are built from the stop times using transitlinks.links_from_stop_times. The maximum number of transit links to concatenate in order to look for redundancies may be passed in the kwargs ('max_shortcut'). For example, if max_shortcut = 5: the footpath that can be avoided be taking a five stations ride will be tagged as "dominated". :param seek_transfer_redundant_paths: if True, the "trips" table is used to look for the dominated footpaths :param max_distance: maximum distance of the footpaths (meters as the crows fly) :param euclidean_speed: speed as the crows fly on the footpaths. :param geometry: If True, a geometry column (shapely.geometry.linestring.Linestring object) is added to the table :return: footpaths data with optional "dominated" tag """ stop_id = links_from_stop_times_kwargs['stop_id'] origin = stop_id + '_origin' destination = stop_id + '_destination' euclidean = skims.euclidean(stops.set_index(stop_id), **euclidean_kwargs) euclidean.reset_index(drop=True, inplace=True) euclidean['tuple'] = pd.Series(list(zip(list(euclidean['origin']), list(euclidean['destination'])))) short_enough = euclidean[euclidean['euclidean_distance'] < max_distance] short_enough = short_enough[short_enough['origin'] != short_enough['destination']] footpath_tuples = {tuple(path) for path in short_enough[['origin', 'destination']].values.tolist()} paths = euclidean[euclidean['tuple'].isin(footpath_tuples)] paths['dominated'] = False _stop_times = stop_times if stop_id in stops.columns and stop_id not in stop_times.columns: _stop_times = pd.merge( stop_times, stops[['id', stop_id]], left_on='stop_id', right_on='id', suffixes=['', '_merged']) if seek_traffic_redundant_paths: links = feed_links.link_from_stop_times(_stop_times, **links_from_stop_times_kwargs).reset_index() in_links_tuples = {tuple(path) for path in links[[origin, destination]].values.tolist()} paths['trafic_dominated'] = paths['tuple'].isin(in_links_tuples) paths['dominated'] = paths['dominated'] | paths['trafic_dominated'] stop_routes = {} stop_set = set(_stop_times[stop_id]) # if two routes are connected by several footpaths we only keep the shortest one # if routes a and b are connected to route c, d and e by several footpaths : # we keep only the shortest one that does the job. if trips is not False: grouped = pd.merge(_stop_times, trips, left_on='trip_id', right_on='id').groupby(stop_id)['route_id'] stop_routes = grouped.aggregate(lambda x: frozenset(x)).to_dict() def get_routes(row): return tuple((stop_routes[row['origin']], stop_routes[row['destination']])) paths = paths[(paths['origin'].isin(stop_set) & paths['destination'].isin(stop_set))] paths['trips'] = paths.apply(get_routes, axis=1) paths = paths.sort('euclidean_distance').groupby(['trips', 'dominated'], as_index=False).first() paths['min_transfer_time'] = paths['euclidean_distance'] / euclidean_speed paths = paths[paths['origin'] != paths['destination']] if seek_transfer_redundant_paths: paths['frozen'] = paths['trips'].apply(lambda a: frozenset(a[0]).union(frozenset(a[1]))) max_length = max([len(f) for f in list(paths['frozen'])]) to_beat = [] for length in range(max_length + 1): for stop in stop_routes.values(): for c in list(itertools.combinations(stop, length)): to_beat.append(frozenset(c)) to_beat = set(to_beat) paths['transfer_dominated'] = paths['frozen'].apply(lambda f: f in to_beat) paths['dominated'] = paths['dominated'] | paths['transfer_dominated'] if geometry and not gtfs_only: paths['geometry'] = paths.apply(linestring_geometry, axis=1) paths['from_stop_id'] = paths['origin'] paths['to_stop_id'] = paths['destination'] paths['transfer_type'] = transfer_type if gtfs_only: paths = paths[~paths['dominated']] paths = paths[['from_stop_id', 'to_stop_id', 'transfer_type', 'min_transfer_time']] return paths
9e9456440b3dc6cbdd367f9ea99f85559e3343cd
15,415
from pypy.module.cpyext.tupleobject import PyTuple_GetItem from pypy.module.cpyext.listobject import PyList_GetItem def PySequence_ITEM(space, w_obj, i): """Return the ith element of o or NULL on failure. Macro form of PySequence_GetItem() but without checking that PySequence_Check(o)() is true and without adjustment for negative indices. This function used an int type for i. This might require changes in your code for properly supporting 64-bit systems.""" # XXX we should call Py*_GET_ITEM() instead of Py*_GetItem() # from here, but we cannot because we are also called from # PySequence_GetItem() py_obj = as_pyobj(space, w_obj) if isinstance(w_obj, tupleobject.W_TupleObject): py_res = PyTuple_GetItem(space, py_obj, i) incref(space, py_res) keepalive_until_here(w_obj) return py_res if isinstance(w_obj, W_ListObject): py_res = PyList_GetItem(space, py_obj, i) incref(space, py_res) keepalive_until_here(w_obj) return py_res as_sequence = py_obj.c_ob_type.c_tp_as_sequence if as_sequence and as_sequence.c_sq_item: ret = generic_cpy_call(space, as_sequence.c_sq_item, w_obj, i) return make_ref(space, ret) w_ret = space.getitem(w_obj, space.newint(i)) return make_ref(space, w_ret)
8a3bb364d6d2e96681bb89b170b8517e09eb719c
15,416
import codecs import binascii def decode_hex(data): """Decodes a hex encoded string into raw bytes.""" try: return codecs.decode(data, 'hex_codec') except binascii.Error: raise TypeError()
115e89d6f80a6fc535f44d92f610a6312edf6daf
15,417
def crop_bbox_by_coords(bbox, crop_coords, crop_height, crop_width, rows, cols): """Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the required height and width of the crop. """ bbox = denormalize_bbox(bbox, rows, cols) x_min, y_min, x_max, y_max = bbox x1, y1, x2, y2 = crop_coords cropped_bbox = [x_min - x1, y_min - y1, x_max - x1, y_max - y1] return normalize_bbox(cropped_bbox, crop_height, crop_width)
2cd53c51f6a80034630a53a43678d22f1073e7f4
15,418
def computeDateGranularity(ldf): """ Given a ldf, inspects temporal column and finds out the granularity of dates. Example ---------- ['2018-01-01', '2019-01-02', '2018-01-03'] -> "day" ['2018-01-01', '2019-02-01', '2018-03-01'] -> "month" ['2018-01-01', '2019-01-01', '2020-01-01'] -> "year" Parameters ---------- ldf : lux.luxDataFrame.LuxDataFrame LuxDataFrame with a temporal field Returns ------- field: str A str specifying the granularity of dates for the inspected temporal column """ dateFields = ["day", "month", "year"] if ldf.dataType["temporal"]: dateColumn = ldf[ldf.dataType["temporal"][0]] # assumes only one temporal column, may need to change this function to recieve multiple temporal columns in the future dateIndex = pd.DatetimeIndex(dateColumn) for field in dateFields: if hasattr(dateIndex,field) and len(getattr(dateIndex, field).unique()) != 1 : #can be changed to sum(getattr(dateIndex, field)) != 0 return field
e998a144b22c6e65599f1f6b5cc2ec893310e3cc
15,419
from typing import Optional from typing import Tuple import sqlite3 def next_pending_location(user_id: int, current_coords: Optional[Tuple[int, int]] = None) -> Optional[Tuple[int, int]]: """ Retrieves the next pending stone's coordinates. If current_coords is not specified (or is not pending), retrieves the longest-pending stone's coordinates. The order for determining which stone is "next" is defined by how long stones have been pending -- successive applications of this function will retrieve successively younger pending stones. If there is no younger pending stone, the coordinates of the oldest pending stone are returned. If there are no pending stones at all, None is returned. """ with sqlite3.connect(db_file) as db: cur = db.cursor() current_stone_pending_since = 0 # Will always be older than any stone. if current_coords is not None: current_stone = get_stone(*current_coords) if current_stone is not None and current_stone["player"] == user_id and current_stone["status"] == "Pending": # The current stone belongs to the player and is pending. current_stone_pending_since = current_stone["last_status_change_time"] query = """SELECT x, y FROM stones WHERE player = ? AND status = 'Pending' AND last_status_change_time > ? ORDER BY last_status_change_time ASC;""" cur.execute(query, [user_id, current_stone_pending_since]) next_pending_coords = cur.fetchone() # A younger pending stone exists. if next_pending_coords is not None: return next_pending_coords # Otherwise, a younger pending stone does not exist. # Retrieve the oldest stone. cur.execute(query, [user_id, 0]) next_pending_coords = cur.fetchone() # Return either the coords of the oldest pending stone, or None if no such stone exists. return next_pending_coords
fb26531819c19532d7dbf8152963245f07af8e7c
15,420
import re import keyword def get_valid_identifier(prop, replacement_character='', allow_unicode=False): """Given a string property, generate a valid Python identifier Parameters ---------- replacement_character: string, default '' The character to replace invalid characters with. allow_unicode: boolean, default False If True, then allow Python 3-style unicode identifiers. Examples -------- >>> get_valid_identifier('my-var') 'myvar' >>> get_valid_identifier('if') 'if_' >>> get_valid_identifier('$schema', '_') '_schema' >>> get_valid_identifier('$*#$') '_' """ # First substitute-out all non-valid characters. flags = re.UNICODE if allow_unicode else re.ASCII valid = re.sub('\W', replacement_character, prop, flags=flags) # If nothing is left, use just an underscore if not valid: valid = '_' # first character must be a non-digit. Prefix with an underscore # if needed if re.match('^[\d\W]', valid): valid = '_' + valid # if the result is a reserved keyword, then add an underscore at the end if keyword.iskeyword(valid): valid += '_' return valid
a3eeb389b73540aba2041e877c2ff151e272ffdd
15,421
from re import T def temporal_padding(x, paddings=(1, 0), pad_value=0): """Pad the middle dimension of a 3D tensor with `padding[0]` values left and `padding[1]` values right. Modified from keras.backend.temporal_padding https://github.com/fchollet/keras/blob/3bf913d/keras/backend/theano_backend.py#L590 """ if not isinstance(paddings, (tuple, list, np.ndarray)): paddings = (paddings, paddings) output = T.zeros(x.size(0), x.size(1) + sum(paddings), x.size(2)).to(dev) output[:, :paddings[0], :] = pad_value output[:, paddings[1]:, :] = pad_value output[:, paddings[0]: paddings[0]+x.size(1), :] = x return output
8ccc828ac68cd98da4e7ec5f8253ae5385317d48
15,422
import sys import os def get_args_and_hdf5_file(cfg): """ Assembles the command line arguments for training and the filename for the hdf5-file with the results :return: args, filename """ common_parameters = [ "--train:mode", "world", "--train:samples", "256**3", "--train:batchsize", "64*64*128", "--train:sampler_importance", "0.01", '--rebuild_dataset', '51', "--val:copy_and_split", "--outputmode", "density:direct", "--lossmode", "density", "--activation", BEST_ACTIVATION, "-l1", "1", "--lr_step", "100", "-i", "500", "--logdir", 'volnet/results/eval_TimeVolumetricFeatures/log', "--modeldir", 'volnet/results/eval_TimeVolumetricFeatures/model', "--hdf5dir", 'volnet/results/eval_TimeVolumetricFeatures/hdf5', '--save_frequency', '50' ] def getNetworkParameters(network): channels, layers, params = network return ["--layers", ':'.join([str(channels)] * (layers - 1))] def getFourierParameters(network, fourier): channels, layers, params = network std, count = fourier return ['--fouriercount', str(count), '--fourierstd', str(std)] config, network, fourier, volumetric, time, filename = cfg launcher = [sys.executable, "volnet/train_volnet.py"] args = launcher + config + \ common_parameters + \ getNetworkParameters(network) + \ getFourierParameters(network, fourier) + \ volumetric + time + ['--name', filename] hdf5_file = os.path.join(BASE_PATH, 'hdf5', filename + ".hdf5") return args, hdf5_file, filename
c6cc382384274e6c127a7494f1cbf170dbe62158
15,423
def get_orientation(y, num_classes=8, encoding='one_hot'): """ Args: y: [B, T, H, W] """ # [H, 1] idx_y = np.arange(y.shape[2]).reshape([-1, 1]) # [1, W] idx_x = np.arange(y.shape[3]).reshape([1, -1]) # [H, W, 2] idx_map = np.zeros([y.shape[2], y.shape[3], 2]) idx_map[:, :, 0] += idx_y idx_map[:, :, 1] += idx_x # [1, 1, H, W, 2] idx_map = idx_map.reshape([1, 1, y.shape[2], y.shape[3], 2]) # [B, T, H, W, 1] y2 = np.expand_dims(y, 4) # [B, T, H, W, 2] y_map = idx_map * y2 # [B, T, 1] y_sum = np.expand_dims(y.sum(axis=2).sum(axis=2), 3) + 1e-7 # [B, T, 2] centroids = y_map.sum(axis=2).sum(axis=2) / y_sum # [B, T, 1, 1, 2] centroids = centroids.reshape([y.shape[0], y.shape[1], 1, 1, 2]) # Orientation vector # [B, T, H, W, 2] ovec = (y_map - centroids) * y2 # Normalize orientation [B, T, H, W, 2] ovec = (ovec + 1e-8) / \ (np.sqrt((ovec * ovec).sum(axis=-1, keepdims=True)) + 1e-7) # [B, T, H, W] angle = np.arcsin(ovec[:, :, :, :, 0]) xpos = (ovec[:, :, :, :, 1] > 0).astype('float') ypos = (ovec[:, :, :, :, 0] > 0).astype('float') # [B, T, H, W] angle = angle * xpos * ypos + (np.pi - angle) * (1 - xpos) * ypos + \ angle * xpos * (1 - ypos) + \ (-np.pi - angle) * (1 - xpos) * (1 - ypos) angle = angle + np.pi / 8 # [B, T, H, W] angle_class = np.mod( np.floor((angle + np.pi) * num_classes / 2 / np.pi), num_classes) if encoding == 'one_hot': angle_class = np.expand_dims(angle_class, 4) clazz = np.arange(num_classes).reshape( [1, 1, 1, 1, -1]) angle_one_hot = np.equal(angle_class, clazz).astype('float32') angle_one_hot = (angle_one_hot * y2).max(axis=1) return angle_one_hot.astype('uint8') elif encoding == 'class': # [B, H, W] return (angle_class * y).max(axis=1).astype('uint8') else: raise Exception('Unknown encoding type: {}'.format(encoding))
501c57cf447865ec03229a3ba15125da3837eb8e
15,424
def plot_tempo_curve(f_tempo, t_beat, ax=None, figsize=(8, 2), color='k', logscale=False, xlabel='Time (beats)', ylabel='Temp (BPM)', xlim=None, ylim=None, label='', measure_pos=[]): """Plot a tempo curve Notebook: C3/C3S3_MusicAppTempoCurve.ipynb Args: f_tempo: Tempo curve t_beat: Time axis of tempo curve (given as sampled beat axis) ax: Plot either as figure (ax==None) or into axis (ax==True) (Default value = None) figsize: Size of figure (Default value = (8, 2)) color: Color of tempo curve (Default value = 'k') logscale: Use linear (logscale==False) or logartihmic (logscale==True) tempo axis (Default value = False) xlabel: Label for x-axis (Default value = 'Time (beats)') ylabel: Label for y-axis (Default value = 'Temp (BPM)') xlim: Limits for x-axis (Default value = None) ylim: Limits for x-axis (Default value = None) label: Figure labels when plotting into axis (ax==True) (Default value = '') measure_pos: Plot measure positions as spefified (Default value = []) Returns: fig: figure handle ax: axes handle """ fig = None if ax is None: fig = plt.figure(figsize=figsize) ax = plt.subplot(1, 1, 1) ax.plot(t_beat, f_tempo, color=color, label=label) ax.set_title('Tempo curve') if xlim is None: xlim = [t_beat[0], t_beat[-1]] if ylim is None: ylim = [np.min(f_tempo) * 0.9, np.max(f_tempo) * 1.1] ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.grid(True, which='both') if logscale: ax.set_yscale('log') ax.yaxis.set_major_formatter(ScalarFormatter()) ax.yaxis.set_minor_formatter(ScalarFormatter()) # ax.set_yticks([], minor=True) # yticks = np.arange(ylim[0], ylim[1]+1, 10) # ax.set_yticks(yticks) plot_measure(ax, measure_pos) return fig, ax
fd8f084d5912b7b64929e7bc8a61bd4dfc8ae107
15,425
def not_pathology(data): """Return false if the node is a pathology. :param dict data: A PyBEL data dictionary :rtype: bool """ return data[FUNCTION] != PATHOLOGY
b420826265164445e8df470a4049d68839182d4b
15,426
def remove_index_fastqs(fastqs,fastq_attrs=IlluminaFastqAttrs): """ Remove index (I1/I2) Fastqs from list Arguments: fastqs (list): list of paths to Fastq files fastq_attrs (BaseFastqAttrs): class to use for extracting attributes from Fastq names (defaults to IlluminaFastqAttrs) Returns: List: input Fastq list with any index read Fastqs removed. """ return list(filter(lambda fq: not fastq_attrs(fq).is_index_read, fastqs))
fc62bd4ab28427ba51d8cd56d17576c89e2ed7ad
15,427
from typing import Union from typing import List def max(name: "Union[str, List[Expr]]") -> "Expr": """ Get maximum value """ if isinstance(name, list): def max_(acc: Series, val: Series) -> Series: mask = acc < val return acc.zip_with(mask, val) return fold(lit(0), max_, name).alias("max") return col(name).max()
3ca308e951801e4376483188249da2333aafc789
15,428
def initialize(Lx, Ly, solutes, restart_folder, field_to_subspace, concentration_init, rad, enable_NS, enable_EC, dx, surface_charge, permittivity, **namespace): """ Create the initial state. """ w_init_field = dict() if not restart_folder: if enable_EC: for solute in ["c_p", "c_m"]: w_init_field[solute] = df.interpolate( df.Constant(1e-4), field_to_subspace[solute].collapse()) c_init = df.interpolate( df.Expression("1./(2*DOLFIN_PI*rad*rad)*exp(" "- (pow(x[0], 2) + pow(x[1], 2))/(2*rad*rad))", Lx=Lx, Ly=Ly, rad=rad, degree=2), field_to_subspace["c_n"].collapse()) C_tot = df.assemble(c_init*dx) c_init.vector()[:] *= concentration_init*Lx*Ly/C_tot w_init_field["c_n"] = c_init V_0 = -surface_charge*Ly/permittivity[0] w_init_field["V"] = df.interpolate( df.Expression("V_0*(x[1]/Ly-0.5)", Ly=Ly, V_0=V_0, degree=1), field_to_subspace["V"].collapse()) return w_init_field
802affd7e56598cb4a22e37480313401254e263f
15,429
def _get_sentry_sdk(): """Creates raven.Client instance configured to work with cron jobs.""" # NOTE: this function uses settings and therefore it shouldn't be called # at module level. try: sentry_sdk = __import__("sentry_sdk") DjangoIntegration = __import__( "sentry_sdk.integrations.django" ).integrations.django.DjangoIntegration except ImportError: raise MissingDependency( "Unable to import sentry_sdk. " "Sentry monitor requires this dependency." ) for setting in ( "CRONMAN_SENTRY_CONFIG", "SENTRY_CONFIG", "RAVEN_CONFIG", ): client_config = getattr(settings, setting, None) if client_config is not None: break else: client_config = app_settings.CRONMAN_SENTRY_CONFIG sentry_sdk.init(integrations=[DjangoIntegration()], **client_config) return sentry_sdk
8682004e68606bf8f67487ad541455179c50493c
15,430
def get_memos(): """ Returns all memos in the database, in a form that can be inserted directly in the 'session' object. """ records = [ ] for record in collection.find( { "type": "dated_memo" } ): record['date'] = arrow.get(record['date']).isoformat() del record['_id'] records.append(record) return sorted(records, key=lambda entry : entry['date'])
d9f0f66db05d368d086b77669418652565ea8587
15,431
import os def filepath(folder, *args, ext='pkl'): """Returns the full path of the file with the calculated results for the given dataset, descriptor, descriptor of the given dataset Parameters ---------- folder : string Full path of the folder where results are saved. args : list or tuple Instances of `TextureDataset`, `HEP`, `KNeighborsClassifier`, etc. ext : string File extension (default pkl). Returns ------- fullpath : string The complete path of the file where features corresponding to the given dataset and descriptor (and estimator) are stored. """ lst = [] for obj in args: if hasattr(obj, 'acronym'): item = obj.acronym else: item = obj.__name__ lst.append(item) lst[-1] = lst[-1] + '.' + ext fullpath = os.path.join(folder, '--'.join(lst)) return fullpath
b558559a7b92db6943b6dd04670d9dc4097b5675
15,432
def psplit(df, idx, label): """ Split the participants with a positive label in df into two sets, similarly for participants with a negative label. Return two numpy arrays of participant ids, each array are the chosen id's to be removed from two dataframes to ensure no overlap of participants between the two sets, and keeping half of all participants in df with the same prevelance of event positive participants. """ pos = np.unique(df.loc[df[label] == 1].index.get_level_values(idx)) all_id = np.unique(df.index.get_level_values(idx)) neg = np.setdiff1d(all_id, pos) np.random.shuffle(pos) np.random.shuffle(neg) rmv_1 = np.concatenate((pos[:len(pos)//2], neg[:len(neg)//2])) rmv_2 = np.concatenate((pos[len(pos)//2:], neg[len(neg)//2:])) return rmv_1, rmv_2
fa8652d8812c8f4fd94c7d2601b964b7aaced963
15,433
def se_block(inputs, out_node, scope=None): # TODO: check feature shape and dimension """SENet""" with tf.variable_scope(scope, "se_block", reuse=tf.AUTO_REUSE): channel = inputs.get_shape().as_list()[3] net = tf.reduce_mean(inputs, [1,2], keep_dims=False) net = fc_layer(net, [channel, out_node], _std=1, scope="fc1") net = tf.nn.relu(net) net = fc_layer(net, [out_node, channel], _std=1, scope="fc2") net = tf.nn.sigmoid(net) net = inputs * net return net
7fecd9c0796324c4e74eea0cf07a23ef50306aaf
15,434
import os import pickle def deri_cie_ionfrac(Zlist, condifile='adia.exp_phy.info', \ condi_index=False, appfile=False, outfilename=False, rootpath=False): """ Derive the CIE ionic fraction based on the physical conditions in an ASCII file. The only input parameter is the index (array) of the elements. Parameters ---------- Zlist: [int] list of element nuclear charges Keywords -------- condifile: string or dictionary the ASCII file containing the physical condition array. can also pass in a structure read from the ASCII file; condi_index: [int] index array of at which condition position to derive the spectrum; appfile: str or dictionary of ionic fraction the pickle file that the new calculation will be appended into. Could be the dictionary of loaded from the pickle file; outfilename: str the name of output pickle file recording the ionic fraction. The name of the output file is adopted as following sequence: 1. specified by <outfilename>; 2. adopted from <appfile>, if applicable; 3. "tionfrac_Element.List.pkl". Returns ------- No return, but a pickle file containing derived CIE ionic fraction at specified condition positions is created/updated. """ # System parameter atomdbpath = os.environ['ATOMDB'] ionbalfile = atomdbpath+'APED/ionbal/v3.0.7_ionbal.fits' if not pyatomdb.util.keyword_check(rootpath): rootpath = os.getcwd()+'/' NZlist = len(Zlist) # Output file name if not pyatomdb.util.keyword_check(outfilename): if pyatomdb.util.keyword_check(appfile): if isinstance(appfile, str): outfilename = appfile else: outfilename = 'tciefrac_' for Z in Zlist: outfilename += pyatomdb.atomic.Ztoelsymb(Z) outfilename += '.pkl' # Check the setting of the condition array if pyatomdb.util.keyword_check(condifile): # If it is a string, look for the file name and read it if exists if isinstance(condifile, str): confile = os.path.expandvars(rootpath+condifile) if not os.path.isfile(confile): print("*** ERROR: no such condition file %s. Exiting ***" \ %(confile)) return -1 conditions = ascii.read(confile) elif isinstance(condifile, astropy.table.table.Table): conditions = condifile else: print("Unknown data type for condition file. Please pass a " \ "string or an ASCIIList") return -1 ncondi = len(conditions) if not pyatomdb.util.keyword_check(condi_index): condi_index = range(0,ncondi) else: if max(condi_index) >= ncondi: return -1 te_arr = conditions['kT']/pyatomdb.const.KBOLTZ #in K ionfrac = {} for Z in Zlist: ionfrac[Z] = np.zeros([Z+1,ncondi],dtype=float) for l in condi_index: print('For Zone-%03d...' % l) for Z in Zlist: ionfrac[Z][:,l] = pyatomdb.atomdb.get_ionfrac(ionbalfile, \ Z, te_arr[l]) print('finished.') # Save calculated ionic fraction as pickle file tmp = open(outfilename,'wb') pickle.dump(ionfrac,tmp) tmp.close() return 0
65d29a7290b2d086e211d26dc3dd8166d5b0caaf
15,435
from numscons import get_scons_build_dir import os def get_numpy_include_dirs(sconscript_path): """Return include dirs for numpy. The paths are relatively to the setup.py script path.""" scdir = pjoin(get_scons_build_dir(), pdirname(sconscript_path)) n = scdir.count(os.sep) dirs = _incdir() rdirs = [] for d in dirs: rdirs.append(pjoin(os.sep.join([os.pardir for i in range(n+1)]), d)) return rdirs
5ae44cadbf3451f88e5118e4493c7895ab6941e1
15,436
import numpy def invU(U): """ Calculate inverse of U Cell. """ nr, nc = U.getCellsShape() mshape = U.getMatrixShape() assert (nr == nc), "U Cell must be square!" nmat = nr u_tmp = admmMath.copyCell(U) u_inv = admmMath.Cells(nmat, nmat) for i in range(nmat): for j in range(nmat): if (i == j): u_inv[i,j] = numpy.identity(mshape[0]) else: u_inv[i,j] = numpy.zeros_like(U[0,0]) for j in range(nmat-1,0,-1): for i in range(j-1,-1,-1): tmp = u_tmp[i,j] for k in range(nmat): u_tmp[i,k] = u_tmp[i,k] - numpy.matmul(tmp, u_tmp[j,k]) u_inv[i,k] = u_inv[i,k] - numpy.matmul(tmp, u_inv[j,k]) return u_inv
58765834bde6c93e419d3e2f6d8de25d1740c587
15,437
def liq_g(drvt,drvp,temp,pres): """Calculate liquid water Gibbs energy using F03. Calculate the specific Gibbs free energy of liquid water or its derivatives with respect to temperature and pressure using the Feistel (2003) polynomial formulation. :arg int drvt: Number of temperature derivatives. :arg int drvp: Number of pressure derivatives. :arg float temp: Temperature in K. :arg float pres: Pressure in Pa. :returns: Gibbs free energy in units of (J/kg) / K^drvt / Pa^drvp. :raises ValueError: If drvt or drvp are negative. :Examples: >>> liq_g(0,0,300.,1e5) -5.26505056073e3 >>> liq_g(1,0,300.,1e5) -393.062597709 >>> liq_g(0,1,300.,1e5) 1.00345554745e-3 >>> liq_g(2,0,300.,1e5) -13.9354762020 >>> liq_g(1,1,300.,1e5) 2.75754520492e-7 >>> liq_g(0,2,300.,1e5) -4.52067557155e-13 """ if drvt < 0 or drvp < 0: errmsg = 'Derivatives {0} cannot be negative'.format((drvt,drvp)) raise ValueError(errmsg) TRED, PRED = _C_F03[0] y = (temp - _TCELS)/TRED z = (pres - _PATM)/PRED g = 0. for (j,k,c) in _C_F03[1:]: if y==0: if j==drvt: pwrt = 1. else: pwrt = 0. else: pwrt = y**(j-drvt) for l in range(drvt): pwrt *= j-l if z==0: if k==drvp: pwrp = 1. else: pwrp = 0. else: pwrp = z**(k-drvp) for l in range(drvp): pwrp *= k-l g += c * pwrt * pwrp g /= TRED**drvt * PRED**drvp return g
dfa3eef9d0b9228495b2d4d33a503c0e8c174c2a
15,438
import os def read(fname): """ Utility function to read the README file. Used for the long_description. It's nice, because now 1) we have a top level README file and 2) it's easier to type in the README file than to put a raw string in below ... """ with open(os.path.join(os.path.dirname(__file__), fname)) as f: return f.read()
3320773f53a555e4a8c9ca84c76de282ff91d955
15,439
import asyncio def get_tcp_client(tcp_server_address: str, tcp_server_port: int, session_handler: SessionHandler): """Returns the TCP client used in the 15118 communication. :param tcp_server_address: The TCP server address. :param tcp_server_port: The TCP server port. :param session_handler: The session handler that manages the sessions. :return: transport, protocol -- the objects associated with the TCP connection. """ loop = asyncio.get_event_loop() logger.info("Starting TCP client.") task = loop.create_connection(lambda: TCPClientProtocol(session_handler), tcp_server_address, tcp_server_port, ssl=get_ssl_context()) # TODO: set tcp client port using config file transport, protocol = loop.run_until_complete(task) return transport, protocol
8ebf68acc054831d03e707b1b868bed62e9fe24a
15,440
def extract_dates(obj): """extract ISO8601 dates from unpacked JSON""" if isinstance(obj, dict): new_obj = {} # don't clobber for k,v in iteritems(obj): new_obj[k] = extract_dates(v) obj = new_obj elif isinstance(obj, (list, tuple)): obj = [ extract_dates(o) for o in obj ] elif isinstance(obj, string_types): obj = _parse_date(obj) return obj
1dd7dbda376755cd962c23d2149f41e8559cff12
15,441
def construct_covariates(states, model_spec): """Construct a matrix of all the covariates that depend only on the state space. Parameters --------- states : np.ndarray Array with shape (num_states, 8) containing period, years of schooling, the lagged choice, the years of experience in part-time, and the years of experience in full-time employment, type, age of the youngest child, indicator for the presence of a partner. Returns ------- covariates : np.ndarray Array with shape (num_states, number of covariates) containing all additional covariates, which depend only on the state space information. """ # Age youngest child # Bins of age of youngest child based on kids age # bin 0 corresponds to no kid, remaining bins as in Blundell # 0-2, 3-5, 6-10, 11+ age_kid = pd.Series(states[:, 6]) bins = pd.cut( age_kid, bins=[-2, -1, 2, 5, 10, 11], labels=[0, 1, 2, 3, 4], ).to_numpy() # Male wages based on age and education level of the woman # Wages are first calculated as hourly wages log_wages = ( model_spec.partner_cf_const + model_spec.partner_cf_age * states[:, 0] + model_spec.partner_cf_age_sq * states[:, 0] ** 2 + model_spec.partner_cf_educ * states[:, 1] ) # Male wages # Final input of male wages / partner income is calculated on a weekly # basis. Underlying assumption that all men work full time. male_wages = np.where(states[:, 7] == 1, np.exp(log_wages) * HOURS[2], 0) # Equivalence scale # Depending on the presence of a partner and a child each state is # assigned an equivalence scale value following the modernized OECD # scale: 1 for a single woman HH, 1.5 for a woman with a partner, # 1.8 for a woman with a partner and a child and 1.3 for a woman with # a child and no partner equivalence_scale = np.full(states.shape[0], np.nan) equivalence_scale = np.where( (states[:, 6] == -1) & (states[:, 7] == 0), 1.0, equivalence_scale ) equivalence_scale = np.where( (states[:, 6] == -1) & (states[:, 7] == 1), 1.5, equivalence_scale ) equivalence_scale = np.where( (states[:, 6] != -1) & (states[:, 7] == 1), 1.8, equivalence_scale ) equivalence_scale = np.where( (states[:, 6] != -1) & (states[:, 7] == 0), 1.3, equivalence_scale ) assert ( np.isnan(equivalence_scale).any() == 0 ), "Some HH were not assigned an equivalence scale" # Child benefits # If a woman has a child she receives child benefits child_benefits = np.where(states[:, 6] == -1, 0, model_spec.child_benefits) # Collect in covariates vector covariates = np.column_stack((bins, male_wages, equivalence_scale, child_benefits)) return covariates
883395fc3561ea2fb774eb0ea6dfd866a3d2eed6
15,442
from datetime import datetime def target_ok(target_file, *source_list): """Was the target file created after all the source files? If so, this is OK. If there's no target, or the target is out-of-date, it's not OK. """ try: mtime_target = datetime.datetime.fromtimestamp( target_file.stat().st_mtime) except FileNotFoundError: logger.debug("File %s not found", target_file) return False logger.debug("Compare %s %s >ALL %s", target_file, mtime_target, source_list) # If a source doesn't exist, we have bigger problems. times = ( datetime.datetime.fromtimestamp(source_file.stat().st_mtime) for source_file in source_list ) return all(mtime_target > mtime_source for mtime_source in times)
f47e89f10d9855913bcfaa07ac97965caabeeaa7
15,443
def check_oblique_montante(grille, x, y): """Alignements diagonaux montants (/) : allant du coin bas gauche au coin haut droit""" symbole = grille.grid[y][x] # Alignement diagonal montant de la forme XXX., le noeud (x,y) étant le plus bas et à gauche if grille.is_far_from_top(y) and grille.is_far_from_right(x): if all(symbole == grille.grid[y - i - 1][x + i + 1] for i in range(2)): my_play = grille.play_if_possible(x + 3, y - 2) if my_play is not None: return my_play # Alignements diagonaux montants, le noeud (x,y) étant le plus haut et à droite if grille.is_far_from_bottom(y) and grille.is_far_from_left(x): # Alignement diagonal de la forme .XXX if all(symbole == grille.grid[y + i + 1][x - i - 1] for i in range(2)): if grille.is_very_far_from_bottom(y): my_play = grille.play_if_possible(x - 3, y + 3) if my_play is not None: return my_play if symbole == grille.grid[y + 3][x - 3]: # Alignement diagonal de la forme X.XX if symbole == grille.grid[y + 2][x - 2]: my_play = grille.play_if_possible(x - 1, y + 1) if my_play is not None: return my_play # Alignement diagonal de la forme XX.X if symbole == grille.grid[y + 1][x - 1]: my_play = grille.play_if_possible(x - 2, y + 2) if my_play is not None: return my_play return None
f1ca8d7b55117e3e03c5150a07fd483a1da0a4d5
15,444
def _rotate_the_grid(lon, lat, rot_1, rot_2, rot_3): """Rotate the horizontal grid at lon, lat, via rotation matrices rot_1/2/3 Parameters ---------- lon, lat : xarray DataArray giving longitude, latitude in degrees of LLC horizontal grid rot_1, rot_2, rot_3 : np.ndarray rotation matrices Returns ------- xg, yg, zg : xarray DataArray cartesian coordinates of the horizontal grid """ # Get cartesian of 1D view of lat/lon xg, yg, zg = _convert_latlon_to_cartesian(lon.values.ravel(),lat.values.ravel()) # These rotations result in: # xg = 0 at pt1 # yg = 1 at pt1 # zg = 0 at pt1 and pt2 (and the great circle that crosses pt1 & pt2) xg, yg, zg = _apply_rotation_matrix(rot_1, (xg,yg,zg)) xg, yg, zg = _apply_rotation_matrix(rot_2, (xg,yg,zg)) xg, yg, zg = _apply_rotation_matrix(rot_3, (xg,yg,zg)) # Remake into LLC xarray DataArray xg = llc_tiles_to_xda(xg, grid_da=lon, less_output=True) yg = llc_tiles_to_xda(yg, grid_da=lat, less_output=True) zg = llc_tiles_to_xda(zg, grid_da=lon, less_output=True) return xg, yg, zg
b6c81dcc8191c2843534f369269e5c9cd466d581
15,445
def dict_mapper(data): """Mapper from `TypeValueMap` to :class`dict`""" out = {} for k, v in data.items(): if v.type in (iceint.TypedValueType.TypeDoubleComplex, iceint.TypedValueType.TypeFloatComplex): out[k] = complex(v.value.real, v.value.imag) elif v.type in (iceint.TypedValueType.TypeDoubleComplexSeq, iceint.TypedValueType.TypeFloatComplexSeq): out[k] = [ complex(i.real, i.imag) for i in v.value ] elif v.type == iceint.TypedValueType.TypeDirection: out[k] = (v.value.coord1, v.value.coord2, str(v.value.sys)) elif v.type == iceint.TypedValueType.TypeNull: out[k] = None else: out[k] = v.value return out
b10ba4ed38d81cca3fc760d281a32d46d03d4223
15,446
import os import sys import re def parse_prophage_tbl(phispydir): """ Parse the prophage table and return a dict of objects :param phispydir: The phispy directory to find the results :return: dict """ if not os.path.exists(os.path.join(phispydir, "prophage.tbl")): sys.stderr.write("FATAL: The file prophage.tbl does not exist\n") sys.stderr.write("Please run create_prophage_tbl.py -d {}\n".format(phispydir)) sys.exit(-1) p = re.compile('^(.*)_(\d+)_(\d+)$') locations = {} with open(os.path.join(phispydir, "prophage.tbl"), 'r') as f: for l in f: (ppid, location) = l.strip().split("\t") m = p.search(location) (contig, beg, end) = m.groups() beg = int(beg) end = int(end) if beg > end: (beg, end) = (end, beg) if contig not in locations: locations[contig] = [] locations[contig].append((beg, end)) return locations
5a913ec2818a37458fd84267748c199990035a8e
15,447
import time import asyncio async def get_odds(database, params): """Get odds based on parameters.""" LOGGER.info("generating odds") start_time = time.time() players = [dict( civilization_id=data['civilization_id'], user_id=data['user_id'], winner=data['winner'], team_id=data['team_id'] ) for data in params['players']] teams = by_key(players, 'team_id') num_unique_civs = len({p['civilization_id'] for p in players if 'civilization_id' in p}) keys = [] queries = [] map_filter = ("matches.map_name=:map_name", {'map_name': params['map_name']}) if 'teams' in params: keys.append('teams') queries.append(odds_query(database, teams, params['type_id'], user_filter=True)) if 'map_name' in params: keys.append('teams_and_map') queries.append(odds_query(database, teams, params['type_id'], match_filters=map_filter, user_filter=True)) if num_unique_civs > 1: keys.append('teams_and_civilizations') queries.append(odds_query(database, teams, params['type_id'], civ_filter=True, user_filter=True)) keys.append('civilizations') queries.append(odds_query(database, teams, params['type_id'], civ_filter=True)) if 'map_name' in params and num_unique_civs > 1: keys.append('civilizations_and_map') queries.append(odds_query(database, teams, params['type_id'], match_filters=map_filter, civ_filter=True)) results = await asyncio.gather(*queries) LOGGER.debug("computed all odds in %f", time.time() - start_time) return dict(zip(keys, results))
0f71b893df370244e82cd952f4bc1a15cae30728
15,448
def split_by_normal(cpy): """split curved faces into one face per triangle (aka split by normal, planarize). in place""" for name, faces in cpy.iteritems(): new_faces = [] for points, triangles in faces: x = points[triangles, :] normals = np.cross(x[:, 1]-x[:, 0], x[:, 2]-x[:, 0]) normals /= np.sqrt(np.sum(np.square(normals), axis=1))[:, None] if np.allclose(normals, normals[0][None, :]): new_faces.append((points, triangles)) else: for triangle in triangles: new_faces.append((points[triangle, :], np.arange(3, dtype=np.intc).reshape((1, 3)))) cpy[name] = new_faces return cpy
9a4c563465cc2deb5c2946f3e182fc9b71327081
15,449
def generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params): """ Generates a vector of indices to partition the data for training. NO CHECKING IS DONE: it is assumed that the data could be partitioned in the specified block quantities and that the block quantities describe a coherent partition. Parameters ---------- numTrain : int Number of training data points numTest : int Number of testing data points numValidation : int Number of validation data points (may be zero) params : dictionary with parameters Contains the keywords that control the behavior of the function (uq_train_bks, uq_valid_bks, uq_test_bks) Return ---------- indexTrain : int numpy array Indices for data in training indexValidation : int numpy array Indices for data in validation (if any) indexTest : int numpy array Indices for data in testing (if merging) """ # Extract required parameters numBlocksTrain = params['uq_train_bks'] numBlocksValidation = params['uq_valid_bks'] numBlocksTest = params['uq_test_bks'] numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest # Determine data size and block size if numBlocksTest > 0: # Use all data and re-distribute the partitions numData = numTrain + numValidation + numTest else: # Preserve test partition numData = numTrain + numValidation blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding remainder = numData - blockSize * numBlocksTotal if remainder != 0: print("Warning ! Requested partition does not distribute data evenly between blocks. " "Testing (if specified) or Validation (if specified) will use different block size.") sizeTraining = numBlocksTrain * blockSize sizeValidation = numBlocksValidation * blockSize # Fill partition indices # Fill train partition Folds = np.arange(numData) np.random.shuffle(Folds) indexTrain = Folds[:sizeTraining] # Fill validation partition indexValidation = None if numBlocksValidation > 0: indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation] # Fill test partition indexTest = None if numBlocksTest > 0: indexTest = Folds[sizeTraining + sizeValidation:] return indexTrain, indexValidation, indexTest
7bb30a6f69a45d231cbb4a140d7527a270f22e27
15,450
def sct2e(sc, sclkdp): """sct2e(SpiceInt sc, SpiceDouble sclkdp)""" return _cspyce0.sct2e(sc, sclkdp)
a32defabb20993b87c182121e209e62c190a46c8
15,451
import re import json def test_main(monkeypatch, test_dict: FullTestDict): """ - GIVEN a list of words - WHEN the accent dict is generated - THEN check all the jisho info is correct and complete """ word_list = convert_list_of_str_to_kaki(test_dict['input']) sections = test_dict['jisho']['expected_sections'] expected_output = test_dict['jisho']['expected_output'] def get_word_from_jisho_url(url: URL) -> Kaki: match = re.search(r"words\?keyword=(.+)", url) assert match is not None return Kaki(match.group(1)) def get_api_response(url: URL) -> str: word = get_word_from_jisho_url(url) return json.dumps(sections[word]["api_response"]) monkeypatch.setattr("requests.get", lambda url: FakeResponse(get_api_response(url))) assert jisho.main(word_list) == expected_output
f32d75bc5219cf48eccffcd777dc0881e0299ae7
15,452
import os def normalizeFilename(filename): """Take a given filename and return the normalized version of it. Where ~/ is expanded to the full OS specific home directory and all relative path elements are resolved. """ result = os.path.expanduser(filename) result = os.path.abspath(result) return result
a83e1ece98d23708eb6ae8a2acbe4f8495f9e2b8
15,453
def get_tn(tp, fp, fn, _all): """ Args: tp (Set[T]): fp (Set[T]): fn (Set[T]): _all (Iterable[T]): Returns: Set[T] """ return set(_all) - tp - fp - fn
a9afa3a2f07c8b63a6d6911b9a54cf9f9df08600
15,454
def download_cow_head(): """Download cow head dataset.""" return _download_and_read('cowHead.vtp')
70dc6617d3b9d6a8f9fa4df90caf749d00a6d778
15,455
def select_tests(blocks, match_string_list, do_test): """Remove or keep tests from list in WarpX-tests.ini according to do_test variable""" if do_test not in [True, False]: raise ValueError("do_test must be True or False") if (do_test == False): for match_string in match_string_list: print('Selecting tests without ' + match_string) blocks = [ block for block in blocks if not match_string in block ] else: for match_string in match_string_list: print('Selecting tests with ' + match_string) blocks = [ block for block in blocks if match_string in block ] return blocks
f77a0b9e91ec34b85479a442008241c7da386beb
15,456
def get_last_ds_for_site(session, idescr: ImportDescription, col: ImportColumn, siteid: int): """ Returns the newest dataset for a site with instrument, valuetype and level fitting to the ImportDescription's column To be used by lab imports where a site is encoded into the sample name. """ q = session.query(db.Dataset).filter( db.Dataset._site == siteid, db.Dataset._valuetype == col.valuetype, db.Dataset._source == idescr.instrument, ) if col.level is not None: q = q.filter(db.Dataset.level == col.level) return q.order_by(db.Dataset.end.desc()).limit(1).scalar()
41040efe43c0189a3cc8b7288e47eccd752674a7
15,457
def get_cart_from_request(request, cart_queryset=Cart.objects.all()): """Get cart from database or return unsaved Cart :type cart_queryset: saleor.cart.models.CartQueryset :type request: django.http.HttpRequest :rtype: Cart """ if request.user.is_authenticated(): cart = get_user_cart(request.user, cart_queryset) user = request.user else: token = request.get_signed_cookie(Cart.COOKIE_NAME, default=None) cart = get_anonymous_cart_from_token(token, cart_queryset) user = None if cart is not None: return cart else: return Cart(user=user)
5d9d7e3708db5db38f07aea9299ee0aacdecea22
15,458
def _is_ge(series, value): """ Returns the index of rows from series where series >= value. Parameters ---------- series : pandas.Series The data to be queried value : list-like The values to be tested Returns ------- index : pandas.index The index of series for rows where series >= value. """ series = series[series.ge(value)] return series.index
98b8825753953b1b9bf7348d04d260b7514a7749
15,459
def preprocess_image(image, image_size, is_training=False, test_crop=True): """Preprocesses the given image. Args: image: `Tensor` representing an image of arbitrary size. image_size: Size of output image. is_training: `bool` for whether the preprocessing is for training. test_crop: whether or not to extract a central crop of the images (as for standard ImageNet evaluation) during the evaluation. Returns: A preprocessed image `Tensor` of range [0, 1]. """ image = tf.image.convert_image_dtype(image, dtype=tf.float32) if is_training: return preprocess_for_train(image, image_size, image_size) else: return preprocess_for_eval(image, image_size, image_size, crop=test_crop)
913f614798daaf7b752195c92e48890868666b57
15,460
async def wait_for_reaction(self, message): """ Assert that ``message`` is reacted to with any reaction. :param discord.Message message: The message to test with :returns: The reaction object. :rtype: discord.Reaction :raises NoReactionError: """ def check_reaction(reaction, user): return ( reaction.message.id == message.id and user == self.target and reaction.message.channel == self.channel ) try: result = await self.client.wait_for( "reaction_add", timeout=self.client.timeout, check=check_reaction ) except TimeoutError: raise NoResponseError else: return result
67890343d6b59923e8fd3e655252eddcde88323c
15,461
def _multivariate_normal_log_likelihood(X, means=None, covariance=None): """Calculate log-likelihood assuming normally distributed data.""" X = check_array(X) n_samples, n_features = X.shape if means is None: means = np.zeros_like(X) else: means = check_array(means) assert means.shape == X.shape if covariance is None: covariance = np.eye(n_features) else: covariance = check_array(covariance) assert covariance.shape == (n_features, n_features) log_likelihood = 0 for t in range(n_samples): log_likelihood += ss.multivariate_normal.logpdf( X[t], mean=means[t], cov=covariance) return log_likelihood
d5144074f0a88c51a0c46f1b36eb8bdd95f9140e
15,462
import tokenize def lemmatize(text): """ tokenize and lemmatize english messages Parameters ---------- text: str text messages to be lemmatized Returns ------- list list with lemmatized forms of words """ def get_wordnet_pos(treebank_tag): if treebank_tag.startswith('J'): return wordnet.ADJ if treebank_tag.startswith('V'): return wordnet.VERB if treebank_tag.startswith('N'): return wordnet.NOUN if treebank_tag.startswith('R'): return wordnet.ADV # try to transfer to Noun else # else: return wordnet.NOUN # lemmatize wordpos = nltk.pos_tag(tokenize(text)) lmtzer = WordNetLemmatizer() return [lmtzer.lemmatize(word, pos=get_wordnet_pos(pos)) for word, pos in wordpos]
0a744953ac014f2c0551cecb9c235fc405bf5aaa
15,463
def prune_non_overlapping_boxes(boxes1, boxes2, min_overlap): """Prunes the boxes in boxes1 that overlap less than thresh with boxes2. For each box in boxes1, we want its IOA to be more than min_overlap with at least one of the boxes in boxes2. If it does not, we remove it. Arguments: boxes1: a float tensor with shape [N, 4]. boxes2: a float tensor with shape [M, 4]. min_overlap: minimum required overlap between boxes, to count them as overlapping. Returns: boxes: a float tensor with shape [N', 4]. keep_indices: a long tensor with shape [N'] indexing kept bounding boxes in the first input tensor ('boxes1'). """ with tf.name_scope('prune_non_overlapping_boxes'): overlap = ioa(boxes2, boxes1) # shape [M, N] overlap = tf.reduce_max(overlap, axis=0) # shape [N] keep_bool = tf.greater_equal(overlap, min_overlap) keep_indices = tf.squeeze(tf.where(keep_bool), axis=1) boxes = tf.gather(boxes1, keep_indices) return boxes, keep_indices
5e1a04022707364f1d1a8b14afbd356e781137b9
15,464
def get_namespace_from_node(node): """Get the namespace from the given node Args: node (str): name of the node Returns: namespace (str) """ parts = node.rsplit("|", 1)[-1].rsplit(":", 1) return parts[0] if len(parts) > 1 else u":"
a2305719c0e72614f75309f1412ce71c9264b5df
15,465
def PricingStart(builder): """This method is deprecated. Please switch to Start.""" return Start(builder)
d87eae22f74b5251261bb39aea93e46887f03725
15,466
import json import os def get_structures(defect_name: str, output_path: str, bdm_increment: float=0.1, bdm_distortions: list = None, bdm_type="BDM", ): """Imports all the structures found with BDM and stores them in a dictionary matching BDM distortion to final structure. Args: defect_name (str) : name of defect (e.g "vac_1_Sb_0") output_path (str) : path where material folder is bdm_increment (float): Distortion increment for BDM. (default: 0.1) bdm_distortions (list): List of distortions applied to nearest neighbours instead of default ones. (e.g. [-0.5, 0.5]) (default: None) bdm_type (str): BDM or champion (default: BDM) Returns: dictionary mathing BDM distortion to final structure""" defect_structures = {} try: # Read BDM_parameters from BDM_metadata.json with open(f"{output_path}/BDM_metadata.json") as json_file: bdm_parameters = json.load(json_file)['BDM_parameters'] bdm_distortions = bdm_parameters['BDM_distortions'] bdm_distortions = [i*100 for i in bdm_distortions] except: # if there's not a BDM metadata file if bdm_distortions: bdm_distortions = [i*100 for i in bdm_distortions] else: bdm_distortions = range(-60, 70, bdm_increment*100) # if user didn't specify BDM distortions rattle_dir_path = output_path + "/"+ defect_name + "/" + bdm_type + "/" + defect_name +"_" + "only_rattled" if os.path.isdir(rattle_dir_path): #check if rattle folder exists (if so, it means we only applied rattle (no BDM as 0 change in electrons), # hence grab the rattle & Unperturbed, not BDM distortions) try: path= rattle_dir_path + "/vasp_gam/CONTCAR" defect_structures['rattle'] = grab_contcar(path) except: print("Problems in get_structures") defect_structures['rattle'] = "Not converged" else: for i in bdm_distortions: key = i / 100 #key used in dictionary. Using the same format as the one in dictionary that matches distortion to final energy i = '{:.1f}'.format(i) if i == "0.0": i = "-0.0" #this is the format used in defect file name path = output_path + "/"+ defect_name + "/" + bdm_type + "/" + defect_name +"_" + str(i) + "%_BDM_Distortion/vasp_gam/CONTCAR" try : defect_structures[key] = grab_contcar(path) except FileNotFoundError or IndexError or ValueError: print("Error grabbing structure.") print("Your defect path is: ", path) defect_structures[key] = "Not converged" except: print("Problem in get_structures") print("Your defect path is: ", path) defect_structures[key] = "Not converged" try: defect_structures["Unperturbed"] = grab_contcar(output_path + "/"+ defect_name + "/" + bdm_type + "/" + defect_name +"_" + "Unperturbed_Defect" + "/vasp_gam/CONTCAR") except FileNotFoundError: print("Your defect path is: ", path) defect_structures[key] = "Not converged" return defect_structures
921996e12dc327e2a3f813bec278127c4963970e
15,467
def create_whimsy_value_at_clients(number_of_clients: int = 3): """Returns a Python value and federated type at clients.""" value = [float(x) for x in range(10, number_of_clients + 10)] type_signature = computation_types.at_clients(tf.float32) return value, type_signature
87d1d110392bd83585fd19ba2e8a10a0c8507d30
15,468
def format_task_numbers_with_links(tasks): """Returns formatting for the tasks section of asana.""" project_id = data.get('asana-project', None) def _task_format(task_id): if project_id: asana_url = tool.ToolApp.make_asana_url(project_id, task_id) return "[#%d](%s)" % (task_id, asana_url) else: return "#%d" % task_id return "\n".join([_task_format(tid) for tid in tasks])
b6b7975cb45cdae0a146a67c0fab51ef0724aee2
15,469
def get_tick_indices(tickmode, numticks, coords): """ Ticks on the axis are a subset of the axis coordinates This function returns the indices of y coordinates on which a tick should be displayed :param tickmode: should be 'auto' (automatically generated) or 'all' :param numticks: minimum number of ticks to display, only applies to 'auto' mode :param coords: list of coordinates along the axis :return indices: ticks indices in the input list of y coordinates :return numchar: maximum number of characters required to display ticks, this is useful to preserve alignments """ if tickmode == 'all' or (tickmode == 'auto' and numticks >= len(coords)): # Put a tick in front of each row indices = list(range(len(coords))) else: # It tickmode is 'auto', put at least 'numticks' ticks tick_spacing = 5 # default spacing between ticks # Decrease the tick spacing progressively until we get the desired number of ticks indices = [] while len(indices) < numticks: indices = list(range(0, len(coords), tick_spacing)) tick_spacing -= 1 # Compute the number of characters required to display ticks numchar = max(len(str(NiceNumber(coords[i]))) for i in indices) return indices, numchar
72cf3fed39db3cabf672bff4b042c8685356f9ff
15,470
def fpIsNormal(a, ctx=None): """Create a Z3 floating-point isNormal expression. """ return _mk_fp_unary_pred(Z3_mk_fpa_is_normal, a, ctx)
ee6e2cccf1ad0534929aa0632d271d37f58a232e
15,471
import os import argparse def existingFile(filename): """ 'type' for argparse - check that filename exists """ if not os.path.exists(filename): raise argparse.ArgumentTypeError("{0} does not exist".format(filename)) return filename
01d51420bba9edc18e7aecf53950a6ab843c384c
15,472
import math def siqs_find_next_poly(n, factor_base, i, g, B): """Compute the (i+1)-th polynomials for the Self-Initialising Quadratic Sieve, given that g is the i-th polynomial. """ v = lowest_set_bit(i) + 1 z = -1 if math.ceil(i / (2 ** v)) % 2 == 1 else 1 b = (g.b + 2 * z * B[v - 1]) % g.a a = g.a b_orig = b if (2 * b > a): b = a - b assert ((b * b - n) % a == 0) g = Polynomial([b * b - n, 2 * a * b, a * a], a, b_orig) h = Polynomial([b, a]) for fb in factor_base: if a % fb.p != 0: fb.soln1 = (fb.ainv * (fb.tmem - b)) % fb.p fb.soln2 = (fb.ainv * (-fb.tmem - b)) % fb.p return g, h
d5529db62a194582aacd8769a56688cf6b42bbe1
15,473
def get_column(value): """Convert column number on command line to Python index.""" if value.startswith("c"): # Ignore c prefix, e.g. "c1" for "1" value = value[1:] try: col = int(value) except: stop_err("Expected an integer column number, not %r" % value) if col < 1: stop_err("Expect column numbers to be at least one, not %r" % value) return col - 1
858f4128955c0af579d99dcd64be157b41c6dae3
15,474
def sdi(ts_split, mean=False, keys=None): """ Compute the Structural Decoupling Index (SDI). i.e. the ratio between the norms of the "high" and the norm of the "low" "graph-filtered" timeseries. If the given dictionary does not contain the keywords "high" and "low", the SDI is computed as the ratio between the norm of the second and the norm of the first dictionary entry. "keys" can be used to indicate the order of the two keys, or to select two elements of a bigger dictionary. Parameters ---------- ts_split : dict or numpy.ndarrays A dictionary containing two entries. If the two entries are "low" and "high", then SDI will be computed as the norm of the high vs the norm of the low, oterwise as the ratio between the second (second key in sorted keys) and the first. mean : bool, optional If True, compute mean over the last axis (e.g. between subjects) keys : None or list of strings, optional Can be used to select two entries from a bigger dictionary and/or to specify the order in which the keys should be read (e.g. forcing a different order from teh sorted keys). Returns ------- numpy.ndarray Returns the structural decoupling index Raises ------ ValueError If keys are provided but not contained in the dictionary If keys are not provided and the dictionary has more than 2 entries """ # #!# Implement acceptance of two matrices and not only dictionary if keys is None: keys = list(ts_split.keys()) else: if all(item in list(ts_split.keys()) for item in keys) is False: raise ValueError(f'The provided keys {keys} do not match the ' 'keys of the provided dictionary ' f'({list(ts_split.keys())})') if len(keys) != 2: raise ValueError('`structural_decoupling_index` function requires ' 'a dictionary with exactly two timeseries as input.') check_keys = [item.lower() for item in keys] if all(item in ['low', 'high'] for item in check_keys): # Case insensitively reorder the items of dictionary as ['low', 'high']. keys = [keys[check_keys.index('low')], keys[check_keys.index('high')]] norm = dict.fromkeys(keys) for k in keys: norm[k] = np.linalg.norm(ts_split[k], axis=1) LGR.info('Computing Structural Decoupling Index.') sdi = norm[keys[1]] / norm[keys[0]] if sdi.ndim >= 2 and mean: sdi = sdi.mean(axis=1) return sdi
9ed09f72bc6902b5c007286e12f1ed72d904d4b8
15,475
def Class_Property (getter) : """Return a descriptor for a property that is accessible via the class and via the instance. :: >>> from _TFL._Meta.Property import * >>> from _TFL._Meta.Once_Property import Once_Property >>> class Foo (object) : ... @Class_Property ... def bar (cls) : ... "Normal method bar" ... print ("Normal method bar called") ... return 42 ... @Class_Property ... @classmethod ... def baz (cls) : ... "classmethod baz" ... print ("classmethod baz called") ... return "Frozz" ... @Class_Property ... @Class_Method ... def foo (cls) : ... "Class_Method foo" ... print ("Class_Method foo called") ... return "Hello world" ... @Class_Property ... @Once_Property ... def qux (cls) : ... "Once property qux" ... print ("Once property qux") ... return 42 * 42 ... >>> foo = Foo () >>> Foo.bar Normal method bar called 42 >>> foo.bar Normal method bar called 42 >>> foo.bar = 137 >>> Foo.bar Normal method bar called 42 >>> foo.bar 137 >>> Foo.bar = 23 >>> Foo.bar 23 >>> print (Foo.baz) classmethod baz called Frozz >>> print (foo.baz) classmethod baz called Frozz >>> >>> print (Foo.foo) Class_Method foo called Hello world >>> print (foo.foo) Class_Method foo called Hello world >>> >>> Foo.qux Once property qux 1764 >>> foo.qux 1764 >>> foo2 = Foo () >>> foo2.qux 1764 >>> Foo.qux 1764 """ if hasattr (getter, "__func__") : return _Class_Property_Descriptor_ (getter) else : return _Class_Property_Function_ (getter)
845d62444f41b547b9922d10666f8a911c7e8de3
15,476
def naive_act_norm_initialize(x, axis): """Compute the act_norm initial `scale` and `bias` for `x`.""" x = np.asarray(x) axis = list(sorted(set([a + len(x.shape) if a < 0 else a for a in axis]))) min_axis = np.min(axis) reduce_axis = tuple(a for a in range(len(x.shape)) if a not in axis) var_shape = [x.shape[a] for a in axis] var_shape_aligned = [x.shape[a] if a in axis else 1 for a in range(min_axis, len(x.shape))] mean = np.reshape(np.mean(x, axis=reduce_axis), var_shape) bias = -mean scale = 1. / np.reshape( np.sqrt(np.mean((x - np.reshape(mean, var_shape_aligned)) ** 2, axis=reduce_axis)), var_shape ) return scale, bias, var_shape_aligned
78034c16e38c27b146a8ee1be1be86d9fc4ffe6a
15,477
def cmpTensors(t1, t2, atol=1e-5, rtol=1e-5, useLayout=None): """Compare Tensor list data""" assert (len(t1) == len(t2)) for i in range(len(t2)): if (useLayout is None): assert(t1[i].layout == t2[i].layout) dt1 = t1[i].dataAs(useLayout) dt2 = t2[i].dataAs(useLayout) if not np.allclose(dt1, dt2, atol=atol, rtol=rtol): logger.error("Tensor %d mismatch!" % i) return False return True
ce085c9998fddc86420ea4f5307e83e15d49372a
15,478
def auth(body): # noqa: E501 """Authenticate endpoint Return a bearer token to authenticate and authorize subsequent calls for resources # noqa: E501 :param body: Request body to perform authentication :type body: dict | bytes :rtype: Auth """ db = get_db() cust = db['Customer'].find_one({"email": body['username']}) try: if cust is None: user = db['User'].find_one({"email": body['username']}) if user is None: return "Auth failed", 401 else: if user['plain_password'] == body['password']: return generate_response(generate_token(str(user['_id']))) else: if cust['plain_password'] == body['password']: return generate_response(generate_token(str(cust['_id']))) except Exception as e: print (e) return "Auth failed", 401
2992d119cf7fa3a5d797825c704cd837f647dbd7
15,479
def make_feature(func, *argfuncs): """Return a customized feature function that adapts to different input representations. Args: func: feature function (callable) argfuncs: argument adaptor functions (callable, take `ctx` as input) """ assert callable(func) for argfunc in argfuncs: assert callable(argfunc) def _feature(ctx): return func(*[argfunc(ctx) for argfunc in argfuncs]) return _feature
26064ee0873d63edc877afdcb03a39e40453a831
15,480
import ctypes def from_numpy(np_array: np.ndarray): """Convert a numpy array to another type of dlpack compatible array. Parameters ---------- np_array : np.ndarray The source numpy array that will be converted. Returns ------- pycapsule : PyCapsule A pycapsule containing a DLManagedTensor that can be converted to other array formats without copying the underlying memory. """ holder = _Holder(np_array) size = ctypes.c_size_t(ctypes.sizeof(DLManagedTensor)) dl_managed_tensor = DLManagedTensor.from_address( ctypes.pythonapi.PyMem_RawMalloc(size) ) dl_managed_tensor.dl_tensor.data = holder.data dl_managed_tensor.dl_tensor.device = DLDevice(1, 0) dl_managed_tensor.dl_tensor.ndim = np_array.ndim dl_managed_tensor.dl_tensor.dtype = DLDataType.TYPE_MAP[str(np_array.dtype)] dl_managed_tensor.dl_tensor.shape = holder.shape dl_managed_tensor.dl_tensor.strides = holder.strides dl_managed_tensor.dl_tensor.byte_offset = 0 dl_managed_tensor.manager_ctx = holder._as_manager_ctx() dl_managed_tensor.deleter = _numpy_array_deleter pycapsule = ctypes.pythonapi.PyCapsule_New( ctypes.byref(dl_managed_tensor), _c_str_dltensor, _numpy_pycapsule_deleter, ) return pycapsule
2663b831274f1fc1dd2e597212fa475f6d03e578
15,481
def lmsSubstringsAreEqual(string, typemap, offsetA, offsetB): """ Return True if LMS substrings at offsetA and offsetB are equal. """ # No other substring is equal to the empty suffix. if offsetA == len(string) or offsetB == len(string): return False i = 0 while True: aIsLMS = isLMSChar(i + offsetA, typemap) bIsLMS = isLMSChar(i + offsetB, typemap) # If we've found the start of the next LMS substrings if (i > 0 and aIsLMS and bIsLMS): # then we made it all the way through our original LMS # substrings without finding a difference, so we can go # home now. return True if aIsLMS != bIsLMS: # We found the end of one LMS substring before we reached # the end of the other. return False if string[i + offsetA] != string[i + offsetB]: # We found a character difference, we're done. return False i += 1
5177b8cf5b2b80a519ef0d9fbb5f972c584a6b5b
15,482
from .tools import nantrapz def synthesize_photometry(lbda, flux, filter_lbda, filter_trans, normed=True): """ Get Photometry from the given spectral information through the given filter. This function converts the flux into photons since the transmission provides the fraction of photons that goes though. Parameters ----------- lbda, flux: [array] Wavelength and flux of the spectrum from which you want to synthetize photometry filter_lbda, filter_trans: [array] Wavelength and transmission of the filter. normed: [bool] -optional- Shall the fitler transmission be normalized? Returns ------- Float (photometric point) """ # --------- # The Tool def integrate_photons(lbda, flux, step, flbda, fthroughput): """ """ filter_interp = np.interp(lbda, flbda, fthroughput) dphotons = (filter_interp * flux) * lbda * 5.006909561e7 return nantrapz(dphotons,lbda) if step is None else np.sum(dphotons*step) # --------- # The Code normband = 1. if not normed else \ integrate_photons(lbda, np.ones(len(lbda)),None,filter_lbda,filter_trans) return integrate_photons(lbda,flux,None,filter_lbda,filter_trans)/normband
6eb8b9806388b9b373e37a2c813e3a4ba9696bc2
15,483
def get_A_dash_floor_bath(house_insulation_type, floor_bath_insulation): """浴室の床の面積 (m2) Args: house_insulation_type(str): 床断熱住戸'または'基礎断熱住戸' floor_bath_insulation(str): 床断熱住戸'または'基礎断熱住戸'または'浴室の床及び基礎が外気等に面していない' Returns: float: 浴室の床の面積 (m2) """ return get_table_3(15, house_insulation_type, floor_bath_insulation)
fbcd2c6dd6b5e2099351b445bf4b3e71aed4d508
15,484
def cancel_task_async(hostname, task_id): """Cancels a swarming task.""" return _call_api_async( None, hostname, 'task/%s/cancel' % task_id, method='POST')
fb1b57dac80518e2cf3b375d8ecd393b34855b45
15,485
def generate_two_files_both_stress_strain(): """Generates two files that have both stress and strain in each file""" fname = {'stress': 'resources/double_stress.json', 'strain': 'resources/double_strain.json'} expected = [ # makes an array of two pif systems pif.System( properties=[ pif.Property(name='stress', scalars=list(np.linspace(0, 100)), conditions=pif.Value( name='time', scalars=list(np.linspace(0, 100)))), pif.Property(name='strain', scalars=list(np.linspace(0, 1)), conditions=pif.Value( name='time', scalars=list(np.linspace(0, 100))))]), pif.System( properties=[ pif.Property(name='stress', scalars=list(np.linspace(0, 100)), conditions=pif.Value( name='time', scalars=list(np.linspace(0, 100)))), pif.Property(name='strain', scalars=list(np.linspace(0, 1)), conditions=pif.Value( name='time', scalars=list(np.linspace(0, 100)))) ])] # dump the pifs into two seperate files with open(fname['stress'], 'w') as stress_file: pif.dump(expected[0], stress_file) with open(fname['strain'], 'w') as strain_file: pif.dump(expected[1], strain_file) return fname
6cfe410071085bc975f630e34e43c8b2b626f846
15,486
import sys def run_cli(entry_point, *arguments, **options): """ Test a command line entry point. :param entry_point: The function that implements the command line interface (a callable). :param arguments: Any positional arguments (strings) become the command line arguments (:data:`sys.argv` items 1-N). :param options: The following keyword arguments are supported: **capture** Whether to use :class:`CaptureOutput`. Defaults to :data:`True` but can be disabled by passing :data:`False` instead. **input** Refer to :class:`CaptureOutput`. **merged** Refer to :class:`CaptureOutput`. **program_name** Used to set :data:`sys.argv` item 0. :returns: A tuple with two values: 1. The return code (an integer). 2. The captured output (a string). """ # Add the `program_name' option to the arguments. arguments = list(arguments) arguments.insert(0, options.pop('program_name', sys.executable)) # Log the command line arguments (and the fact that we're about to call the # command line entry point function). logger.debug("Calling command line entry point with arguments: %s", arguments) # Prepare to capture the return code and output even if the command line # interface raises an exception (whether the exception type is SystemExit # or something else). returncode = 0 stdout = None stderr = None try: # Temporarily override sys.argv. with PatchedAttribute(sys, 'argv', arguments): # Manipulate the standard input/output/error streams? options['enabled'] = options.pop('capture', True) with CaptureOutput(**options) as capturer: try: # Call the command line interface. entry_point() finally: # Get the output even if an exception is raised. stdout = capturer.stdout.getvalue() stderr = capturer.stderr.getvalue() # Reconfigure logging to the terminal because it is very # likely that the entry point function has changed the # configured log level. configure_logging() except BaseException as e: if isinstance(e, SystemExit): logger.debug("Intercepting return code %s from SystemExit exception.", e.code) returncode = e.code else: logger.warning("Defaulting return code to 1 due to raised exception.", exc_info=True) returncode = 1 else: logger.debug("Command line entry point returned successfully!") # Always log the output captured on stdout/stderr, to make it easier to # diagnose test failures (but avoid duplicate logging when merged=True). is_merged = options.get('merged', False) merged_streams = [('merged streams', stdout)] separate_streams = [('stdout', stdout), ('stderr', stderr)] streams = merged_streams if is_merged else separate_streams for name, value in streams: if value: logger.debug("Output on %s:\n%s", name, value) else: logger.debug("No output on %s.", name) return returncode, stdout
4ff525bd5b8b8edb520151475f6da58a9bed7172
15,487
def recipe_edit(username, pk): """Page showing the possibility to edit the recipe.""" recipe_manager = RecipeManager(api_token=g.user_token) response = recipe_manager.get_recipe_response(pk) recipe = response.json() # shows 404 if there is no recipe, response status code is 404 or user is not the author if not recipe or response.status_code == 404 or username != g.username: abort(404) # checking form validation form = RecipeAddForm(data=recipe) if form.validate_on_submit(): try: if form.image.data != DEFAULT_RECIPE_IMAGE_PATH: # if the user has uploaded a picture file image = images.save(form.image.data) image_path = f'app/media/recipe_images/{image}' else: image_path = None # set image_path to None so as not to alter the image except UploadNotAllowed: # if the user uploaded a file that is not a picture flash('Incorrect picture format', 'error') else: # if there is no exception edit recipe data and image recipe_data, recipe_files = recipe_manager.get_form_data(form, image_path) recipe_manager.edit(recipe_data, recipe_files, pk, username) return redirect('/recipes/') return render_template('recipe_edit.html', form=form)
73735cd5c279c8e62aebdacfb29c5d3d83c856fa
15,488
import re def load_data_file(filename): """loads a single file into a DataFrame""" regexp = '^.*/results/([^/]+)/([^/]+)/([^/]+).csv$' optimizer, blackbox, seed = re.match(regexp, filename).groups() f = ROOT + '/results/{}/{}/{}.csv'.format(optimizer, blackbox, seed) result = np.genfromtxt(f, delimiter=',') return get_best(result)
a2c53adfc356809f7ec554d20203a5ad276ebc1e
15,489
def get_hub_manager(): """Generate Hub plugin structure""" global _HUB_MANAGER if not _HUB_MANAGER: _HUB_MANAGER = _HubManager(_plugins) return _HUB_MANAGER
384039f45f59cec3db737536a08719770ecfb3ff
15,490
def extract_stimtype( data: pd.DataFrame, stimtype: str, columns: list ) -> pd.DataFrame: """ Get trials with matching label under stimType """ if stimtype not in accept_stimtype: raise ValueError(f"invalid {stimtype}, only accept {accept_stimtype}") get = columns.copy() get += ["participant_id"] get += [i for i in identity_entity if i in data.columns] stimresp = data.query(f"stimType == '{stimtype}'") return stimresp.loc[:, get]
186cc066133d1d8d6c443b17a2d17cc70d366d98
15,491
def compute_rank_clf_loss(hparams, scores, labels, group_size, weight): """ Compute ranking/classification loss Note that the tfr loss is slightly different than our implementation: the tfr loss is sum over all loss and devided by number of queries; our implementation is sum over all loss and devided by the number of larger than 0 labels. """ # Classification loss if hparams.num_classes > 1: labels = tf.cast(labels, tf.int32) labels = tf.squeeze(labels, -1) # Last dimension is max_group_size, which should be 1 return tf.losses.sparse_softmax_cross_entropy(logits=scores, labels=labels, weights=weight) # Expand weight to [batch size, 1] so that in inhouse ranking loss it can be multiplied with loss which is # [batch_size, max_group_size] expanded_weight = tf.expand_dims(weight, axis=-1) # Ranking losses # tf-ranking loss if hparams.use_tfr_loss: weight_name = "weight" loss_fn = tfr.losses.make_loss_fn(hparams.tfr_loss_fn, lambda_weight=hparams.tfr_lambda_weights, weights_feature_name=weight_name) loss = loss_fn(labels, scores, {weight_name: expanded_weight}) return loss # our own implementation if hparams.ltr_loss_fn == 'pairwise': lambdarank = LambdaRank() pairwise_loss, pairwise_mask = lambdarank(scores, labels, group_size) loss = tf.reduce_sum(tf.reduce_sum(pairwise_loss, axis=[1, 2]) * expanded_weight) / tf.reduce_sum(pairwise_mask) elif hparams.ltr_loss_fn == 'softmax': loss = compute_softmax_loss(scores, labels, group_size) * expanded_weight is_positive_label = tf.cast(tf.greater(labels, 0), dtype=tf.float32) loss = tf.div_no_nan(tf.reduce_sum(loss), tf.reduce_sum(is_positive_label)) elif hparams.ltr_loss_fn == 'pointwise': loss = compute_sigmoid_cross_entropy_loss(scores, labels, group_size) * expanded_weight loss = tf.reduce_mean(loss) else: raise ValueError('Currently only support pointwise/pairwise/softmax/softmax_cls.') return loss
12b45518d5bd11182dbf220ccfe90da2fe0d6c38
15,492
import string def get_org_image_url(url, insert_own_log=False): """ liefert gegebenenfalls die URL zum Logo der betreffenden Institution """ #n_pos = url[7:].find('/') # [7:] um http:// zu ueberspringen #org_url = url[:n_pos+7+1] # einschliesslich '/' item_containers = get_image_items(ELIXIER_LOGOS_PATH) image_url = image_url_url = '' image_url_extern = True for ic in item_containers: arr = string.splitfields(ic.item.sub_title, '|') for a in arr: b = a.strip() if b != '' and url.find(b) >= 0: image_url = ELIXIER_LOGOS_URL + ic.item.name image_url_url = ic.item.title image_url_extern = True break if image_url != '': break if insert_own_log and image_url == '': image_url = EDUFOLDER_INST_LOGO_URL image_url_url = get_base_site_url() image_url_extern = False return image_url, image_url_url, image_url_extern
b80d29a3393820e6cfc58e36ae34361d4587bd73
15,493
import asyncio import logging async def download_page(url, file_dir, file_name, is_binary=False): """ Fetch URL and save response to file Args: url (str): Page URL file_dir (pathlib.Path): File directory file_name (str): File name is_binary (bool): True if should download binary content (e.g. images) Returns: HttpResponse: HTTP response content and extension """ response = await fetch(url, is_binary) path = file_dir.joinpath('{}{}'.format(file_name, response.ext)) try: with ThreadPoolExecutor() as pool: await asyncio.get_running_loop().run_in_executor( pool, write_file, str(path), is_binary, response.content ) except OSError: logging.error('Can\'t save file: {}'.format(path)) return response
452285e7d47d7d7c227e356efc0e7dc1ad2ce7ee
15,494
def normal_coffee(): """ when the user decides to pick a normal or large cup of coffee :return: template that explains how to make normal coffee """ return statement(render_template('explanation_large_cup', product='kaffee'))
ba9ed37cb85327d6541ad86071f047ce87297c95
15,495
def _transitive_closure_dense_numpy(A, kind='metric', verbose=False): """ Calculates Transitive Closure using numpy dense matrix traversing. """ C = A.copy() n, m = A.shape # Check if diagonal is all zero if sum(np.diagonal(A)) > 0: raise ValueError("Diagonal has to be zero for matrix computation to be correct") # Compute Transitive Closure for i in range(0, n): if verbose: print('calc row:', i + 1, 'of', m) for j in range(0, n): if kind == 'metric': vec = C[i, :] + C[:, j] C[i, j] = vec.min() elif kind == 'ultrametric': vec = np.maximum(C[i, :], C[:, j]) C[i, j] = vec.min() return np.array(C)
cf02a380dbf28a6442cc999b3faea329d5041b17
15,496
def convert_date(raw_dates: pd.Series) -> pd.Series: """Automatically converts series containing raw dates to specific format. Parameters ---------- raw_dates: Series to be converted. Returns ------- Optimized pandas series. """ raw_dates = pd.to_datetime(raw_dates, utc=True) return raw_dates
23a2310ec8fd30dd2b831805817fb3407c10c104
15,497
async def get_scorekeeper_by_id(scorekeeper_id: conint(ge=0, lt=2**31)): """Retrieve a Scorekeeper object, based on Scorekeeper ID, containing: Scorekeeper ID, name, slug string, and gender.""" try: scorekeeper = Scorekeeper(database_connection=_database_connection) scorekeeper_info = scorekeeper.retrieve_by_id(scorekeeper_id) if not scorekeeper_info: raise HTTPException(status_code=404, detail=f"Scorekeeper ID {scorekeeper_id} not found") else: return scorekeeper_info except ValueError: raise HTTPException(status_code=404, detail=f"Scorekeeper ID {scorekeeper_id} not found") except ProgrammingError: raise HTTPException(status_code=500, detail="Unable to retrieve scorekeeper information") except DatabaseError: raise HTTPException(status_code=500, detail="Database error occurred while trying to " "retrieve scorekeeper information")
044b3bacfdf47918c2ad15635958d69c17ccf5c8
15,498
import inspect import os def modulePath(): """ This will get us the program's directory, even if we are frozen using py2exe """ try: _ = sys.executable if weAreFrozen() else __file__ except NameError: _ = inspect.getsourcefile(modulePath) return os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(_))))
46c404ccb60044f7c1687692f5ba4903230a5769
15,499