content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def add_upgrades(ws, cols, lnth): """ """ for col in cols: cell = "{}1".format(col) ws[cell] = "='New_4G_Sites'!{}".format(cell) for col in cols[:2]: for i in range(2, lnth): cell = "{}{}".format(col, i) ws[cell] = "='New_4G_Sites'!{}".format(cell) for col in cols[2:]: for i in range(2, lnth): cell = "{}{}".format(col, i) part1 = "=IFERROR(IF(Towers_non_4G_MNO!{}>0,IF(Towers_non_4G_MNO!{}>".format(cell,cell) part2 = "New_4G_Sites!{},New_4G_Sites!{},New_4G_Sites!{}-Towers_non_4G_MNO!{}),0),0)".format(cell,cell,cell,cell) ws[cell] = part1 + part2 #+ part3 + part4 columns = ['C','D','E','F','G','H','I','J','K','L'] ws = format_numbers(ws, columns, (1, 200), 'Comma [0]', 0) set_border(ws, 'A1:L{}'.format(lnth-1), "thin", "000000") return ws
a5c33a59992976dfbdd775ce55c6017cef7d7f1d
7,448
import torch def vae_loss(recon_x, x, mu, logvar, reduction="mean"): """ Effects ------- Reconstruction + KL divergence losses summed over all elements and batch See Appendix B from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 """ BCE = F.binary_cross_entropy(recon_x, x, reduction=reduction) # 0.5 * mean(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp()) return BCE, KLD
ecdabfd62d7e7c7aa858b36669a73e6081891f83
7,449
from functools import reduce def Or(*args): """Defines the three valued ``Or`` behaviour for a 2-tuple of three valued logic values""" def reduce_or(cmp_intervala, cmp_intervalb): if cmp_intervala[0] is True or cmp_intervalb[0] is True: first = True elif cmp_intervala[0] is None or cmp_intervalb[0] is None: first = None else: first = False if cmp_intervala[1] is True or cmp_intervalb[1] is True: second = True elif cmp_intervala[1] is None or cmp_intervalb[1] is None: second = None else: second = False return (first, second) return reduce(reduce_or, args)
fc252c0129904d7ad58c18adad0b08c638b2bd11
7,450
def create_task(): """Create a new task""" data = request.get_json() # In advanced solution, a generic validation should be done if (TaskValidator._validate_title(data)): TaskPersistence.create(title=data['title']) return {'success': True, 'message': 'Task has been saved'} # Simple error response return {'error': 'bad request', 'message': 'not valid data', 'status': 400}
e42b06ed297b589cacedae522b81c898a01d6b72
7,451
import socket def get_socket_with_reuseaddr() -> socket.socket: """Returns a new socket with `SO_REUSEADDR` option on, so an address can be reused immediately, without waiting for TIME_WAIT socket state to finish. On Windows, `SO_EXCLUSIVEADDRUSE` is used instead. This is because `SO_REUSEADDR` on this platform allows the socket to be bound to an address that is already bound by another socket, without requiring the other socket to have this option on as well. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if 'SO_EXCLUSIVEADDRUSE' in dir(socket): sock.setsockopt(socket.SOL_SOCKET, getattr(socket, 'SO_EXCLUSIVEADDRUSE'), 1) else: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return sock
6edbc0f0aaaeaebd9c6d0f31257de0b4dfe7df1c
7,452
def get_systemd_services(service_names): """ :param service_names: {'service_unit_id': 'service_display_name'} e.g., {'cloudify-rabbitmq.service': 'RabbitMQ'} """ systemd_services = get_services(service_names) statuses = [] services = {} for service in systemd_services: is_service_running = service['instances'] and ( service['instances'][0]['state'] == 'running') status = NodeServiceStatus.ACTIVE if is_service_running \ else NodeServiceStatus.INACTIVE services[service['display_name']] = { 'status': status, 'extra_info': { 'systemd': service } } statuses.append(status) return services, statuses
523efae5fb536c9326978d84ea9055aecf47da05
7,453
import datetime from werkzeug.local import LocalProxy def json_handler(obj): """serialize non-serializable data for json""" # serialize date if isinstance(obj, (datetime.date, datetime.timedelta, datetime.datetime)): return unicode(obj) elif isinstance(obj, LocalProxy): return unicode(obj) else: raise TypeError, """Object of type %s with value of %s is not JSON serializable""" % \ (type(obj), repr(obj))
ea44a5d77e608f16458a1c3405665011f2d9c70c
7,454
def _random_prefix(sentences): """ prefix random generator input: list of input sentences output: random word """ words = _word_dict(sentences) return choice(words)
7a81b5825bc0dc2ac4b75bff40a9b76af77486a3
7,455
def user_logout(*args, **kwargs): # pylint: disable=unused-argument """ This endpoint is the landing page for the logged-in user """ # Delete the Oauth2 token for this session log.info('Logging out User: %r' % (current_user,)) delete_session_oauth2_token() logout_user() flash('You were successfully logged out.', 'warning') return flask.redirect(_url_for('backend.home'))
baaeb1f1b353eaa75bc1d81cb72a9bb931398047
7,456
def redraw_frame(image, names, aligned): """ Adds names and bounding boxes to the frame """ i = 0 unicode_font = ImageFont.truetype("DejaVuSansMono.ttf", size=17) img_pil = Image.fromarray(image) draw = ImageDraw.Draw(img_pil) for face in aligned: draw.rectangle((face[0], face[1], face[2], face[3]), outline=(0, 255, 0), width=2) if names is not None and len(names) > i: if names[i] == 'unknown': draw.text((face[0], face[1] - 30), "unknown", fill=(0, 0, 255), font=unicode_font) draw.rectangle((face[0], face[1], face[2], face[3]), outline=(0, 0, 255), width=2) else: draw.text((face[0], face[1] - 30), names[i], fill=(0, 255, 0), font=unicode_font) if names is None or len(names) <= i: draw.text((face[0], face[1] - 30), 'refreshing...', fill=(255, 0, 0), font=unicode_font) i += 1 return np.array(img_pil)
66adbbc42c4108855e1eea6494391957c3e91b4f
7,457
from typing import Type def is_dapr_actor(cls: Type[Actor]) -> bool: """Checks if class inherits :class:`Actor`. Args: cls (type): The Actor implementation. Returns: bool: True if cls inherits :class:`Actor`. Otherwise, False """ return issubclass(cls, Actor)
1c3f5b4744cf9db91c869247ab297ffc10dcfc68
7,459
def unpickle(file): """ unpickle the data """ fo = open(file, 'rb') dict = cPickle.load(fo) fo.close() return dict
dbab180e31e7bff6ba965f48ee7a3018e2665763
7,460
def _calc_norm_gen_prob(sent_1, sent_2, mle_lambda, topic): """ Calculates and returns the length-normalized generative probability of sent_1 given sent_2. """ sent_1_len = sum([count for count in sent_1.raw_counts.values()]) return _calc_gen_prob(sent_1, sent_2, mle_lambda, topic) ** (1.0 / sent_1_len)
7f84f1b0de67f9d6f631ad29aa1c614d6d3f13d6
7,461
def isomorphic(l_op, r_op): """ Subject of definition, here it is equal operation. See limintations (vectorization.rst). """ if l_op.getopnum() == r_op.getopnum(): l_vecinfo = forwarded_vecinfo(l_op) r_vecinfo = forwarded_vecinfo(r_op) return l_vecinfo.bytesize == r_vecinfo.bytesize return False
e34c6928c4fdf10fed55bb20588cf3183172cab1
7,462
import numpy import math def partition5(l, left, right): """ Insertion Sort of list of at most 5 elements and return the position of the median. """ j = left for i in xrange(left, right + 1): t = numpy.copy(l[i]) for j in xrange(i, left - 1, -1): if l[j - 1][0] < t[0]: break l[j] = l[j - 1] l[j] = t return int(math.floor((left + right) / 2))
75c5c893c978e81a2b19b79c6000a2151ff6b088
7,463
def max_validator(max_value): """Return validator function that ensures upper bound of a number. Result validation function will validate the internal value of resource instance field with the ``value >= min_value`` check. Args: max_value: maximum value for new validator """ def validator(value): if value > max_value: raise ValidationError("{} is not <= {}".format(value, max_value)) return validator
6957f507f7140aa58a9c969a04b9bde65da54319
7,464
def standalone_job_op(name, image, command, gpus=0, cpu_limit=0, memory_limit=0, env=[], tensorboard=False, tensorboard_image=None, data=[], sync_source=None, annotations=[], metrics=['Train-accuracy:PERCENTAGE'], arena_image='cheyang/arena_launcher:v0.5', timeout_hours=240): """This function submits a standalone training Job Args: name: the name of standalone_job_op image: the docker image name of training job mount: specify the datasource to mount to the job, like <name_of_datasource>:<mount_point_on_job> command: the command to run """ if not name: raise ValueError("name must be specified") if not image: raise ValueError("image must be specified") if not command: raise ValueError("command must be specified") options = [] if sync_source: if not sync_source.startswith("http"): raise ValueError("sync_source must be an http git url") options.append('--sync-source') options.append(str(sync_source)) for e in env: options.append('--env') options.append(str(e)) for d in data: options.append('--data') options.append(str(d)) for m in metrics: options.append('--metric') options.append(str(m)) if tensorboard_image: options.append('--tensorboard-image') options.append(str(tensorboard_image)) op = dsl.ContainerOp( name=name, image=arena_image, command=['python','arena_launcher.py'], arguments=[ "--name", name, "--tensorboard", str(tensorboard), "--image", str(image), "--gpus", str(gpus), "--cpu", str(cpu_limit), "--step-name", '{{pod.name}}', "--workflow-name", '{{workflow.name}}', "--memory", str(memory_limit), "--timeout-hours", str(timeout_hours), ] + options + [ "job", "--", str(command)], file_outputs={'train': '/output.txt', 'workflow':'/workflow-name.txt', 'step':'/step-name.txt', 'name':'/name.txt'} ) op.set_image_pull_policy('Always') return op
2c2c6c014fe841b6929153cd5590fa43210964ed
7,465
def load_randompdata(dataset_str, iter): """Load data.""" names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] objects = [] for i in range(len(names)): with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f: if sys.version_info > (3, 0): objects.append(pkl.load(f, encoding='latin1')) else: objects.append(pkl.load(f)) x, y, tx, ty, allx, ally, graph = tuple(objects) test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str)) test_idx_range = np.sort(test_idx_reorder) if dataset_str == 'citeseer': # Fix citeseer dataset (there are some isolated nodes in the graph) # Find isolated nodes, add them as zero-vecs into the right position test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1) tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) tx_extended[test_idx_range-min(test_idx_range), :] = tx tx = tx_extended ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) ty_extended[test_idx_range-min(test_idx_range), :] = ty ty = ty_extended NL = 2312 NC = 6 elif dataset_str == 'cora': NL = 1708 NC = 7 else: NL = 18717 NC = 3 features = sp.vstack((allx, tx)).tolil() features[test_idx_reorder, :] = features[test_idx_range, :] adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) labels = np.vstack((ally, ty)) labels[test_idx_reorder, :] = labels[test_idx_range, :] #fixed 500 for validation read from file, choose random 20 per class from the others for train ''' idx_test = test_idx_range.tolist() idx_train = range(len(y)) idx_val = range(len(y), len(y)+500) ''' idx_val=[int(item) for item in open("source/"+dataset_str+"/val_idx"+str(iter)+".txt").readlines()] idx_test = test_idx_range.tolist() idx_traincand = list(set(range(0,NL))-set(idx_val)) #train candiate, not test not valid nontestlabels = labels[idx_traincand] gtlabels = np.argmax(nontestlabels,axis=1) idx_train = [] for i in range(NC): nodeidx = np.where(gtlabels==i) ridx = random.sample(range(0,nodeidx[0].shape[0]),20) idx_train+=list(np.asarray(idx_traincand)[list(nodeidx[0][ridx])]) train_mask = sample_mask(idx_train, labels.shape[0]) val_mask = sample_mask(idx_val, labels.shape[0]) test_mask = sample_mask(idx_test, labels.shape[0]) y_train = np.zeros(labels.shape) y_val = np.zeros(labels.shape) y_test = np.zeros(labels.shape) y_train[train_mask, :] = labels[train_mask, :] y_val[val_mask, :] = labels[val_mask, :] y_test[test_mask, :] = labels[test_mask, :] return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
353160ffd8b0474fc4c58532d1bcae80f4d6cbad
7,466
def page_to_reload(): """ Returns page that is refreshed every argument of content attribute in meta http-equiv="refresh". """ val = knob_thread.val year = int(val * 138./256 + 1880) return ( """<!DOCTYPE html> <html> <head><meta http-equiv="refresh" content=".2"> <style> h1 {{color:white; font-family: Arial; font-size: 9em}} </style> </head> <body bgcolor="{color}0000"> <h1>YEAR {year}</h1><br /> <h1>ANOMALY {anomaly}&#176; </h1> </body> </html> """ ).format(color=('%x' % val), year=year, anomaly=year_to_anomaly[year])
75c9a409a6dd936f23c9a54dae058fb7e8fd9e97
7,467
def svn_log_changed_path2_create(*args): """svn_log_changed_path2_create(apr_pool_t pool) -> svn_log_changed_path2_t""" return _core.svn_log_changed_path2_create(*args)
f40cf409bfb458d35cb38ba76fa93c319803a992
7,468
def check_from_dict(method): """A wrapper that wrap a parameter checker to the original function(crop operation).""" @wraps(method) def new_method(self, *args, **kwargs): word_dict, = (list(args) + [None])[:1] if "word_dict" in kwargs: word_dict = kwargs.get("word_dict") assert isinstance(word_dict, dict), "word_dict needs to be a list of word,id pairs" for word, word_id in word_dict.items(): assert isinstance(word, str), "each word in word_dict needs to be type str" assert isinstance(word_id, int) and word_id >= 0, "each word id needs to be positive integer" kwargs["word_dict"] = word_dict return method(self, **kwargs) return new_method
d45b68ccccbd4f97e585c7386f7da6547fdd86d6
7,471
def env_observation_space_info(instance_id): """ Get information (name and dimensions/bounds) of the env's observation_space Parameters: - instance_id: a short identifier (such as '3c657dbc') for the environment instance Returns: - info: a dict containing 'name' (such as 'Discrete'), and additional dimensional info (such as 'n') which varies from space to space """ info = envs.get_observation_space_info(instance_id) return jsonify(info = info)
2ee4b17a73ad49c6c63dc07f2822b0f2d1ece770
7,472
from typing import Set from typing import Dict def build_target_from_transitions( dynamics_function: TargetDynamics, initial_state: State, final_states: Set[State], ) -> Target: """ Initialize a service from transitions, initial state and final states. The set of states and the set of actions are parsed from the transition function. This will guarantee that all the states are reachable. :param dynamics_function: the transition function :param initial_state: the initial state :param final_states: the final states :return: the service """ states = set() actions = set() transition_function: TransitionFunction = {} policy: Dict[State, Dict[Action, Prob]] = {} reward: Dict[State, Dict[Action, Reward]] = {} for start_state, transitions_by_action in dynamics_function.items(): states.add(start_state) transition_function[start_state] = {} policy[start_state] = {} reward[start_state] = {} for action, (next_state, prob, reward_value) in transitions_by_action.items(): actions.add(action) states.add(next_state) transition_function[start_state][action] = next_state policy[start_state][action] = prob reward[start_state][action] = reward_value unreachable_final_states = final_states.difference(states) assert ( len(unreachable_final_states) == 0 ), f"the following final states are not in the transition function: {unreachable_final_states}" assert initial_state in states, "initial state not in the set of states" return Target( states, actions, final_states, initial_state, transition_function, policy, reward, )
d1014560c05e6f3169c65725d94af20494d97f0a
7,473
from typing import Optional from datetime import datetime def citation(dll_version: Optional[str] = None) -> dict: """ Return a citation for the software. """ executed = datetime.now().strftime("%B %d, %Y") bmds_version = __version__ url = "https://pypi.org/project/bmds/" if not dll_version: # assume we're using the latest version dll_version = get_latest_dll_version() return dict( paper=( "Pham LL, Watford S, Friedman KP, Wignall J, Shapiro AJ. Python BMDS: A Python " "interface library and web application for the canonical EPA dose-response modeling " "software. Reprod Toxicol. 2019;90:102-108. doi:10.1016/j.reprotox.2019.07.013." ), software=( f"Python BMDS. (Version {bmds_version}; Model Library Version {dll_version}) " f"[Python package]. Available from {url}. Executed on {executed}." ), )
1196e1de2c2431120467eac83701022f1b4d9840
7,474
def comment_pr_(ci_data, github_token): """Write either a staticman comment or non-staticman comment to github. """ return sequence( (comment_staticman(github_token) if is_staticman(ci_data) else comment_general), post(github_token, ci_data), lambda x: dict(status_code=x.status_code, json=x.json()), )(ci_data)
548f854a37fe95b83660bc2ec4012cda72317976
7,475
from re import L def response_loss_model(h, p, d_z, d_x, d_y, samples=1, use_upper_bound=False, gradient_samples=0): """ Create a Keras model that computes the loss of a response model on data. Parameters ---------- h : (tensor, tensor) -> Layer Method for building a model of y given p and x p : (tensor, tensor) -> Layer Method for building a model of p given z and x d_z : int The number of dimensions in z d_x : int Tbe number of dimensions in x d_y : int The number of dimensions in y samples: int The number of samples to use use_upper_bound : bool Whether to use an upper bound to the true loss (equivalent to adding a regularization penalty on the variance of h) gradient_samples : int The number of separate additional samples to use when calculating the gradient. This can only be nonzero if user_upper_bound is False, in which case the gradient of the returned loss will be an unbiased estimate of the gradient of the true loss. Returns ------- A Keras model that takes as inputs z, x, and y and generates a single output containing the loss. """ assert not(use_upper_bound and gradient_samples) # sample: (() -> Layer, int) -> Layer def sample(f, n): assert n > 0 if n == 1: return f() else: return L.average([f() for _ in range(n)]) z, x, y = [L.Input((d,)) for d in [d_z, d_x, d_y]] if gradient_samples: # we want to separately sample the gradient; we use stop_gradient to treat the sampled model as constant # the overall computation ensures that we have an interpretable loss (y-h̅(p,x))², # but also that the gradient is -2(y-h̅(p,x))∇h̅(p,x) with *different* samples used for each average diff = L.subtract([y, sample(lambda: h(p(z, x), x), samples)]) grad = sample(lambda: h(p(z, x), x), gradient_samples) def make_expr(grad, diff): return K.stop_gradient(diff) * (K.stop_gradient(diff + 2 * grad) - 2 * grad) expr = L.Lambda(lambda args: make_expr(*args))([grad, diff]) elif use_upper_bound: expr = sample(lambda: L.Lambda(K.square)(L.subtract([y, h(p(z, x), x)])), samples) else: expr = L.Lambda(K.square)(L.subtract([y, sample(lambda: h(p(z, x), x), samples)])) return Model([z, x, y], [expr])
898e72f29a9c531206d0243b8503761844468665
7,476
import numpy from datetime import datetime def get_hourly_load(session, endpoint_id, start_date, end_date): """ :param session: session for the database :param endpoint_id: id for the endpoint :param start_date: datetime object :param end_date: datetime object and: end_date >= start_date :return: """ numdays = (end_date - start_date).days + 1 # list of hours: 0:00 - 23:00 hours = ['0{}:00'.format(h) for h in range(0, 10)] + ['{}:00'.format(h) for h in range(10, 24)] heatmap_data = numpy.zeros((len(hours), numdays)) start_datetime = to_utc_datetime( datetime.datetime.combine(start_date, datetime.time(0, 0, 0, 0)) ) end_datetime = to_utc_datetime(datetime.datetime.combine(end_date, datetime.time(23, 59, 59))) for time, count in get_num_requests(session, endpoint_id, start_datetime, end_datetime): parsed_time = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S') day_index = (parsed_time - start_datetime).days hour_index = int(to_local_datetime(parsed_time).strftime('%H')) heatmap_data[hour_index][day_index] = count return { 'days': [ (start_date + datetime.timedelta(days=i)).strftime('%Y-%m-%d') for i in range(numdays) ], "data": heatmap_data.tolist(), }
cf619b12778edfaf27d89c43226079aafc650ac4
7,477
def startend(start=None, end=None): """Return TMIN, TAVG, TMAX.""" # Select statement sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)] if not end: # Calculate TMIN, TAVG, TMAX for dates greater than start results = session.query(*sel).\ filter(Measurement.date >= start).all() print(results) # This is a list of tuples # Convert list of tuples into normal list temps = list(np.ravel(results)) return jsonify(temps) # Calculate TMIN, TAVG, TMAX with start and stop results = session.query(*sel).\ filter(Measurement.date >= start).\ filter(Measurement.date <= end).all() print(results) # This is a list of tuples # Convert list of tuples into normal list temps = list(np.ravel(results)) print(temps) # This is a normal list return jsonify(temps)
7b8f395fd177d5352b14c12902acea1a641c5df8
7,478
def configure_camera(config): """ Configures the camera. :param config: dictionary containing BARD configuration parameters optional parameters in camera. source (default 0), window size (default delegates to cv2.CAP_PROP_FRAME_WIDTH), calibration directory and roi (region of interest) """ # Specify some reasonable defaults. Webcams are typically 640x480. video_source = 0 dims = None mtx33d = np.array([[1000.0, 0.0, 320.0], [0.0, 1000.0, 240.0], [0.0, 0.0, 1.0]]) dist5d = np.array([0.0, 0.0, 0.0, 0.0, 0.0]) roi = None if config is None: return video_source, mtx33d, dist5d, dims, roi camera_config = config.get('camera', None) if camera_config is not None: video_source = camera_config.get('source', 0) calib_dir = camera_config.get('calibration directory', None) calib_prefix = camera_config.get('calibration prefix', 'calib') if calib_dir is not None: calib_param = MonoCalibrationParams() calib_param.load_data(calib_dir, calib_prefix, halt_on_ioerror = False) mtx33d = calib_param.camera_matrix dist5d = calib_param.dist_coeffs dims = camera_config.get('window size', None) if dims is None: print("WARNING: window size was not specified! " "This probably breaks the calibrated overlay!") else: # JSON file contains list, OpenCV requires tuple. if len(dims) != 2: raise ValueError("Invalid window size given, window size", " should be list of length 2") dims = (dims[0], dims[1]) roi = camera_config.get('roi', None) if roi is not None: if len(roi) != 4: raise ValueError("Invalid roi set. Region of interest should", " be a list of length 4. [x_start, y_start, x_end, y_end]") return video_source, mtx33d, dist5d, dims, roi
8accdaf9d710ff2ccff6d4ad5216611593e06ff0
7,479
def munsell_value_Moon1943(Y: FloatingOrArrayLike) -> FloatingOrNDArray: """ Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using *Moon and Spencer (1943)* method. Parameters ---------- Y *luminance* :math:`Y`. Returns ------- :class:`np.floating` or :class:`numpy.ndarray` *Munsell* value :math:`V`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``Y`` | [0, 100] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``V`` | [0, 10] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Wikipedia2007c` Examples -------- >>> munsell_value_Moon1943(12.23634268) # doctest: +ELLIPSIS 4.0688120... """ Y = to_domain_100(Y) V = 1.4 * spow(Y, 0.426) return as_float(from_range_10(V))
7e419c8936fa49f35a5838aa7d3a5d99c93808f2
7,480
import functools import traceback def log_errors(func): """ A wrapper to print exceptions raised from functions that are called by callers that silently swallow exceptions, like render callbacks. """ @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: # Exceptions from calls like this aren't well-defined, so just log the # error and don't reraise it. traceback.print_exc() return wrapper
a15c26de36a8c784da0333382f27fc06b0ed78a0
7,481
def count_total_words(sentence_list): """ 문장 리스트에 있는 단어를 셉니다. :param sentence_list: 단어의 리스트로 구성된 문장 리스트 :return: 문장에 있는 단어의 개수 """ return sum( [count_words_per_sentence(sentence) for sentence in sentence_list] )
0abc550c26b40fd36d0b9540fc1cd001e40a7552
7,482
def translate_dbpedia_url(url): """ Convert an object that's defined by a DBPedia URL to a ConceptNet URI. We do this by finding the part of the URL that names the object, and using that as surface text for ConceptNet. This is, in some ways, abusing a naming convention in the Semantic Web. The URL of an object doesn't have to mean anything at all. The human-readable name is supposed to be a string, specified by the "name" relation. The problem here is that the "name" relation is not unique in either direction. A URL can have many names, and the same name can refer to many URLs, and some of these names are rarely used or are the result of parsing glitches. The URL itself is a stable thing that we can build a ConceptNet URI from, on the other hand. """ if '__' in url or 'dbpedia.org' not in url: return None parsed = parse_url(url) domain = parsed.netloc if '.' not in domain: return None if domain == 'dbpedia.org': # Handle old DBPedia URLs that had no language code domain = 'en.dbpedia.org' domain_parts = domain.split('.', 1) if domain_parts[1] == 'dbpedia.org': lang = domain_parts[0] if lang in LCODE_ALIASES: lang = LCODE_ALIASES[lang] if lang not in ALL_LANGUAGES: return None text = resource_name(url).replace('_', ' ') uri = topic_to_concept(lang, text) if uri in CONCEPT_BLACKLIST: return None else: return uri else: return None
2a6b99ca59216c97dc1cfd90f1d8f4c01ad5f9f2
7,483
def setSecurityPolicy(aSecurityPolicy): """Set the system default security policy. This method should only be caused by system startup code. It should never, for example, be called during a web request. """ last = _ImplPython._defaultPolicy _ImplPython._defaultPolicy = aSecurityPolicy return last
7063b83f1c2492b8684a43f64c9d2a49ae2ca61b
7,484
import re def map_sentence2ints(sentence): """Map a sentence to a list of words.""" word_list = re.findall(r"[\w']+|[.,!?;]", sentence) int_list = [const.INPUTVOCABULARY.index(word) for word in word_list] return np.array(int_list).astype(np.int32)
6dcb2917c817aa2e394c313fb273d466b6fb1ea9
7,485
def get_api_key( api_key_header: str = Security( APIKeyHeader(name=settings.API_KEY_HEADER, auto_error=False) ) ) -> str: """ This function checks the header and his value for correct authentication if not a 403 error is returned: - api_key_header = Security api header https://github.com/tiangolo/fastapi/issues/142 """ if api_key_header == settings.API_KEY: return api_key_header
50121c0d16455862552c58e7478ef383b68e71c7
7,486
def add_pred_to_test(test_df, pred_np, demo_col_list, days): """ derived from Tensorflow INPUT: - df (pandas DataFrame) - group (string) OUTPUT: - show_group_stats_viz """ test_df = test_df.copy() for c in demo_col_list: test_df[c] = test_df[c].astype(str) test_df['score'] = pred_np test_df['label_value'] = test_df['time_in_hospital'].apply(lambda x: 1 if x >=days else 0) return test_df
aec48bd6201e1a9a1ebd6f96c4c8b7cfd9304607
7,487
def getCriticality(cvss): """ color convention fot the cells of the PDF """ if cvss == 0.0: return ("none", "#00ff00", (0, 255, 0)) if cvss < 3.1: return ("low", "#ffff00", (255, 255, 0)) if cvss < 6.1: return ("medium", "#ffc800", (255, 200, 0)) if cvss < 9.1: return ("high", "#ff6400", (255, 100, 0)) return ("critical", "#cc0000", (200, 0, 0))
a4167b2f576dcb361641f7fe0280c212673f0157
7,488
from typing import List def combine_groups(groups: List[np.ndarray], num_features: int) -> np.ndarray: """ Combines the given groups back into a 2d measurement matrix. Args: groups: A list of 1d, flattened groups num_features: The number of features in each measurement (D) Returns: A [K, D] array containing the recovered measurements. """ flattened = np.concatenate(groups) # [K * D] return flattened.reshape(num_features, -1).T
906c69fabcb62a60f12fd3c4bafc711aa971ad19
7,489
import functools def skippable(*prompts, argument=None): """ Decorator to allow a method on the :obj:`CustomCommand` to be skipped. Parameters: ---------- prompts: :obj:iter A series of prompts to display to the user when the method is being skipped. argument: :obj:`str` By default, the management command argument to indicate that the method should be skipped will be `skip_<func_name>`. If the argument should be different, it can be explicitly provided here. """ def decorator(func): @functools.wraps(func) def inner(instance, *args, **kwargs): parameter = argument or "skip_%s" % func.__name__ if parameter in kwargs and kwargs[parameter] is True: instance.prompt(*prompts, style_func=instance.style.HTTP_NOT_MODIFIED) return False else: return func(instance, *args, **kwargs) return inner return decorator
879106f4cc0524660fb6639e56d688d40b115ac4
7,490
import hashlib def _cache_name(address): """Generates the key name of an object's cache entry""" addr_hash = hashlib.md5(address).hexdigest() return "unsub-{hash}".format(hash=addr_hash)
6933b1170933df5e3e57af03c81322d68a46d91f
7,492
def format_currency( value: Decimal, currency: str | None = None, show_if_zero: bool = False, invert: bool = False, ) -> str: """Format a value using the derived precision for a specified currency.""" if not value and not show_if_zero: return "" if value == ZERO: return g.ledger.format_decimal(ZERO, currency) if invert: value = -value return g.ledger.format_decimal(value, currency)
197dc15c799e1866526a944e0f1f8217e97cf785
7,493
def supplemental_div(content): """ Standardize supplemental content listings Might not be possible if genus and tree content diverge """ return {'c': content}
b42e868ef32f387347cd4a97328794e6628fe634
7,495
def viewTypes(): """View types of item when sent through slash command""" user_id, user_name, channel_id = getUserData(request.form) checkUser(user_id) itemType = request.form.get('text') try: text = viewTypesItems(itemType) except ItemNotInPantry: reply = "Sorry! But either the spelling is wrong or the item is currently unavailable.\nPlease view items in the pantry to check." client.chat_postMessage(channel=channel_id, blocks=itemExceptionBlock(reply)) return Response(), 200 client.chat_postMessage(channel=channel_id, blocks=viewTypesItemBlock(text)) return Response(), 200
aea7633a1092c68a5ccf3a5619eee9d74dafdca2
7,496
def load_and_preprocess(): """ Load the data (either train.csv or test.csv) and pre-process it with some simple transformations. Return in the correct form for usage in scikit-learn. Arguments --------- filestr: string string pointing to csv file to load into pandas Returns ------- X_train: numpy.array array containing features of training set X_test: numpy.array array containing features of test set y: numpy.array array containing labels for training set test_ID: numpy.array IDs for test set, for submission """ train = pd.read_csv("data/train.csv") test = pd.read_csv("data/test.csv") data = pd.concat((train.loc[:,'MSSubClass':'SaleCondition'],\ test.loc[:,'MSSubClass':'SaleCondition'])) #first extract the target variable, and log-transform because the prices are very skewed y_train = np.log1p(train['SalePrice'].values) #one hot encoding for categorical variables data = pd.get_dummies(data) #first find which numerical features are significantly skewed and transform them to log(1 + x) numerical = data.dtypes[data.dtypes!='object'].index skewed = data[numerical].apply(lambda u: skew(u.dropna())) skewed = skewed[skewed > 0.75].index data[skewed] = np.log1p(data[skewed]) #if numerical values are missing, replace with median from that column data = data.fillna(data.mean()) X_train = data[:train.shape[0]].as_matrix() X_test = data[train.shape[0]:].as_matrix() return X_train,X_test,y_train,test.Id
3202c4aaf76af0695594c39641dd4892b1215d97
7,497
from datetime import datetime def _dates2absolute(dates, units): """ Absolute dates from datetime object Parameters ---------- dates : datetime instance or array_like of datetime instances Instances of pyjams.datetime class units : str 'day as %Y%m%d.%f', 'month as %Y%m.%f', or 'year as %Y.%f' Returns ------- longdouble or array_like of longdouble absolute dates Examples -------- >>> dt = [datetime(1990, 1, 1), datetime(1991, 1, 1)] >>> dec = _dates2absolute(dt, 'day as %Y%m%d.%f') >>> print(np.around(dec, 1)) [19900101.0, 19910101.0] """ mdates = input2array(dates, default=datetime(1990, 1, 1)) # wrapper might be slow out = [ _date2absolute(dd, units) for dd in mdates ] out = array2input(out, dates) return out
ef823887ec410d7f7d0c5c54d12005ab35744c0c
7,498
def Mix_GetNumMusicDecoders(): """Retrieves the number of available music decoders. The returned value can differ between runs of a program due to changes in the availability of the shared libraries required for supporting different formats. Returns: int: The number of available music decoders. """ return _funcs["Mix_GetNumMusicDecoders"]()
a91b84c42701cdaeb7f400a3091bb869e477ff06
7,499
from typing import Callable def _concat_applicative( current: KindN[ _ApplicativeKind, _FirstType, _SecondType, _ThirdType, ], acc: KindN[ _ApplicativeKind, _UpdatedType, _SecondType, _ThirdType, ], function: KindN[ _ApplicativeKind, Callable[[_FirstType], Callable[[_UpdatedType], _UpdatedType]], _SecondType, _ThirdType, ], ) -> KindN[_ApplicativeKind, _UpdatedType, _SecondType, _ThirdType]: """Concats two applicatives using a curried-like function.""" return acc.apply(current.apply(function))
fb720d87f643592f3ebed01bd55364fec83e1b22
7,501
def goto_x(new_x): """ Move tool to the new_x position at speed_mm_s at high speed. Update curpos.x with new position. If a failure is detected, sleep so the operator can examine the situation. Since the loss of expected responses to commands indicates that the program does not know the exact position of the device, the caller should immediately abort on a failure. Call this function like this: assert goto_x(new_x_value), "Useful message indicating where failure occurred" :param new_x: new X position of tool :return: True -> success, False -> failure """ assert isinstance(new_x, float) if VERIFY_NEGATIVE_VALUES: assert is_x_valid(new_x) global curpos output_and_log("G00 X{0:3.3f}".format(new_x)) responded = read_port_await_str("ok") if not responded: print "goto_x() RESPONSE STRING({0}) NOT RECEIVED".format("ok") time.sleep(SLEEP_BEFORE_ESTOP) else: curpos.x = new_x return responded
fe49dde9349e18cea91d8f7ee1aae1f3545b5a04
7,502
def server_rename(adapter_id, server_id): """Renames a server using a certain adapter, if that adapter supports renaming.""" adapter = get_adapter(adapter_id) if not adapter: return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501) if not adapter.can_rename(): return output.failure("This adapter doesn't support renaming servers.", 501) if not adapter.do_verify(request.headers): return output.failure("Credential verification failed. Please check your credentials and try again.", 401) result = adapter.do_server_rename(request.headers, server_id, request.json) if isinstance(result, dict) and 'error' in result: return output.failure(result['error'], result['status']) return ""
55a25178a3ff9ec1e2e1d4a2f7cdd228bf0914cb
7,503
def index(): """ Root URL response, load UI """ return app.send_static_file("index.html")
fd793fadf7ecaf8e2c435b377c264aaf6e4da1d2
7,505
def restore_capitalization(word, example): """ Make the capitalization of the ``word`` be the same as in ``example``: >>> restore_capitalization('bye', 'Hello') 'Bye' >>> restore_capitalization('half-an-hour', 'Minute') 'Half-An-Hour' >>> restore_capitalization('usa', 'IEEE') 'USA' >>> restore_capitalization('pre-world', 'anti-World') 'pre-World' >>> restore_capitalization('123-do', 'anti-IEEE') '123-DO' >>> restore_capitalization('123--do', 'anti--IEEE') '123--DO' In the alignment fails, the reminder is lower-cased: >>> restore_capitalization('foo-BAR-BAZ', 'Baz-Baz') 'Foo-Bar-baz' >>> restore_capitalization('foo', 'foo-bar') 'foo' .. note: Currently this function doesn't handle uppercase letters in the middle of the token (e.g. McDonald). """ if '-' in example: results = [] word_parts = word.split('-') example_parts = example.split('-') for i, part in enumerate(word_parts): if len(example_parts) > i: results.append(_make_the_same_case(part, example_parts[i])) else: results.append(part.lower()) return '-'.join(results) return _make_the_same_case(word, example)
77b074acb4d95de5d88f37495786f6679fa5f54d
7,506
def test_loss_at_machine_precision_interval_is_zero(): """The loss of an interval smaller than _dx_eps should be set to zero.""" def f(x): return 1 if x == 0 else 0 def goal(l): return learner.loss() < 0.01 or learner.npoints >= 1000 learner = Learner1D(f, bounds=(-1, 1)) simple(learner, goal=goal) # this means loss < 0.01 was reached assert learner.npoints != 1000
61d2efd80054729aafbe11d67873860f96f2198b
7,507
def params_document_to_uuid(params_document): """Generate a UUID5 based on a pipeline components document""" return identifiers.typeduuid.catalog_uuid(params_document)
32366dd5fa2ff4acfe848a7a4633baba23a1e993
7,508
import typing def modify_account() -> typing.RouteReturn: """IntraRez account modification page.""" form = forms.AccountModificationForm() if form.validate_on_submit(): rezident = flask.g.rezident rezident.nom = form.nom.data.title() rezident.prenom = form.prenom.data.title() rezident.promo = form.promo.data rezident.email = form.email.data db.session.commit() utils.log_action( f"Modified account {rezident} ({rezident.prenom} {rezident.nom} " f"{rezident.promo}, {rezident.email})" ) flask.flash(_("Compte modifié avec succès !"), "success") return utils.redirect_to_next() return flask.render_template("profile/modify_account.html", title=_("Mettre à jour mon compte"), form=form)
e67b553f0c7051d5be4b257824f495f0a0ad9838
7,509
def fizzbuzz(end=100): """Generate a FizzBuzz game sequence. FizzBuzz is a childrens game where players take turns counting. The rules are as follows:: 1. Whenever the count is divisible by 3, the number is replaced with "Fizz" 2. Whenever the count is divisible by 5, the number is replaced with "Buzz" 3. Whenever the count is divisible by both 3 and 5, the number is replaced with "FizzBuzz" Parameters ---------- end : int The FizzBuzz sequence is generated up and including this number. Returns ------- sequence : list of str The FizzBuzz sequence. Examples -------- >>> fizzbuzz(3) ['1', '2', 'Fizz'] >>> fizzbuzz(5) ['1', '2', 'Fizz', '4', 'Buzz'] References ---------- https://blog.codinghorror.com/why-cant-programmers-program/ """ sequence = [] for i in range(1, end + 1): if i % (3 * 5) == 0: sequence.append('FizzBuzz') elif i % 3 == 0: sequence.append('Fizz') elif i % 5 == 0: sequence.append('Buzz') else: sequence.append(str(i)) return sequence
b68b1c39674fb47d0bd12d387f347af0ef0d26ca
7,510
def generate_lane_struct(): """ Generate the datatype for the lanes dataset :return: The datatype for the lanes dataset and the fill values for the lanes dataset """ lane_top_list = [] for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneTop"]: lane_top_list.append((item.name, item.type)) lane_list = [] for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneSObject"]: lane_list.append((item.name, item.type)) lane_top_list.append((str_lan_obj, lane_list, 4)) d_lane = np.dtype(lane_top_list) lane_fill = np.zeros((len(lane_top_list), ), dtype=d_lane) for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneTop"]: lane_fill[item.name] = item.fill_value for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneSObject"]: lane_fill[str_lan_obj][item.name] = item.fill_value return d_lane, lane_fill
698fadc8472233ae0046c9bbf1e4c21721c7de48
7,511
def notification_list(next_id=None): # noqa: E501 """notification_list Get all your certificate update notifications # noqa: E501 :param next_id: :type next_id: int :rtype: NotificationList """ return 'do some magic!'
4fe4467f89ad4bf1ba31bd37eace411a78929a26
7,512
import requests def SendPost(user, password, xdvbf, cookie, session, url=URL.form): """ 根据之前获得的信息,发送请求 :param user: 学号 :param password: 密码 :param xdvbf: 验证码内容 :param cookie: 之前访问获得的cookie :param session: 全局唯一的session :param url: 向哪个资源发送请求 :return: response """ form_data = { "timestamp": helper.time_stamp, "jwb": helper.jwb, "id": user, "pwd": password, "xdvfb": xdvbf } response = session.post(url, form_data, headers=helper.header, cookies=requests.utils.dict_from_cookiejar(cookie)) response.encoding = response.apparent_encoding return response
932d869f048e8f06d7dbfe6032950c66a72224fa
7,514
def css_flat(name, values=None): """Все значения у свойства (по порядку) left -> [u'auto', u'<dimension>', u'<number>', u'<length>', u'.em', u'.ex', u'.vw', u'.vh', u'.vmin', u'.vmax', u'.ch', u'.rem', u'.px', u'.cm', u'.mm', u'.in', u'.pt', u'.pc', u'<percentage>', u'.%'] """ cur = CSS_DICT.get(name) or CSS_DICT.get(name[1:-1]) if values is None: values = [] if cur is None: return values for value in cur['values']: values.append(value) if value.startswith('<') and value.endswith('>'): values = css_flat(value, values) return values
a992d261d234f9c4712b00986cb6ba5ba4347b8f
7,515
def prepare_mqtt(MQTT_SERVER, MQTT_PORT=1883): """ Initializes MQTT client and connects to a server """ client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect(MQTT_SERVER, MQTT_PORT, 60) return client
a5015d80c5c0222ac5eb40cbb1ba490826fcebae
7,516
def record_iterator_class(record_type): """ Gets the record iterator for a given type A way to abstract the construction of a record iterator class. :param record_type: the type of file as string :return: the appropriate record iterator class """ if record_type == 'bib': return BibtexRecordIterator elif record_type == 'froac' or record_type == 'xml': return FroacRecordIterator elif record_type == 'isi': return IsiRecordIterator else: raise ValueError("This type {} has not been implemented yet".format( record_type ))
b1fbd393819055b9468a96b5ec7e44d3773dcf52
7,517
from typing import Any def palgo( dumbalgo: type[DumbAlgo], space: Space, fixed_suggestion_value: Any ) -> SpaceTransformAlgoWrapper[DumbAlgo]: """Set up a SpaceTransformAlgoWrapper with dumb configuration.""" return create_algo(algo_type=dumbalgo, space=space, value=fixed_suggestion_value)
373f74ca675250b5a422ff965396a122c4b967fd
7,519
def english_to_french(english_text): """ Input language translate function """ translation = language_translator.translate(text=english_text, model_id = "en-fr").get_result() french_text = translation['translations'][0]['translation'] return french_text
ac9951d0362ccf511361dfc676b03f61f4fe8453
7,520
from typing import Sequence def noise_get_turbulence( n: tcod.noise.Noise, f: Sequence[float], oc: float, typ: int = NOISE_DEFAULT, ) -> float: """Return the turbulence noise sampled from the ``f`` coordinate. Args: n (Noise): A Noise instance. f (Sequence[float]): The point to sample the noise from. typ (int): The noise algorithm to use. octaves (float): The level of level. Should be more than 1. Returns: float: The sampled noise value. """ return float( lib.TCOD_noise_get_turbulence_ex( n.noise_c, ffi.new("float[4]", f), oc, typ ) )
f4af83726dd6f3badf2c2eaa86f647dd4ad71cb3
7,521
def mnist_loader(path="../../corruptmnist", n_files=8, image_scale=255): """ Loads .npz corruptedmnist, assumes loaded image values to be between 0 and 1 """ # load and stack the corrupted mnist dataset train_images = np.vstack( [np.load(path + "/train_{}.npz".format(str(i)))["images"] for i in range(n_files)] ) train_labels = np.hstack( [np.load(path + "/train_{}.npz".format(str(i)))["labels"] for i in range(n_files)] ) test_images = np.load(path + "/test.npz")["images"] test_labels = np.load(path + "/test.npz")["labels"] return train_images * image_scale, train_labels, test_images * image_scale, test_labels
a7e7328621819e0cbf163e1ef006df5183b6d25d
7,523
def reduce_min(values, index, name='segmented_reduce_min'): """Computes the minimum over segments.""" return _segment_reduce(values, index, tf.math.unsorted_segment_min, name)
473698ffd1295344dd8019b01b69d464f2db93b8
7,524
import glob def _data_type(data_string: str): """ convert the data type string (i.e., FLOAT, INT16, etc.) to the appropriate int. See: https://deeplearning4j.org/api/latest/onnx/Onnx.TensorProto.DataType.html """ for key, val in glob.DATA_TYPES.items(): if key == data_string: return val _print("Data string not found. Use `list_data_types()` to list all supported data strings.") return False
a0fce62a304ce8b61ad2ecf173b8723cf66f10c0
7,525
def bin_power(dataset, fsamp:int, band=range(0, 45)): """Power spec Args: dataset: n_epoch x n_channel x n_sample fsamp: band: Returns: n_epoch x n_channel x len(band) """ res = [] for i, data in enumerate(dataset): res.append(power(data, fsamp=fsamp, band=band)) return res
e85815837d2cab8bd1b89132df29a439ec54bd34
7,526
import six import base64 import zlib def deflate_and_base64_encode(string_val): """ Deflates and the base64 encodes a string :param string_val: The string to deflate and encode :return: The deflated and encoded string """ if not isinstance(string_val, six.binary_type): string_val = string_val.encode('utf-8') return base64.b64encode(zlib.compress(string_val)[2:-4])
31fc19cf134bc22b3fc45b4158c65aef666716cc
7,527
def smooth_reward_curve(x, y): """Smooths a reward curve--- how?""" k = min(31, int(np.ceil(len(x) / 30))) # Halfwidth of our smoothing convolution xsmoo = x[k:-k] ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='valid') / np.convolve(np.ones_like(y), np.ones(2 * k + 1), mode='valid') downsample = max(int(np.floor(len(xsmoo) / 1e3)), 1) return xsmoo[::downsample], ysmoo[::downsample]
3106cc75a8ceb58f29cded4353133eff7a737f8b
7,528
def sdot(s): """Returns the time derivative of a given state. Args: s(1x6 numpy array): the state vector [rx,ry,rz,vx,vy,vz] Returns: 1x6 numpy array: the time derivative of s [vx,vy,vz,ax,ay,az] """ mu_Earth = 398600.4405 r = np.linalg.norm(s[0:3]) a = -mu_Earth/(r**3)*s[0:3] p_j2 = j2_pert(s) p_drag = drag(s) a = a+p_j2+p_drag return np.array([*s[3:6],*a])
4e79054e194b5395953fbda30794e819c6700feb
7,529
def get_values(abf,key="freq",continuous=False): """returns Xs, Ys (the key), and sweep #s for every AP found.""" Xs,Ys,Ss=[],[],[] for sweep in range(abf.sweeps): for AP in cm.matrixToDicts(abf.APs): if not AP["sweep"]==sweep: continue Ys.append(AP[key]) Ss.append(AP["sweep"]) if continuous: Xs.append(AP["expT"]) else: Xs.append(AP["sweepT"]) return np.array(Xs),np.array(Ys),np.array(Ss)
8671d795410b8064fd70172da396ccbd4323c9a3
7,530
def geodetic2cd( gglat_deg_array, gglon_deg_array, ggalt_km_array, decimals=2, year=2021.0 ): """Transformation from Geodetic (lat, lon, alt) to Centered Dipole (CD) (lat, lon, alt). Author: Giorgio Savastano ([email protected]) Parameters ---------- gglon_deg_array : np.ndarray array containing geodetic longitude values in degrees gglat_deg_array : np.ndarray array containing geodetic latitude values in degrees ggalt_km_array : np.ndarray array containing geodetic altitude values in km decimals : int, default=2 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. year : float, default=2021.0 year for computing the IGRF Gauss coefficients Returns ------- tuple[np.ndarray, np.ndarray, np.ndarray] CD lat, lon, alt arrays """ if type(gglon_deg_array) == list: logger.info(" Converting list to np.ndarrays.") gglon_deg_array = np.asarray(gglon_deg_array) gglat_deg_array = np.asarray(gglat_deg_array) ggalt_km_array = np.asarray(ggalt_km_array) elif type(gglon_deg_array) != np.ndarray: logger.info(f" Converting {type(gglon_deg_array)} to np.ndarrays.") gglon_deg_array = np.asarray([gglon_deg_array]) gglat_deg_array = np.asarray([gglat_deg_array]) ggalt_km_array = np.asarray([ggalt_km_array]) x_geoc, y_geoc, z_geoc = pymap3d.geodetic2ecef( gglat_deg_array, gglon_deg_array, ggalt_km_array * 1000.0 ) x_cd, y_cd, z_cd = ecef2eccdf(x_geoc, y_geoc, z_geoc, year=year) colat_cd, long_cd, r_cd = ecef2spherical(x_cd, y_cd, z_cd) lat_cd = np.round(90 - colat_cd, decimals) alt_cd = np.round(r_cd - CONSTS.RE_M, decimals) return lat_cd, long_cd, alt_cd
b5a3a8622051e05f31e3f087869b8bebfd213fd9
7,531
import pickle def load_pickle(file_path): """ load the pickle object from the given path :param file_path: path of the pickle file :return: obj => loaded obj """ with open(file_path, "rb") as obj_des: obj = pickle.load(obj_des) # return the loaded object return obj
4770a152dad9c7d123f95a53642aff990f3590f7
7,532
def _expand_global_features(B, T, g, bct=True): """Expand global conditioning features to all time steps Args: B (int): Batch size. T (int): Time length. g (Tensor): Global features, (B x C) or (B x C x 1). bct (bool) : returns (B x C x T) if True, otherwise (B x T x C) Returns: Tensor: B x C x T or B x T x C or None """ if g is None: return None g = g.unsqueeze(-1) if g.dim() == 2 else g if bct: g_bct = g.expand(B, -1, T) return g_bct.contiguous() else: g_btc = g.expand(B, -1, T).transpose(1, 2) return g_btc.contiguous()
9d0ab550147d8658f0ff8fb5cfef8fc565c5f3d3
7,533
def plot_CDF(data, ax=None, reverse=False, plot=True, **plotargs): """ plot Cumulative Ratio. """ n_samples = len(data) X = sorted(data, reverse=reverse) Y = np.arange(1,n_samples+1)/n_samples if plot or ax: if ax is None: fig, ax = plt.subplots() ax.plot(X, Y, **plotargs) ax.set_ylabel("Cumulative Ratio") return ax return (X, Y)
25d9a83a9b560a89137c0e4eb6cd63761f39901f
7,535
def is_zsettable(s): """quick check that all values in a dict are reals""" return all(map(lambda x: isinstance(x, (int, float, long)), s.values()))
ad51e7419a37bec071be6aa2c1a4e9d62bce913c
7,536
from typing import Sequence def initialize_simulator(task_ids: Sequence[str], action_tier: str) -> ActionSimulator: """Initialize ActionSimulator for given tasks and tier.""" tasks = phyre.loader.load_compiled_task_list(task_ids) return ActionSimulator(tasks, action_tier)
8b54ae1c98d44839a33a8774de48e53f1ce9ca96
7,537
def import_sensitivities(input, file_location): """ Ratio is the C/O starting gas ratio file_location is the LSR C and O binding energy, false to load the base case """ tol, ratio = input try: data = pd.read_csv(file_location + '/all-sensitivities/' + tol + '{:.1f}RxnSensitivity.csv'.format(ratio)) data = data.values data = data.tolist() return data except: print('Cannot find ' + file_location + '/all-sensitivities/' + tol + '{:.1f}RxnSensitivity.csv'.format(ratio))
c0b0c9d740335032b4d196232c3166818aa77a1a
7,539
import re import ntpath def extract_files_to_process(options, company_file): """Extract the files from the ENER zip file and the ITR/DFP inside of it, and collect all the XML files """ force_download = options.get("force_download", False) local_base_path = _doc_local_base_path(options, company_file) # Make sure the file is in the local cache local_file = "{0}/{1}". \ format(local_base_path, company_file.file_name) if not exists(options, local_file): copy_file(options, company_file.file_url, local_file) working_local_base_path = \ _doc_local_working_base_path(options, company_file) file_to_export = "{0}/{1}".format(local_base_path, company_file.file_name) if exists(options, working_local_base_path): if force_download: # Clean the folder of the company file (working folder) delete_all(options, working_local_base_path) files_ref = extract_zip( options, file_to_export, working_local_base_path) else: files_ref = listdir(options, working_local_base_path) # If the folder is empty if not files_ref: mkdirs(options, working_local_base_path) files_ref = extract_zip( options, file_to_export, working_local_base_path) else: mkdirs(options, working_local_base_path) files_ref = extract_zip( options, file_to_export, working_local_base_path) available_files = {} if company_file.doc_type in ["ITR", "DFP"]: for the_file in files_ref: if re.match(RE_FILE_BY_XML, the_file, re.IGNORECASE): filename = ntpath.basename(the_file) available_files[filename] = the_file elif re.match(RE_FILE_BY_ITR, the_file, re.IGNORECASE): itr_dest_folder = "{0}/itr_content/".\ format(working_local_base_path) itr_files = extract_zip(options, the_file, itr_dest_folder) for itr_file in itr_files: filename = ntpath.basename(itr_file) available_files["itr/{}".format(filename)] = itr_file # Once unzipped, we can delete the original file from the elif re.match(RE_FILE_BY_DFP, the_file, re.IGNORECASE): dfp_dest_folder = "{0}/dfp_content/".\ format(working_local_base_path) dfp_files = extract_zip(options, the_file, dfp_dest_folder) for dfp_file in dfp_files: filename = ntpath.basename(dfp_file) available_files["dfp/{}".format(filename)] = dfp_file return available_files
963dd738224c36311791c54d964ae5b95d345a7f
7,540
def merge(source, dest): """ Copy all properties and relations from one entity onto another, then mark the source entity as an ID alias for the destionation entity. """ if source.id == dest.id: return source if dest.same_as == source.id: return source if source.same_as == dest.id: return dest if dest.same_as is not None: # potential infinite recursion here. canonical = Entity.by_id(dest.same_as) if canonical is not None: return merge(source, canonical) if dest.schema.is_parent(source.schema): dest.schema = source.schema dest_valid = [a.name for a in dest.schema.attributes] dest_active = [p.name for p in dest.active_properties] for prop in source.properties: prop.entity = dest if prop.name in dest_active: prop.active = False if prop.name not in dest_valid: properties_logic.delete(prop) for rel in source.inbound: rel.target = dest db.session.add(rel) for rel in source.outbound: rel.source = dest db.session.add(rel) source.same_as = dest.id db.session.flush() _entity_changed.delay(dest.id, 'update') _entity_changed.delay(source.id, 'delete') return dest
9cb6963ba0e15e639915e27d7c369394d7088231
7,542
import json import re def create_summary_text(summary): """ format a dictionary so it can be printed to screen or written to a plain text file Args: summary(dict): the data to format Returns: textsummary(str): the summary dict formatted as a string """ summaryjson = json.dumps(summary, indent=3) textsummary = re.sub('[{},"]', '', summaryjson) return textsummary
3a8dd508b760a0b9bfe925fa2dc07d53dee432af
7,543
from datetime import datetime import random def random_datetime(start, end): """Generate a random datetime between `start` and `end`""" return start + datetime.timedelta( # Get a random amount of seconds between `start` and `end` seconds=random.randint(0, int((end - start).total_seconds())), )
c3cf7a0fb616b9f157d5eb86b3d76f1cd811308f
7,544
def maximo_basico(a: float, b: float) -> float: """Toma dos números y devuelve el mayor. Restricción: No utilizar la función max""" if a > b: return a return b
f98db565243587015c3b174cf4130cbc32a00e22
7,545
def listas_mesmo_tamanho(lista_de_listas): """ Recebe uma lista de listas e retorna 'True' caso todas as listas sejam de mesmo tamanho e 'False', caso contrário """ tamanho_padrao = len(lista_de_listas[0]) for lista in lista_de_listas: if(len(lista) != tamanho_padrao): return False return True
3a405f36bf8cd906fc603e9774cc23e07738e123
7,546
def self_quarantine_policy_40(): """ Real Name: b'self quarantine policy 40' Original Eqn: b'1-PULSE(self quarantine start 40, self quarantine end 40-self quarantine start 40)*self quarantine effectiveness 40' Units: b'dmnl' Limits: (None, None) Type: component b'' """ return 1 - functions.pulse(__data['time'], self_quarantine_start_40(), self_quarantine_end_40() - self_quarantine_start_40()) * self_quarantine_effectiveness_40()
87ae16bd53bdd08a71231297949c3b995c7f9ba0
7,548
def knapsack_bqm(cities, values, weights, total_capacity, value_r=0, weight_r=0): """ build the knapsack binary quadratic model From DWave Knapsack examples Originally from Andrew Lucas, NP-hard combinatorial problems as Ising spin glasses Workshop on Classical and Quantum Optimization; ETH Zuerich - August 20, 2014 based on Lucas, Frontiers in Physics _2, 5 (2014) See # Q-Alpha version for original introduction of value_r and weight_r value_r: the proportion of value contributed from the objects outside of the knapsack. For the standard knapsack problem this is 0, but in the case of GDP a closed city retains some % of GDP value; or for health problems it may contribute negative value (-1). weight_r: the proportion of weight contributed from the objects outside of the knapsack. For the standard knapsack problem this is 0, but in the case of sick people we might consider that a closed city retains some % of its sick people over time; or for health problems it may contribute negative value (-1) """ # Initialize BQM - use large-capacity BQM so that the problem can be # scaled by the user. bqm = dimod.AdjVectorBQM(dimod.Vartype.BINARY) # Lagrangian multiplier # First guess as suggested in Lucas's paper lagrange = max(values) # Number of objects x_size = len(values) # Lucas's algorithm introduces additional slack variables to handle # the inequality. max_y_index indicates the maximum index in the y # sum; hence the number of slack variables. max_y_index = ceil(log(total_capacity)) # Slack variable list for Lucas's algorithm. The last variable has # a special value because it terminates the sequence. y = [2**n for n in range(max_y_index - 1)] y.append(total_capacity + 1 - 2**(max_y_index - 1)) # Q-Alpha - calculate the extra constant in second part of problem hamiltonian C = sum([weight * weight_r for weight in weights]) # Q-Alpha - change weights to weight*(1-weight_r) weights = [weight*(1-weight_r) for weight in weights] # Q-Alpha - change values to value*(1-value_r) values = [value*(1-value_r) for value in values] # Hamiltonian xi-xi terms for k in range(x_size): # Q-Alpha add final term lagrange * C * weights[k] bqm.set_linear( cities[k], lagrange * (weights[k] ** 2) - values[k] + lagrange * C * weights[k]) # Hamiltonian xi-xj terms for i in range(x_size): for j in range(i + 1, x_size): key = (cities[i], cities[j]) bqm.quadratic[key] = 2 * lagrange * weights[i] * weights[j] # Hamiltonian y-y terms for k in range(max_y_index): # Q-Alpha add final term -lagrange * C * y[k] bqm.set_linear('y' + str(k), lagrange * (y[k]**2) - lagrange * C * y[k]) # Hamiltonian yi-yj terms for i in range(max_y_index): for j in range(i + 1, max_y_index): key = ('y' + str(i), 'y' + str(j)) bqm.quadratic[key] = 2 * lagrange * y[i] * y[j] # Hamiltonian x-y terms for i in range(x_size): for j in range(max_y_index): key = (cities[i], 'y' + str(j)) bqm.quadratic[key] = -2 * lagrange * weights[i] * y[j] return bqm
0a00c5fbcf30e36b7d6a03b9edc4029582b001fd
7,550
from typing import List def nltk_punkt_de(data: List[str], model=None) -> List[str]: """Sentence Segmentation (SBD) with NLTK's Punct Tokenizer Parameters: ----------- data : List[str] list of N documents as strings. Each document is then segmented into sentences. model (Default: None) Preloaded instance of the NLP model. See nlptasks.sbd.get_model Returns: -------- List[str] list of M sentences as strings. Pls note that the information about the relationship to the document is lost. Example: -------- import nlptasks as nt import nlptasks.sbd docs = ["Die Kuh ist bunt. Die Bäuerin mäht die Wiese."] sents = nt.sbd.nltk_punkt_de(docs) Help: ----- - https://www.nltk.org/api/nltk.tokenize.html#module-nltk.tokenize.punkt """ # SBD sentences = [] for rawstr in data: sents = nltk.tokenize.sent_tokenize(rawstr, language="german") sentences.extend(sents) # done return sentences
10b924070ebcb3062c9b40f4f6ca0a3a006f8d2e
7,551
def is_pattern_error(exception: TypeError) -> bool: """Detect whether the input exception was caused by invalid type passed to `re.search`.""" # This is intentionally simplistic and do not involve any traceback analysis return str(exception) == "expected string or bytes-like object"
623246404bbd54bc82ff5759bc73be815d613731
7,552
import pdb def iwave_modes_banded(N2, dz, k=None): """ !!! DOES NOT WORK!!! Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem: $$ \left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0 $$ with boundary conditions """ nz = N2.shape[0] # Remove the surface values if k is None: k = nz-2 dz2 = 1/dz**2 # Construct the LHS matrix, A A = np.vstack([-1*dz2*np.ones((nz,)),\ 2*dz2*np.ones((nz,)),\ -1*dz2*np.ones((nz,)),\ ]) # BC's #A[0,0] = -1. #A[0,1] = 0. #A[-1,-1] = -1. #A[-1,-2] = 0. A[1,0] = -1. A[2,0] = 0. A[1,-1] = -1. A[0,-1] = 0. # Now convert from a generalized eigenvalue problem to # A.v = lambda.B.v # a standard problem # A.v = lambda.v # By multiply the LHS by inverse of B # (B^-1.A).v = lambda.v # B^-1 = 1/N2 since B is diagonal A[0,:] /= N2 A[1,:] /= N2 A[2,:] /= N2 w, phi = linalg.eig_banded(A) pdb.set_trace() ## Main diagonal #dd = 2*dz2*np.ones((nz,)) #dd /= N2 #dd[0] = -1 #dd[-1] = -1 ## Off diagonal #ee = -1*dz2*np.ones((nz-1,)) #ee /= N2[0:-1] #ee[0] = 0 #ee[-1] = 0 ## Solve... (use scipy not numpy) #w, phi = linalg.eigh_tridiagonal(dd, ee ) ##### c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \phi # Sort by the eigenvalues idx = np.argsort(c)[::-1] # descending order ## Calculate the actual phase speed cn = np.real( c[idx] ) idxgood = ~np.isnan(cn) phisort = phi[:,idx] return np.real(phisort[:,idxgood]), np.real(cn[idxgood])
f4016fb4acd1c5aa024d8ac1e69262dec9057713
7,553
def parse_fastq_pf_flag(records): """Take a fastq filename split on _ and look for the pass-filter flag """ if len(records) < 8: pf = None else: fastq_type = records[-1].lower() if fastq_type.startswith('pass'): pf = True elif fastq_type.startswith('nopass'): pf = False elif fastq_type.startswith('all'): pf = None else: raise ValueError("Unrecognized fastq name: %s" % ( "_".join(records),)) return pf
9a46022aa6e07ed3ca7a7d80933ee23e26d1ca9a
7,554
def rule_manager(): """ Pytest fixture for generating rule manager instance """ ignore_filter = IgnoreFilter(None, verbose=False) return RuleManager(None, ignore_filter, verbose=False)
ce5e9ecf482b5dfd0e3b99b2367605d6e488f7e7
7,555
def zeros(fn, arr, *args): """ Find where a function crosses 0. Returns the zeroes of the function. Parameters ---------- fn : function arr : array of arguments for function *args : any other arguments the function may have """ # the reduced function, with only the argument to be solved for (all other arguments fixed): def fn_reduced(array): return fn(array, *args) # the array of values of the function: fn_arr = fn_reduced(arr) # looking where the function changes sign... sign_change_arr = np.where(np.logical_or((fn_arr[:-1] < 0.) * (fn_arr[1:] > 0.), (fn_arr[:-1] > 0.) * (fn_arr[1:] < 0.)) )[0] # or, just in case, where it is exactly 0! exact_zeros_arr = np.where(fn_arr == 0.)[0] # defining the array of 0-crossings: cross_arr = [] # first, interpolating between the sign changes if len(sign_change_arr) > 0: for i in range(len(sign_change_arr)): cross_arr.append( brentq(fn_reduced, arr[sign_change_arr[i]], arr[sign_change_arr[i] + 1]) ) # and then adding those places where it is exactly 0 if len(exact_zeros_arr) > 0: for i in range(len(exact_zeros_arr)): cross_arr.append(arr[exact_zeros_arr[i]]) # sorting the crossings in increasing order: cross_arr = np.sort(np.array(cross_arr)) return cross_arr
129a162912f86ee52fc57b1a3a46acaf402598f5
7,556
import math def create_low_latency_conv_model(fingerprint_input, model_settings, is_training): """Builds a convolutional model with low compute requirements. This is roughly the network labeled as 'cnn-one-fstride4' in the 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper: http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf Here's the layout of the graph: (fingerprint_input) v [Conv2D]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [MatMul]<-(weights) v [BiasAdd]<-(bias) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v This produces slightly lower quality results than the 'conv' model, but needs fewer weight parameters and computations. During training, dropout nodes are introduced after the relu, controlled by a placeholder. Args: fingerprint_input: TensorFlow node that will output audio feature vectors. model_settings: Dictionary of information about the model. is_training: Whether the model is going to be used for training. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder. """ if is_training: dropout_prob = tf.placeholder(tf.float32, name='dropout_prob') input_frequency_size = model_settings['dct_coefficient_count'] input_time_size = model_settings['spectrogram_length'] fingerprint_4d = tf.reshape(fingerprint_input, [-1, input_time_size, input_frequency_size, 1]) first_filter_width = 8 first_filter_height = input_time_size first_filter_count = 186 first_filter_stride_x = 1 first_filter_stride_y = 4 first_weights = tf.Variable( tf.truncated_normal( [first_filter_height, first_filter_width, 1, first_filter_count], stddev=0.01)) first_bias = tf.Variable(tf.zeros([first_filter_count])) first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [ 1, first_filter_stride_y, first_filter_stride_x, 1 ], 'VALID') + first_bias first_relu = tf.nn.relu(first_conv) if is_training: first_dropout = tf.nn.dropout(first_relu, dropout_prob) else: first_dropout = first_relu first_conv_output_width = math.floor( (input_frequency_size - first_filter_width + first_filter_stride_x) / first_filter_stride_x) first_conv_output_height = math.floor( (input_time_size - first_filter_height + first_filter_stride_y) / first_filter_stride_y) first_conv_element_count = int( first_conv_output_width * first_conv_output_height * first_filter_count) flattened_first_conv = tf.reshape(first_dropout, [-1, first_conv_element_count]) first_fc_output_channels = 128 first_fc_weights = tf.Variable( tf.truncated_normal( [first_conv_element_count, first_fc_output_channels], stddev=0.01)) first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels])) first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias if is_training: second_fc_input = tf.nn.dropout(first_fc, dropout_prob) else: second_fc_input = first_fc second_fc_output_channels = 128 second_fc_weights = tf.Variable( tf.truncated_normal( [first_fc_output_channels, second_fc_output_channels], stddev=0.01)) second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels])) second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias if is_training: final_fc_input = tf.nn.dropout(second_fc, dropout_prob) else: final_fc_input = second_fc label_count = model_settings['label_count'] final_fc_weights = tf.Variable( tf.truncated_normal( [second_fc_output_channels, label_count], stddev=0.01)) final_fc_bias = tf.Variable(tf.zeros([label_count])) final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias if is_training: return final_fc, dropout_prob else: return final_fc
3b03e84c9af5a6d1134736d8757e15039bb196b8
7,557
def _format_author(url, full_name): """ Helper function to make author link """ return u"<a class='more-info' href='%s'>%s</a>" % (url, full_name)
50f001c2358b44bb95da628cc630a2ed3ea8ddfd
7,560
def all_series(request: HttpRequest) -> JsonResponse: """ View that serves all the series in a JSON array. :param request: The original request. :return: A JSON-formatted response with the series. """ return JsonResponse([ _series_response(request, s) for s in get_response(request) ], safe=False)
01657615b53a4316a9ec0ad581e009928cfefed2
7,561
def stlx_powerset(s): """If s is a set, the expression pow(s) computes the power set of s. The power set of s is defined as the set of all subsets of s.""" def powerset_generator(i): for subset in it.chain.from_iterable(it.combinations(i, r) for r in range(len(i)+1)): yield set(subset) return SetlxSet(SetlxSet(z) for z in powerset_generator(s))
9297efa03636ff19da7aae4e60593bcc9933d6bb
7,562
import copy def get_entries_configuration(data): """Given the dictionary of resources, returns the generated factory xml file Args: data (dict): A dictionary similar to the one returned by ``get_information`` Returns: str: The factory xml file as a string """ entries_configuration = "" for _, site_information in sorted(data.items()): for celem, ce_information in sorted(site_information.items()): for _, q_information in sorted(ce_information.items()): for entry, entry_information in sorted(q_information.items()): entry_configuration = copy.deepcopy(entry_information) entry_configuration["entry_name"] = entry # Can we get these information (next key)? entry_configuration["attrs"]["GLIDEIN_REQUIRED_OS"] = { "comment": "This value has been hardcoded", "value": "any", } # Probably we can use port from attribute AddressV1 or CollectorHost entry_configuration["gatekeeper"] = celem + " " + celem + ":9619" entry_configuration["rsl"] = "" entry_configuration["attrs"] = get_attr_str(entry_configuration["attrs"]) if "submit_attrs" in entry_configuration: entry_configuration["submit_attrs"] = get_submit_attr_str(entry_configuration["submit_attrs"]) else: entry_configuration["submit_attrs"] = "" entry_configuration["limits"] = get_limits_str(entry_configuration["limits"]) entry_configuration["submission_speed"] = get_submission_speed( entry_configuration["submission_speed"] ) entries_configuration += ENTRY_STUB % entry_configuration return entries_configuration
db228df9062b8801f7edde5d1e2977ef1e451b5f
7,563
def validinput(x0, xf, n): """Checks that the user input is valid. Args: x0 (float): Start value xf (float): End values n (int): Number of sample points Returns: False if x0 > xf or if True otherwise """ valid = True if x0 > xf: valid = False if int(n) != n: valid = False if not valid: print("Please recheck your input") return valid
096e0702eb8fe47486d4f03e5b3c55c0835807cd
7,564