content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def feedforward( inputs, input_dim, hidden_dim, output_dim, num_hidden_layers, hidden_activation=None, output_activation=None): """ Creates a dense feedforward network with num_hidden_layers layers where each layer has hidden_dim number of units except for the last layer which has output_dim number of units. Arguments: inputs: Tensor input. hidden_dim: The number of units in each hidden layer. output_dim: The number of units in the output layer. num_hidden_layers: The number of hidden layers. hidden_activation: The activation function of hidden layers. Set it to None to use a linear activation. output_activation: The activation function of the output layer. Set it to None to use a linear activation. Returns: Output tensor. """ prev_input_dim = input_dim prev_output = inputs for i in range(0, num_hidden_layers): with tf.variable_scope("dense" + str(i)): w_n = tf.get_variable("w_" + str(i), [prev_input_dim, hidden_dim], initializer=tf.initializers.random_normal(0, 1)) b_n = tf.get_variable("b_" + str(i), [hidden_dim], initializer=tf.initializers.random_normal(0, 1)) prev_input_dim = hidden_dim prev_output = hidden_activation(tf.matmul(prev_output, w_n) + b_n) with tf.variable_scope("dense_output"): return tf.layers.dense(prev_output, output_dim, activation=output_activation)
bbf6559d27e68ff4642d8842af50ff0d292bd1c8
14,700
import doctest def _test(): """ >>> solve("axyb", "abyxb") axb """ global chr def chr(x): return x doctest.testmod()
1ba052fbf066cee92ad2088b9562443c727292df
14,701
from typing import Optional def _basic_rebuild_chain(target: database.Target) -> RebuildChain: """ Get a rebuild chain based purely on 'rebuild info' from Jam. """ chain: RebuildChain = [(target, None)] current: Optional[database.Target] = target assert current is not None while True: reason = current.rebuild_reason current = current.rebuild_reason_target if current is None: break else: chain.append((current, reason)) return chain
966864ac71eafb982c2dff0f74e383e207127b32
14,702
def ravel_group_params(parameters_group): """Take a dict(group -> {k->p}) and return a dict('group:k'-> p) """ return {f'{group_name}:{k}': p for group_name, group_params in parameters_group.items() for k, p in group_params.items()}
4a768e89cd70b39bea4f658600690dcb3992a710
14,703
def decode_orders(game, power_name, dest_unit_value, factors): """ Decode orders from computed factors :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :param dest_unit_value: A dict with unit as key, and unit value as value :param factors: An instance of `Factors` :return: A list of orders :type factors: Factors :type game: diplomacy.Game """ phase_type = game.get_current_phase()[-1] # Movement phase if phase_type == 'M': return generate_movement_orders(game, power_name, dest_unit_value, factors) # Retreat Phaes if phase_type == 'R': return generate_retreat_orders(game, power_name, dest_unit_value) # Adjustment if phase_type == 'A': power = game.get_power(power_name) nb_builds = len(power.centers) - len(power.units) # Building if nb_builds >= 0: return generate_build_orders(game, power_name, dest_unit_value) # Disbanding return generate_disband_orders(game, power_name, dest_unit_value) # Otherwise, invalid phase_type LOGGER.error('Invalid phase type. Got %s. Expected M, R, A', phase_type) return []
ac1e9b59d792158bb0b903709344b8535b330e73
14,704
from typing import Type def _convert_to_type(se, allow_any=False, allow_implicit_tuple=False): """ Converts an S-Expression representing a type, like (Vec Float) or (Tuple Float (Vec Float)), into a Type object, e.g. Type.Tensor(1,Type.Float) or Type.Tuple(Type.Float, Type.Tensor(1,Type.Float)). If allow_implicit_tuple is true, also converts a list of types into a Tuple, e.g. (Float (Vec Float)) becomes Type.Tuple(Type.Float, Type.Tensor(1,Type.Float)), i.e. as if the S-Expression began with an extra "Tuple". """ while isinstance(se, list) and len(se)==1: se=se[0] # Discard ((pointless)) brackets if isinstance(se, sexpdata.Symbol): if se.value() == "Any" and allow_any: return None return Type(se.value()) if isinstance(se, list) and len(se)>0: if isinstance(se[0], sexpdata.Symbol): sym = se[0].value() if sym == "Tensor" and len(se) == 3: assert se[1] == 1, "Only 1D 'Tensor's ('Vec's) supported" return Type.Tensor(1, _convert_to_type(se[2])) children = [_convert_to_type(s) for s in se[1:]] if sym == "Vec" and len(se)==2: return Type.Tensor(1, utils.single_elem(children)) if sym == "Tuple": return Type.Tuple(*children) # Fall through in case it's a list of types with allow_implicit_tuple. if allow_implicit_tuple: return Type.Tuple(*[_convert_to_type(s) for s in se]) raise ValueError("Did not know how to parse type {}".format(se))
f615244363fa7fcdc67c4d68580860b3145bd94f
14,705
def index(): """Returns a 200, that's about it!!!!!!!""" return 'Wow!!!!!'
f6d8a765556d2d6a1c343bb0ab1a9d4a6c5fd6ba
14,706
import os def file_sort_key(file): """Calculate the sort key for ``file``. :param file: The file to calculate the sort key for :type file: :class:`~digi_edit.models.file.File` :return: The sort key :rtype: ``tuple`` """ path = file.attributes['filename'].split(os.path.sep) path_len = len(path) key = [] for idx, element in enumerate(path): if idx < path_len - 1: key.append((1, element)) else: key.append((0, element)) return tuple(key)
1997e48c2355816d88e930e9fb3369096a227b63
14,707
def merge_tables(pulse_data, trial_data, merge_keys=TRIAL_GROUPER): """Add trial-wise information to the pulse-wise table.""" pulse_data = pulse_data.merge(trial_data, on=merge_keys) add_kernel_data(pulse_data) return pulse_data
1c5eafa44b50d05c8d23af7d290d0b40c2643ef9
14,708
def eos_deriv(beta, g): """ compute d E_os(beta)/d beta from polynomial expression""" x = np.tan(beta/2.0) y = g[4] + x * g[3] + x*x * g[2] + x*x*x*g[1] + x*x*x*x*g[0] y = y / ((1.0 + x*x)*(1.0 + x*x)*(1.0 + x*x)) return y
2e1055bc48364abfe5bb07a1d9eafd32fefb7031
14,709
def optimizeAngle(angle): """ Because any rotation can be expressed within 360 degrees of any given number, and since negative angles sometimes are one character longer than corresponding positive angle, we shorten the number to one in the range to [-90, 270[. """ # First, we put the new angle in the range ]-360, 360[. # The modulo operator yields results with the sign of the # divisor, so for negative dividends, we preserve the sign # of the angle. if angle < 0: angle %= -360 else: angle %= 360 # 720 degrees is unnecessary, as 360 covers all angles. # As "-x" is shorter than "35x" and "-xxx" one character # longer than positive angles <= 260, we constrain angle # range to [-90, 270[ (or, equally valid: ]-100, 260]). if angle >= 270: angle -= 360 elif angle < -90: angle += 360 return angle
8abcaba2542b59715ced1c0acec94194f6e357d7
14,710
def process_one_name(stove_name): """ Translates a single PokerStove-style name of holecards into an expanded list of pokertools-style names. For example: "AKs" -> ["Ac Kc", "Ad Kd", "Ah Kh", "As Ks"] "66" -> ["6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d"] """ if len(stove_name) == 3: rank1, rank2, suit_mark = stove_name if suit_mark == "s": return [ "{}{} {}{}".format(rank1, suit, rank2, suit) for suit in SUITS ] elif suit_mark == "o": return [ "{}{} {}{}".format(rank1, suit1, rank2, suit2) for suit1, suit2 in SUIT_PERMUATIONS ] else: raise TokeniserError("incorrect suit_mark in stove_name: {}".format(stove_name)) else: rank1, rank2 = stove_name if rank1 == rank2: return [ "{}{} {}{}".format(rank1, suit1, rank2, suit2) for suit1, suit2 in SUIT_COMBINATIONS ] else: raise TokeniserError("rank1 != rank2 in stove_name: {}".format(stove_name))
5a824df9ae1a723c350b635a6b3096b795d4c58e
14,711
def job_dispatch(results, job_id, batches): """ Process the job batches one at a time When there is more than one batch to process, a chord is used to delay the execution of remaining batches. """ batch = batches.pop(0) info('dispatching job_id: {0}, batch: {1}, results: {2}'.format(job_id, batch, results)) tasks = [job_worker.subtask((job_id, task_num)) for task_num in batch] # when there are other batches to process, use a chord to delay the # execution of remaining tasks, otherwise, finish off with a TaskSet if batches: info('still have batches, chording {0}'.format(batches)) callback = job_dispatch.subtask((job_id, batches)) return chord(tasks)(callback) else: info('only batch, calling TaskSet') return TaskSet(tasks=tasks).apply_async()
d6107c11bf350aedc1103e0e182f2808041abb5b
14,712
import logging import sqlite3 def get_temperature(): """ Serves temperature data from the database, in a simple html format """ logger = logging.getLogger("logger") #sqlite handler sql_handler = SQLiteHandler() logger.addHandler(sql_handler) logger.setLevel(logging.INFO) con = sqlite3.connect(db) cur = con.cursor() cur.execute("select * from temperatures") rows = cur.fetchall() cur.close() logger.info("Temperatures data was requested.") return render_template("temp.html", rows=rows)
3f2400c823ff2bc11a2b1910ce6cc39d90614178
14,713
def command_result_processor_category_empty(command_category): """ Command result message processor if a command category is empty. Parameters ---------- command_category : ``CommandLineCommandCategory`` Respective command category. Returns ------- message : `str` """ command_full_name = ''.join(command_category._trace_back_name()) message_parts = [] message_parts.append('Command category: ') message_parts.append(repr(command_full_name.name)) message_parts.append(' has no direct command, neither sub commands registered.\n') return ''.join(message_parts)
10da547a922bfd538a4241976385210969bf752a
14,714
def _parse_path(**kw): """ Parse leaflet `Path` options. http://leafletjs.com/reference-1.2.0.html#path """ color = kw.pop('color', '#3388ff') return { 'stroke': kw.pop('stroke', True), 'color': color, 'weight': kw.pop('weight', 3), 'opacity': kw.pop('opacity', 1.0), 'lineCap': kw.pop('line_cap', 'round'), 'lineJoin': kw.pop('line_join', 'round'), 'dashArray': kw.pop('dash_array', None), 'dashOffset': kw.pop('dash_offset', None), 'fill': kw.pop('fill', False), 'fillColor': kw.pop('fill_color', color), 'fillOpacity': kw.pop('fill_opacity', 0.2), 'fillRule': kw.pop('fill_rule', 'evenodd'), 'bubblingMouseEvents': kw.pop('bubbling_mouse_events', True), }
02d3810ad69a1a0b8f16d61e661e246aea5c09cc
14,715
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0., interpolation_order=1): """Performs a random rotation of a Numpy image tensor. # Arguments x: Input tensor. Must be 3D. rg: Rotation range, in degrees. row_axis: Index of axis for rows in the input tensor. col_axis: Index of axis for columns in the input tensor. channel_axis: Index of axis for channels in the input tensor. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'nearest', 'reflect', 'wrap'}`). cval: Value used for points outside the boundaries of the input if `mode='constant'`. interpolation_order int: order of spline interpolation. see `ndimage.interpolation.affine_transform` # Returns Rotated Numpy image tensor. """ theta = np.random.uniform(-rg, rg) x = apply_affine_transform(x, theta=theta, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) return x
57f263f1bee9fb323205543cba9c14c9a86ba431
14,716
from app.config import config_by_name from app.models import User from app.controllers import user_api, iris_api def create_app(config_name: str) -> Flask: """Create the Flask application Args: config_name (str): Config name mapping to Config Class Returns: [Flask]: Flask Application """ # Create the app app = Flask(__name__) # Log the current config name being used and setup app with the config app.logger: Logger app.logger.debug(f"CONFIG NAME: {config_name}") config = config_by_name[config_name] app.config.from_object(config) # Initialize the database db.init_app(app) # Initialize Rest+ API api.init_app(app) api.add_namespace(user_api, path="/user") api.add_namespace(iris_api, path="/iris") # Initialize the flask-praetorian instance for the app guard.init_app(app, User) return app
355b4f0ee97441eb8309c485f234b07418c24378
14,717
import subprocess def load_module(): """This function loads the module and returns any errors that occur in the process.""" proc = subprocess.Popen(["pactl", "load-module", "module-suspend-on-idle"], stderr=subprocess.PIPE) stderr = proc.communicate()[1].decode("UTF-8") return stderr
f95eefb380d0be6a65d4a3a044a33f13a96e190c
14,718
from typing import Optional import time from datetime import datetime def time_struct_2_datetime( time_struct: Optional[time.struct_time], ) -> Optional[datetime]: """Convert struct_time to datetime. Args: time_struct (Optional[time.struct_time]): A time struct to convert. Returns: Optional[datetime]: A converted value. """ return ( datetime.fromtimestamp(time.mktime(time_struct)) if time_struct is not None else None )
705b09428d218e8a47961e247b62b9dfd631a41f
14,719
import argparse def _parse_input(): """ A function for handling terminal commands. :return: The path to the experiment configuration file. """ parser = argparse.ArgumentParser(description='Performs CNN analysis according to the input config.') parser.add_argument('-i', '--experiments_file', default='experiments_config.json', type=str, help='A path to the experiments config file.') args = parser.parse_args() experiments_config_path = args.experiments_file return experiments_config_path
5486a1fee5eeb6b69f857d45f9e3e1a7f924ae5b
14,720
def independent_interdomain_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False): """ The inducing outputs live in the g-space (R^L). Interdomain conditional calculation. :param Kmn: M x L x N x P :param Kmm: L x M x M :param Knn: N x P or N x N or P x N x N or N x P x N x P :param f: data matrix, M x L :param q_sqrt: L x M x M or M x L :param full_cov: calculate covariance between inputs :param full_output_cov: calculate covariance between outputs :param white: use whitened representation :return: - mean: N x P - variance: N x P, N x P x P, P x N x N, N x P x N x P """ logger.debug("independent_interdomain_conditional") M, L, N, P = [tf.shape(Kmn)[i] for i in range(Kmn.shape.ndims)] Lm = tf.cholesky(Kmm) # L x M x M # Compute the projection matrix A Kmn = tf.reshape(tf.transpose(Kmn, (1, 0, 2, 3)), (L, M, N * P)) A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # L x M x M * L x M x NP -> L x M x NP Ar = tf.reshape(A, (L, M, N, P)) # compute the covariance due to the conditioning if full_cov and full_output_cov: fvar = Knn - tf.tensordot(Ar, Ar, [[0, 1], [0, 1]]) # N x P x N x P elif full_cov and not full_output_cov: At = tf.reshape(tf.transpose(Ar), (P, N, M * L)) # P x N x ML fvar = Knn - tf.matmul(At, At, transpose_b=True) # P x N x N elif not full_cov and full_output_cov: At = tf.reshape(tf.transpose(Ar, [2, 3, 1, 0]), (N, P, M * L)) # N x P x ML fvar = Knn - tf.matmul(At, At, transpose_b=True) # N x P x P elif not full_cov and not full_output_cov: fvar = Knn - tf.reshape(tf.reduce_sum(tf.square(A), [0, 1]), (N, P)) # Knn: N x P # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(Lm, Ar) # L x M x M * L x M x NP -> L x M x NP Ar = tf.reshape(A, (L, M, N, P)) fmean = tf.tensordot(Ar, f, [[1, 0], [0, 1]]) # N x P if q_sqrt is not None: if q_sqrt.shape.ndims == 3: Lf = tf.matrix_band_part(q_sqrt, -1, 0) # L x M x M LTA = tf.matmul(Lf, A, transpose_a=True) # L x M x M * L x M x NP -> L x M x NP else: # q_sqrt M x L LTA = (A * tf.transpose(q_sqrt)[..., None]) # L x M x NP if full_cov and full_output_cov: LTAr = tf.reshape(LTA, (L * M, N * P)) fvar = fvar + tf.reshape(tf.matmul(LTAr, LTAr, transpose_a=True), (N, P, N, P)) elif full_cov and not full_output_cov: LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [2, 0, 1]) # P x LM x N fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # P x N x N elif not full_cov and full_output_cov: LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [1, 0, 2]) # N x LM x P fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # N x P x P elif not full_cov and not full_output_cov: fvar = fvar + tf.reshape(tf.reduce_sum(tf.square(LTA), (0, 1)), (N, P)) return fmean, fvar
e886fb4cbfc549cb02e728bba20ecad92dbfb135
14,721
def we_are_buying(account_from, account_to): """ Are we buying? (not buying == selling) """ buy = False sell = False for value in TRADING_ACCOUNTS: if (value.lower() in account_from): buy = True sell = False elif (value.lower() in account_to): buy = False sell = True return buy
a5748ad756f472e0e2c39b5dc5239265fbf3d1f4
14,722
import os def register(param, file_src, file_dest, file_mat, file_out, im_mask=None): """ Register two images by estimating slice-wise Tx and Ty transformations, which are regularized along Z. This function uses ANTs' isct_antsSliceRegularizedRegistration. :param param: :param file_src: :param file_dest: :param file_mat: :param file_out: :param im_mask: Image of mask, could be 2D or 3D :return: """ # TODO: deal with mask # initialization failed_transfo = 0 # by default, failed matrix is 0 (i.e., no failure) do_registration = True # get metric radius (if MeanSquares, CC) or nb bins (if MI) if param.metric == 'MI': metric_radius = '16' else: metric_radius = '4' file_out_concat = file_out kw = dict() im_data = Image(file_src) # TODO: pass argument to use antsReg instead of opening Image each time # register file_src to file_dest if param.todo == 'estimate' or param.todo == 'estimate_and_apply': # If orientation is sagittal, use antsRegistration in 2D mode # Note: the parameter --restrict-deformation is irrelevant with affine transfo if im_data.orientation[2] in 'LR': cmd = ['isct_antsRegistration', '-d', '2', '--transform', 'Affine[%s]' %param.gradStep, '--metric', param.metric + '[' + file_dest + ',' + file_src + ',1,' + metric_radius + ',Regular,' + param.sampling + ']', '--convergence', param.iter, '--shrink-factors', '1', '--smoothing-sigmas', param.smooth, '--verbose', '1', '--output', '[' + file_mat + ',' + file_out_concat + ']'] cmd += sct.get_interpolation('isct_antsRegistration', param.interp) if im_mask is not None: # if user specified a mask, make sure there are non-null voxels in the image before running the registration if np.count_nonzero(im_mask.data): cmd += ['--masks', im_mask.absolutepath] else: # Mask only contains zeros. Copying the image instead of estimating registration. sct.copy(file_src, file_out_concat, verbose=0) do_registration = False # TODO: create affine mat file with identity, in case used by -g 2 # 3D mode else: cmd = ['isct_antsSliceRegularizedRegistration', '--polydegree', param.poly, '--transform', 'Translation[%s]' %param.gradStep, '--metric', param.metric + '[' + file_dest + ',' + file_src + ',1,' + metric_radius + ',Regular,' + param.sampling + ']', '--iterations', param.iter, '--shrinkFactors', '1', '--smoothingSigmas', param.smooth, '--verbose', '1', '--output', '[' + file_mat + ',' + file_out_concat + ']'] cmd += sct.get_interpolation('isct_antsSliceRegularizedRegistration', param.interp) if im_mask is not None: cmd += ['--mask', im_mask.absolutepath] # run command if do_registration: kw.update(dict(is_sct_binary=True)) env = dict() env.update(os.environ) env = kw.get("env", env) # reducing the number of CPU used for moco (see issue #201) env["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = "1" status, output = sct.run(cmd, verbose=0, **kw) elif param.todo == 'apply': sct_apply_transfo.main(args=['-i', file_src, '-d', file_dest, '-w', file_mat + 'Warp.nii.gz', '-o', file_out_concat, '-x', param.interp, '-v', '0']) # check if output file exists if not os.path.isfile(file_out_concat): # sct.printv(output, verbose, 'error') sct.printv('WARNING in ' + os.path.basename(__file__) + ': No output. Maybe related to improper calculation of ' 'mutual information. Either the mask you provided is ' 'too small, or the subject moved a lot. If you see too ' 'many messages like this try with a bigger mask. ' 'Using previous transformation for this volume (if it' 'exists).', param.verbose, 'warning') failed_transfo = 1 # TODO: if sagittal, copy header (because ANTs screws it) and add singleton in 3rd dimension (for z-concatenation) if im_data.orientation[2] in 'LR' and do_registration: im_out = Image(file_out_concat) im_out.header = im_data.header im_out.data = np.expand_dims(im_out.data, 2) im_out.save(file_out, verbose=0) # return status of failure return failed_transfo
c1c90b411a70d6c6ea5975e499b98b919675956f
14,723
import os import csv def parse_geoname_table_file(fpath, delimiter='\t'): """ Parse the table given in a file :param fpath: string - path to the file :param delimiter: string - delimiter between columns in the file :returns: list of dict """ if not os.path.isfile(fpath): fstr = "path is not a file: {}".format(fpath) raise GlobeIndexerError(fstr) full_fpath = os.path.realpath(fpath) rows = list() with open(full_fpath, encoding='utf-8') as fin: reader = csv.DictReader(fin, fieldnames=GEONAME_TABLE_HEADERS, delimiter=delimiter, quoting=csv.QUOTE_NONE) for line in reader: rows.append(line) return rows
9a907da6c1ec331418b1be340bb798855888c633
14,724
import time def wait_for_compute_jobs(nevermined, account, jobs): """Monitor and wait for compute jobs to finish. Args: nevermined (:py:class:`nevermined_sdk_py.Nevermined`): A nevermined instance. account (:py:class:`contracts_lib_py.account.Account`): Account that published the compute jobs. jobs (:obj:`list` of :obj:`tuple`): A list of tuples with each tuple containing (service_agreement_id, compute_job_id). Returns: :obj:`list` of :obj:`str`: Returns a list of dids produced by the jobs Raises: ValueError: If any of the jobs fail """ failed = False dids = set() while True: finished = 0 for i, (sa_id, job_id) in enumerate(jobs): status = nevermined.assets.compute_status(sa_id, job_id, account) print(f"{job_id}: {status['status']}") if status["status"] == "Failed": failed = True if status["status"] == "Succeeded": finished += 1 dids.add(status["did"]) if failed: for i, (sa_id, job_id) in enumerate(jobs): logs = nevermined.assets.compute_logs(sa_id, job_id, account) for line in logs: print(f"[{line['podName']}]: {line['content']}") raise ValueError("Some jobs failed") if finished == len(jobs): break # move up 4 lines print("\u001B[4A") time.sleep(5) return list(dids)
98370b8d596f304630199578a360a639507ae3c3
14,725
def f1_score_loss(predicted_probs: tf.Tensor, labels: tf.Tensor) -> tf.Tensor: """ Computes a loss function based on F1 scores (harmonic mean of precision an recall). Args: predicted_probs: A [B, L] tensor of predicted probabilities labels: A [B, 1] tensor of expected labels Returns: A tensor of sample-wise losses """ # Apply a sharpened sigmoid function to approximate the threshold thresholded_predictions = predicted_probs - ONE_HALF level_predictions = 1.0 / (1.0 + tf.exp(BETA * thresholded_predictions)) # [B, L] # predictions = tf.reduce_prod(level_predictions, axis=-1, keepdims=True) # [B, 1] predictions = tf.exp(tf.reduce_sum(tf.log(level_predictions), axis=-1, keepdims=True)) # [B, 1] # Compute the (approximate) F1 score f1_score = 2 * tf.reduce_sum(predictions * labels) / (tf.reduce_sum(predictions) + tf.reduce_sum(labels)) return 1.0 - f1_score
df4f35516230a7c57b0c6b3e8b7e958feae900f8
14,726
def get_alarm_historys_logic(starttime, endtime, page, limit): """ GET 请求历史告警记录信息 :return: resp, status resp: json格式的响应数据 status: 响应码 """ data = {'alarm_total': 0, "alarms": []} status = '' message = '' resp = {"status": status, "data": data, "message": message} alarm_set = SfoAlarmLogMethod.group_by_alarm_device(page=int(page), limit=int(limit), starttime=starttime, endtime=endtime) if alarm_set: data['alarm_total'] = alarm_set.total for alarm in alarm_set.items: sfo_alarm_logs = SfoAlarmLogMethod.query_by_alarm_device(alarm.alarm_device, starttime, endtime) if len(sfo_alarm_logs) > 0: critical_len = filter(lambda x: x.alarm_level == 'critical', sfo_alarm_logs) warn_len = filter(lambda x: x.alarm_level == 'warning', sfo_alarm_logs) sfo_cluster_node = SfoClusterNodesMethod.query_host_by_host_name(alarm.hostname) alarm_info = {"alarm": sfo_alarm_logs[0], "total": len(sfo_alarm_logs), "warning_total": len(warn_len), "critical_total": len(critical_len)} if sfo_cluster_node and sfo_cluster_node.cluster_name: alarm_info.update({"cluster_name": sfo_cluster_node.cluster_name}) alarm_info.update({"ip": sfo_cluster_node.node_inet_ip}) data['alarms'].append(alarm_info) status = 200 message = 'OK' else: status = 404 message = 'Not Found Record' resp.update({"status": status, "data": data, "message": message}) return resp, status
bc273cf8e6d022374f92b7c3da86552a9dbbed2a
14,727
def showCities(): """ Shows all cities in the database """ if 'access_token' not in login_session: return redirect(url_for('showLogin')) cities = session.query(City).order_by(City.id) return render_template('cities.html', cities=cities)
558b2a8639f810cf105777ce89acc368e4441bbd
14,728
def symb_to_num(symbolic): """ Convert symbolic permission notation to numeric notation. """ if len(symbolic) == 9: group = (symbolic[:-6], symbolic[3:-3], symbolic[6:]) try: numeric = notation[group[0]] + notation[group[1]] + notation[group[2]] except: numeric = "Invalid Symbolic Representation!" else: numeric = "Symbolic input should be of lengh 9!" return numeric
c2c11697658322ad972e87ec1eb55d08eaa91e0e
14,729
def round_vector(v, fraction): """ ベクトルの各要素をそれぞれ round する Args: v (list[float, float, float]): Returns: list[float, float, float]: """ v = [round(x, fraction) for x in v] return v
47c10d23d9f2caa319f4f3fa97c85cf226752bab
14,730
def accept(model): """Return True if more than 20% of the validation data is being correctly classified. Used to avoid including nets which haven't learnt anything in the ensemble. """ accuracy = 0 for data, target in validation_data[:(500/100)]: if use_gpu: data, target = Variable(data.cuda(), volatile=True), Variable(target.cuda()) else: data, target = Variable(data, volatile=True), Variable(target) output = model(data) pred = output.data.max(1, keepdim=True)[1] accuracy += pred.eq(target.data.view_as(pred)).cpu().sum() if accuracy < 100: return False else: return True
c2921fb6dc0226b88fe7dd8219264cb5908feb6b
14,731
import struct def parse_tcp_packet(tcp_packet): """read tcp data.http only build on tcp, so we do not need to support other protocols.""" tcp_base_header_len = 20 # tcp header tcp_header = tcp_packet[0:tcp_base_header_len] source_port, dest_port, seq, ack_seq, t_f, flags = struct.unpack(b'!HHIIBB6x', tcp_header) # real tcp header len tcp_header_len = ((t_f >> 4) & 0xF) * 4 # skip extension headers if tcp_header_len > tcp_base_header_len: pass # body body = tcp_packet[tcp_header_len:] return source_port, dest_port, flags, seq, ack_seq, body
fa1b1050609cce8ca23ca5bac6276a681f560659
14,732
def find_balanced(text, start=0, start_sep='(', end_sep=')'): """ Finds balanced ``start_sep`` with ``end_sep`` assuming that ``start`` is pointing to ``start_sep`` in ``text``. """ if start >= len(text) or start_sep != text[start]: return start balanced = 1 pos = start + 1 while pos < len(text): token = text[pos] pos += 1 if token == end_sep: if balanced == 1: return pos balanced -= 1 elif token == start_sep: balanced += 1 return start
15c17a216405028b480efa9d12846905a1eb56d4
14,733
from datetime import datetime import requests import io import re def get_jhu_counts(): """ Get latest case count .csv from JHU. Return aggregated counts by country as Series. """ now = datetime.datetime.now().strftime("%m-%d-%Y") url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{now}.csv" req = requests.head(url) while req.status_code != 200: print("Got status " + str(req.status_code) + " for '" + url + "'") date = datetime.datetime.now() - datetime.timedelta(days=1) now = date.strftime("%m-%d-%Y") url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{now}.csv" req = requests.head(url) req = requests.get(url) jhu_df = pd.read_csv(io.StringIO(req.text)) print(f"Retrieved JHU case counts from {now}.") jhu_counts = jhu_df['Confirmed'].groupby( jhu_df['Country_Region']).sum().reset_index() jhu_counts['Country_Region'] = jhu_counts['Country_Region'].apply( lambda x: re.sub(r'[^a-zA-Z ]', '', x)) jhu_counts['Country_Region'] = jhu_counts['Country_Region'].apply( lambda x: _COUNTRY_MAP[x] if x in _COUNTRY_MAP.keys() else x) jhu_counts = jhu_counts.set_index('Country_Region') jhu_counts = pd.Series(jhu_counts.values.flatten(), index=jhu_counts.index) return jhu_counts
6a3fb69cce6f8976178afd3ff81ab2381b89abc5
14,734
def sectionsToMarkdown(root): """ Converts a list of Demisto JSON tables to markdown string of tables :type root: ``dict`` or ``list`` :param root: The JSON table - List of dictionaries with the same keys or a single dictionary (required) :return: A string representation of the markdown table :rtype: ``str`` """ mdResult = '' if isinstance(root, dict): for section in root: data = root[section] if isinstance(data, dict): data = [data] data = [{k: formatCell(row[k]) for k in row} for row in data] mdResult += tblToMd(section, data) return mdResult
3f916544cc5a9dc7e4d094d82834382d377948f1
14,735
import numpy def VonMisesFisher_sample(phi0, theta0, sigma0, size=None): """ Draw a sample from the Von-Mises Fisher distribution. Parameters ---------- phi0, theta0 : float or array-like Spherical-polar coordinates of the center of the distribution. sigma0 : float Width of the distribution. size : int, tuple, array-like number of samples to draw. Returns ------- phi, theta : float or array_like Spherical-polar coordinates of sample from distribution. """ n0 = cartesian_from_polar(phi0, theta0) M = rotation_matrix([0, 0, 1], n0) x = numpy.random.uniform(size=size) phi = numpy.random.uniform(size=size) * 2*numpy.pi theta = numpy.arccos(1 + sigma0**2 * numpy.log(1 + (numpy.exp(-2/sigma0**2)-1) * x)) n = cartesian_from_polar(phi, theta) x = M.dot(n) phi, theta = polar_from_cartesian(x) return phi, theta
440029bb9c3455dce22ff2d078068f9b7c404a7b
14,736
from typing import Optional from typing import Dict from typing import Any from unittest.mock import patch async def async_init_flow( hass: HomeAssistantType, handler: str = DOMAIN, context: Optional[Dict] = None, data: Any = None, ) -> Any: """Set up mock Roku integration flow.""" with patch( "homeassistant.components.roku.config_flow.Roku.device_info", new=MockDeviceInfo, ): return await hass.config_entries.flow.async_init( handler=handler, context=context, data=data )
2147f18b6b26e57e84d21aff321e8464710de653
14,737
import logging def _get_filtered_topics(topics, include, exclude): """ Filter the topics. :param topics: Topics to filter :param include: Topics to include if != None :param exclude: Topics to exclude if != and include == None :return: filtered topics """ logging.debug("Filtering topics (include=%s, exclude=%s) ...", include, exclude) return [t for t in include if t in topics] if include is not None else \ [t for t in topics if t not in exclude] if exclude is not None else topics
ec353ecc57015d10641562dd66dd30ba046a0a97
14,738
import inspect def create_cell(cell_classname, cell_params): """ Creates RNN cell. Args: cell_classname: The name of the cell class, e.g. "LSTMCell", "GRUCell" and so on. cell_params: A dictionary of parameters to pass to the cell constructor. Returns: A `tf.contrib.rnn.RNNCell` object. """ cell_params = cell_params.copy() # Find the cell class, use the in-house implemented LSTMCell & GRUCell cell_class = eval(cell_classname) # find from all CELL NAMES imported from tf.contrib.rnn # Make sure additional arguments are valid cell_args = set(inspect.getargspec(cell_class.__init__).args[1:]) new_cell_params = {} for key in cell_params.keys(): if key not in cell_args: # raise ValueError( tf.logging.info( """{} is not a valid argument for {} class. Available arguments are: {}""".format(key, cell_class.__name__, cell_args)) else: new_cell_params[key] = cell_params[key] # Create cell return cell_class(**new_cell_params)
64eed878f950499b599f992dbb50f2f05e8fbff9
14,739
def get_myia_tag(rtag): """Return the myia tag for a constructor. This will fail if you haven't properly called fill_reverse_tag_map(). """ return rev_tag_map[rtag]
95e7afb73ce15bbfe7a75c4708f5c81a9c9e22df
14,740
def get_priority(gene, phenotype): """ Get matched priority from the phenotype table. Parameters ---------- gene : str Gene name. phenotype : str Phenotype name. Returns ------- str EHR priority. Examples -------- >>> import pypgx >>> pypgx.get_priority('CYP2D6', 'Normal Metabolizer') 'Normal/Routine/Low Risk' >>> pypgx.get_priority('CYP2D6', 'Ultrarapid Metabolizer') 'Abnormal/Priority/High Risk' >>> pypgx.get_priority('CYP3A5', 'Normal Metabolizer') 'Abnormal/Priority/High Risk' >>> pypgx.get_priority('CYP3A5', 'Poor Metabolizer') 'Normal/Routine/Low Risk' """ if not is_target_gene(gene): raise NotTargetGeneError(gene) if phenotype not in list_phenotypes(): raise PhenotypeNotFoundError(phenotype) df = load_phenotype_table() i = (df.Gene == gene) & (df.Phenotype == phenotype) return df[i].Priority.values[0]
5520d8df0b79834227f059e98d66109134e84439
14,741
import tqdm from datetime import datetime def _generator3(path): """ Args: path: path of the dataframe Returns: yield outputs of X and Y pairs """ args = init_args() catalog = load_catalog(path) def preprocess(x, y=None): zero = False if not np.any(x): zero = True img = (x - avg_x) / std_x return img, y, zero for index in tqdm(range(0, len(catalog), 200)): rows = catalog[index:index + 200] for idx, row in rows.iterrows(): # print(row) # pdb.set_trace() if row.ncdf_path == "nan": continue samples = load_numpy(row['hdf5_8bit_path']) offset_idx = row['hdf5_8bit_offset'] # continue timedelta_rows = [catalog[catalog.index == ( idx + datetime.timedelta(hours=i))] for i in [0, 1, 3, 6]] # CS_GHIs = [catalog[catalog.index==(idx+datetime.timedelta(hours=i))][station_i + "_CLEARSKY_GHI"].values[0] for i in [0,1,3,6]] for station_i in args.station_data.keys(): sample = samples[station_i] if row[[station_i + "_GHI"]].isnull()[0]: continue elif row[[station_i + "_DAYTIME"]][0] == 0: continue else: GHI_0 = row[station_i + "_GHI"] # train_df[train_df.index == train_df.index[0]+datetime.timedelta(hours=1)] # pdb.set_trace() GHIs = [i[station_i + "_GHI"].values[0] for i in timedelta_rows] CS_GHIs = [i[station_i + "_CLEARSKY_GHI"].values[0] for i in timedelta_rows] y = np.array(CS_GHIs) - np.array(GHIs) if np.isnan(np.sum(y)): continue # ini = time.time() # print(station_coords) imgs = [] x = sample[offset_idx].swapaxes(0, 1).swapaxes(1, 2) # print(y) x = preprocess(x)[0] continue yield x, y
3d26d9cab1777b3b72d85584c6ff95b39c725e47
14,742
def _extract_gsi(name): """ Extract a normalised groundstation if available. :param name: :rtype: str >>> _extract_gsi('LANDSAT-7.76773.S3A1C2D2R2') >>> _extract_gsi('AQUA.60724.S1A1C2D2R2') >>> _extract_gsi('TERRA.73100.S1A2C2D4R4') >>> _extract_gsi('LANDSAT-8.3108') >>> _extract_gsi('NPP.VIIRS.10014.ALICE') 'ASA' >>> _extract_gsi('NPP_VIRS_STD-HDF5_P00_18966.ASA_0_0_20150626T053709Z20150626T055046') 'ASA' >>> _extract_gsi('not_an_ads_dir') >>> _extract_gsi('LANDSAT-8.FAKE') """ last_component = name.split('.')[-1] if '_' in last_component: last_component = last_component.split('_')[0] if not metadata.is_groundstation_alias(last_component): return None return metadata.normalise_gsi(last_component)
b101b79df21b9d0bbb633dbca14ff6a5b207b91d
14,743
def array_at_verts_basic2d(a): """ Computes values at cell vertices on 2d array using neighbor averaging. Parameters ---------- a : ndarray Array values at cell centers, could be a slice in any orientation. Returns ------- averts : ndarray Array values at cell vertices, shape (a.shape[0]+1, a.shape[1]+1). """ assert a.ndim == 2 shape_verts2d = (a.shape[0] + 1, a.shape[1] + 1) # create a 3D array of size (nrow+1, ncol+1, 4) averts3d = np.full(shape_verts2d + (4,), np.nan) averts3d[:-1, :-1, 0] = a averts3d[:-1, 1:, 1] = a averts3d[1:, :-1, 2] = a averts3d[1:, 1:, 3] = a # calculate the mean over the last axis, ignoring NaNs averts = np.nanmean(averts3d, axis=2) return averts
e1f9ab5abbed6d4837daec01b8cd865d15cddde6
14,744
def get_subquestion_answer(response, questions, subquestion): """ Return the answer to a subquestion from ``response``. """ question_id = subquestion[0] answers = response[question_id] dim = len(subquestion) - 1 for answer in answers: matched = True if subquestion[1] != answer[0]: matched = False if dim == 2 and subquestion[2] != answer[1]: matched = False if matched: if dim == 1: answer = answer[1] else: answer = answer[2] return map_answer_expr(questions, question_id, answer)
e0b89db06570e35d1fb9eba7b762ed96bf7c16b8
14,745
def uniform_centroids(dist_map, n_centroids): """ Uniformly space `n_centroids` seeds in a naive way :param dist_map: sparse distance map :param n_centroids: number of seeds to place :return: (n_centroids, ) integer arrays with the indices of the seeds """ def get_dist(idx_vertex): return csgraph.dijkstra(dist_map, indices=idx_vertex, directed=False) res = np.zeros(n_centroids, dtype='i4') res[0] = np.random.randint(0, dist_map.shape[0]) dist = get_dist(res[0]) for idx in range(1, n_centroids): res[idx] = np.argmax(dist) np.minimum(dist, get_dist(res[idx]), out=dist) return res
437eaf8b70b56379d5529ea30026176fda9049a9
14,746
import collections import itertools def collate_custom(batch,key=None): """ Custom collate function for the Dataset class * It doesn't convert numpy arrays to stacked-tensors, but rather combines them in a list * This is useful for processing annotations of different sizes """ # this case will occur in first pass, and will convert a # list of dictionaries (returned by the threads by sampling dataset[idx]) # to a unified dictionary of collated values if isinstance(batch[0], collections.Mapping): return {key: collate_custom([d[key] for d in batch],key) for key in batch[0]} # these cases will occur in recursion #elif torch.is_tensor(batch[0]): # for tensors, use standrard collating function #return default_collate(batch) elif isinstance(batch,list) and isinstance(batch[0],list): # flatten lists of lists flattened_list = list(itertools.chain(*batch)) return flattened_list elif isinstance(batch,list) and len(batch)==1: # lists of length 1, remove list wrap return batch[0] else: # for other types (i.e. lists of len!=1), return as is return batch
b692252cb27aed68cb5af6cd5644913216a8dde7
14,747
def get_horizon(latitude, longitude, dem, ellipsoid=Ellipsoid("WGS84"), distance=0.5, precision=1): """ Compute local get_horizon obstruction from Digital Elevation Model This function is mainly based on a previous Matlab function (see https://fr.mathworks.com/matlabcentral/fileexchange/59421-dem-based-topography-get_horizon-model) :param latitude: :param longitude: :param dem: DigitalElevationModel instance :param ellipsoid: Ellipsoid instance :param distance: distance in degrees :param precision: azimuth precision of resulting horizon in degrees :return: """ # Prune DEM and fit to study area study_area = dem.get_raster_at(ll_point=(latitude - distance, longitude - distance), ur_point=(latitude + distance, longitude + distance)) y_obs, x_obs = study_area.geo_grid.latlon_to_2d_index(latitude, longitude) z_obs = study_area.get_value_at(latitude, longitude) # Azimuth and elevation azimuth = (180/np.pi) * get_azimuth(latitude * np.pi/180, longitude * np.pi/180, (study_area.geo_grid.latitude - dem.res/2) * np.pi/180, (study_area.geo_grid.longitude + dem.res/2) * np.pi/180, ellipsoid.e) elevation = np.zeros(azimuth.shape) elevation[study_area > z_obs] = \ get_elevation(z_obs, study_area[study_area > z_obs], latitude * np.pi/180, study_area.geo_grid.latitude[study_area > z_obs] * np.pi/180, longitude * np.pi/180, study_area.geo_grid.longitude[study_area > z_obs] * np.pi/180, ellipsoid.e, ellipsoid.a) # TODO: understand why "z_obs < study_area" return a numpy ValueError (ambiguous truth value) # Elevation vector length len_elevation = (90 + precision) // precision elevation_dic = dict(ne=np.zeros((y_obs, len_elevation)), e=np.zeros((study_area.x_size - x_obs, 2*len_elevation - 1)), s=np.zeros((study_area.y_size - y_obs, 2*len_elevation - 1)), w=np.zeros((x_obs, 2*len_elevation - 1)), nw=np.zeros((y_obs, len_elevation))) azimuth_dic = dict(ne=np.arange(-180, -90 + precision, precision), e=np.arange(-180, 0 + precision, precision), s=np.arange(-90, 90 + precision, precision), w=np.arange(0, 180 + precision, precision), nw=np.arange(90, 180 + precision, precision)) # Main computation # NE & NW for n, (az, el) in enumerate(zip(azimuth[:y_obs], elevation[:y_obs])): idx_ne = np.digitize(azimuth_dic["ne"], az[x_obs:]) idx_nw = np.digitize(azimuth_dic['nw'], az[:x_obs]) elevation_dic["ne"][n, idx_ne < len(az[x_obs:])] = el[x_obs:][idx_ne[idx_ne < len(az[x_obs:])]] elevation_dic["nw"][n, idx_nw < len(az[:x_obs])] = el[:x_obs][idx_nw[idx_nw < len(az[:x_obs])]] # South for n, (az, el) in enumerate(zip(azimuth[y_obs:, ::-1], elevation[y_obs:, ::-1])): idx_s = np.digitize(azimuth_dic["s"], az) elevation_dic["s"][n, idx_s < len(az)] = el[idx_s[idx_s < len(az)]] # East for n, (az, el) in enumerate(zip(azimuth[:, x_obs:].transpose(), elevation[:, x_obs:].transpose())): idx_e = np.digitize(azimuth_dic["e"], az) elevation_dic["e"][n, idx_e < len(az)] = el[idx_e[idx_e < len(az)]] # West for n, (az, el) in enumerate(zip(azimuth[::-1, :x_obs].transpose(), elevation[::-1, :x_obs].transpose())): idx_w = np.digitize(azimuth_dic["w"], az) elevation_dic["w"][n, idx_w < len(az)] = el[idx_w[idx_w < len(az)]] sun_mask = np.concatenate([elevation_dic[key].max(axis=0, initial=None) for key in elevation_dic.keys()]) az_mask = np.concatenate([azimuth_dic[key] for key in azimuth_dic.keys()]) + 180 horizon = dict(elevation=np.zeros((360 + precision)//precision), azimuth=np.arange(0, 360 + precision, precision)) for n, az in enumerate(horizon["azimuth"]): horizon["elevation"][n] = np.max(sun_mask[az_mask == az]) horizon["elevation"][-1] = horizon["elevation"][0] return horizon
342860c7320e009f4e32c8d610baeccef595460f
14,748
def get_articles(language, no_words, max_no_articles, search, **kwargs): """ Retrieve articles from Wikipedia """ wikipedia.set_rate_limiting(True) # be polite wikipedia.set_lang(language) if search is not None: titles = wikipedia.search(search, results = max_no_articles) else: titles = wikipedia.random(pages = max_no_articles) articles = [] current_no_words = 0 for title in titles: print("INFO: loading {}".format(title)) page = wikipedia.page(title=title) content = page.content article_no_words = len(content.split()) current_no_words += article_no_words print("INFO: article contains {} words".format(article_no_words)) articles.append((title, content)) if current_no_words >= no_words: break return articles
d6f2216a0800f6d9627d47ae1acda9e327583841
14,749
def gen_urdf_material(color_rgba): """ :param color_rgba: Four element sequence (0 to 1) encoding an rgba colour tuple, ``seq(float)`` :returns: urdf element sequence for an anonymous material definition containing just a color element, ``str`` """ return '<material name=""><color rgba="{0} {1} {2} {3}"/></material>'.format(*color_rgba)
d0fe1a706c932ad1a6f14aa3a9d9471de70650b9
14,750
import logging def plot(self, class_=None, show_plot=True, plot_3D=True, plot_probs=True, plot_dominant_classes=True, plot_poly=False, plot_normals=False, plot_subclasses=False, plot_legend=True, fig=None, ax=None, title='Softmax Classification', **kwargs): """Display the class and/or PDF plots of the Softmax distribution. The class plot shows only the critical classes (those that have the greatest probability at any given state). Parameters ---------- plot_dominant_classes : bool, optional Plot the critical classes. Defaults to `True`. plot_probs : bool, optional Plot the probability densities. Defaults to `True`. plot_poly : bool, optional Plot the polygon from which the boundaries are formed. Defaults to `False`. **kwargs Keyword arguments for ``plot_dominant_classes``. """ # Define probabilities lazily if not hasattr(self, 'probs') and not plot_subclasses: self.probability() if not hasattr(self, 'subclass_probs') and plot_subclasses: self.probability(find_subclass_probs=True) # Plotting attributes self.plot_3D = plot_3D self.plot_subclasses = plot_subclasses if plot_dominant_classes and plot_probs and class_ is None: if fig is None: self.fig = plt.figure(figsize=(14, 8)) else: self.fig = fig bbox_size = (-1.3, -0.175, 2.2, -0.075) if ax is None: ax1 = self.fig.add_subplot(1, 2, 1) if plot_3D and self.state.shape[1] > 1: ax2 = self.fig.add_subplot(1, 2, 2, projection='3d') else: ax2 = self.fig.add_subplot(1, 2, 2) else: ax1 = ax[0] ax2 = ax[1] self._plot_dominant_classes(ax1) self._plot_probs(ax2) axes = [ax1, ax2] elif plot_dominant_classes and class_ is None: if fig is None: self.fig = plt.figure(figsize=(8, 8)) else: self.fig = fig if ax is None: ax1 = self.fig.add_subplot(111) else: ax1 = ax bbox_size = (0, -0.175, 1, -0.075) self._plot_dominant_classes(ax=ax1, **kwargs) axes = [ax1] elif plot_probs: if fig is None: self.fig = plt.figure(figsize=(8, 8)) else: self.fig = fig if class_ is not None: if ax is None: if plot_3D and self.state.shape[1] > 1: ax = self.fig.add_subplot(1, 1, 1, projection='3d') else: ax = self.fig.add_subplot(1, 1, 1) self.classes[class_].plot(ax=ax, **kwargs) axes = [self.fig.gca()] else: if plot_3D and self.state.shape[1] > 1 and ax is None: ax1 = self.fig.add_subplot(111, projection='3d') elif ax is None: ax1 = self.fig.add_subplot(111) else: ax1 = ax self._plot_probs(ax1, **kwargs) axes = [ax1] bbox_size = (0, -0.15, 1, -0.05) if plot_legend: # Create Proxy artists for legend labels proxy = [None] * self.num_classes for i in range(self.num_classes): if self.class_labels[i] not in self.class_labels[:i]: proxy_label = self.class_labels[i] else: proxy_label = "_nolegend_" proxy[i] = plt.Rectangle((0, 0), 1, 1, fc=self.class_colors[i], alpha=0.6, label=proxy_label,) plt.legend(handles=proxy, loc='lower center', mode='expand', ncol=5, bbox_to_anchor=(0, 1.0 ,1, 0), borderaxespad=0.) # plt.legend(handles=proxy, loc='lower center', mode='expand', ncol=4, # bbox_to_anchor=bbox_size, borderaxespad=0.) plt.suptitle(title, fontsize=16) # Plot polygon if self.poly is not None and plot_poly and plot_dominant_classes: try: for poly in self.polys: patch = PolygonPatch(poly, facecolor='none', zorder=2, linewidth=3, edgecolor='black',) ax1.add_patch(patch) except: patch = PolygonPatch(self.poly, facecolor='none', zorder=2, linewidth=3, edgecolor='black',) ax1.add_patch(patch) # Plot normals # <>TODO fix crashing issue with vertical normals if self.normals is not None and plot_normals and plot_dominant_classes: t = np.arange(self.bounds[0], self.bounds[2] + 1) for i, normal in enumerate(self.normals): if abs(normal[1]) < 0.0001: ax1.axvline(self.offsets[i], ls='--', lw=3, c='black') else: slope = normal[0] y = slope * t - self.offsets[i] ax1.plot(t, y, ls='--', lw=3, c='black') if show_plot: plt.show() try: return axes except UnboundLocalError: logging.warn('No axes to return.')
6b0c760b35ddd8df30b817e702fbee209c5bc2d3
14,751
def roll_timeseries(arr, timezones): """ Roll timeseries from UTC to local time. Automatically compute time-shift from UTC offset (timezone) and time-series length. Parameters ---------- arr : ndarray Input timeseries array of form (time, sites) timezones : ndarray | list Vector of timezone shifts from UTC to local time Returns ------- local_arr : ndarray Array shifted to local time """ if arr.shape[1] != len(timezones): msg = ('Number of timezone shifts ({}) does not match number of ' 'sites ({})'.format(len(timezones), arr.shape[1])) raise ValueError(msg) time_step = arr.shape[0] // 8760 local_arr = np.zeros(arr.shape, dtype=arr.dtype) for tz in set(timezones): mask = timezones == tz local_arr[:, mask] = np.roll(arr[:, mask], int(tz * time_step), axis=0) return local_arr
4715425ea048a1ccb9c5fe2a1dc9e2ea1ecea085
14,752
def is_linear(a, eps=1e-3): """Check if array of numbers is approximately linear.""" x = np.diff(a[1:-1]).std() / np.diff(a[1:-1]).mean() return x < eps
0efa5c923012527d4973d24d67871a41ee2e3e91
14,753
def faces_sphere(src, show_path): """ Compute vertices and faces of Sphere input for plotting. Parameters ---------- - src (source object) - show_path (bool or int) Returns ------- vert, faces (returns all faces when show_path=int) """ # pylint: disable=protected-access res = 15 # surface discretization # generate sphere faces r = src.diameter / 2 phis = np.linspace(0, 2 * np.pi, res) phis2 = np.roll(np.linspace(0, 2 * np.pi, res), 1) ths = np.linspace(0, np.pi, res) faces = [ r * np.array( [ (np.cos(p) * np.sin(t1), np.sin(p) * np.sin(t1), np.cos(t1)), (np.cos(p) * np.sin(t2), np.sin(p) * np.sin(t2), np.cos(t2)), (np.cos(p2) * np.sin(t2), np.sin(p2) * np.sin(t2), np.cos(t2)), (np.cos(p2) * np.sin(t1), np.sin(p2) * np.sin(t1), np.cos(t1)), ] ) for p, p2 in zip(phis, phis2) for t1, t2 in zip(ths[1:-2], ths[2:-1]) ] faces += [ r * np.array( [(np.cos(p) * np.sin(th), np.sin(p) * np.sin(th), np.cos(th)) for p in phis] ) for th in [ths[1], ths[-2]] ] # add src attributes position and orientation depending on show_path rots, poss, _ = get_rot_pos_from_path(src, show_path) # all faces (incl. along path) adding pos and rot all_faces = [] for rot, pos in zip(rots, poss): for face in faces: all_faces += [[rot.apply(f) + pos for f in face]] return all_faces
eb454e55a932aae2f6b0f15587d1aa0be6da80f7
14,754
import itertools def pronto_signals_to_iguana_signals(carrier_frequency, signals): """Convert the pronto format into iguana format, where the pulses and spaces are represented in number of microseconds. """ return [carrier_cycles_to_microseconds(carrier_frequency, signal) | command for signal, command in zip(signals, itertools.cycle((iguanaIR.IG_PULSE_BIT, 0)))]
b8ddaf9f573abfe207d2ca2009904a3d93e360a4
14,755
import collections import itertools import pandas def stack_xarray_repdim(da, **dims): """Like xarrays stack, but with partial support for repeated dimensions The xarray.DataArray.stack method fails when any dimension occurs multiple times, as repeated dimensions are not currently very well supported in xarray (2018-03-26). This method provides a workaround so that stack can be used for an array where some dimensions are repeated, as long as the repeated dimensions are themselves not stacked. Parameters: da (DataArray): DataArray to operate on. **dims: Dimensions to stack. As for xarray.DataArray.stack. """ # make view of da without repeated dimensions cnt = collections.Counter(da.dims) D = {k: itertools.count() for k in cnt.keys()} tmpdims = [] dimmap = {} for dim in da.dims: if cnt[dim] == 1: tmpdims.append(dim) else: newdim = "{:s}{:d}".format(dim, next(D[dim])) tmpdims.append(newdim) dimmap[newdim] = dim da2 = xarray.DataArray(da.values, dims=tmpdims) da2_stacked = da2.stack(**dims) # put back repeated dimensions with new coordinates da3 = xarray.DataArray(da2_stacked.values, dims=[dimmap.get(d, d) for d in da2_stacked.dims]) da3 = da3.assign_coords( **{k: pandas.MultiIndex.from_product( [da.coords[kk] for kk in dims[k]], names=dims[k]) if k in dims else da.coords[k] for k in np.unique(da3.dims)}) return da3
5f0617ccd054c6d11573b00f659308780db4d0d7
14,756
import math def compute_pnorm(model: nn.Module) -> float: """ Computes the norm of the parameters of a model. :param model: A PyTorch model. :return: The norm of the parameters of the model. """ return math.sqrt(sum([p.norm().item() ** 2 for p in model.parameters()]))
610c640902f411221f90c5c7b48d3b3246a60124
14,757
def atomic_brute_cast(tree: Element) -> Element: """ Cast every node's text into an atomic string to prevent further processing on it. Since we generate the final HTML with Jinja templates, we do not want other inline or tree processors to keep modifying the data, so this function is used to mark the complete tree as "do not touch". Reference: issue [Python-Markdown/markdown#920](https://github.com/Python-Markdown/markdown/issues/920). On a side note: isn't `atomic_brute_cast` such a beautiful function name? Arguments: tree: An XML node, used like the root of an XML tree. Returns: The same node, recursively modified by side-effect. You can skip re-assigning the return value. """ if tree.text: tree.text = AtomicString(tree.text) for child in tree: atomic_brute_cast(child) return tree
57d13b5e97b7f94593f925f745bdf833b15e03a1
14,758
def rsa_keys(p: int = None, q: int = None, e: int = 3) -> RSA_Keys: """ Generate a new set of RSA keys. If p and q are not provided (<= 1), then they will be generated. :param p: A big prime. :param q: A big prime. :param e: The default public key. :return: The RSA private and public keys. :raise Exception: If provided p and q are invalid. """ if not p or p <= 1: p = matasano.math.random_big_prime(e=e) if not q or q <= 1: q = matasano.math.random_big_prime(e=e) n = p * q phi_n = (p - 1) * (q - 1) d = matasano.math.modinv(e, phi_n) return RSA_Keys(RSA_Priv(d, n), RSA_Pub(e, n))
b09fea8b6c23e4709c0f49faf2cb9b20463a2db9
14,759
import math import contextlib import os def chunk_file(file_path, chunks, work_dir): """Splits a large file by line into number of chunks and writes them into work_dir""" with open(file_path) as fin: num_lines = sum(1 for line in fin) chunk_size = math.ceil(num_lines / chunks) output_file_paths = [] with contextlib.ExitStack() as stack: fin = stack.enter_context(open(file_path)) for i, line in enumerate(fin): if not i % chunk_size: file_split = "{}.chunk_{}".format( os.path.join(work_dir, os.path.basename(file_path)), i // chunk_size ) output_file_paths.append(file_split) fout = stack.enter_context(open(file_split, "w")) fout.write("{}\n".format(line.strip())) return output_file_paths
290ce02a4c6767374ced10fbf28d2c1ed1d5b69a
14,760
def _get_closest_station_by_zcta_ranked(zcta): """ Selects the nth ranked station from a list of ranked stations Parameters ---------- zcta : string ZIP Code Tabulation Area (ZCTA) Returns ------- station : string Station that was found warnings : list List of warnings for the returned station (includes distance warnings) lat : float latitude for the search lon : float longitude for the search """ zcta = zcta.zfill(5) # Ensure that we have 5 characters, and if not left-pad it with zeroes. lat, lon = zcta_to_lat_long(zcta) finding_station = True rank = 0 while finding_station: rank = rank + 1 station_ranking = _rank_stations_by_distance_and_quality(lat, lon) station, warnings = select_station(station_ranking, rank=rank) # Ignore stations that begin with A if str(station)[0] != 'A': finding_station = False return station, warnings, lat, lon
b9cbd7ccc4a22c3069e11bc0542700b8ee087a1c
14,761
def label_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized Confusion Matrix' else: title = 'Confusion Matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data classes = classes[unique_labels(y_true, y_pred)] if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized Confusion Matrix") else: print('Confusion Matrix, without normalization') print(cm) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax
0a1dc4665de4c2b876a0a40d5aa1fcfb1a9113d9
14,762
import warnings def lowpass(data,in_t=None,cutoff=None,order=4,dt=None,axis=-1,causal=False): """ data: vector of data in_t: sample times cutoff: cutoff period in the same units as in_t returns vector same as data, but with high frequencies removed """ # Step 1: Determine dt from data or from user if specified if dt is None: dt=np.median(np.diff(in_t)) dt=float(dt) # make sure it's not an int cutoff=float(cutoff) Wn = dt / cutoff B,A = butter(order, Wn) if not causal: # scipy filtfilt triggers some warning message about tuple # indices. with warnings.catch_warnings(): warnings.simplefilter("ignore") data_filtered = filtfilt(B,A,data,axis=axis) else: data_filtered = lfilter(B,A,data,axis=axis) return data_filtered
f182fdb912be827d0d8e4fd788cc2cadca453b5a
14,763
def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): """ This function indexes out a target dimension of a tensor in a structured way, by allowing a different value to be selected for each member of a flat index tensor (@indices) corresponding to a source dimension. This can be interpreted as moving along the source dimension, using the corresponding index value in @indices to select values for all other dimensions outside of the source and target dimensions. A common use case is to gather values in target dimension 1 for each batch member (target dimension 0). Args: x (torch.Tensor): tensor to gather values for target_dim (int): dimension to gather values along source_dim (int): dimension to hold constant and use for gathering values from the other dimensions indices (torch.Tensor): flat index tensor with same shape as tensor @x along @source_dim Returns: y (torch.Tensor): gathered tensor, with dimension @target_dim indexed out """ assert len(indices.shape) == 1 assert x.shape[source_dim] == indices.shape[0] # unsqueeze in all dimensions except the source dimension new_shape = [1] * x.ndimension() new_shape[source_dim] = -1 indices = indices.reshape(*new_shape) # repeat in all dimensions - but preserve shape of source dimension, # and make sure target_dimension has singleton dimension expand_shape = list(x.shape) expand_shape[source_dim] = -1 expand_shape[target_dim] = 1 indices = indices.expand(*expand_shape) out = x.gather(dim=target_dim, index=indices) return out.squeeze(target_dim)
06fbba5478ddb21cda9a555c41c94c809244537c
14,764
import os import sys def resolve_parallelism(parallel): """Decide what level of parallelism to use. Parameters ---------- parallel : integer or None The user's specification Returns ------- A positive integer giving the parallelization level. """ if parallel is None: if mp.get_start_method() == 'fork': parallel = os.cpu_count() if SHOW_INFORMATIONAL_MESSAGES and parallel > 1: print(f'info: parallelizing processing over {parallel} CPUs') else: parallel = 1 if parallel > 1 and mp.get_start_method() != 'fork': print('''warning: parallel processing was requested but is not possible because this operating system is not using `fork`-based multiprocessing On macOS a bug prevents forking: https://bugs.python.org/issue33725''', file=sys.stderr) parallel = 1 if parallel > 1: return parallel return 1
0de01d63039162c3fd0afc4889dc9c733a8489b8
14,765
def get_queue(launcher=None): """Get the name of the queue used in an allocation. :param launcher: Name of the WLM to use to collect allocation info. If no launcher is provided ``detect_launcher`` is used to select a launcher. :type launcher: str | None :returns: Name of the queue :rtype: str :raises SSUnsupportedError: User attempted to use an unsupported WLM """ if launcher is None: launcher = detect_launcher() if launcher == "pbs": return _pbs.get_queue() if launcher == "slurm": return _slurm.get_queue() raise SSUnsupportedError(f"SmartSim cannot get queue for launcher `{launcher}`")
56fd4e59877363fd6e889bae52a9b5abf77230f6
14,766
def get_openmc_geometry(openmoc_geometry): """Return an OpenMC geometry corresponding to an OpenMOC geometry. Parameters ---------- openmoc_geometry : openmoc.Geometry OpenMOC geometry Returns ------- openmc_geometry : openmc.Geometry Equivalent OpenMC geometry """ cv.check_type('openmoc_geometry', openmoc_geometry, openmoc.Geometry) # Clear dictionaries and auto-generated ID OPENMC_SURFACES.clear() OPENMOC_SURFACES.clear() OPENMC_CELLS.clear() OPENMOC_CELLS.clear() OPENMC_UNIVERSES.clear() OPENMOC_UNIVERSES.clear() OPENMC_LATTICES.clear() OPENMOC_LATTICES.clear() openmoc_root_universe = openmoc_geometry.getRootUniverse() openmc_root_universe = get_openmc_universe(openmoc_root_universe) openmc_geometry = openmc.Geometry() openmc_geometry.root_universe = openmc_root_universe return openmc_geometry
af1eb3cbbcdb4122b28b544bc252f754758ababf
14,767
def distinct(xs): """Get the list of distinct values with preserving order.""" # don't use collections.OrderedDict because we do support Python 2.6 seen = set() return [x for x in xs if x not in seen and not seen.add(x)]
e5dafd942c8aa0314b7e9aa2ec09795796cac34a
14,768
def get_price_to_free_cash_flow_ratio(equity, year=None, market_cap=None): """ This ratio can be found by dividing the current price of the stock by its free cash flow per share, Easy way is to get it from the ratios object extracted from investing. """ try: price_to_free_cash_flow = None if year is None: # get it from the ratios ratios = equity.fundamentals.ratios sorted_ratios = sorted(ratios, key=lambda x: x.current_period, reverse=True) # the newest in front # Starting from the first going down the list. for ratio in sorted_ratios: if ratio.benchmark == Benchmark.company: price_to_free_cash_flow = ratio.price_to_free_cash_flow_ttm break if price_to_free_cash_flow is None: price_to_free_cash_flow = 1000 return price_to_free_cash_flow except Exception as e: log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
80fd0441030d1310ceff89497196840cc2be870f
14,769
import copy def _parse_train_configs(train_config): """ check if user's train configs are valid. Args: train_config(dict): user's train config. Return: configs(dict): final configs will be used. """ configs = copy.deepcopy(_train_config_default) configs.update(train_config) assert isinstance(configs['num_epoch'], int), \ "'num_epoch' must be int value" assert isinstance(configs['max_iter'], int), \ "'max_iter' must be int value" assert isinstance(configs['save_iter_step'], int), \ "'save_iter_step' must be int value" assert isinstance(configs['learning_rate'], float), \ "'learning_rate' must be float" assert isinstance(configs['weight_decay'], float), \ "'weight_decay' must be float" assert isinstance(configs['use_pact'], bool), \ "'use_pact' must be bool" assert isinstance(configs['quant_model_ckpt_path'], str), \ "'quant_model_ckpt_path' must be str" assert isinstance(configs['teacher_model_path_prefix'], str), \ "'teacher_model_path_prefix' must both be string" assert isinstance(configs['model_path_prefix'], str), \ "'model_path_prefix' must both be str" assert isinstance(configs['distill_node_pair'], list), \ "'distill_node_pair' must both be list" assert len(configs['distill_node_pair']) > 0, \ "'distill_node_pair' not configured with distillation nodes" assert len(configs['distill_node_pair']) % 2 == 0, \ "'distill_node_pair' distillation nodes need to be configured in pairs" return train_config
339539eac9a0463f4fd11d471cfa3f4971010969
14,770
def as_region(region): """ Convert string to :class:`~GenomicRegion`. This function attempts to convert any string passed to it to a :class:`~GenomicRegion`. Strings are expected to be of the form <chromosome>[:<start>-<end>[:[strand]], e.g. chr1:1-1000, 2:2mb-5mb:-, chrX:1.5kb-3mb, ... Numbers can be abbreviated as '12k', '1.5Mb', etc. When fed a :class:`~GenomicRegion`, it will simply be returned, making the use of this function as an "if-necessary" converter possible. :param region: str or :class:`~GenomicRegion` :return: :class:`~GenomicRegion` """ if isinstance(region, string_types): return GenomicRegion.from_string(region) elif isinstance(region, GenomicRegion): return region raise ValueError("region parameter cannot be converted to GenomicRegion!")
863b1f982e9b411a023ab876661123b5565fae91
14,771
import re def parse_user_next_stable(user): """ Parse the specified user-defined string containing the next stable version numbers and returns the discretized matches in a dictionary. """ try: data = re.match(user_version_matcher, user).groupdict() if len(data) < 3: raise AttributeError except AttributeError: return False return data
3d5de92fdb119a85bc6b5e87a8399cc07e6c9ee8
14,772
import tqdm def interp_ADCP_2D( sadcp, mask, depth, lon, lat, time, time_win=360.0, rmax=15.0, vmax=2.0, range_min=4.0, ): """ This is essentially a loop over the interp_ADCP function with some additional NaN handling. Assume data is of the form D[i, j] where each j represents a profile and i a depth in that profile. Parameters ---------- sadcp : Munch Munch structure of sadcp data mask : 2D array Mask of boolean values specifying valid depths to interpolate to. depth : array Depths (m) at which to interpolate ADCP data. lon : array Longitude of CTD/VMP profile. lat : array Latitude of CTD/VMP profile. time : array Time of CTD/VMP profile as matlab datenum. time_win : float, optional Time window for search (s) centered on time of profile. Data outside the time range is excluded. rmax : float, optional Distance threshold (m) defines a circle around the location of the profile. Data outside the circle is excluded. vmax : float, optional Velocity threshold (m/s) above which we remove velocity data range_min : float, optional ADCP minimum range threshold (m) below which we remove data Return ------ u : 2D array Zonal velocity (m/s) interpolated to given depths. v : 2D array Meridional velocity (m/s) interpolated to given depths. w : 2D array Vertical velocity (m/s) interpolated to given depths. lonm : array Mean longitude of ADCP data. latm : array Mean latitude of ADCP data. range_bottom : array Minimum beam range to bottom (m). n : array Number of ADCP profiles in average. """ u = np.full_like(mask, np.nan, dtype=float) v = np.full_like(mask, np.nan, dtype=float) w = np.full_like(mask, np.nan, dtype=float) lonm = np.full_like(time, np.nan) latm = np.full_like(time, np.nan) range_bottom = np.full_like(time, np.nan) n = np.full_like(time, np.nan) for i in tqdm(range(time.size)): valid = mask[:, i] try: u_, v_, w_, lon_, lat_, range_bottom_, n_ = interp_ADCP( sadcp, depth[valid], lon[i], lat[i], time[i], time_win=time_win, rmax=rmax, vmax=vmax, range_min=range_min, ) except RuntimeError as err: continue # Fill data u[valid, i] = u_ v[valid, i] = v_ w[valid, i] = w_ lonm[i] = lon_ latm[i] = lat_ range_bottom[i] = range_bottom_ n[i] = n_ return u, v, w, lonm, latm, range_bottom, n
ec092d203ef1cfee176bdf9ae05021fd876d444a
14,773
def extract_p(path, dict_obj, default): """ try to extract dict value in key path, if key error provide default :param path: the nested dict key path, separated by '.' (therefore no dots in key names allowed) :param dict_obj: the dictinary object from which to extract :param default: a default return value if key error :return: extracted value """ if dict_obj is None: return default keys = path.split('.') tmp_iter = dict_obj for key in keys: try: # dict.get() might make KeyError exception unnecessary tmp_iter = tmp_iter.get(key, default) except KeyError: return default return tmp_iter
1a563212e229e67751584885c5db5ac19157c37f
14,774
def default_lscolors(env): """Gets a default instanse of LsColors""" inherited_lscolors = os_environ.get("LS_COLORS", None) if inherited_lscolors is None: lsc = LsColors.fromdircolors() else: lsc = LsColors.fromstring(inherited_lscolors) # have to place this in the env, so it is applied env["LS_COLORS"] = lsc return lsc
0ad54d1220308a51194a464a2591be6edcc8d0ff
14,775
import logging def get_indices_by_sent(start, end, offsets, tokens): """ Get sentence index for textbounds """ # iterate over sentences sent_start = None sent_end = None token_start = None token_end = None for i, sent in enumerate(offsets): for j, (char_start, char_end) in enumerate(sent): if (start >= char_start) and (start < char_end): sent_start = i token_start = j if (end > char_start) and (end <= char_end): sent_end = i token_end = j + 1 assert sent_start is not None assert sent_end is not None assert token_start is not None assert token_end is not None if (sent_start != sent_end): logging.warn(f"Entity spans multiple sentences, truncating") token_end = len(offsets[sent_start]) toks = tokens[sent_start][token_start:token_end] return (sent_start, token_start, token_end, toks)
7ce90b69c63b18ee1c025970f5a645f5f4095d3b
14,776
def get_server_object_by_id(nova, server_id): """ Returns a server with a given id :param nova: the Nova client :param server_id: the server's id :return: an SNAPS-OO VmInst object or None if not found """ server = __get_latest_server_os_object_by_id(nova, server_id) return __map_os_server_obj_to_vm_inst(server)
384a5481c41937dfb7fcfdfdcc14bf0123db38a7
14,777
import os def get_tasks(container_name): """Get the list of tasks in a container.""" file_name = tasks_path(container_name) try: tasks = [x.rstrip() for x in open(file_name).readlines()] except IOError: if os.path.exists(file_name): raise tasks = [] # container doesn't exist anymore return tasks
9b75eaf5dfea43dea2e8dbad302e5a0d0b975ebd
14,778
import argparse def get_args() -> argparse.Namespace: """Get script command line arguments.""" parser = argparse.ArgumentParser(description=__doc__.split("\n")[0]) parser.add_argument( "-i", "--input-files", required=True, nargs="+", type=helpers.check_file_arg, help="Path to coverage files", ) parser.add_argument( "-o", "--output-file", help="File where to save coverage results", ) parser.add_argument( "-u", "--uncovered-only", action="store_true", help="Report only uncovered arguments", ) parser.add_argument( "-p", "--print-coverage", action="store_true", help="Print coverage percentage", ) parser.add_argument( "-b", "--badge-icon-url", action="store_true", help="Print badge icon URL", ) parser.add_argument( "--ignore-skips", action="store_true", help="Include all commands and arguments, ignore list of items to skip", ) return parser.parse_args()
e9a015bfc9e9c73c9bc039206d355624174bd4e4
14,779
import os def _get_archive(software, version): """ Gets the downloaded source archive for a software version. :param software: software to get the downloaded source archive for :type software: str :param version: software release :type version: str """ download_dir = get_download_location() archives = os.listdir(download_dir) prefix = "{}-{}.".format(software, version) for archive in archives: if archive.startswith(prefix): return os.path.join(download_dir, archive) return None
07e1269ae5adfe24b645dffb50b6a3ba66ed30a0
14,780
def make_odm(study_oid, environment, site_oid, subject_oid, mapping, retrieved_datetime, transfer_user, transfer_identifier, freeze=True): """Receives a mapping like: [ dict(folder_oid="SCRN", form_oid="DM", field_oid="SEX", value="M", cdash_domain="DM", cdash_element="SEX"), dict(folder_oid="SCRN", form_oid="DM", field_oid="DOB", value="1965-02-09", cdash_domain="DM", cdash_element="DOB"), ... ] Unpacks this into a ODM Message broken up by [folder][form][record][field] """ # Sort unstructured dicts into hierarchy of objects to send folders = {} # Map of folders to forms to records to fields for row in mapping: folder_oid = row.get('folder_oid', 'SUBJECT') folder = folders.get(folder_oid, False) if not folder: folder = Folder(folder_oid) folders[folder_oid] = folder form_oid = row.get('form_oid') form = folder.forms.get(form_oid, False) if not form: form = Form(form_oid) folder.forms[form_oid] = form # add_field sorts into appropriate records form.add_field(row) # Now loop through our structure and build ODM study_events = [] for folder_oid in folders: folder = folders[folder_oid] study_event = StudyEventData(folder.oid, study_event_repeat_key=None) # TODO: Folder repeat key? study_events.append(study_event) # Loop through forms in folder for form_oid in folder.forms: form = folder.forms[form_oid] # Add formdata to study event formdata = FormData(form.oid, transaction_type="Update") study_event << formdata # Loop through records we gathered for record_context in form.records: record = form.records[record_context] params = {} if record_context is not None: # Log line? params['oid'] = "{0}_LOG_LINE".format(form_oid) ig = ItemGroupData() # Add itemgroupdata to formdata formdata << ig # Add all items to itemgroupdata along with external audits to show where they came from for field in record.fields: transaction_type = None if field.context_item: if field.is_new: ig.transaction_type = 'Upsert' else: # We want to do a seek an update transaction_type = "Context" ig.transaction_type = 'Update' ig.item_group_repeat_key = '@CONTEXT' ehr_message = "Import from EHR: EHR Source Value %s -> Submitted value: %s" % (field.raw, field.value) item_data = ItemData(field.oid, field.value, specify_value=field.specify_value, transaction_type=transaction_type, freeze=freeze)( AuditRecord(used_imputation_method=False, identifier=transfer_identifier, include_file_oid=False)( UserRef(transfer_user), LocationRef(site_oid), ReasonForChange(ehr_message), # Any string, just becomes part of documentation in Audit trail DateTimeStamp(retrieved_datetime) ) ) # Measurement unit related to this value? if field.measurement_unit is not None: item_data << MeasurementUnitRef(field.measurement_unit) # Add to itemgroup ig << item_data # In context update situation we need to pass the value of the conext field with transaction type # of context. So if that is not one of the fields passed in we need to include it for this record if not record.has_context_field and record_context is not None: # create the itemdata element, add the mdsol:Freeze attribute ig << ItemData(record.context_field_oid, record.context_field_value, transaction_type="Context", freeze=freeze) ig.item_group_repeat_key = '@CONTEXT' odm = ODM("EHRImport")( ClinicalData(study_oid, environment)( SubjectData(site_oid, subject_oid, transaction_type="Update", subject_key_type='SubjectUUID')(*study_events) ) ) return odm
b35728d1eef7ad2bb361e34826df574415737aa4
14,781
from xml.dom.minidom import parseString import attr def parse_string(xml): """ Returns a slash-formatted string from the given XML representation. The return value is a TokenString (see mbsp.py). """ string = "" dom = parseString(xml) # Traverse all the <sentence> elements in the XML. for sentence in dom.getElementsByTagName(XML_SENTENCE): _anchors.clear() # Populated by calling _parse_tokens(). _attachments.clear() # Populated by calling _parse_tokens(). # Parse the language from <sentence language="">. language = attr(sentence, XML_LANGUAGE, "en") # Parse the token tag format from <sentence token="">. # This information is returned in TokenString.tags, # so the format and order of the token tags is retained when exporting/importing as XML. format = attr(sentence, XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]) format = not isinstance(format, basestring) and format or format.replace(" ","").split(",") # Traverse all <chunk> and <chink> elements in the sentence. # Find the <word> elements inside and create tokens. tokens = [] for chunk in children(sentence): tokens.extend(_parse_tokens(chunk, format)) # Attach PNP's to their anchors. # Keys in _anchors have linked anchor chunks (each chunk is a list of tokens). # The keys correspond to the keys in _attachments, which have linked PNP chunks. if ANCHOR in format: A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR) for id in sorted(A.keys()): for token in A[id]: token[i] += "-"+"-".join(["A"+str(a+p) for p in range(len(P[id]))]) token[i] = token[i].strip("O-") for p, pnp in enumerate(P[id]): for token in pnp: token[i] += "-"+"P"+str(a+p) token[i] = token[i].strip("O-") a += len(P[id]) # Collapse the tokens to string. # Separate multiple sentences with a new line. tokens = ["/".join([tag for tag in token]) for token in tokens] tokens = " ".join(tokens) string += tokens + "\n" # Return a TokenString, which is a unicode string that transforms easily # into a plain str, a list of tokens, or a Sentence. try: if MBSP: from mbsp import TokenString return TokenString(string, tags=format, language=language) except: return TaggedString(string, tags=format, language=language)
e3ccf32bcc148b2c6b9b44259d881f336720fde5
14,782
def join_ad_domain_by_taking_over_existing_computer_using_session( ad_session: ADSession, computer_name=None, computer_password=None, old_computer_password=None, computer_key_file_path=DEFAULT_KRB5_KEYTAB_FILE_LOCATION) -> ManagedADComputer: """ A fairly simple 'join a domain' function using pre-created accounts, which requires minimal input - an AD session. Specifying the name of the computer to takeover explicitly is also encouraged. Given those basic inputs, the domain's nearest controllers are automatically discovered and an account is found with the computer name specified. That account is then taken over so that it can be controlled by the local system, and kerberos keys and such are generated for it. By providing an AD session, one can build a connection to the domain however they so choose and then use it to join this computer, so you don't even need to necessarily use user credentials. :param ad_session: The ADSession object representing a connection with the domain to be joined. :param computer_name: The name of the computer to take over in the domain. This should be the sAMAccountName of the computer, though if computer has a trailing $ in its sAMAccountName and that is omitted, that's ok. If not specified, we will attempt to find a computer with a name matching the local system's hostname. :param computer_password: The password to set for the computer when taking it over. If not specified, a random 120 character password will be generated and set. :param old_computer_password: The current password of the computer being taken over. If specified, the action of taking over the computer will use a "change password" operation, which is less privileged than a "reset password" operation. So specifying this reduces the permissions needed by the user specified. :param computer_key_file_path: The path of where to write the keytab file for the computer after taking it over. This will include keys for both user and server keys for the computer. If not specified, defaults to /etc/krb5.keytab :returns: A ManagedADComputer object representing the computer taken over. """ # for joining a domain, default to using the local machine's hostname as a computer name if computer_name is None: computer_name = get_system_default_computer_name() logger.warning('No computer name was specified for joining via computer takeover. This is unusual and relies ' 'implicitly on the computers in the domain matching this library in terms of how they decide ' 'on the computer name, and may cause errors. The name being used is %s', computer_name) logger.info('Attempting to join computer to domain %s by taking over account with name %s', ad_session.get_domain_dns_name(), computer_name) computer = ad_session.take_over_existing_computer(computer_name, computer_password=computer_password, old_computer_password=old_computer_password) if computer_key_file_path is not None: computer.write_full_keytab_file_for_computer(computer_key_file_path) logger.info('Successfully joined computer to domain %s by taking over computer with name %s', ad_session.get_domain_dns_name(), computer_name) return computer
a9fea1126fd775c85cf9a354044315eef03a4ffb
14,783
def peak_sound_pressure(pressure, axis=-1): """ Peak sound pressure :math:`p_{peak}` is the greatest absolute sound pressure during a certain time interval. :param pressure: Instantaneous sound pressure :math:`p`. :param axis: Axis. .. math:: p_{peak} = \\mathrm{max}(|p|) """ return np.abs(pressure).max(axis=axis)
e3beb4d67dc414fa7aabdc7a9c4a06a5ddb371ab
14,784
def field_filter_query(field, values): """Need to define work-around for full-text fields.""" values = ensure_list(values) if not len(values): return {'match_all': {}} if field in ['_id', 'id']: return {'ids': {'values': values}} if len(values) == 1: if field in ['names', 'addresses']: field = '%s.text' % field return {'match_phrase': {field: values[0]}} return {'term': {field: values[0]}} return {'terms': {field: values}}
54d3b394e8dc38b2a0ead3b9d5a81da9f5f6915a
14,785
import logging def compute_investigation_stats(inv, exact=True, conf=0.95, correct=True): """ Compute all statistics for all protected features of an investigation Parameters ---------- inv : the investigation exact : whether exact tests should be used conf : overall confidence level (1- familywise error rate) Returns ------- all_stats: list of all statistics for the investigation """ # count the number of hypotheses to test total_hypotheses = num_hypotheses(inv) logging.info('Testing %d hypotheses', total_hypotheses) # # Adjusted Confidence Level (Bonferroni) # adj_conf = 1-(1-conf)/total_hypotheses if correct else conf # statistics for all investigations all_stats = {sens: compute_stats(ctxts, exact, adj_conf, inv.random_state) for (sens, ctxts) in sorted(inv.contexts.iteritems())} # flattened array of all p-values all_pvals = [max(stat[-1], 1e-180) for sens_stats in all_stats.values() for stat in sens_stats['stats']] # correct p-values if correct: pvals_corr = multipletests(all_pvals, alpha=1-conf, method='holm')[1] else: pvals_corr = all_pvals # replace p-values by their corrected value idx = 0 # iterate over all protected features for the investigation for (sens, sens_contexts) in inv.contexts.iteritems(): sens_stats = all_stats[sens]['stats'] # iterate over all contexts for a protected feature for i in range(len(sens_stats)): old_stats = sens_stats[i] all_stats[sens]['stats'][i] = \ np.append(old_stats[0:-1], pvals_corr[idx]) idx += 1 for (sens, sens_contexts) in inv.contexts.iteritems(): metric = sens_contexts[0].metric # For regression, re-form the dataframes for each context if isinstance(metric.stats, pd.DataFrame): res = all_stats[sens] res = pd.DataFrame(res['stats'], index=res['index'], columns=res['cols']) all_stats[sens] = \ {'stats': np.array_split(res, len(res)/len(metric.stats))} all_stats = {sens: sens_stats['stats'] for (sens, sens_stats) in all_stats.iteritems()} return all_stats
08bf8ab5c4e985c33fdb0bd0d9dfc1dc949f4d83
14,786
def group_bars(note_list): """ Returns a list of bars, where each bar is a list of notes. The start and end times of each note are rescaled to units of bars, and expressed relative to the beginning of the current bar. Parameters ---------- note_list : list of tuples List of notes to group into bars. """ bar_list = [] current_bar = [] current_bar_start_time = 0 for raw_note in note_list: if raw_note[0] != -1: current_bar.append(raw_note) elif raw_note[0] == -1: quarter_notes_per_bar = raw_note[2] - current_bar_start_time current_bar_scaled = [] for note in current_bar: current_bar_scaled.append((note[0], note[1], min([(note[2] - current_bar_start_time) / quarter_notes_per_bar, 1]), min([(note[3] - current_bar_start_time) / quarter_notes_per_bar, 1]))) bar_list.append(current_bar_scaled) current_bar = [] current_bar_start_time = raw_note[2] return bar_list
3b12a7c7e2395caa3648abf152915ece4b325599
14,787
def get_vmexpire_id_from_ref(vmexpire_ref): """Parse a container reference and return the container ID The container ID is the right-most element of the URL :param container_ref: HTTP reference of container :return: a string containing the ID of the container """ vmexpire_id = vmexpire_ref.rsplit('/', 1)[1] return vmexpire_id
e90c34c8489d91fb582a4bf15f874bcb2feaea82
14,788
def create_A_and_B_state_ligand(line, A_B_state='vdwq_q'): """Create A and B state topology for a ligand. Parameters ---------- line : str 'Atom line': with atomtype, mass, charge,... A_B_state : str Interactions in the A state and in the B state. vdwq_vdwq: ligand fully interacting in A and B state vdwq_vdw: vdw interactions and electrostatics in the A_state, only vdw in the B_state vdw_vdwq: charge vdw_dummy dummy_vdw vdwq_dummy Returns ------- text : str Atoms line for topology file with A and B state parameters """ atom_number = line.split()[0] atom_type = line.split()[1] residue_nr = line.split()[2] residue_name = line.split()[3] atom_name = line.split()[4] cgnr = line.split()[5] charge = line.split()[6] mass = line.split()[7] # A and B state are the same if A_B_state == 'vdwq_vdwq': text = line.split(';')[0] + ' ' + atom_type + ' ' + charge + ' ' + mass + '\n' # Turn on vdw elif A_B_state == 'dummy_vdw': charge = str(0.0) text = ' ' + atom_number + ' d%s ' % atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + charge + ' ' + mass + ' ' + \ atom_type + ' ' + charge + ' ' + mass + '\n' # Turn vdw off elif A_B_state == 'vdw_dummy': charge = str(0.0) text = ' ' + atom_number + ' ' + atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + charge + ' ' + mass + \ ' d%s ' % atom_type + ' ' + charge + ' ' + mass + '\n' # Turn vdw and electrostatics off elif A_B_state == 'vdwq_dummy': text = line.split(';')[0] + ' ' + ' d%s ' % atom_type + ' 0.0 ' + mass + '\n' # uncharge elif A_B_state == 'vdwq_vdw': text = line.split(';')[0] + ' ' + ' ' + atom_type + ' 0.0 ' + mass + '\n' # charge elif A_B_state == 'vdw_vdwq': text = ' ' + atom_number + ' ' + atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + str(0.0) + ' ' + \ mass + ' ' + atom_type + ' ' + charge + ' ' + mass + '\n' # Posre off elif A_B_state == 'dummy': charge = str(0.0) text = ' ' + atom_number + ' d%s ' % atom_type + ' ' + residue_nr + ' ' + \ residue_name + ' ' + atom_name + ' ' + cgnr + ' ' + charge + ' ' + mass + ' ' + '\n' # Turn vdw and electrostatics off elif A_B_state == 'vdwq': text = line.split(';')[0] + '\n' else: print('Transformation not implemented yet') return text
3ac16da60de68013b20ea3f1a6ce3173cd4871a1
14,789
def clean_params(estimator, n_jobs=None): """clean unwanted hyperparameter settings If n_jobs is not None, set it into the estimator, if applicable Return ------ Cleaned estimator object """ ALLOWED_CALLBACKS = ( "EarlyStopping", "TerminateOnNaN", "ReduceLROnPlateau", "CSVLogger", "None", ) estimator_params = estimator.get_params() for name, p in estimator_params.items(): # all potential unauthorized file write if name == "memory" or name.endswith("__memory") or name.endswith("_path"): new_p = {name: None} estimator.set_params(**new_p) elif n_jobs is not None and (name == "n_jobs" or name.endswith("__n_jobs")): new_p = {name: n_jobs} estimator.set_params(**new_p) elif name.endswith("callbacks"): for cb in p: cb_type = cb["callback_selection"]["callback_type"] if cb_type not in ALLOWED_CALLBACKS: raise ValueError("Prohibited callback type: %s!" % cb_type) return estimator
da639b03ea7dec534130105571c1623128e99143
14,790
def getValidOauth2TxtCredentials(force_refresh=False, api=None): """Gets OAuth2 credentials which are guaranteed to be fresh and valid.""" try: credentials = auth.get_admin_credentials(api) except gam.auth.oauth.InvalidCredentialsFileError: doRequestOAuth() # Make a new request which should store new creds. return getValidOauth2TxtCredentials(force_refresh=force_refresh, api=api) if credentials.expired or force_refresh: request = transport.create_request() credentials.refresh(request) return credentials
ff29fe312fe6ca875e53c56482033ca5ccceb71c
14,791
import itertools def get_combinations_sar(products, aoi): """Get a dataframe with all possible combinations of products and calculate their coverage of the AOI and the temporal distance between the products. Parameters ---------- products : dataframe Search results with product identifiers as index. aoi : shapely geometry Area of interest (lat/lon). Returns ------- combinations : dataframe Double-indexed output dataframe. Only combinations that contain the AOI are returned (with a 1% margin). """ couples = list(itertools.combinations(products.index, 2)) combinations = pd.DataFrame(index=pd.MultiIndex.from_tuples(couples)) for id_a, id_b in couples: footprint_a = wkt.loads(products.loc[id_a].footprint) footprint_b = wkt.loads(products.loc[id_b].footprint) footprint = footprint_a.union(footprint_b) combinations.at[(id_a, id_b), 'date_a'] = products.loc[id_a].date combinations.at[(id_a, id_b), 'date_b'] = products.loc[id_b].date combinations.at[(id_a, id_b), 'cover'] = coverage(aoi, footprint) combinations = combinations[combinations.cover >= 99.] combinations['dist'] = combinations.date_b - combinations.date_a combinations.dist = combinations.dist.apply(lambda x: abs(x.days)) combinations = combinations.sort_values(by='dist', ascending=True) return combinations
045fcf59e9dae17a17d77cb945d2fe63af01e7ae
14,792
import re def sample(s, n): """Show a sample of string s centered at position n""" start = max(n - 8, 0) finish = min(n + 24, len(s)) return re.escape(s[start:finish])
565f69224269ed7f5faa538d40ce277714144577
14,793
def getNeededLibraries(binary_filepath): """ Get all libraries given binary depends on. """ if False: return getNeededLibrariesLDD(binary_filepath) else: return getNeededLibrariesOBJDUMP(binary_filepath)
40fcb08fac7877f97cb9fa9f6f198e58c64fe492
14,794
from typing import List def load_transformer(input_paths:List[str], input_type:str=None) -> Transformer: """ Creates a transformer for the appropriate file type and loads the data into it from file. """ if input_type is None: input_types = [get_type(i) for i in input_paths] for t in input_types: if input_types[0] != t: error( """ Each input file must have the same file type. Try setting the --input-type parameter to enforce a single type. """ ) input_type = input_types[0] transformer_constructor = get_transformer(input_type) if transformer_constructor is None: error('Inputs do not have a recognized type: ' + str(get_file_types())) t = transformer_constructor() for i in input_paths: t.parse(i, input_type) t.report() return t
55eab62cdf5293ad03441fc91663383adcf12da7
14,795
from ocs_ci.ocs.platform_nodes import AWSNodes def delete_and_create_osd_node_aws_upi(osd_node_name): """ Unschedule, drain and delete osd node, and creating a new osd node. At the end of the function there should be the same number of osd nodes as it was in the beginning, and also ceph health should be OK. This function is for AWS UPI. Args: osd_node_name (str): the name of the osd node Returns: str: The new node name """ osd_node = get_node_objs(node_names=[osd_node_name])[0] az = get_node_az(osd_node) aws_nodes = AWSNodes() stack_name_of_deleted_node = aws_nodes.get_stack_name_of_node(osd_node_name) remove_nodes([osd_node]) log.info(f"name of deleted node = {osd_node_name}") log.info(f"availability zone of deleted node = {az}") log.info(f"stack name of deleted node = {stack_name_of_deleted_node}") if config.ENV_DATA.get("rhel_workers"): node_type = constants.RHEL_OS else: node_type = constants.RHCOS log.info("Preparing to create a new node...") node_conf = {"stack_name": stack_name_of_deleted_node} new_node_names = add_new_node_and_label_upi(node_type, 1, node_conf=node_conf) return new_node_names[0]
c376b8b499a9897723962e5af30984eb4d9f06fa
14,796
import math def encode_integer_compact(value: int) -> bytes: """Encode an integer with signed VLQ encoding. :param int value: The value to encode. :return: The encoded integer. :rtype: bytes """ if value == 0: return b"\0" if value < 0: sign_bit = 0x40 value = -value else: sign_bit = 0 n_bits = value.bit_length() n_bytes = 1 + int(math.ceil((n_bits - 6) / 7)) buf = bytearray(n_bytes) for i in range(n_bytes - 1, 0, -1): buf[i] = 0x80 | (value & 0x7F) value >>= 7 buf[0] = 0x80 | sign_bit | (value & 0x3F) buf[-1] &= 0x7F return bytes(buf)
daf9ed4a794754a3cd402e8cc4c3e614857941fe
14,797
def kin_phos_query(kin_accession): """ Query to pull related phosphosites using kinase accession :param kin_accession: string kinase accession :return: Flask_Table Phosphosite_results object """ session = create_sqlsession() q = session.query(Kinase).filter_by(kin_accession= kin_accession) kin = q.first() #subset of information about substrate phosphosites sites. subsets = kin.kin_phosphorylates table = Phosphosite_results(subsets) session.close() return table
94f5f7d987dface90ff5d061525d2277173ed271
14,798
def max_surplus(redemptions, costs, traders): """ Calculates the maximum possible surplus """ surplus = 0 transactions = 0.5 * traders for redemption, cost in zip(redemptions, costs): if redemption >= cost: surplus += ((redemption - cost) * transactions) return surplus
6dd452de1b8726c475c9b95d8c24a2f57fe71516
14,799