content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def getLambdaFasta(): """ Returns the filename of the FASTA of the lambda phage reference. """ return _getAbsPath('lambdaNEB.fa')
4cb351d874087da71d8f726802a5bb86438dacd1
15,006
def Rotation_ECL_EQD(time): """Calculates a rotation matrix from ecliptic J2000 (ECL) to equatorial of-date (EQD). This is one of the family of functions that returns a rotation matrix for converting from one orientation to another. Source: ECL = ecliptic system, using equator at J2000 epoch. Target: EQD = equatorial system, using equator of date. Parameters ---------- time : Time The date and time of the desired equator. Returns ------- RotationMatrix A rotation matrix that converts ECL to EQD. """ rot = Rotation_EQD_ECL(time) return InverseRotation(rot)
d140e2c03e62fba2168faf9c3599afa6e41bb774
15,008
def _make_players_away(team_size): """Construct away team of `team_size` players.""" away_players = [] for i in range(team_size): away_players.append( Player(Team.AWAY, _make_walker("away%d" % i, i, _RGBA_RED))) return away_players
b0beff6f06fc52f870c143c01c14d18eb77d0cc5
15,009
import json def store_barbican_secret_for_coriolis( barbican, secret_info, name='Coriolis Secret'): """ Stores secret connection info in Barbican for Coriolis. :param barbican: barbican_client.Client instance :param secret_info: secret info to store :return: the HREF (URL) of the newly-created Barbican secret """ payload = json.dumps(secret_info) secret = barbican.secrets.create( name=name, payload=payload, payload_content_type='application/json') secret_ref = secret.store() return secret_ref
218bf941203dd12bc78fc7a87d6a2f9f21761d57
15,010
import random def padding(): """Return 16-200 random bytes""" return URANDOM(random.randrange(16, PAD_MAX))
65a52c19c3b39344bd1959c58f2cd7950b0a19e4
15,012
def find(): """Prints user message and returns the number of HP-49 connected. """ hps = com.find() if len( hps ) == 0: print "No HP49-compatible devices connected." sys.stdout.flush() else: print "Number of HP49-compatible devices: %d" % len( hps ) sys.stdout.flush() return len( hps )
8530fc9d6d904e8c4fe061c237af57f9874a2ea2
15,014
def get_children_templates(pvc_enabled=False): """ Define a list of all resources that should be created. """ children_templates = { "service": "service.yaml", "ingress": "ingress.yaml", "statefulset": "statefulset.yaml", "configmap": "configmap.yaml", "secret": "secret.yaml", } if pvc_enabled: children_templates["pvc"] = "pvc.yaml" return children_templates
25db24b03542b1365529bbf1814e2fb801337022
15,015
def sort_as_int(environment, value, reverse=False, attribute=None): """Sort collection after converting the attribute value to an int""" def convert_to_int(x): val = str(x) # Test if this is a string representation of a float. # This is what the copy rig does and it's annoying if '.' in val: val = float(val) return int(val) key_func = make_attrgetter( environment, attribute, postprocess=convert_to_int ) return sorted(value, key=key_func, reverse=reverse)
13e7727d1337bbfddec1a0661552c51d7015e58b
15,016
def get_pretty_table_for_item(item, output_fields): """ """ x = PrettyTable(["Attribute", "Value"]) attrs = _filter_attributes(item.get_attributes(), output_fields) for attr in attrs: row = [] row.append(attr) row.append(getattr(item, attr)) x.add_row(row) return x
48d7b4c1a53884dc65de8da1167762cbf0143d2c
15,017
def create_t1_based_unwarp(name='unwarp'): """ Unwarp an fMRI time series based on non-linear registration to T1. NOTE: AS IT STANDS THIS METHOD DID NOT PRODUCE ACCEPTABLE RESULTS IF BRAIN COVERAGE IS NOT COMPLETE ON THE EPI IMAGE. ALSO: NEED TO ADD AUTOMATIC READING OF EPI RESOLUTION TO GET """ unwarpflow = pe.Workflow(name=name) inputnode = pe.Node(interface=util.IdentityInterface(fields=['epi', 'T1W']), name='inputspec') outputnode = pe.Node(interface=util.IdentityInterface(fields=[ 'unwarped_func', 'warp_files']), name='outputspec') tmedian = pe.Node(interface=ImageMaths(), name='tmedian') tmedian.inputs.op_string = '-Tmedian' epi_brain_ext = pe.Node(interface=util.Function(function=epi_brain_extract, input_names=['in_file'], output_names=['out_vol', 'out_mask']), name='epi_brain_ext') fast_debias = pe.Node(interface=FAST(), name='FAST_debias') fast_debias.inputs.output_biascorrected = True robex = pe.Node(interface=util.Function(function=my_robex, input_names=['in_file'], output_names=['out_file', 'out_mask']), name='robex') downsample_T1 = pe.Node(MRIConvert(), name='downsample_dti') downsample_T1.inputs.vox_size = (3.438, 3.438, 3.000) downsample_T1.inputs.out_type = 'niigz' contrast_invert = pe.Node(interface=util.Function(function=invert_contrast, input_names=['in_t1_brain', 'in_b0_brain'], output_names=['out_fn']), name='contrast_invert') ants_syn = pe.Node(interface=util.Function(function=my_ants_registration_syn, input_names=['in_T1W', 'in_epi'], output_names=['out_transforms']), name='ants_syn') ants_warp = pe.Node(interface=WarpTimeSeriesImageMultiTransform(), name='ants_warp') '''connections''' # unwarpflow.connect(inputnode, 'T1W', robex, 'in_file') unwarpflow.connect(inputnode, 'T1W', fast_debias, 'in_files') # unwarpflow.connect(robex, 'out_file', fast_debias, 'in_files') unwarpflow.connect(fast_debias, 'restored_image', robex, 'in_file') # unwarpflow.connect(fast_debias, 'restored_image', downsample_T1, 'in_file') unwarpflow.connect(robex, 'out_file', downsample_T1, 'in_file') unwarpflow.connect(downsample_T1, 'out_file', contrast_invert, 'in_t1_brain') unwarpflow.connect(inputnode, 'epi', tmedian, 'in_file') unwarpflow.connect(tmedian, 'out_file', epi_brain_ext, 'in_file') unwarpflow.connect(epi_brain_ext, 'out_vol', contrast_invert, 'in_b0_brain') unwarpflow.connect(contrast_invert, 'out_fn', ants_syn, 'in_T1W') unwarpflow.connect(epi_brain_ext, 'out_vol', ants_syn, 'in_epi') unwarpflow.connect(ants_syn, 'out_transforms', outputnode, 'out_transforms') unwarpflow.connect(inputnode, 'epi', ants_warp, 'input_image') unwarpflow.connect(contrast_invert, 'out_fn', ants_warp, 'reference_image') unwarpflow.connect(ants_syn, 'out_transforms', ants_warp, 'transformation_series') unwarpflow.connect(ants_syn, 'out_transforms', outputnode, 'warp_files') unwarpflow.connect(ants_warp, 'output_image', outputnode, 'unwarped_func') return unwarpflow
cbf3180e2899ac6314cde3c30ca3619ca4d3e125
15,018
def get_qnode(caching, diff_method="finite-diff", interface="autograd"): """Creates a simple QNode""" dev = qml.device("default.qubit.autograd", wires=3) @qnode(dev, caching=caching, diff_method=diff_method, interface=interface) def qfunc(x, y): qml.RX(x, wires=0) qml.RX(y, wires=1) qml.CNOT(wires=[0, 1]) return expval(qml.PauliZ(wires=1)) return qfunc
3a8cb0f47e8846338338d21896e59cba475e8351
15,019
def segment_relative_timestamps(segment_start, segment_end, timestamps): """ Converts timestamps for a global recording to timestamps in a segment given the segment boundaries Args: segment_start (float): segment start time in seconds segment_end (float): segment end time in seconds timestamps (list): List with length the number of labelled classes. Each element of the list is a array of start and end time of labelled portion of the recording. Returns: List of the timestamps of labelled portion in the segment , with respect to the segment. Examples: >>> timestamps = [np.array([0.0, 1.0, 2.0, 9.0]), np.array([0.5, 1.5]), np.array([3.0, 6.0]), np.array([]), np.array([7.0, 8.0])] >>> segment_relative_timestamps(3.3, 6.6, timestamps) >>> [array([[0. , 3.3]], dtype=float32), array([], dtype=float32), array([[0. , 2.7]], dtype=float32), array([], dtype=float32), array([], dtype=float32)] """ segment_timestamps = [] # loop over the classes for c_timestamps in timestamps: if c_timestamps.size > 0: # "if there are timestamps" inside_timestamps = [] # For all timestamps, look if they fall in the segment. If they do, convert them to segment times. for (start, end) in zip(c_timestamps[::2], c_timestamps[1::2]): if start <= segment_end and end >= segment_start: inside_timestamps.append( (np.max([segment_start, start]) - segment_start, np.min([end, segment_end]) - segment_start)) segment_timestamps.append(np.asarray(inside_timestamps, dtype=np.float32)) else: segment_timestamps.append(np.array([], dtype=np.float32)) return segment_timestamps
743938adfb8ee1450c2140f76dbbdfb88c2a3c7f
15,020
def compare_dataframes_mtmc(gts, ts): """Compute ID-based evaluation metrics for MTMCT Return: df (pandas.DataFrame): Results of the evaluations in a df with only the 'idf1', 'idp', and 'idr' columns. """ gtds = [] tsds = [] gtcams = gts['CameraId'].drop_duplicates().tolist() tscams = ts['CameraId'].drop_duplicates().tolist() maxFrameId = 0 for k in sorted(gtcams): gtd = gts.query('CameraId == %d' % k) gtd = gtd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']] # max FrameId in gtd only mfid = gtd['FrameId'].max() gtd['FrameId'] += maxFrameId gtd = gtd.set_index(['FrameId', 'Id']) gtds.append(gtd) if k in tscams: tsd = ts.query('CameraId == %d' % k) tsd = tsd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']] # max FrameId among both gtd and tsd mfid = max(mfid, tsd['FrameId'].max()) tsd['FrameId'] += maxFrameId tsd = tsd.set_index(['FrameId', 'Id']) tsds.append(tsd) maxFrameId += mfid # compute multi-camera tracking evaluation stats multiCamAcc = mm.utils.compare_to_groundtruth( pd.concat(gtds), pd.concat(tsds), 'iou') metrics = list(mm.metrics.motchallenge_metrics) metrics.extend(['num_frames', 'idfp', 'idfn', 'idtp']) mh = mm.metrics.create() summary = mh.compute(multiCamAcc, metrics=metrics, name='MultiCam') return summary
002333c2be971a453727f43c257b46a99b0451cb
15,021
def fetch_xml(url): """ Fetch a URL and parse it as XML using ElementTree """ resp=urllib2.urlopen(url) tree=ET.parse(resp) return tree
d0f4f5b7fe19692675cba1254f6bfa63f07e45a5
15,023
def update_hirsch_index(depth_node_dict, minimum_hirsch_value, maximum_hirsch_value): """ Calculates the Hirsch index for a radial tree. Note that we have a slightly different definition of the Hirsch index to the one found in: Gómez, V., Kaltenbrunner, A., & López, V. (2008, April). Statistical analysis of the social network and discussion threads in slashdot. In Proceedings of the 17th international conference on World Wide Web (pp. 645-654). ACM. Inputs: - depth_node_dict: A map from node depth to node ids as a python dictionary. - minimum_hirsch_value: This is the previous Hirsch value. - maximum_hirsch_value: This is the depth of the latest node added to the tree. Output: - hirsch: The Hirsch index. """ # This is the previous hirsch index value. hirsch_index = minimum_hirsch_value if maximum_hirsch_value > minimum_hirsch_value: adopters = depth_node_dict[maximum_hirsch_value] width = len(adopters) if width >= maximum_hirsch_value: hirsch_index = maximum_hirsch_value return hirsch_index
2fdf5ca6aa216eacb3f18cd2f91875d02e0740ea
15,024
def get_E_E_fan_H_d_t(P_fan_rtd_H, V_hs_vent_d_t, V_hs_supply_d_t, V_hs_dsgn_H, q_hs_H_d_t): """(37) Args: P_fan_rtd_H: 定格暖房能力運転時の送風機の消費電力(W) V_hs_vent_d_t: 日付dの時刻tにおける熱源機の風量のうちの全般換気分(m3/h) V_hs_supply_d_t: param V_hs_dsgn_H:暖房時の設計風量(m3/h) q_hs_H_d_t: 日付dの時刻tにおける1時間当たりの熱源機の平均暖房能力(-) V_hs_dsgn_H: returns: 日付dの時刻tにおける1時間当たりの送風機の消費電力量のうちの暖房設備への付加分(kWh/h) Returns: 日付dの時刻tにおける1時間当たりの送風機の消費電力量のうちの暖房設備への付加分(kWh/h) """ f_SFP = get_f_SFP() E_E_fan_H_d_t = np.zeros(24 * 365) a = (P_fan_rtd_H - f_SFP * V_hs_vent_d_t) \ * ((V_hs_supply_d_t - V_hs_vent_d_t) / (V_hs_dsgn_H - V_hs_vent_d_t)) * 10 ** (-3) E_E_fan_H_d_t[q_hs_H_d_t > 0] = np.clip(a[q_hs_H_d_t > 0], 0, None) return E_E_fan_H_d_t
0e2ceb9f8fbedd95d44f1c307cfb0d9ea17ea370
15,027
def load_func(func_string): """ If the given setting is a string import notation, then perform the necessary import or imports. """ if func_string is None: return None elif isinstance(func_string, str): return import_from_string(func_string) return func_string
99fdf6889936c95d7680ed5a70a2095474e02a9b
15,028
def normalize(features): """ Scale data in provided series into [0,1] range. :param features: :return: """ return (features - features.min()) / (features.max() - features.min())
a85d77e37e71c732471d7dcd42ae1aef2181f6dc
15,029
def get_gitlab_template_version(response): """Return version number of gitlab template.""" return glom(response, 'ref', default=False).replace('refs/tags/', '')
95e1be93ef6f14d24757e07d0ba644ce89bc0dc9
15,030
def getConfigXmlString(version, name, protocol, user, host, port, path): """! Arguments -> XML String. """ tag_root = ET.Element(TAG_ROOT) tag_root.set(ATTR_VERSION, version) tag_remote = ET.Element(TAG_REMOTE) tag_remote.set(ATTR_NAME, name) tag_root.append(tag_remote) appendElement(tag_remote, TAG_PROTOCOL, protocol) appendElement(tag_remote, TAG_USER, user) appendElement(tag_remote, TAG_HOST, host) appendElement(tag_remote, TAG_PORT, port) appendElement(tag_remote, TAG_PATH, path) return ET.tostring(tag_root)
da0546a2e276c16820e09807930c981bf7d5406c
15,031
from typing import Optional def phase_angle(A: Entity, B: Entity, C: Entity) -> Optional[float]: """The orbital phase angle, between A-B-C, of the angle at B. i.e. the angle between the ref-hab vector and the ref-targ vector.""" # Code from Newton Excel Bach blog, 2014, "the angle between two vectors" if B.name == C.name: return None AB = A.pos - B.pos CB = C.pos - B.pos return np.degrees( np.arctan2(AB[1], AB[0]) - np.arctan2(CB[1], CB[0]) ) % 360
ddbbc75909977350f89748c0afdb242ed9d741b6
15,032
def upper(string): # pragma: no cover """Lower.""" new_string = [] for c in string: o = ord(c) new_string.append(chr(o - 32) if LC_A <= o <= LC_Z else c) return ''.join(new_string)
c13b1cc49a608bcc65a3afa87ca94f73f0deeb0b
15,034
def getid(obj): """Return id if argument is a Resource. Abstracts the common pattern of allowing both an object or an object's ID (UUID) as a parameter when dealing with relationships. """ try: if obj.uuid: return obj.uuid except AttributeError: # nosec(cjschaef): 'obj' doesn't contain attribute # 'uuid', return attribute 'id' or the 'obj' pass try: return obj.id except AttributeError: return obj
43160e6dd61ddc2e8e0559925bf2a35def79eb3f
15,035
from datetime import datetime import time def dateToUsecs(datestring): """Convert Date String to Unix Epoc Microseconds""" dt = datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S") return int(time.mktime(dt.timetuple())) * 1000000
cba081ae63523c86572463249b4324f2183fcaaa
15,036
def _compute_applied_axial(R_od, t_wall, m_stack, section_mass): """Compute axial stress for spar from z-axis loading INPUTS: ---------- params : dictionary of input parameters section_mass : float (scalar/vector), mass of each spar section as axial loading increases with spar depth OUTPUTS: ------- stress : float (scalar/vector), axial stress """ R = R_od - 0.5 * t_wall # Add in weight of sections above it axial_load = m_stack + np.r_[0.0, np.cumsum(section_mass[:-1])] # Divide by shell cross sectional area to get stress return gravity * axial_load / (2.0 * np.pi * R * t_wall)
35c9a92b22b3639b6d1236ba45dd797388e25b07
15,037
def categorical(p, rng=None, size=()): """Draws i with probability p[i]""" if len(p) == 1 and isinstance(p[0], np.ndarray): p = p[0] p = np.asarray(p) if size == (): size = (1,) elif isinstance(size, (int, np.number)): size = (size,) else: size = tuple(size) if size == (0,): return np.asarray([]) assert len(size) if p.ndim == 0: raise NotImplementedError() elif p.ndim == 1: n_draws = int(np.prod(size)) sample = rng.multinomial(n=1, pvals=p, size=int(n_draws)) assert sample.shape == size + (len(p),) rval = np.dot(sample, np.arange(len(p))) rval.shape = size return rval elif p.ndim == 2: n_draws_, n_choices = p.shape (n_draws,) = size assert n_draws == n_draws_ rval = [ np.where(rng.multinomial(pvals=p[ii], n=1))[0][0] for ii in range(n_draws) ] rval = np.asarray(rval) rval.shape = size return rval else: raise NotImplementedError()
1cee8c996206284f36f3bf72f8c6729037489f4d
15,039
from typing import List def covariance_distance(covariances: List[Covariance], x: np.ndarray) -> np.ndarray: """Euclidean distance of all pairs gp_models. :param covariances: :param x: :return: """ # For each pair of kernel matrices, compute Euclidean distance n_kernels = len(covariances) dists = np.zeros((n_kernels, n_kernels)) for i in range(n_kernels): for j in range(i + 1, n_kernels): dists[i, j] = kernel_l2_dist(covariances[i].raw_kernel, covariances[j].raw_kernel, x) # Make symmetric dists = (dists + dists.T) / 2. return dists
7bfe0337c89b8476285797d1fdec394ffcd04479
15,042
def create_inputs(im, im_info, model_arch='YOLO'): """generate input for different model type Args: im (np.ndarray): image (np.ndarray) im_info (dict): info of image model_arch (str): model type Returns: inputs (dict): input of model """ inputs = {} inputs['image'] = im origin_shape = list(im_info['origin_shape']) resize_shape = list(im_info['resize_shape']) scale = im_info['scale'] if 'YOLO' in model_arch: im_size = np.array([origin_shape]).astype('int32') inputs['im_size'] = im_size elif 'RetinaNet' in model_arch: im_info = np.array([resize_shape + [scale]]).astype('float32') inputs['im_info'] = im_info elif 'RCNN' in model_arch: im_info = np.array([resize_shape + [scale]]).astype('float32') im_shape = np.array([origin_shape + [1.]]).astype('float32') inputs['im_info'] = im_info inputs['im_shape'] = im_shape return inputs
940563f6c48cfe54e328339b0efcd44e03ad67d8
15,043
def multiplex(n, q, **kwargs): """ Convert one queue into several equivalent Queues >>> q1, q2, q3 = multiplex(3, in_q) """ out_queues = [Queue(**kwargs) for i in range(n)] def f(): while True: x = q.get() for out_q in out_queues: out_q.put(x) t = Thread(target=f) t.daemon = True t.start() return out_queues
ee9dac3506acb5159580a39d64e3cb046c44b204
15,044
import xml.etree.ElementTree as ET def select_project(FILENAME): """ lee el fichero xml FILENAME, muestra los proyectos para que el usuario escoja uno de ellos input FILENAME: fichero xml de estructura adecuada situada donde se encuentran los scripts del programa return: el proyecto seleccionado por el usuario con un árbol xml """ tree = ET.parse(FILENAME) root = tree.getroot() print('Projects in ' + FILENAME) projects = [] for i, project in enumerate(root.findall('project')): projects.append(project) print(i, end=' ') print('. ' + project.get('name')) print('Select project number:') choice = input() return projects[int(choice)]
0ef7ddd4b320e2ca577c253522f512e2802569e1
15,045
def compute_average(arr): """Compute average value for given matrix Args: arr (numpy array): a numpy array Return: float: average value """ val_avg = np.average(arr) return val_avg
c69d17f53e946f693242cfd9d90877847e7c7cc6
15,046
from typing import Callable def ta_series(func: Callable, *args, **kwargs) -> QFSeries: """ Function created to allow using TA-Lib functions with QFSeries. Parameters ---------- func talib function: for example talib.MA args time series arguments to the function. They are all passed as QFSeries. for example: 'close' or 'high, low, close' where each argument is a QFSeries. kwargs additional arguments to the function. for example: 'timeperiod=10' or 'timeperiod=timeperiod, matype=i'. All additional arguments have to be passed as keyword arguments. Returns ------- QFSeries Output from the talib function encapsulated in a QFSeries """ series_list = list(map(lambda series: series.values, args)) result = func(*series_list, **kwargs) result = QFSeries(index=args[0].index, data=result) return result
c3e4e644fd3e6ce7853cbe99441fe6c8a5ca2679
15,048
def find_trendline( df_data: pd.DataFrame, y_key: str, high_low: str = "high" ) -> pd.DataFrame: """Attempts to find a trend line based on y_key column from a given stock ticker data frame. Parameters ---------- df_data : DataFrame The stock ticker data frame with at least date_id, y_key columns. y_key : str Column name to base the trend line on. high_low: str, optional Either "high" or "low". High is the default. Returns ------- DataFrame If a trend is successfully found, An updated Panda's data frame with a trend data {y_key}_trend column. If no trend was found, An original Panda's data frame """ for iteration in [3, 4, 5, 6, 7]: df_temp = df_data.copy() while len(df_temp) > iteration: reg = stats.linregress( x=df_temp["date_id"], y=df_temp[y_key], ) if high_low == "high": df_temp = df_temp.loc[ df_temp[y_key] > reg[0] * df_temp["date_id"] + reg[1] ] else: df_temp = df_temp.loc[ df_temp[y_key] < reg[0] * df_temp["date_id"] + reg[1] ] if len(df_temp) > 1: break if len(df_temp) == 1: return df_data reg = stats.linregress( x=df_temp["date_id"], y=df_temp[y_key], ) df_data[f"{y_key}_trend"] = reg[0] * df_data["date_id"] + reg[1] return df_data
dbe995fab1436a1c212780eebf123dc39f27f234
15,049
def find_core(read, core, core_position_sum, core_position_count, start = -1): """ Find the core sequence, trying "average" position first for efficiency. """ if start < 0 and core_position_count > 0: core_position = round(core_position_sum/core_position_count) if len(read) > core_position+len(core): if read[core_position:core_position+len(core)]==core: return core_position return read.find(core, start+1)
3a0de472194db00fac4e65a2b0e15cfa351eb70f
15,050
from functools import reduce def clambda(n): """ clambda(n) Returns Carmichael's lambda function for positive integer n. Relies on factoring n """ smallvalues=[1,1,2,2,4,2,6,2,6,4,10,2,12,6,4,4,16,6,18,4,6,10,22,2,20,12,18,\ 6,28,4,30,8,10,16,12,6,36,18,12,4,40,6,42,10,12,22,46,4,42,20,16,12,52,18,\ 20,6,18,28,58,4,60,30,6,16,12,10,66,16,22,12,70,6,72,36,20,18,30,12,78,4,54,\ 40,82,6,16,42,28,10,88,12,12,22,30,46,36,8,96,42,30,20] if n<=100: return smallvalues[n-1] factors=factor(n) l1=[] for p,e in factors: if p==2 and e>2: l1.append(2**(e-2)) else: l1.append((p-1)*p**(e-1)) return reduce(lambda a,b : lcm(a,b), l1)
0da59a30e6d7376731a868ae81aed8e1cb42e8ce
15,051
def dashboard(): """ Render the dashboard template on the /dashboard route """ return render_template('page/home/dashboard.html', title="Dashboard")
12e1750a6c0b90aa8fcda29b78a463805abd45f3
15,052
import json def get_port_status(cluster, lswitch_id, port_id): """Retrieve the operational status of the port""" try: r = do_single_request("GET", "/ws.v1/lswitch/%s/lport/%s/status" % (lswitch_id, port_id), cluster=cluster) r = json.loads(r) except NvpApiClient.ResourceNotFound as e: LOG.error(_("Port not found, Error: %s"), str(e)) raise exception.PortNotFound(port_id=port_id, net_id=lswitch_id) except NvpApiClient.NvpApiException as e: raise exception.QuantumException() if r['link_status_up'] is True: return constants.PORT_STATUS_ACTIVE else: return constants.PORT_STATUS_DOWN
f5c6fdf7d23fef17f402525cbfe9c3892012e3f0
15,053
import scipy import tqdm import logging def make_nearest_neighbors_graph(data, k, n=1000): """Build exact k-nearest neighbors graph from numpy data. Args: data: Data to compute nearest neighbors of, each column is one point k: number of nearest neighbors to compute n (optional): number of neighbors to compute simultaneously Returns: A scipy sparse matrix in LIL format giving the symmetric nn graph. """ shape = data.shape assert shape[0] % n == 0 nbr_graph = scipy.sparse.lil_matrix((shape[0], shape[0])) norm = np.sum(data**2, axis=1) cols = np.meshgrid(np.arange(n), np.ones(k+1))[0] for i in tqdm(range(0, shape[0], n)): dot = data @ data[i:i+n].T dists = np.sqrt(np.abs(norm[:, None] - 2*dot + norm[i:i+n][None, :])) idx = np.argpartition(dists, k, axis=0)[:k+1] nbrs = idx[np.argsort(dists[idx, cols], axis=0), cols][1:] for j in range(n): nbr_graph[i+j, nbrs[:, j]] = 1 # Symmetrize graph for i in tqdm(range(shape[0])): for j in nbr_graph.rows[i]: if nbr_graph[j, i] == 0: nbr_graph[j, i] = nbr_graph[i, j] logging.info('Symmetrized neighbor graph') return nbr_graph
dd99b42c306ac963232aeca4e86ef7e0449126ca
15,054
import multiprocessing def read_examples(input_files, batch_size, shuffle, num_epochs=None): """Creates readers and queues for reading example protos.""" files = [] for e in input_files: for path in e.split(','): files.extend(file_io.get_matching_files(path)) thread_count = multiprocessing.cpu_count() # The minimum number of instances in a queue from which examples are drawn # randomly. The larger this number, the more randomness at the expense of # higher memory requirements. min_after_dequeue = 1000 # When batching data, the queue's capacity will be larger than the batch_size # by some factor. The recommended formula is (num_threads + a small safety # margin). For now, we use a single thread for reading, so this can be small. queue_size_multiplier = thread_count + 3 # Convert num_epochs == 0 -> num_epochs is None, if necessary num_epochs = num_epochs or None # Build a queue of the filenames to be read. filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle) options = tf.python_io.TFRecordOptions( compression_type=tf.python_io.TFRecordCompressionType.GZIP) example_id, encoded_example = tf.TFRecordReader(options=options).read_up_to( filename_queue, batch_size) if shuffle: capacity = min_after_dequeue + queue_size_multiplier * batch_size return tf.train.shuffle_batch( [example_id, encoded_example], batch_size, capacity, min_after_dequeue, enqueue_many=True, num_threads=thread_count) else: capacity = queue_size_multiplier * batch_size return tf.train.batch( [example_id, encoded_example], batch_size, capacity=capacity, enqueue_many=True, num_threads=thread_count)
5265e14d02b53d7b7c8754f980573b8d8c9667ea
15,055
def gen_context(n=10): """ method returns a random matrix which can be used to produce private prices over a bunch of items """ return np.random.randint(-3,4,size=(n,n))
51b3cf2a64530147eddacf628c8b593b7e923402
15,056
def _parallel_binning_fit(split_feat, _self, X, y, weights, support_sample_weight, bins, loss): """Private function to find the best column splittings within a job.""" n_sample, n_feat = X.shape feval = CRITERIA[_self.criterion] split_t = None split_col = None left_node = (None, None, None, None) right_node = (None, None, None, None) largs_left = {'classes': None} largs_right = {'classes': None} if n_sample < _self._min_samples_split: return loss, split_t, split_col, left_node, right_node for col, _bin in zip(split_feat, bins): for q in _bin: # create 1D bool mask for right/left children mask = (X[:, col] > q) n_left, n_right = (~mask).sum(), mask.sum() if n_left < _self._min_samples_leaf or n_right < _self._min_samples_leaf: continue # create 2D bool mask for right/left children left_mesh = np.ix_(~mask, _self._linear_features) right_mesh = np.ix_(mask, _self._linear_features) model_left = deepcopy(_self.base_estimator) model_right = deepcopy(_self.base_estimator) if hasattr(_self, 'classes_'): largs_left['classes'] = np.unique(y[~mask]) largs_right['classes'] = np.unique(y[mask]) if len(largs_left['classes']) == 1: model_left = DummyClassifier(strategy="most_frequent") if len(largs_right['classes']) == 1: model_right = DummyClassifier(strategy="most_frequent") if weights is None: model_left.fit(X[left_mesh], y[~mask]) loss_left = feval(model_left, X[left_mesh], y[~mask], **largs_left) wloss_left = loss_left * (n_left / n_sample) model_right.fit(X[right_mesh], y[mask]) loss_right = feval(model_right, X[right_mesh], y[mask], **largs_right) wloss_right = loss_right * (n_right / n_sample) else: if support_sample_weight: model_left.fit(X[left_mesh], y[~mask], sample_weight=weights[~mask]) model_right.fit(X[right_mesh], y[mask], sample_weight=weights[mask]) else: model_left.fit(X[left_mesh], y[~mask]) model_right.fit(X[right_mesh], y[mask]) loss_left = feval(model_left, X[left_mesh], y[~mask], weights=weights[~mask], **largs_left) wloss_left = loss_left * (weights[~mask].sum() / weights.sum()) loss_right = feval(model_right, X[right_mesh], y[mask], weights=weights[mask], **largs_right) wloss_right = loss_right * (weights[mask].sum() / weights.sum()) total_loss = wloss_left + wloss_right # store if best if total_loss < loss: split_t = q split_col = col loss = total_loss left_node = (model_left, loss_left, wloss_left, n_left, largs_left['classes']) right_node = (model_right, loss_right, wloss_right, n_right, largs_right['classes']) return loss, split_t, split_col, left_node, right_node
5889993b9ad1ca49ac9e8ce541262ff716dea18f
15,057
def checkkeywords(keywordsarr, mdtype): """ Check the keywords Datasets: for Check 9 Services: for Check 9 Logic: there must be at least one keyword to get a score = 2. If keywords contain comma's (","), then a maimum of score = 1 is possible. """ score = 0 # keywordsarr is an array of objects, each containing a property "keywords" and info on a thesaurus # here we join the keywords from all objects to one array keywordsstr = "" if keywordsarr != None: keywords = [] for k in keywordsarr: for i in k["keywords"]: i = i.replace("\n", " ") # exception for 1 keyword of INSPIRE if i.find(",") > -1 and i != "Gebiedsbeheer, gebieden waar beperkingen gelden, gereguleerde gebieden en rapportage-eenheden": score = 1 keywords.append(i) # if the score is already 1, then we know the keywords are not # correctly set if len(keywords) > 0 and score != 1: score = 2 keywordsstr = valuesep.join(keywords) else: keywordsstr = "" # Now fetch the result if mdtype == "dataset" or mdtype == "series": # checkid = 9, so the index in the matrix is: 8 result = checksdatasets[8][2][score] else: result = checksservices[8][2][score] return MkmScore(keywordsstr, score, result)
cb165689ce820c1ead3622ed562260ee76558205
15,059
def compute_presence_ratios( sorting, duration_in_frames, sampling_frequency=None, unit_ids=None, **kwargs ): """ Computes and returns the presence ratios for the sorted dataset. Parameters ---------- sorting: SortingExtractor The sorting result to be evaluated. duration_in_frames: int Length of recording (in frames). sampling_frequency: float The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor unit_ids: list List of unit ids to compute metric for. If not specified, all units are used **kwargs: keyword arguments Keyword arguments among the following: save_property_or_features: bool If True, the metric is saved as sorting property verbose: bool If True, will be verbose in metric computation Returns ---------- presence_ratios: np.ndarray The presence ratios of the sorted units. """ params_dict = update_all_param_dicts_with_kwargs(kwargs) if unit_ids is None: unit_ids = sorting.get_unit_ids() md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None, apply_filter=False, freq_min=params_dict["freq_min"], freq_max=params_dict["freq_max"], unit_ids=unit_ids, duration_in_frames=duration_in_frames, verbose=params_dict['verbose']) pr = PresenceRatio(metric_data=md) presence_ratios = pr.compute_metric(**kwargs) return presence_ratios
f270ff52c2d60296db25887bcbb7d203bfc23c07
15,060
def img_box_match(bboxes_gt, bboxes_pre, iou_threshold): """ Goal: Returns info for mAP calculation (Precision recall curve) Precision = TP / (TP + FP) Recall = TP / (TP + FN) Returns: list of [TP/FP, conf] num_gt_bboxes : int Notes: For each prediction bbox, it finds what ground-truth bbox it belongs to in a descending order of confidence If iou(pred_box, gt_box) > iou_threshold, this gt_box is assigned to this pred_box. Then we check if the class is correct or not -> correct: TP -> incorrect: FP The rest of prediction bboxes cannot find gt bboxes -> FP The rest of gt bboxes haven't been assigned to any prediction bboxes -> FN """ num_gt_bboxes = len(bboxes_gt) gt_assign = [0] * num_gt_bboxes pre_TF = [] for box_pre in bboxes_pre: max_iou = 0 max_iou_index = -1 for i in range(num_gt_bboxes): iou_temp = iou_compute(box_pre, bboxes_gt[i]) if gt_assign[i] == 0: # This gt bbox hasn't been assigned # Find the box_gt with largest iou with this given box_pre if iou_temp > iou_threshold and iou_temp > max_iou: max_iou_index = i max_iou = iou_temp if max_iou_index != -1: # successfully find a box_gt gt_assign[i] = 1 # TP pre_TF.append([True, box_pre['conf']]) else: # FP pre_TF.append([False, box_pre['conf']]) return pre_TF, num_gt_bboxes
09a7c9e9739f491777f1b0f48f745858281a0953
15,061
def random_char(): """Return a random character.""" return Char(choice(_possible_chars))
aca30fc1e6b7039cd5187264b89bca4d2899d169
15,062
def bond(self, atom:Atom, nBonds:int=1, main=False) -> Atom: """Like :meth:`__call__`, but returns the atom passed in instead, so you can form the main loop quickly.""" self(atom, nBonds, main); return atom
e8d065f55110c37b4db06ca394c741d98ffbd446
15,063
def train_list(): """ Return a sorted list of all train patients """ patients = listdir_no_hidden(INPUT_PATH) patients.sort() l = [] for patient in patients: if labels[patient] != None: l.append(patient) return l
7977e8ea72e826e18b4138391e812c15f3cfb6c0
15,064
def split_data(X, Y): """ This function split the features and the target into training and test set Params: X- (df containing predictors) y- (series conatining Target) Returns: X_train, y_train, X_test, y_test """ X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.2, random_state=2) return X_train, X_test, Y_train, Y_test
c8dbc5a6e63f0b24abf3547ba208ad0a24e5594b
15,065
def get_bucket(client=None, **kwargs): """ Get bucket object. :param client: client object to use. :type client: Google Cloud Storage client :returns: Bucket object :rtype: ``object`` """ bucket = client.lookup_bucket(kwargs['Bucket']) return bucket
b71891eec3a9f7c8f9b8fad134b2dd02bfb65e51
15,066
def remove_macros(xml_tree: etree._ElementTree) -> etree._ElementTree: """Removes the macros section from the tool tree. Args: xml_tree (etree._ElementTree): The tool element tree. Returns: etree.ElementTree: The tool element tree without the macros section. """ to_remove = [] for macros_el in xml_tree.getroot().findall("macros"): to_remove.append(macros_el) for macros_el in to_remove: xml_tree.getroot().remove(macros_el) return xml_tree
77fed7e85dadbe8b2ec7511ad3b4cf7c272807a4
15,067
def flight_time_movies_2_binary_search(movie_lengths, flight_length): """ Solution: Sort the list of movies, then iterate it, conducting a binary search on each item for different item, when added together, equals the flight length. Complexity: Time: O(n * lg{n}) Space: O(1) """ if len(movie_lengths) < 2: raise ValueError('movie length list must be at least 2 items long') # Sort the movies first: Time: O(n * lg{n}) movie_lengths.sort() # For each movie length for index, movie_length_first in enumerate(movie_lengths): # Conduct a binary search on movie_lengths: O(lg{n}) time target_length = flight_length - movie_length_first movie_lengths_sub = movie_lengths[0:index] + movie_lengths[ index + 1:len( movie_lengths)] if binary_search(target=target_length, nums=movie_lengths_sub): return True return False
ac7e8ad340e677f6c51f1841aab61262d8c4e226
15,068
def find_vertical_bounds(hp, T): """ Finds the upper and lower bounds of the characters' zone on the plate based on threshold value T :param hp: horizontal projection (axis=1) of the plate image pixel intensities :param T: Threshold value for bound detection :return: upper and lower bounds """ N = len(hp) # Find lower bound i = 0 while ~((hp[i] <= T) & (hp[i+1] > T)) & (i < int(N/2)): i += 1 lower_bound = 0 if i == int(N/2) else i # Find superior bound i = N-1 while ~((hp[i-1] > T) & (hp[i] <= T)) & (i > int(N/2)): i -= 1 upper_bound = i return [lower_bound, upper_bound]
8520c3b638cafe1cfb2d86cc7ce8c3f28d132512
15,069
import base64 def executeCmd(cmd,arg): """ the meat: how we react to the SNI-based logic and execute the underlying command """ global currentPath global currentDirList global currentFileList global currentFileSizeList global agentName commands = initCmd(cmd) for testedCommand, alias in commands.items(): if testedCommand == cmd == "WHERE": currentPath = encodeString(cmdHandler(alias)) return cmdHandler(alias) elif testedCommand == cmd == 'CB': returnedOutput = cmdHandler(alias) currentPath = encodeString(returnedOutput) return returnedOutput elif testedCommand == cmd == 'ALIVE': return (str(agentName)).encode('utf-8') elif testedCommand == cmd == 'LS': returnedOutput = cmdHandler(alias) currentFileList,returnedOutput = emptyListCheck(returnedOutput) return returnedOutput elif testedCommand == cmd == 'SIZE': returnedOutput = cmdHandler(alias) currentFileSizeList = emptyListCheck(returnedOutput) return returnedOutput elif testedCommand == cmd == 'CD': try: target_dir = ('%s' % currentDirList[int(arg)]) except IndexError: print("(!) Invalid directory number!") return alias = (alias % target_dir).replace("'","") returnedOutput = (cmdHandler(alias)) currentPath = encodeString(returnedOutput) return returnedOutput elif testedCommand == cmd == 'EX': try: targetFile = ('%s' % currentFileList[int(arg)]) except IndexError: print("(!) Invalid file number!") return targetFilePath = ('%s/%s' % (currentPath,targetFile)) with open(targetFilePath, 'rb') as f: content = base64.b32encode(f.read()) return content elif testedCommand == cmd == "LD": returnedOutput = cmdHandler(alias) currentDirList,returnedOutput = emptyListCheck(returnedOutput) return returnedOutput elif testedCommand == cmd == "LIST": returnedOutput = cmdHandler(alias) return returnedOutput
b257e77c2f7c692d63aa4140cc5ce6ccc2213273
15,070
from typing import Union from typing import Tuple import re async def text2image( text: str, auto_parse: bool = True, font_size: int = 20, color: Union[str, Tuple[int, int, int], Tuple[int, int, int, int]] = "white", font: str = "CJGaoDeGuo.otf", font_color: Union[str, Tuple[int, int, int]] = "black", padding: Union[int, Tuple[int, int, int, int]] = 0, ) -> BuildImage: """ 说明: 解析文本并转为图片 使用标签 <f> </f> 可选配置项 font: str -> 特殊文本字体 fs / font_size: int -> 特殊文本大小 fc / font_color: Union[str, Tuple[int, int, int]] -> 特殊文本颜色 示例 在不在,<f font=YSHaoShenTi-2.ttf font_size=30 font_color=red>HibiKi小姐</f>, 你最近还好吗,<f font_size=15 font_color=black>我非常想你</f>,这段时间我非常不好过, <f font_size=25>抽卡抽不到金色</f>,这让我很痛苦 参数: :param text: 文本 :param auto_parse: 是否自动解析,否则原样发送 :param font_size: 普通字体大小 :param color: 背景颜色 :param font: 普通字体 :param font_color: 普通字体颜色 :param padding: 文本外边距,元组类型时为 (上,左,下,右) """ pw = ph = top_padding = left_padding = 0 if padding: if isinstance(padding, int): pw = padding * 2 ph = padding * 2 top_padding = left_padding = padding elif isinstance(padding, tuple): pw = padding[0] + padding[2] ph = padding[1] + padding[3] top_padding = padding[0] left_padding = padding[1] if auto_parse and re.search(r"<f(.*)>(.*)</f>", text): _data = [] new_text = "" placeholder_index = 0 for s in text.split("</f>"): r = re.search(r"<f(.*)>(.*)", s) if r: start, end = r.span() if start != 0 and (t := s[:start]): new_text += t _data.append( [ (start, end), f"[placeholder_{placeholder_index}]", r.group(1).strip(), r.group(2), ] ) new_text += f"[placeholder_{placeholder_index}]" placeholder_index += 1 new_text += text.split("</f>")[-1] image_list = [] current_placeholder_index = 0 # 切分换行,每行为单张图片 for s in new_text.split("\n"): _tmp_text = s img_height = BuildImage(0, 0, font_size=font_size).getsize("正")[1] img_width = 0 _tmp_index = current_placeholder_index for _ in range(s.count("[placeholder_")): placeholder = _data[_tmp_index] if "font_size" in placeholder[2]: r = re.search(r"font_size=['\"]?(\d+)", placeholder[2]) if r: w, h = BuildImage(0, 0, font_size=int(r.group(1))).getsize( placeholder[3] ) img_height = img_height if img_height > h else h img_width += w else: img_width += BuildImage(0, 0, font_size=font_size).getsize( placeholder[3] )[0] _tmp_text = _tmp_text.replace(f"[placeholder_{_tmp_index}]", "") _tmp_index += 1 img_width += BuildImage(0, 0, font_size=font_size).getsize(_tmp_text)[0] # img_width += len(_tmp_text) * font_size # 开始画图 A = BuildImage( img_width, img_height, color=color, font=font, font_size=font_size ) basic_font_h = A.getsize("正")[1] current_width = 0 # 遍历占位符 for _ in range(s.count("[placeholder_")): if not s.startswith(f"[placeholder_{current_placeholder_index}]"): slice_ = s.split(f"[placeholder_{current_placeholder_index}]") await A.atext( (current_width, A.h - basic_font_h - 1), slice_[0], font_color ) current_width += A.getsize(slice_[0])[0] placeholder = _data[current_placeholder_index] # 解析配置 _font = font _font_size = font_size _font_color = font_color for e in placeholder[2].split(): if e.startswith("font="): _font = e.split("=")[-1] if e.startswith("font_size=") or e.startswith("fs="): _font_size = int(e.split("=")[-1]) if _font_size > 1000: _font_size = 1000 if _font_size < 1: _font_size = 1 if e.startswith("font_color") or e.startswith("fc="): _font_color = e.split("=")[-1] text_img = BuildImage( 0, 0, plain_text=placeholder[3], font_size=_font_size, font_color=_font_color, font=_font, ) _img_h = ( int(A.h / 2 - text_img.h / 2) if new_text == "[placeholder_0]" else A.h - text_img.h ) await A.apaste(text_img, (current_width, _img_h - 1), True) current_width += text_img.w s = s[ s.index(f"[placeholder_{current_placeholder_index}]") + len(f"[placeholder_{current_placeholder_index}]") : ] current_placeholder_index += 1 if s: slice_ = s.split(f"[placeholder_{current_placeholder_index}]") await A.atext((current_width, A.h - basic_font_h), slice_[0]) current_width += A.getsize(slice_[0])[0] A.crop((0, 0, current_width, A.h)) # A.show() image_list.append(A) height = 0 width = 0 for img in image_list: height += img.h width = width if width > img.w else img.w width += pw height += ph A = BuildImage(width + left_padding, height + top_padding, color=color) current_height = top_padding for img in image_list: await A.apaste(img, (left_padding, current_height), True) current_height += img.h else: width = 0 height = 0 _tmp = BuildImage(0, 0, font_size=font_size) for x in text.split("\n"): w, h = _tmp.getsize(x) height += h width = width if width > w else w width += pw height += ph A = BuildImage( width + left_padding, height + top_padding, font_size=font_size, color=color, font=font, ) await A.atext((left_padding, top_padding), text, font_color) # A.show() return A
dbdc6436c94d57aa2d1eb910dc18e4afeeadf689
15,071
def get_dosage_ann(): """ Convenience function for getting the dosage and snp annotation """ dos = {} s_ann = {} dos_path =\ ("/export/home/barnarj/CCF_1000G_Aug2013_DatABEL/CCF_1000G_Aug2013_Chr" "{0}.dose.double.ATB.RNASeq_MEQTL.txt") SNP_ANNOT =\ ("/proj/genetics/Projects/shared/Studies/Impute_CCF_Arrythmia/" "Projects/CCF/Projects/ATB/Projects/ATB_RNASeq/OutputData/" "ATB.RNASeq_Variant_Ann.bed.gz") return(dos, s_ann)
792caa3c9b6326178ca5a706b694c52cf1bddccc
15,072
import types import typing import re def function_arguments(function_name: str, services_module: types.ModuleType) -> typing.List[str]: """Get function arguments for stan::services `function_name`. This function parses a function's docstring to get argument names. This is an inferior method to using `inspect.Signature.from_callable(function)`. Unfortunately, pybind11 does not support this use of `inspect`. A compiled `services_module` is required for the lookup. Only simple function arguments are returned. For example, callback writers and var_context arguments are dropped. Arguments: function_name: Name of the function. services_module (module): Compiled model-specific services extension module. Returns: Argument names for `function_name`. """ function = getattr(services_module, f"{function_name}_wrapper") docstring = function.__doc__ # first line look something like this: function_name(arg1: int, arg2: int, ...) -> int function_name_with_arguments = docstring.split(" -> ", 1).pop(0) parameters = re.findall(r"(\w+): \w+", function_name_with_arguments) # remove arguments which are specific to the wrapper arguments_exclude = {"socket_filename"} return list(filter(lambda arg: arg not in arguments_exclude, parameters))
01a12d97c6b154159c4ba2d142e1374a008befe3
15,073
def cost_n_moves(prev_cost: int, weight: int = 1) -> int: """ 'g(n)' cost function that adds a 'weight' to each move.""" return prev_cost + weight
77a737d68f2c74eaba484b36191b95064b05e1a9
15,074
import io def get_gaussian_fundamentals(s, nfreq=None): """ Parses harmonic and anharmonic frequencies from gaussian log file. Input: s: String containing the log file output. nfreq : number of vibrational frequencies Returns: If successful: Numpy 2D array of size: nfreq x 2 1st column for harmonic frequencies in cm-1 2nd column for anharmonic frequencies in cm-1 else: A string showing the error. Portion of the relevant output: Fundamental Bands (DE w.r.t. Ground State) 1(1) 3106.899 2957.812 -0.042978 -0.008787 -0.008920 2(1) 3106.845 2959.244 -0.042969 -0.008924 -0.008782 3(1) 3082.636 2934.252 -0.043109 -0.008543 -0.008705 4(1) 3082.581 2935.702 -0.043101 -0.008709 -0.008539 5(1) 3028.430 2918.529 -0.048859 -0.008796 -0.008794 6(1) 3026.064 2926.301 -0.048438 -0.008788 -0.008785 7(1) 1477.085 1438.911 -0.044573 -0.001097 -0.007855 8(1) 1477.063 1439.122 -0.044576 -0.007858 -0.001089 9(1) 1474.346 1432.546 -0.043241 0.000678 -0.007062 10(1) 1474.318 1432.981 -0.043245 -0.007065 0.000691 11(1) 1410.843 1377.548 -0.028060 -0.016937 -0.016944 12(1) 1387.532 1356.818 -0.027083 -0.016001 -0.016001 13(1) 1205.022 1177.335 -0.029813 -0.010333 -0.011188 14(1) 1204.977 1177.775 -0.029806 -0.011191 -0.010328 15(1) 1011.453 988.386 -0.037241 -0.014274 -0.014270 16(1) 821.858 814.503 -0.025712 -0.008603 -0.010446 17(1) 821.847 814.500 -0.025693 -0.010449 -0.008599 18(1) 317.554 296.967 -0.035184 -0.010866 -0.010861 Overtones (DE w.r.t. Ground State) """ if nfreq == None: nfreq = get_gaussian_nfreq(s) freqs = np.zeros((nfreq, 2)) lines = s.splitlines() key = 'Fundamental Bands (DE w.r.t. Ground State)' iline = io.get_line_number(key, lines=lines) if iline > 0: for i in range(nfreq): iline += 1 line = lines[iline] cols = line.split() freqs[i, :] = [float(cols[-5]), float(cols[-4])] return freqs[freqs[:, 0].argsort()]
0da2acf3eb1ca0e057da8935ad772a2c65fd251a
15,076
def uniform_selection_tensor(tensor_data: np.ndarray, p: int, n_bits: int, per_channel: bool = False, channel_axis: int = 1, n_iter: int = 10, min_threshold: float = MIN_THRESHOLD, quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE) -> dict: """ Compute the optimal quantization range based on the provided QuantizationErrorMethod to uniformly quantize the tensor. Different search is applied, depends on the value of the selected QuantizationErrorMethod. Args: tensor_data: Tensor content as Numpy array. p: p-norm to use for the Lp-norm distance. n_bits: Number of bits to quantize the tensor. per_channel: Whether the quantization should be per-channel or not. channel_axis: Output channel index. n_iter: Number of iterations to search for the optimal threshold (not used for this method). min_threshold: Minimal threshold to use if threshold is too small (not used for this method). quant_error_method: an error function to optimize the range parameters' selection accordingly. Returns: Optimal quantization range to quantize the tensor uniformly. """ tensor_min = get_tensor_min(tensor_data, per_channel, channel_axis) tensor_max = get_tensor_max(tensor_data, per_channel, channel_axis) if quant_error_method == qc.QuantizationErrorMethod.NOCLIPPING: mm = tensor_min, tensor_max else: error_function = get_threshold_selection_tensor_error_function(QuantizationMethod.UNIFORM, quant_error_method, p, norm=False) mm = qparams_uniform_selection_tensor_search(error_function, tensor_data, tensor_min, tensor_max, n_bits, per_channel, channel_axis) return {RANGE_MIN: mm[0], RANGE_MAX: mm[1]}
17f5e13443fc23ce4d0dafc0fb69de226e93fc56
15,077
def calculate_bin_P(P, x, cal_type='pes'): """ Calculate the virtual, binary transition function. That is, this function is to calculate the transition function which a state and action pair may visit the virtual state $z$ """ n, m = x.world_shape # P_z is defined for the n*m states $s$ and a virtual state $z$ # index 0 - n*m-1: real state # n*m: virtual state P_z = np.zeros((5, n*m+1, n*m+1)) ind_a, ind_s, ind_sp = np.where(P) if cal_type == 'pes': safe_space = x.S_hat elif cal_type == 'opt': safe_space = x.S_bar for i in range(len(ind_a)): if safe_space[ind_s[i], ind_a[i]]: P_z[ind_a[i], ind_s[i], ind_sp[i]] = 1 else: P_z[ind_a[i], ind_s[i], -1] = 1 # For any action, transition probability from z to z is equal to 1 P_z[:, -1, -1] = 1 return P_z
db7908f2ac0f20d72a70a920c412b895bf4ccef4
15,079
def makeYbus(baseMVA, bus, branch): """Builds the bus admittance matrix and branch admittance matrices. Returns the full bus admittance matrix (i.e. for all buses) and the matrices C{Yf} and C{Yt} which, when multiplied by a complex voltage vector, yield the vector currents injected into each line from the "from" and "to" buses respectively of each line. Does appropriate conversions to p.u. @see: L{makeSbus} @author: Ray Zimmerman (PSERC Cornell) @author: Richard Lincoln """ ## constants nb = bus.shape[0] ## number of buses nl = branch.shape[0] ## number of lines ## for each branch, compute the elements of the branch admittance matrix where ## ## | If | | Yff Yft | | Vf | ## | | = | | * | | ## | It | | Ytf Ytt | | Vt | ## Ytt, Yff, Yft, Ytf = branch_vectors(branch, nl) ## compute shunt admittance ## if Psh is the real power consumed by the shunt at V = 1.0 p.u. ## and Qsh is the reactive power injected by the shunt at V = 1.0 p.u. ## then Psh - j Qsh = V * conj(Ysh * V) = conj(Ysh) = Gs - j Bs, ## i.e. Ysh = Psh + j Qsh, so ... ## vector of shunt admittances Ysh = (bus[:, GS] + 1j * bus[:, BS]) / baseMVA ## build connection matrices f = real(branch[:, F_BUS]).astype(int) ## list of "from" buses t = real(branch[:, T_BUS]).astype(int) ## list of "to" buses ## connection matrix for line & from buses Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb)) ## connection matrix for line & to buses Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb)) ## build Yf and Yt such that Yf * V is the vector of complex branch currents injected ## at each branch's "from" bus, and Yt is the same for the "to" bus end i = hstack([range(nl), range(nl)]) ## double set of row indices Yf = csr_matrix((hstack([Yff, Yft]), (i, hstack([f, t]))), (nl, nb)) Yt = csr_matrix((hstack([Ytf, Ytt]), (i, hstack([f, t]))), (nl, nb)) # Yf = spdiags(Yff, 0, nl, nl) * Cf + spdiags(Yft, 0, nl, nl) * Ct # Yt = spdiags(Ytf, 0, nl, nl) * Cf + spdiags(Ytt, 0, nl, nl) * Ct ## build Ybus Ybus = Cf.T * Yf + Ct.T * Yt + \ csr_matrix((Ysh, (range(nb), range(nb))), (nb, nb)) Ybus.sort_indices() Ybus.eliminate_zeros() return Ybus, Yf, Yt
8068d6a17c99f747e8d95b0b3ba1ac65735be382
15,080
import numpy def list_blob(math_engine, batch_len, batch_width, list_size, channels, dtype="float32"): """Creates a blob with one-dimensional Height * Width * Depth elements. Parameters --------- math_engine : object The math engine that works with this blob. batch_len : int, > 0 The BatchLength dimension of the new blob. batch_width : int, > 0 The BatchWidth dimension of the new blob. list_size : int, > 0 The ListSize dimension of the new blob. channels : int, > 0 The Channels dimension of the new blob. dtype : {"float32", "int32"}, default="float32" The type of data in the blob. """ if dtype != "float32" and dtype != "int32": raise ValueError('The `dtype` must be one of {`float32`, `int32`}.') if batch_len < 1: raise ValueError('The `batch_len` must be > 0.') if batch_width < 1: raise ValueError('The `batch_width` must be > 0.') if list_size < 1: raise ValueError('The `list_size` must be > 0.') if channels < 1: raise ValueError('The `channels` must be > 0.') shape = numpy.array((batch_len, batch_width, list_size, 1, 1, 1, channels), dtype=numpy.int32, copy=False) return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
dab3b45173fcca32f2cfc7bfbc585e002ff34f37
15,081
def skip_on_pypy_because_cache_next_works_differently(func): """Not sure what happens there but on PyPy CacheNext doesn't work like on CPython. """ return _skipif_wrapper(func, IS_PYPY, reason='PyPy works differently with __next__ cache.')
b2f765f1cad292948bb456aa841e92f180222061
15,082
import random def get_life_of_brian(): """ Get lines from test_LifeOfBrian. """ count = 0 monty_list = ['coconut'] try: with open(LIFE_OF_BRIAN_SCRIPT) as f: lines = f.readlines() for line in lines: count += 1 #print(line) monty_list.append(line) random_line = random.randrange(0, count) picked_line = monty_list[random_line] return picked_line except: #print(f"file at : {LIFE_OF_BRIAN_SCRIPT} could not be opened.") return 'but it has FAAANNNGGsss'
5b6007888f51b0b2a38eea6381bdaa5187624dda
15,083
def ackley_func(x): """Ackley's objective function. Has a global minimum at :code:`f(0,0,...,0)` with a search domain of [-32, 32] Parameters ---------- x : numpy.ndarray set of inputs of shape :code:`(n_particles, dimensions)` Returns ------- numpy.ndarray computed cost of size :code:`(n_particles, )` Raises ------ ValueError When the input is out of bounds with respect to the function domain """ if not np.logical_and(x >= -32, x <= 32).all(): raise ValueError('Input for Ackley function must be within [-32, 32].') d = x.shape[1] j = (-20.0 * np.exp(-0.2 * np.sqrt((1/d) * (x**2).sum(axis=1))) - np.exp((1/float(d)) * np.cos(2 * np.pi * x).sum(axis=1)) + 20.0 + np.exp(1)) return j
f00b729f57fbaa1534bb78589e1a64912b08b4a3
15,084
def validate_listable_type(*atype): """Validate a list of atype. @validate_listable_type(str) def example_func(a_list): return a_list @validate_listable_type(int) def example_int_func(a_list): return a_list """ if len(atype) != 1: raise ValueError("Expected one arg. Got {n} args.".format(n=len(atype))) type_ = atype[0] def wrap(f): def wrapped_f(*args, **kw): for arg in args[0]: if not isinstance(arg, type_): raise TypeError("Expected type {t}. Got type {x} for {v}.".format(t=type_, x=type(arg), v=args)) return f(*args) return wrapped_f return wrap
691737184fca8bdcc7f4c3779af86b9a041b71dc
15,085
def meh(captcha): """Returns the sum of the digits which match the next one in the captcha input string. >>> meh('1122') 3 >>> meh('1111') 4 >>> meh('1234') 0 >>> meh('91212129') 9 """ result = 0 for n in range(len(captcha)): if captcha[n] == captcha[(n + 1) % len(captcha)]: result += int(captcha[n]) return result
2ff68455b7bb826a81392dba3bc8899374cbcc3e
15,086
def check_cli(module, cli): """ This method checks if vRouter exists on the target node. This method also checks for idempotency using the vrouter-bgp-show command. If the given vRouter exists, return VROUTER_EXISTS as True else False. If the given neighbor exists on the given vRouter, return NEIGHBOR_EXISTS as True else False. :param module: The Ansible module to fetch input parameters :param cli: The CLI string :return Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS """ vrouter_name = module.params['pn_vrouter_name'] neighbor = module.params['pn_neighbor'] # Check for vRouter check_vrouter = cli + ' vrouter-show format name no-show-headers' out = run_commands(module, check_vrouter)[1] if out: out = out.split() VROUTER_EXISTS = True if vrouter_name in out else False if neighbor: # Check for BGP neighbor show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name show += 'format neighbor no-show-headers' out = run_commands(module, show)[1] if out and neighbor in out.split(): NEIGHBOR_EXISTS = True else: NEIGHBOR_EXISTS = False return VROUTER_EXISTS, NEIGHBOR_EXISTS
fdeb4dafad83562a48d0d22871fb6dc5a845fc2b
15,087
def is_prime(n): """ from https://stackoverflow.com/questions/15285534/isprime-function-for-python-language """ if n == 2 or n == 3: return True if n < 2 or n%2 == 0: return False if n < 9: return True if n%3 == 0: return False r = int(n**0.5) f = 5 while f <= r: if n%f == 0: return False if n%(f+2) == 0: return False f +=6 return True
e992badd0648d0896097df71186fee2895d20119
15,088
def load(file, encoding=None): """load(file,encoding=None) -> object This function reads a tnetstring from a file and parses it into a python object. The file must support the read() method, and this function promises not to read more data than necessary. """ # Read the length prefix one char at a time. # Note that the netstring spec explicitly forbids padding zeros. c = file.read(1) if not c.isdigit(): raise ValueError("not a tnetstring: missing or invalid length prefix") datalen = ord(c) - ord("0") c = file.read(1) if datalen != 0: while c.isdigit(): datalen = (10 * datalen) + (ord(c) - ord("0")) if datalen > 999999999: errmsg = "not a tnetstring: absurdly large length prefix" raise ValueError(errmsg) c = file.read(1) if c != ":": raise ValueError("not a tnetstring: missing or invalid length prefix") # Now we can read and parse the payload. # This repeats the dispatch logic of pop() so we can avoid # re-constructing the outermost tnetstring. data = file.read(datalen) if len(data) != datalen: raise ValueError("not a tnetstring: length prefix too big") type = file.read(1) if type == ",": if encoding is not None: return data.decode(encoding) return data if type == "#": try: return int(data) except ValueError: raise ValueError("not a tnetstring: invalid integer literal") if type == "^": try: return float(data) except ValueError: raise ValueError("not a tnetstring: invalid float literal") if type == "!": if data == "true": return True elif data == "false": return False else: raise ValueError("not a tnetstring: invalid boolean literal") if type == "~": if data: raise ValueError("not a tnetstring: invalid null literal") return None if type == "]": l = [] while data: (item, data) = pop(data, encoding) l.append(item) return l if type == "}": d = {} while data: (key, data) = pop(data, encoding) (val, data) = pop(data, encoding) d[key] = val return d raise ValueError("unknown type tag")
939cc6f7a42daa35552e256a1e2725826d44c01c
15,089
import ispyb.model.datacollection import ispyb.model.processingprogram import logging import configparser def enable(configuration_file, section="ispyb"): """Enable access to features that are currently under development.""" global _db, _db_cc, _db_config if _db_config: if _db_config == configuration_file: # This database connection is already set up. return logging.getLogger("ispyb").warn( "__future__ configuration file change requested" ) disable() logging.getLogger("ispyb").info( "NOTICE: This code uses __future__ functionality in the ISPyB API. " "This enables unsupported and potentially unstable code, which may " "change from version to version without warnings. Here be dragons." ) cfgparser = configparser.RawConfigParser() if not cfgparser.read(configuration_file): raise RuntimeError( "Could not read from configuration file %s" % configuration_file ) cfgsection = dict(cfgparser.items(section)) host = cfgsection.get("host") port = cfgsection.get("port", 3306) database = cfgsection.get("database", cfgsection.get("db")) username = cfgsection.get("username", cfgsection.get("user")) password = cfgsection.get("password", cfgsection.get("pw")) # Open a direct MySQL connection _db = mysql.connector.connect( host=host, port=port, user=username, password=password, database=database, use_pure=True, ) _db_config = configuration_file _db.autocommit = True class DictionaryCursorContextManager(object): """This class creates dictionary cursors for mysql.connector connections. By using a context manager it is ensured that cursors are closed immediately after use. Cursors created with this context manager return results as a dictionary and offer a .run() function, which is an alias to .execute that accepts query parameters as function parameters rather than a list. """ def __enter__(cm): """Enter context. Ensure the database is alive and return a cursor with an extra .run() function.""" _db.ping(reconnect=True) cm.cursor = _db.cursor(dictionary=True) def flat_execute(stmt, *parameters): """Pass all given function parameters as a list to the existing .execute() function.""" return cm.cursor.execute(stmt, parameters) setattr(cm.cursor, "run", flat_execute) return cm.cursor def __exit__(cm, *args): """Leave context. Close cursor. Destroy reference.""" cm.cursor.close() cm.cursor = None _db_cc = DictionaryCursorContextManager ispyb.model.datacollection.DataCollection.integrations = ( _get_linked_autoprocintegration_for_dc ) ispyb.model.datacollection.DataCollection.pdb = _get_linked_pdb_for_dc ispyb.model.processingprogram.ProcessingProgram.reload = _get_autoprocprogram
2aa613694f01c290f4cfeea8d8a470e87000021f
15,090
def MakeMsgCmd(cmdName,argList): """ Take a command name and an argList of tuples consisting of pairs of the form (argName, argValue), and return a string representing the corresponding dibs command. """ body = MakeStartTag(dibs_constants.cmdTagName,{'id':cmdName}) + '\n' for argPair in argList: body += (MakeStartTag(dibs_constants.argTagName,{'id':argPair[0]}) + argPair[1] + MakeEndTag(dibs_constants.argTagName) + '\n') body += (MakeStartTag(dibs_constants.argTagName, {'id':dibs_constants.cmdTimeArgName}) + `time.time()` + MakeEndTag(dibs_constants.argTagName) + '\n' + MakeEndCmdTag()) return body
63fd4c2695c005fa7ff465cd1975285d15dd4faf
15,091
def preprocess(tweet): """ Substitures urls with the string URL. Removes leading and trailing whitespaces Removes non latin characters :param tweet: :return: """ # remove URL line = remove_url(str(tweet.strip())) # remove non Latin characters stripped_text = '' for c in line: stripped_text += c if len(c.encode(encoding='utf_8')) == 1 else '' return stripped_text.translate(table).strip()
44bc9f9c66c6abc8f95acdf1666f9ded7c6aa610
15,092
def read_xml_file(input_file, elem): """Reads xml data and extracts specified elements Parameters ---------- input_file : str The OTA xml file elem : str Specified elements to be extracted Returns ------- list a list of xml seat data """ tree = ET.parse(input_file) root = tree.findall(elem) return root
58b8e4b86f1400d0d77856cb57e6823f0c538487
15,093
from .application import Application, ApplicationEnv from .operator import Operator, OperatorEnv from typing import Optional from typing import Union from typing import List def env(pip_packages: Optional[Union[str, List[str]]] = None): """A decorator that adds an environment specification to either Operator or Application. Args: pip_packages Optional[Union[str, List[str]]]: A string that is a path to requirements.txt file or a list of packages to install. Returns: A decorator that adds an environment specification to either Operator or Application. """ # Import the classes here to avoid circular import. def decorator(cls): if hasattr(cls, "_env") and cls._env: raise ItemAlreadyExistsError(f"@env decorator is aleady specified for {cls}.") if issubclass(cls, Operator): environment = OperatorEnv(pip_packages=pip_packages) elif issubclass(cls, Application): environment = ApplicationEnv(pip_packages=pip_packages) else: raise UnknownTypeError(f"@env decorator cannot be specified for {cls}.") cls._env = environment return cls return decorator
9404d28e56a0d8824c9f05a0fa601b9ba181c98f
15,094
import time def test_trace_propagation( endpoint, transport, encoding, enabled, expect_spans, expect_baggage, http_patchers, tracer, mock_server, thrift_service, app, http_server, base_url, http_client): """ Main TChannel-OpenTracing integration test, using basictracer as implementation of OpenTracing API. The main logic of this test is as follows: 1. Start a new trace with a root span 2. Store a random value in the baggage 3. Call the first service at the endpoint from `endpoint` parameter. The first service is either tchannel or http, depending on the value if `transport` parameter. 4. The first service calls the second service using pre-defined logic that depends on the endpoint invoked on the first service. 5. The second service accesses the tracing span and returns the value of the baggage item as the response. 6. The first service responds with the value from the second service. 7. The main test validates that the response is equal to the original random value of the baggage, proving trace & baggage propagation. 8. The test also validates that all spans have been finished and recorded, and that they all have the same trace ID. We expect 5 spans to be created from each test run: * top-level (root) span started in the test * client span (calling service-1) * service-1 server span * service-1 client span (calling service-2) * service-2 server span :param endpoint: name of the endpoint to call on the first service :param transport: type of the first service: tchannel or http :param enabled: if False, channels are instructed to disable tracing :param expect_spans: number of spans we expect to be generated :param http_patchers: monkey-patching of tornado AsyncHTTPClient :param tracer: a concrete implementation of OpenTracing Tracer :param mock_server: tchannel server (from conftest.py) :param thrift_service: fixture that creates a Thrift service from fake IDL :param app: tornado.web.Application fixture :param http_server: http server (provided by pytest-tornado) :param base_url: address of http server (provided by pytest-tornado) :param http_client: Tornado's AsyncHTTPClient (provided by pytest-tornado) """ # mock_server is created as a fixture, so we need to set tracer on it mock_server.tchannel._dep_tchannel._tracer = tracer mock_server.tchannel._dep_tchannel._trace = enabled register(tchannel=mock_server.tchannel, thrift_service=thrift_service, http_client=http_client, base_url=base_url) tchannel = TChannel(name='test', tracer=tracer, trace=enabled) app.add_handlers(".*$", [ (r"/", HttpHandler, {'client_channel': tchannel}) ]) with mock.patch('opentracing.tracer', tracer),\ mock.patch.object(tracing.log, 'exception') as log_exception: assert opentracing.tracer == tracer # sanity check that patch worked span = tracer.start_span('root') baggage = 'from handler3 %d' % time.time() span.set_baggage_item(BAGGAGE_KEY, baggage) if not enabled: span.set_tag('sampling.priority', 0) with span: # use span as context manager so that it's always finished response_future = None with tchannel.context_provider.span_in_context(span): if transport == 'tchannel': if encoding == 'json': response_future = tchannel.json( service='test-client', endpoint=endpoint, hostport=mock_server.hostport, body=mock_server.hostport, ) elif encoding == 'thrift': if endpoint == 'thrift1': response_future = tchannel.thrift( thrift_service.X.thrift1(mock_server.hostport), hostport=mock_server.hostport, ) elif endpoint == 'thrift3': response_future = tchannel.thrift( thrift_service.X.thrift3(mock_server.hostport), hostport=mock_server.hostport, ) elif endpoint == 'thrift4': response_future = tchannel.thrift( thrift_service.X.thrift4(mock_server.hostport), hostport=mock_server.hostport, ) else: raise ValueError('wrong endpoint %s' % endpoint) else: raise ValueError('wrong encoding %s' % encoding) elif transport == 'http': response_future = http_client.fetch( request=HTTPRequest( url='%s%s' % (base_url, endpoint), method='POST', body=mock_server.hostport, ) ) else: raise NotImplementedError( 'unknown transport %s' % transport) response = yield response_future assert log_exception.call_count == 0 body = response.body if expect_baggage: assert body == baggage def get_sampled_spans(): return [s for s in tracer.reporter.get_spans() if s.is_sampled] # Sometimes the test runs into weird race condition where the # after_send_response() hook is executed, but the span is not yet # recorded. To prevent flaky test runs we check and wait until # all spans are recorded, for up to 1 second. for i in range(0, 1000): spans = get_sampled_spans() if len(spans) >= expect_spans: break yield tornado.gen.sleep(0.001) # yield execution and sleep for 1ms spans = get_sampled_spans() assert expect_spans == len(spans), 'Unexpected number of spans reported' # We expect all trace IDs in collected spans to be the same if expect_spans > 0: spans = tracer.reporter.get_spans() assert 1 == len(set([s.trace_id for s in spans])), \ 'all spans must have the same trace_id'
afd85ef71b14a263f4480a0c0f81e019dc680e34
15,095
def cost_stage_grads(x, u, target, lmbda): """ x: (n_states, ) u: (n_controls,) target: (n_states, ) lmbda: penalty on controls """ dL = jacrev(cost_stage, (0,1)) #l_x, l_u d2L = jacfwd(dL, (0,1)) # l_xx etc l_x, l_u = dL(x, u, target, lmbda) d2Ldx, d2Ldu = d2L(x, u, target, lmbda) l_xx, l_xu = d2Ldx l_ux, l_uu = d2Ldu return l_x, l_u, l_xx, l_ux, l_uu
a6137653adcb3579775a9bcc9e8ddf03bb6f2cda
15,096
def create_rotation_matrix(angles): """ Returns a rotation matrix that will produce the given Euler angles :param angles: (roll, pitch, yaw) """ R_x = Matrix([[1, 0, 0], [0, cos(q), -sin(q)], [0, sin(q), cos(q)]]).evalf(subs={q: angles[0]}) R_y = Matrix([[cos(q), 0, sin(q)], [0, 1, 0], [-sin(q), 0, cos(q)]]).evalf(subs={q: angles[1]}) R_z = Matrix([[cos(q), -sin(q), 0], [sin(q), cos(q), 0], [0, 0, 1]]).evalf(subs={q: angles[2]}) return R_z * R_y * R_x
8431dce383a83d431f9951f76624723f5697cf83
15,097
def goodput_for_range(endpoint, first_packet, last_packet): """Computes the goodput (in bps) achieved between observing two specific packets""" if first_packet == last_packet or \ first_packet.timestamp_us == last_packet.timestamp_us: return 0 byte_count = 0 seen_first = False for packet in endpoint.packets: if packet == last_packet: break if packet == first_packet: seen_first = True if not seen_first: continue # Packet contributes to goodput if it was not retransmitted if not packet.is_lost(): byte_count += packet.data_len time_us = last_packet.timestamp_us - first_packet.timestamp_us return byte_count * 8 * 1E6 / time_us
aea56993771c1a250dacdfccf8328c7a0d3ce50b
15,098
from typing import Sequence def validate_scopes( required_scopes: Sequence[str], token_scopes: Sequence[str] ) -> bool: """Validates that all require scopes are present in the token scopes""" missing_scopes = set(required_scopes) - set(token_scopes) if missing_scopes: raise SecurityException(f"Missing required scopes: {missing_scopes}") return not missing_scopes
e979cdd2eb73c89084f72fd4f70390dfe3109c17
15,099
def skip_after_postgres(*ver): """Skip a test on PostgreSQL after (including) a certain version.""" ver = ver + (0,) * (3 - len(ver)) def skip_after_postgres_(f): @wraps(f) def skip_after_postgres__(self): if self.conn.server_version >= int("%d%02d%02d" % ver): return self.skipTest("skipped because PostgreSQL %s" % self.conn.server_version) else: return f(self) return skip_after_postgres__ return skip_after_postgres_
075aecad4bcdd2340ec57089124143cc3642a38b
15,101
def make_order_embeddings(max_word_length, order_arr): """ 根据笔顺表生成具有最大字长约束的笔顺embeddings :param max_word_length: :param order_arr: :return: """ order_arr = [ row + [0] * (max_word_length - len(row)) if len(row) <= max_word_length else row[:max_word_length - 1] + [row[-1]] for row in order_arr ] order_arr = np.array(order_arr) order_embeddings = tf.convert_to_tensor(order_arr) return order_embeddings
a2f2ac2d0576b2a22145e583cc1e5b8fa9c1cc77
15,103
import numpy def agg_double_list(l): """ @param l: @type l: @return: @rtype: """ # l: [ [...], [...], [...] ] # l_i: result of each step in the i-th episode s = [numpy.sum(numpy.array(l_i), 0) for l_i in l] s_mu = numpy.mean(numpy.array(s), 0) s_std = numpy.std(numpy.array(s), 0) return s_mu, s_std
82b67e70caccb1f5d430e8e9f0a9c75348d3bc7a
15,104
def get_string_from_bytes(byte_data, encoding="ascii"): """Decodes a string from DAT file byte data. Note that in byte form these strings are 0 terminated and this 0 is removed Args: byte_data (bytes) : the binary data to convert to a string encoding (string) : optional, the encoding type to use when converting """ string_bytes = byte_data[0:(len(byte_data) - 1)] # strip off the 0 at the end of the string string = string_bytes.decode(encoding) return string
c07523139e2509fcc19b2ce1d9a933fcb648abfd
15,105
def default_component(): """Return a default component.""" return { 'host': '192.168.0.1', 'port': 8090, 'name': 'soundtouch' }
780dd84ff613f2bccb56f560e5de77e9d57d9d5a
15,106
from pathlib import Path def check_series_duplicates(patches_dir, series_path=Path('series')): """ Checks if there are duplicate entries in the series file series_path is a pathlib.Path to the series file relative to the patches_dir returns True if there are duplicate entries; False otherwise. """ entries_seen = set() for entry in _read_series_file(patches_dir, series_path): if entry in entries_seen: get_logger().warning('Patch appears more than once in series: %s', entry) return True entries_seen.add(entry) return False
58a5b6fbcf6867d770693938a2fc8308d644d54b
15,107
def is_free(board: list, pos: int) -> bool: """checks if pos is free or filled""" return board[pos] == " "
64b75aa5d5b22887495e631e235632e080646422
15,108
def rc_from_blocks(blocks): """ Computes the x and y dimensions of each block :param blocks: :return: """ dc = np.array([np.diff(b[:, 0]).max() for b in blocks]) dr = np.array([np.diff(b[:, 1]).max() for b in blocks]) return dc, dr
0837367eca7a7668a3f0b0078cf8699f5e5bc4d6
15,109
def serialize_measurement(measurement): """Serializes a `openff.evaluator.unit.Measurement` into a dictionary of the form `{'value', 'error'}`. Parameters ---------- measurement : openff.evaluator.unit.Measurement The measurement to serialize Returns ------- dict of str and str A dictionary representation of a openff.evaluator.unit.Measurement with keys of {"value", "error"} """ return {"value": measurement.value, "error": measurement.error}
69eedd9006c63f5734c762d6113495a913d5a8c4
15,111
from exifpy.objects import Ratio def nikon_ev_bias(seq): """ http://tomtia.plala.jp/DigitalCamera/MakerNote/index.asp First digit seems to be in steps of 1/6 EV. Does the third value mean the step size? It is usually 6, but it is 12 for the ExposureDifference. Check for an error condition that could cause a crash. This only happens if something has gone really wrong in reading the Nikon MakerNote. """ if len(seq) < 4: return "" # if seq == [252, 1, 6, 0]: return "-2/3 EV" if seq == [253, 1, 6, 0]: return "-1/2 EV" if seq == [254, 1, 6, 0]: return "-1/3 EV" if seq == [0, 1, 6, 0]: return "0 EV" if seq == [2, 1, 6, 0]: return "+1/3 EV" if seq == [3, 1, 6, 0]: return "+1/2 EV" if seq == [4, 1, 6, 0]: return "+2/3 EV" # Handle combinations not in the table. a = seq[0] # Causes headaches for the +/- logic, so special case it. if a == 0: return "0 EV" if a > 127: a = 256 - a ret_str = "-" else: ret_str = "+" b = seq[2] # Assume third value means the step size whole = a / b a = a % b if whole != 0: ret_str = ret_str + str(whole) + " " if a == 0: ret_str += "EV" else: r = Ratio(a, b) ret_str = ret_str + r.__repr__() + " EV" return ret_str
09a91fc3d82851bb6411b549c282a16f02470e88
15,112
import json def process_message(schema, publisher, data): """ Method to process messsages for all the bases that uses Google's Pub/Sub. Args: schema (:obj:`dict`, required): A JSON schema for contract validation. JSON Schema is a vocabulary that allows you to annotate and validate JSON documents. publisher (:obj:`PubSub`, optional): Instance of the '.manager.PubSub'. data (:obj: `dict`, required): A dictionary representing the message body. """ try: data = json.loads(request.data) validate(data, schema, format_checker=FormatChecker()) publisher.publish(data) return data, 202 except ValidationError as validate_error: return str(validate_error), 400
01e396355e6f7fd6913eaff786af39c95da64718
15,113
def rename_record_columns(records, columns_to_rename): """ Renames columns for better desc and to match Socrata column names :param records: list - List of record dicts :param columns_to_rename: dict - Dict of Hasura columns and matching Socrata columns """ for record in records: for column, rename_value in columns_to_rename.items(): if column in record.keys(): record[rename_value] = record.pop(column) return records
41d5cc90a368f61e8ce138c54e9f5026bacd62b9
15,114
import requests import json def request_similar_resource(token, data_): """If a similar resource to the data_ passed exists, this method gets and returns it """ headers = {'Authorization': 'Token {}'.format(token.token)} # get the resource endpoint url_check_res = URL.DB_URL + 'getSimilarResource/' # only res code if shadow id not passed resource_code = data_['resource_accessing'].split('/')[1] url_check_res += '{}/'.format(resource_code) if "shadow_id" in data_: url_check_res += "{}/".format(data_['shadow_id']) req = requests.get(url=url_check_res, headers=headers) code_to_return = HTTPStatus.NOT_FOUND data_to_return = {"success": False} if req.status_code == HTTPStatus.OK: code_to_return = HTTPStatus.OK data_to_return = json.loads(req.text) return code_to_return, data_to_return
84981ff2520050651b0cb83b11198a2fc1117582
15,115
def total (initial, *positionals, **keywords): """ Simply sums up all the passed numbers. """ count = initial for n in positionals: count += n for n in keywords: count += keywords[n] return count
2df0b37ddec7e4bcdd30d302d1b7297cec0ef3cc
15,116
def login_required(f): """Ensures user is logged in before action Checks of token is provided in header decodes the token then returns current user info """ @wraps(f) def wrap(*args, **kwargs): token = None if 'x-access-token' in request.headers: token = request.headers['x-access-token'] if not token: return jsonify({ 'warning': 'Missing token. Please register or login' }), 401 is_token_valid = versions.v2.models.AuthToken.query.filter_by(token=token).first() is_token_valid = is_token_valid.valid if is_token_valid else True if not is_token_valid: return jsonify({ 'warning': 'Login again'}), 401 try: data = jwt.decode(token, app.config['SECRET_KEY']) current_user = data['id'] except jwt.ExpiredSignatureError: return jsonify({ 'warning': 'Expired token. Please login to get a new token' }), 401 except ValueError: return jsonify({ 'warning': 'Invalid token. Please register or login' }), 401 return f(current_user, *args, **kwargs) return wrap
68b36213830f9fad7f6bcf7ec5951534331c5507
15,117
def loop_to_unixtime(looptime, timediff=None): """Convert event loop time to standard Unix time.""" if timediff is None: timediff = _get_timediff() return looptime + timediff
c2da70e961a5802c2da37f04094baec2c6c88f3c
15,118
def groups(column: str) -> "pli.Expr": """ Syntactic sugar for `pl.col("foo").agg_groups()`. """ return col(column).agg_groups()
30fd3eae7abb4c47ce5d12d0c5d17184d5c25770
15,119
def filter_roidb(roidb, config): """ remove roidb entries without usable rois """ def is_valid(entry): """ valid images have at least 1 fg or bg roi """ overlaps = entry['max_overlaps'] fg_inds = np.where(overlaps >= config.TRAIN.FG_THRESH)[0] bg_inds = np.where((overlaps < config.TRAIN.BG_THRESH_HI) & (overlaps >= config.TRAIN.BG_THRESH_LO + 0.0001))[0] valid = len(fg_inds) > 0 or len(bg_inds) > 0 return valid num = len(roidb) filtered_roidb = [entry for entry in roidb if is_valid(entry)] num_after = len(filtered_roidb) print 'filtered %d roidb entries: %d -> %d' % (num - num_after, num, num_after) return filtered_roidb
e93c4e2236c1febd773e216f109cd2657c94084e
15,121
def seebeck_thermometry(T_Kelvin): """ This function returns the Seebeck coefficient of the thermocouple concerned (by default type "E") at a certain temperature. The input of the function is a temperature in Kelvin, but the coefficient below are for a polynomial function with T in Celsius. The output is S in [V / K] """ coeff_E_below_270K = np.array([ 0, 5.8665508708E1, 4.5410977124E-2, -7.7998048686E-4, -2.5800160843E-5, -5.9452583057E-7, -9.3214058667E-9, -1.0287605534E-10, -8.0370123621E-13, -4.3979497391E-15, -1.6414776355E-17, -3.9673619516E-20, -5.5827328721E-23, -3.4657842013E-26 ])[::-1] # Reverse for poly1d coeff_E_above_270K = np.array([ 0, 5.8665508710E1, 4.5032275582E-2, 2.8908407212E-5, -3.3056896652E-7, 6.5024403270E-10, -1.9197495504E-13, -1.2536600497E-15, 2.1489217569E-18, -1.4388041782E-21, 3.5960899481E-25 ])[::-1] # Reverse for poly1d T_Celsius = T_Kelvin - 273.15 ## Selection of coefficients for temperature regime index_below = np.where(T_Celsius <= 0) index_above = np.where(T_Celsius > 0) S_values = np.zeros(np.size(T_Kelvin)) E_below = np.poly1d(coeff_E_below_270K) # is a poly1d object in microVolt S_below = np.polyder(E_below) # is a poly1d object in microVolt / Celsius S_values[index_below] = S_below(T_Celsius[index_below])*1e-6 # is in Volt / K E_above = np.poly1d(coeff_E_above_270K) # is a poly1d object in microVolt S_above = np.polyder(E_above) # is a poly1d object in microVolt / Celsius S_values[index_above] = S_above(T_Celsius[index_above])*1e-6 # is in Volt / K return S_values
8fca07e7e6488a98c96cc76c68d4ab1b656951e5
15,123
def correlation_permutation_test( x, y, f, side, n=10000, confidence=0.99, plot=None, cores=1, seed=None ): """This function carries out Monte Carlo permutation tests comparing whether the correlation between two variables is statistically significant :param x: An iterable of X values observed :param y: An iterable of Y values observed :param f: The function for calculating the relationship strength between X and Y :param side: The side to use for hypothesis testing :param n: The number of permutations to sample, defaults to 10000 :type n: int, optional :param confidence: The probability that the true p-value is contained in the intervals returned, defaults to 0.99 :type confidence: float, optional :param plot: The name of a file to draw a plot of permuted correlations to, defaults to None :type plot: str, optional :param cores: The number of logical CPUs to use, defaults to 1 :type cores: int, optional :param seed: The seed for randomisation, defaults to None :type seed: int, optional :return: Named tuple containing upper and lower bounds of p-value at the given confidence """ if seed: rng = _rd.Random(seed) else: rng = _rd.Random() if callable(f): _f = f elif f == "pearsonr": _f = _pearsonr elif f == "spearmanr": _f = _spearmanr else: raise ValueError( "{} not valid for f -- must be a function, 'pearsonr', or 'spearmanr'".format( f ) ) _x = list(x) _y = list(y) if side in _GT: stat_0 = _f(_x, _y) elif side in _LT: stat_0 = _f(_x, _y) elif side in _BOTH: stat_0 = abs(_f(_x, _y)) else: raise ValueError( "{} not valid for side -- should be 'greater', 'lower', or 'both'".format( side ) ) jobs = ((_x[:], _y[:], stat_0, _f, rng.randint(0, 1e100)) for _ in range(n)) if side in _GT: result = _job_hander(_correlation_greater, jobs, cores) elif side in _LT: result = _job_hander(_correlation_lower, jobs, cores) else: result = _job_hander(_correlation_both, jobs, cores) v = [] p = 0 for truth, val in result: p += truth v.append(val) p /= n if plot: plot_histogram(x=v, x0=stat_0, outfile=plot, side=side) lower, upper = wilson(p, n, confidence) return _RESULT(lower, upper, confidence)
bc6667985d3046f5b97dd01f109c94449f044bf9
15,124