content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Union def render_orchestrator_inputs() -> Union[Driver, None]: """ Renders input form for collecting orchestrator-related connection metadata, and assembles a Synergos Driver object for subsequent use. Returns: Connected Synergos Driver (Driver) """ with st.sidebar.beta_container(): st.header("NETWORK") with st.beta_expander("Orchestrator Parameters", expanded=True): orchestrator_host = st.text_input( label="Orchestrator IP:", help="Declare the server IP of your selected orchestrator." ) orchestrator_port = st.number_input( label="Orchestrator Port:", value=5000, help="Declare the access port of your selected orchestrator." ) if is_connection_valid(host=orchestrator_host, port=orchestrator_port): driver = Driver(host=orchestrator_host, port=orchestrator_port) else: driver = None # Ensures rendering of unpopulated widgets return driver
d67f3a40347a2f247183e2b9092429ca118bc739
22,852
def type_assert_dict( d, kcls=None, vcls=None, allow_none: bool=False, cast_from=None, cast_to=None, dynamic=None, objcls=None, ctor=None, desc: str=None, false_to_none: bool=False, check=None, ): """ Checks that every key/value in @d is an instance of @kcls: @vcls Will also unmarshal JSON objects to Python objects if the value is an instance of dict and @vcls is a class type Args: d: The dict to type assert kcls: The class to type assert for keys. NOTE: JSON only allows str keys vcls: The class to type assert for values allow_none: Allow a None value for the values. This would not make sense for the keys. cast_from: type-or-tuple-of-types, If @obj is an instance of this type(s), cast it to @cast_to cast_to: type, The type to cast @obj to if it's an instance of @cast_from, or None to cast to @cls. If you need more than type(x), use a lambda or factory function. dynamic: @cls, A dynamic default value if @d is None, and @dynamic is not None. objcls: None-or-type, a type to assert @d is, ie: dict, etc... Note that isinstance considers collections.OrderedDict to be of type dict ctor: None-or-static-method: Use this method as the constructor instead of __init__ desc: None-or-string, an optional description for this field, for using this function to fully replace docstrings false_to_none: bool, True to cast falsey values such as "", 0, [], to None check: None-lambda-function, Single argument function to check a value, return False if not valid, for example: lambda x: x >= 0 and x < 256 Returns: @d, note that @d will be recreated, which may be a performance concern if @d has many items Raises: TypeError: If a key is not an instance of @kcls or a value is not an instance of @vcls ValueError: If @check is not None and a value fails @check """ _check_dstruct(d, objcls) if ( d is None and dynamic is not None ): d = dynamic t = type(d) return t( ( _check(k, kcls) if kcls else k, _check( v, vcls, allow_none, cast_from, cast_to, ctor=ctor, false_to_none=false_to_none, check=check, ) if vcls else v, ) for k, v in d.items() )
8a5590a86a0f2e1b4dbef5218c4a294dde3675e1
22,854
def get_missing_ids(raw, results): """ Compare cached results with overall expected IDs, return missing ones. Returns a set. """ all_ids = set(raw.keys()) cached_ids = set(results.keys()) print("There are {0} IDs in the dataset, we already have {1}. {2} are missing.".format(len(all_ids), len(cached_ids), len(all_ids) - len(cached_ids))) return all_ids - cached_ids
cb380c12f26de8b4d3908964f4314bc7efe43056
22,855
import collections def _spaghettinet_edgetpu_s(): """Architecture definition for SpaghettiNet-EdgeTPU-S.""" nodes = collections.OrderedDict() outputs = ['c0n1', 'c0n2', 'c0n3', 'c0n4', 'c0n5'] nodes['s0'] = SpaghettiStemNode(kernel_size=5, num_filters=24) nodes['n0'] = SpaghettiNode( num_filters=48, level=2, layers=[ IbnFusedGrouped(3, 8, 2, 3, False), ], edges=[SpaghettiPassthroughEdge(input='s0')]) nodes['n1'] = SpaghettiNode( num_filters=64, level=3, layers=[ IbnFusedGrouped(3, 4, 2, 4, False), IbnFusedGrouped(3, 4, 1, 4, True), IbnFusedGrouped(3, 4, 1, 4, True), ], edges=[SpaghettiPassthroughEdge(input='n0')]) nodes['n2'] = SpaghettiNode( num_filters=72, level=4, layers=[ IbnOp(3, 8, 2, False), IbnFusedGrouped(3, 8, 1, 4, True), IbnOp(3, 8, 1, True), IbnOp(3, 4, 1, True), ], edges=[SpaghettiPassthroughEdge(input='n1')]) nodes['n3'] = SpaghettiNode( num_filters=88, level=5, layers=[ IbnOp(3, 8, 2, False), IbnOp(3, 8, 1, True), IbnOp(3, 4, 1, True), IbnOp(3, 4, 1, True), ], edges=[SpaghettiPassthroughEdge(input='n2')]) nodes['n4'] = SpaghettiNode( num_filters=88, level=6, layers=[ IbnOp(3, 8, 2, False), SepConvOp(5, 1, True), SepConvOp(5, 1, True), SepConvOp(5, 1, True), ], edges=[SpaghettiPassthroughEdge(input='n3')]) nodes['n5'] = SpaghettiNode( num_filters=88, level=7, layers=[ SepConvOp(5, 2, False), SepConvOp(3, 1, True), ], edges=[SpaghettiPassthroughEdge(input='n4')]) nodes['c0n0'] = SpaghettiNode( num_filters=144, level=5, layers=[ IbnOp(3, 4, 1, False), IbnOp(3, 4, 1, True), IbnOp(3, 4, 1, True), IbnOp(3, 4, 1, True), ], edges=[ SpaghettiResampleEdge(input='n3'), SpaghettiResampleEdge(input='n4') ]) nodes['c0n1'] = SpaghettiNode( num_filters=120, level=4, layers=[ IbnOp(3, 8, 1, False), IbnOp(3, 4, 1, True), IbnOp(3, 4, 1, True), IbnOp(3, 4, 1, True), ], edges=[ SpaghettiResampleEdge(input='n2'), SpaghettiResampleEdge(input='c0n0') ]) nodes['c0n2'] = SpaghettiNode( num_filters=168, level=5, layers=[ IbnOp(3, 4, 1, False), ], edges=[ SpaghettiResampleEdge(input='c0n1'), SpaghettiResampleEdge(input='c0n0') ]) nodes['c0n3'] = SpaghettiNode( num_filters=136, level=6, layers=[ IbnOp(3, 4, 1, False), SepConvOp(3, 1, True), ], edges=[ SpaghettiResampleEdge(input='n5'), SpaghettiResampleEdge(input='c0n0') ]) nodes['c0n4'] = SpaghettiNode( num_filters=136, level=7, layers=[ IbnOp(3, 4, 1, False), ], edges=[ SpaghettiResampleEdge(input='n5'), SpaghettiResampleEdge(input='c0n0') ]) nodes['c0n5'] = SpaghettiNode( num_filters=64, level=8, layers=[ SepConvOp(3, 1, False), SepConvOp(3, 1, True), ], edges=[SpaghettiPassthroughEdge(input='c0n4')]) node_specs = SpaghettiNodeSpecs(nodes=nodes, outputs=outputs) return node_specs
93b98a29654f8a838f39d6bfa59f78719ff6c42c
22,856
def instance_of(type): """ A validator that raises a :exc:`TypeError` if the initializer is called with a wrong type for this particular attribute (checks are perfomed using :func:`isinstance` therefore it's also valid to pass a tuple of types). :param type: The type to check for. :type type: type or tuple of types The :exc:`TypeError` is raised with a human readable error message, the attribute (of type :class:`attr.Attribute`), the expected type, and the value it got. """ return _InstanceOfValidator(type)
2d41d457e9f7e60fa5e5d77f83454ca75dc112f7
22,857
def ass(stream: Stream, *args, **kwargs) -> FilterableStream: """https://ffmpeg.org/ffmpeg-filters.html#ass""" return filter(stream, ass.__name__, *args, **kwargs)
7f9d88fe1fdeb2337acce25e8b40db94d59f8748
22,859
def resultcallback(group): """Compatibility layer for Click 7 and 8.""" if hasattr(group, "result_callback") and group.result_callback is not None: decorator = group.result_callback() else: # Click < 8.0 decorator = group.resultcallback() return decorator
1eb938400c90667eb532366f5ca83d02dd6429e1
22,860
from licensedcode import cache def get_license_matches(location=None, query_string=None): """ Return a sequence of LicenseMatch objects. """ if not query_string: return [] idx = cache.get_index() return idx.match(location=location, query_string=query_string)
5d2891d36dd10e6c4d1c24280df86d1bf39e464a
22,861
def compute_corrector_prf(results, logger, on_detected=True): """ References: https://github.com/sunnyqiny/ Confusionset-guided-Pointer-Networks-for-Chinese-Spelling-Check/blob/master/utils/evaluation_metrics.py """ TP = 0 FP = 0 FN = 0 all_predict_true_index = [] all_gold_index = [] for item in results: src, tgt, predict, d_predict = item gold_index = [] for i in range(len(list(src))): if src[i] == tgt[i]: continue else: gold_index.append(i) all_gold_index.append(gold_index) predict_index = [] for i in range(len(list(src))): if src[i] == predict[i]: continue else: predict_index.append(i) each_true_index = [] for i in predict_index: if i in gold_index: TP += 1 each_true_index.append(i) else: FP += 1 for i in gold_index: if i in predict_index: continue else: FN += 1 all_predict_true_index.append(each_true_index) # For the detection Precision, Recall and F1 dp, dr, detection_f1 = report_prf(TP, FP, FN, 'detection', logger=logger) # store FN counts n_misreported = int(FN) TP = 0 FP = 0 FN = 0 # we only detect those correctly detected location, which is a different from the common metrics since # we wanna to see the precision improve by using the confusion set for i in range(len(all_predict_true_index)): if len(all_predict_true_index[i]) > 0: predict_words = [] for j in all_predict_true_index[i]: predict_words.append(results[i][2][j]) if results[i][1][j] == results[i][2][j]: TP += 1 else: FP += 1 for j in all_gold_index[i]: if results[i][1][j] in predict_words: continue else: FN += 1 # For the correction Precision, Recall and F1 cp, cr, correction_f1 = report_prf(TP, FP, FN, 'correction', logger=logger) # common metrics to compare with other baseline methods. ccp, ccr, correction_cf1 = report_prf(TP, FP, FN + n_misreported, 'correction_common', logger=logger) if not on_detected: correction_f1 = correction_cf1 details = { 'det_p': dp, 'det_r': dr, 'det_f1': detection_f1, 'cor_p': cp, 'cor_r': cr, 'cor_f1': correction_f1, 'common_cor_p': ccp, 'common_cor_r': ccr, 'common_cor_f1': correction_cf1, } return detection_f1, correction_f1, details
1f86b5f7cd91aba9a50007493d83cd3480eb9e20
22,862
def nonzero_sign(x, name=None): """Returns the sign of x with sign(0) defined as 1 instead of 0.""" with tf.compat.v1.name_scope(name, 'nonzero_sign', [x]): x = tf.convert_to_tensor(value=x) one = tf.ones_like(x) return tf.compat.v1.where(tf.greater_equal(x, 0.0), one, -one)
1955f37bece137537d53cde6681a8f56554cafea
22,863
def tls_control_system_tdcops(tls_control_system): """Control system with time-dependent collapse operators""" objectives, controls, _ = tls_control_system c_op = [[0.1 * sigmap(), controls[0]]] c_ops = [c_op] H1 = objectives[0].H H2 = objectives[1].H objectives = [ krotov.Objective( initial_state=ket('0'), target=ket('1'), H=H1, c_ops=c_ops ), krotov.Objective( initial_state=ket('0'), target=ket('1'), H=H2, c_ops=c_ops ), ] controls_mapping = krotov.conversions.extract_controls_mapping( objectives, controls ) return objectives, controls, controls_mapping
b94f438291671e863bb759ce024a0e42e6230481
22,864
def create_new_credential(site_name,account_name, account_password): """Function to create a new account and its credentials""" new_credential = Credentials(site_name,account_name, account_password) return new_credential
127335a31054d1b89521a1bc8b354ad51e193be6
22,865
from typing import Dict def prepare_request_params( request_params: Dict, model_id: Text, model_data: Dict ) -> Dict: """ reverse hash names and correct types of input params """ request_params = correct_types(request_params, model_data["columns_data"]) if model_data["hashed_indexes"]: request_params = reverse_hash_names(model_id, request_params) return request_params
c7aee17db83e96cb3bcf6ce75ea650414035654a
22,867
from typing import List def get_all_listening_ports() -> List[int]: """ Returns all tcp port numbers in LISTEN state (on any address). Reads port state from /proc/net/tcp. """ res = [] with open('/proc/net/tcp', 'r') as file: try: next(file) for line in file: split_line = line.strip().split(' ') hex_port = split_line[1].split(':')[1] hex_state = split_line[3] if hex_state == '0A': res.append(int(hex_port, 16)) except StopIteration: pass return res
cfc1b4b93358954ad802ce3727bd9d424ef9d136
22,869
async def mock_race_result() -> dict: """Create a mock race-result object.""" return { "id": "race_result_1", "race_id": "190e70d5-0933-4af0-bb53-1d705ba7eb95", "timing_point": "Finish", "no_of_contestants": 2, "ranking_sequence": ["time_event_1", "time_event_2"], "status": 0, }
33a2889bcb2665642a3e5743128a478bb103a82b
22,870
import re import binascii def qr_to_install_code(qr_code: str) -> tuple[zigpy.types.EUI64, bytes]: """Try to parse the QR code. if successful, return a tuple of a EUI64 address and install code. """ for code_pattern in QR_CODES: match = re.search(code_pattern, qr_code, re.VERBOSE) if match is None: continue ieee_hex = binascii.unhexlify(match[1]) ieee = zigpy.types.EUI64(ieee_hex[::-1]) install_code = match[2] # install_code sanity check install_code = convert_install_code(install_code) return ieee, install_code raise vol.Invalid(f"couldn't convert qr code: {qr_code}")
91ec3f90385e95b94c47c338f56b26315ff12e99
22,871
def vwr(scene, analyzer, test_number, workbook=None, sheet_format=None, agg_dict=None): """ Calculates Variability Weighted Return (VWR). :param workbook: Excel workbook to be saved to disk. :param analyzer: Backtest analyzer. :param sheet_format: Dictionary holding formatting information such as col width, font etc. :param agg_dict: Collects the dictionary outputs from backtrader for using in platting. :return workbook: Excel workbook to be saved to disk. """ # Get the drawdowns auto ordered nested dictionary vwr_dict = analyzer.get_analysis() columns = [ "vwr", ] if scene["save_db"]: df = pd.DataFrame(vwr_dict.values(), index=vwr_dict.keys()).T df = add_key_to_df(df, test_number) agg_dict["vwr"] = df if scene["save_excel"]: worksheet = workbook.add_worksheet("vwr") worksheet.write_row(0, 0, columns) worksheet.set_row(0, None, sheet_format["header_format"]) worksheet.set_column("A:A", sheet_format["x_wide"], sheet_format["align_left"]) worksheet.set_column("B:B", sheet_format["medium"], sheet_format["align_left"]) for i, (k, v) in enumerate(vwr_dict.items()): worksheet.write_row(i + 1, 0, [k]) worksheet.write_row(i + 1, 1, [v]) return workbook, agg_dict
7fa8c9794e443be91d0cf246c209dfdc86e19f54
22,872
def dec2hms(dec): """ ADW: This should really be replaced by astropy """ DEGREE = 360. HOUR = 24. MINUTE = 60. SECOND = 3600. dec = float(dec) fhour = dec*(HOUR/DEGREE) hour = int(fhour) fminute = (fhour - hour)*MINUTE minute = int(fminute) second = (fminute - minute)*MINUTE return (hour, minute, second)
4c2c564631d431d908f66486af40e380598f2724
22,873
def logfpsd(data, rate, window, noverlap, fmin, bins_per_octave): """Computes ordinary linear-frequency power spectral density, then multiplies by a matrix that converts to log-frequency space. Returns the log-frequency PSD, the centers of the frequency bins, and the time points. Adapted from Matlab code by Dan Ellis (Columbia): http://www.ee.columbia.edu/ln/rosa/matlab/sgram/logfsgram.m""" stft, linfreqs, times = specgram(data, window, Fs=rate, noverlap=noverlap) # construct matrix for mapping to log-frequency space fratio = 2**(1/bins_per_octave) # ratio between adjacent frequencies nbins = np.floor(np.log((rate/2)/fmin)/np.log(fratio)) #fftfreqs = (rate/window)*np.arange(window/2+1) nfftbins = window/2+1 logffreqs = fmin*np.exp(np.log(2)*np.arange(nbins)/bins_per_octave) logfbws = logffreqs*(fratio-1) logfbws = np.maximum(logfbws, rate/window) bandoverlapconstant = 0.5475 # controls adjacent band overlap. set by hand by Dan Ellis freqdiff = (np.repeat(logffreqs[:,np.newaxis],nfftbins,axis=1) - np.repeat(linfreqs[np.newaxis,:],nbins,axis=0)) freqdiff = freqdiff / np.repeat(bandoverlapconstant*logfbws[:,np.newaxis],nfftbins,axis=1) mapping = np.exp(-0.5*freqdiff**2) rowEs = np.sqrt(2*np.sum(mapping**2,axis=1)) mapping = mapping/np.repeat(rowEs[:,np.newaxis],nfftbins,axis=1) # perform mapping logfpsd = np.sqrt(np.dot(mapping,(np.abs(stft)**2))) return logfpsd.T, logffreqs, times
cf9a9ee3a248760e6bfa36a82ae782d607269b10
22,874
def ppg_dual_double_frequency_template(width): """ EXPOSE Generate a PPG template by using 2 sine waveforms. The first waveform double the second waveform frequency :param width: the sample size of the generated waveform :return: a 1-D numpy array of PPG waveform having diastolic peak at the low position """ t = np.linspace(0, 1, width, False) # 1 second sig = np.sin(2 * np.pi * 2 * t - np.pi / 2) + \ np.sin(2 * np.pi * 1 * t - np.pi / 6) sig_scale = MinMaxScaler().fit_transform(np.array(sig).reshape(-1, 1)) return sig_scale.reshape(-1)
9c04afb7687e19a96fb84bf1d5367dc79ce6ceea
22,875
def str_input(prompt: str) -> str: """Prompt user for string value. Args: prompt (str): Prompt to display. Returns: str: User string response. """ return input(f"{prompt} ")
ac6c3c694adf227fcc1418574d4875d7fa637541
22,877
def action(ra_deg, dec_deg, d_kpc, pm_ra_masyr, pm_dec_masyr, v_los_kms, verbose=False): """ parameters: ---------- ra_deg: (float) RA in degrees. dec_deg: (float) Dec in degress. d_kpc: (float) Distance in kpc. pm_ra_masyr: (float) RA proper motion in mas/yr. pm_decmasyr: (float) Dec proper motion in mas/yr. v_los_kms: (float) RV in kms. returns: ------ R_kpc, phi_rad, z_kpc, vR_kms, vT_kms, vz_kms jR: (float) Radial action. lz: (float) Vertical ang mom. jz: (float) Vertical action. """ ra_rad = ra_deg * (np.pi / 180.) # RA [rad] dec_rad = dec_deg * (np.pi / 180.) # dec [rad] # Galactocentric position of the Sun: X_gc_sun_kpc = 8. # [kpc] Z_gc_sun_kpc = 0.025 # [kpc] # Galactocentric velocity of the Sun: vX_gc_sun_kms = -9.58 # = -U [kms] vY_gc_sun_kms = 10.52 + 220. # = V+v_circ(R_Sun) [kms] vZ_gc_sun_kms = 7.01 # = W [kms] # a. convert spatial coordinates (ra,dec,d) to (R,z,phi) # (ra,dec) --> Galactic coordinates (l,b): lb = bovy_coords.radec_to_lb(ra_rad, dec_rad, degree=False, epoch=2000.0) # l_rad = lb[:, 0] # b_rad = lb[:, 1] l_rad = lb[0] b_rad = lb[1] # (l,b,d) --> Galactocentric cartesian coordinates (x,y,z): xyz = bovy_coords.lbd_to_XYZ(l_rad, b_rad, d_kpc, degree=False) # x_kpc = xyz[:, 0] # y_kpc = xyz[:, 1] # z_kpc = xyz[:, 2] x_kpc = xyz[0] y_kpc = xyz[1] z_kpc = xyz[2] # (x,y,z) --> Galactocentric cylindrical coordinates (R,z,phi): Rzphi = bovy_coords.XYZ_to_galcencyl(x_kpc, y_kpc, z_kpc, Xsun=X_gc_sun_kpc, Zsun=Z_gc_sun_kpc) # R_kpc = Rzphi[:, 0] # phi_rad = Rzphi[:, 1] # z_kpc = Rzphi[:, 2] R_kpc = Rzphi[0] phi_rad = Rzphi[1] z_kpc = Rzphi[2] # b. convert velocities (pm_ra,pm_dec,vlos) to (vR,vz,vT) # (pm_ra,pm_dec) --> (pm_l,pm_b): pmlpmb = bovy_coords.pmrapmdec_to_pmllpmbb(pm_ra_masyr, pm_dec_masyr, ra_rad, dec_rad, degree=False, epoch=2000.0) # pml_masyr = pmlpmb[:, 0] # pmb_masyr = pmlpmb[:, 1] pml_masyr = pmlpmb[0] pmb_masyr = pmlpmb[1] # (v_los,pm_l,pm_b) & (l,b,d) --> (vx,vy,vz): vxvyvz = bovy_coords.vrpmllpmbb_to_vxvyvz(v_los_kms, pml_masyr, pmb_masyr, l_rad, b_rad, d_kpc, XYZ=False, degree=False) # vx_kms = vxvyvz[:, 0] # vy_kms = vxvyvz[:, 1] # vz_kms = vxvyvz[:, 2] vx_kms = vxvyvz[0] vy_kms = vxvyvz[1] vz_kms = vxvyvz[2] # (vx,vy,vz) & (x,y,z) --> (vR,vT,vz): vRvTvZ = bovy_coords.vxvyvz_to_galcencyl(vx_kms, vy_kms, vz_kms, R_kpc, phi_rad, z_kpc, vsun=[vX_gc_sun_kms, vY_gc_sun_kms, vZ_gc_sun_kms], galcen=True) # vR_kms = vRvTvZ[:, 0] # vT_kms = vRvTvZ[:, 1] # vz_kms = vRvTvZ[:, 2] vR_kms = vRvTvZ[0] vT_kms = vRvTvZ[1] vz_kms = vRvTvZ[2] if verbose: print("R = ", R_kpc, "\t kpc") print("phi = ", phi_rad, "\t rad") print("z = ", z_kpc, "\t kpc") print("v_R = ", vR_kms, "\t km/s") print("v_T = ", vT_kms, "\t km/s") print("v_z = ", vz_kms, "\t km/s") jR, lz, jz = calc_actions(R_kpc, phi_rad, z_kpc, vR_kms, vT_kms, vz_kms) return R_kpc, phi_rad, z_kpc, vR_kms, vT_kms, vz_kms, jR, lz, jz
0e707bff67cee3c909213f14181927a14d5d5656
22,878
def getProcWithParent(host,targetParentPID,procname): """ returns (parentPID,procPID) tuple for the procname with the specified parent """ cmdStr="ps -ef | grep '%s' | grep -v grep" % (procname) cmd=Command("ps",cmdStr,ctxt=REMOTE,remoteHost=host) cmd.run(validateAfter=True) sout=cmd.get_results().stdout logger.info(cmd.get_results().printResult()) if sout is None: return (0,0) lines=sout.split('\n') for line in lines: if line == '': continue fields=line.lstrip(' ').split() if len(fields) < 3: logger.info("not enough fields line: '%s'" % line) return (0,0) procPID=int(line.split()[1]) parentPID=int(line.split()[2]) if parentPID == targetParentPID: return (parentPID,procPID) logger.info("couldn't find process with name: %s which is a child of PID: %s" % (procname,targetParentPID)) return (0,0)
77f4f13eb381dc840eee26875724cd6e1cdf1e57
22,879
def temporal_autocorrelation(array): """Computes temporal autocorrelation of array.""" dt = array['time'][1] - array['time'][0] length = array.sizes['time'] subsample = max(1, int(1. / dt)) def _autocorrelation(array): def _corr(x, d): del x arr1 = jnp.roll(array, d, 0) ans = arr1 * array ans = jnp.sum( jnp.where( jnp.arange(length).reshape(-1, 1, 1, 1) >= d, ans / length, 0), axis=0) return d, ans _, full_result = jax.lax.scan(_corr, 0, jnp.arange(0, length, subsample)) return full_result full_result = jax.jit(_autocorrelation)( jnp.array(array.transpose('time', 'sample', 'x', 'model').u)) full_result = xarray.Dataset( data_vars=dict(t_corr=(['time', 'sample', 'x', 'model'], full_result)), coords={ 'dt': np.array(array.time[slice(None, None, subsample)]), 'sample': array.sample, 'x': array.x, 'model': array.model }) return full_result
69640da51fa94edd92e793f2c86ac34090e70a28
22,880
import json def kv_detail(request, kv_class, kv_pk): """ GET to: /core/keyvalue/api/<kv_class>/<kv_pk>/detail/ Returns a single KV instance. """ Klass = resolve_class(kv_class) KVKlass = Klass.keyvalue_set.related.model try: kv = KVKlass.objects.get(pk=kv_pk) except KVKlass.DoesNotExist: return HttpResponse( status=404, content=json.dumps({'success': False}) ) return HttpResponse( json.dumps(kv.get_bundle()) )
bd8961c25e39f8540b57753b8f923229e77ae795
22,881
from typing import Protocol import json def opentrons_protocol(protocol_id): """Get OpenTrons representation of a protocol.""" current_protocol = Protocol.query.filter_by(id=protocol_id).first() if not current_protocol: flash('No such specification!', 'danger') return redirect('.') if current_protocol.user != current_user and not current_protocol.public: flash('Not your project!', 'danger') return redirect('.') if not current_protocol.protocol: return "" protocol_object = json.loads(current_protocol.protocol) converter = OpenTrons() resp = make_response(converter.convert(protocol_object, current_protocol.name, current_protocol.description)) resp.headers['Content-Type'] = "text" resp.headers['Content-Disposition'] = "attachment; filename=" + current_protocol.name + "-opentrons.py" return resp
3a8cc4355763f788f01bb1d95aa43f5cb249a68f
22,882
def flag(request, comment_id, next=None): """ Flags a comment. Confirmation on GET, action on POST. Templates: `comments/flag.html`, Context: comment the flagged `comments.comment` object """ comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID) # Flag on POST if request.method == 'POST': flag, created = comments.models.CommentFlag.objects.get_or_create( comment = comment, user = request.user, flag = comments.models.CommentFlag.SUGGEST_REMOVAL ) signals.comment_was_flagged.send( sender = comment.__class__, comment = comment, flag = flag, created = created, request = request, ) return next_redirect(request.POST.copy(), next, flag_done, c=comment.pk) # Render a form on GET else: return render_to_response('comments/flag.html', {'comment': comment, "next": next}, template.RequestContext(request) )
e0997313c13446150ed3a0f402b2df74089aa4e9
22,884
from typing import List def get_nodes_for_homek8s_group(inventory, group_name) -> List[str]: """Return the nodes' names of the given group from the inventory as a list.""" hosts_dict = inventory['all']['children']['homek8s']['children'][group_name]['hosts'] if hosts_dict: return list(hosts_dict.keys()) else: return []
806394259816ec4311e69dcd46e7b111c7ca0652
22,885
def is_blank_or_none(value: str): """ Returns True if the specified string is whitespace, empty or None. :param value: the string to check :return: True if the specified string is whitespace, empty or None """ try: return "".__eq__(value.strip()) except AttributeError: return value is None
062e1ab33fc5043435af9e97cdf2443ffc4625bd
22,886
import typing def __get_play_widget(function: typing.Any) -> typing.Any: """Generate play widget. :param function: Function to associate with Play. :return: Play widget. """ play = widgets.interactive( function, i=widgets.Play( value=0, min=0, max=500, step=1, interval=5000, description="Press play", disabled=False, ), ) return play
5bb63256f84f1c6f50e2ae007a08e6e794535bc5
22,887
def add_data_to_profile(id, profile_id, read_only, tree_identifier, folder_path=None, web_session=None): """Shares data to user group Args: id (int): The id of the data profile_id (int): The id of profile read_only (int): The flag that specifies whether the data is read only tree_identifier (str): The identifier of the tree folder_path (str): The folder path f.e. "/scripts/server1" web_session (object): The webserver session object, optional. Will be passed in my the webserver automatically Returns: The id of the folder to which the data was shared. """ if tree_identifier.strip() == "": raise MSGException(Error.CORE_INVALID_PARAMETER, f"Parameter 'tree_identifier' cannot be empty.") if folder_path.strip() == "" or folder_path.strip() == "/": folder_path = None with BackendDatabase(web_session) as db: with BackendTransaction(db): privileges = backend.get_user_privileges_for_data(db, id, web_session.user_id) max_privilege = min([p['read_only'] for p in privileges]) # We check if the user is owner of given profile if backend.get_profile_owner(db, profile_id) == web_session.user_id: if max_privilege <= read_only: profile_root_folder_id = backend.get_root_folder_id(db, tree_identifier, 'profile', profile_id) if profile_root_folder_id is None: profile_root_folder_id = backend.create_profile_data_tree(db, tree_identifier, profile_id) if folder_path: folder_profile_id, _ = backend.create_folder(db, folder_path, profile_root_folder_id) else: folder_profile_id = profile_root_folder_id backend.add_data_to_folder(db, id, folder_profile_id, read_only) else: raise MSGException(Error.MODULES_SHARING_WITH_HIGHER_PERMISSIONS, "Cannot assign data to profile with higher permission than user has.") else: raise MSGException(Error.MODULES_USER_HAVE_NO_PRIVILEGES, "User have no privileges to perform operation.") return folder_profile_id
489d246b90506b7581ee8aef66c7f5f2ba6b9b88
22,888
def get_activation_function(activation): """ Gets an activation function module given the name of the activation. :param activation: The name of the activation function. :return: The activation function module. """ if activation == 'ReLU': return nn.ReLU() elif activation == 'LeakyReLU': return nn.LeakyReLU(0.1) elif activation == 'PReLU': return nn.PReLU() elif activation == 'tanh': return nn.Tanh() elif activation == 'SELU': return nn.SELU() elif activation == 'ELU': return nn.ELU() else: raise ValueError('Activation "{}" not supported.'.format(activation))
ae5d8e91667e2dc4fe34eb1ac96cff329d542103
22,889
def get_value_from_time(a_node="", idx=0): """ gets the value from the time supplied. :param a_node: MFn.kAnimCurve node. :param idx: <int> the time index. :return: <tuple> data. """ return OpenMaya.MTime(a_node.time(idx).value(), OpenMaya.MTime.kSeconds).value(), a_node.value(idx),
77c6cb47c4381df3537754dc66e03ef4366557de
22,890
def getrinputs(rtyper, graph): """Return the list of reprs of the input arguments to the 'graph'.""" return [rtyper.bindingrepr(v) for v in graph.getargs()]
bb0f8861a29cd41af59432f267f07ff67601460c
22,891
def mars_reshape(x_i): """ Reshape (n_stacks, 3, 16, 112, 112) into (n_stacks * 16, 112, 112, 3) """ return np.transpose(x_i, (0, 2, 3, 4, 1)).reshape((-1, 112, 112, 3))
d842d4d9b865feadf7c56e58e66d09c4a3389edf
22,893
def Rz_to_coshucosv(R,z,delta=1.): """ NAME: Rz_to_coshucosv PURPOSE: calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta INPUT: R - radius z - height delta= focus OUTPUT: (cosh(u),cos(v)) HISTORY: 2012-11-27 - Written - Bovy (IAS) """ d12= (z+delta)**2.+R**2. d22= (z-delta)**2.+R**2. coshu= 0.5/delta*(sc.sqrt(d12)+sc.sqrt(d22)) cosv= 0.5/delta*(sc.sqrt(d12)-sc.sqrt(d22)) return (coshu,cosv)
f01ed002c09e488d89cfd4089343b16346dfb5fd
22,894
import ast import numpy def rpFFNET_createdict(cf,ds,series): """ Creates a dictionary in ds to hold information about the FFNET data used to gap fill the tower data.""" # get the section of the control file containing the series section = pfp_utils.get_cfsection(cf,series=series,mode="quiet") # return without doing anything if the series isn't in a control file section if len(section)==0: logger.error("ERUsingFFNET: Series "+series+" not found in control file, skipping ...") return # check that none of the drivers have missing data driver_list = ast.literal_eval(cf[section][series]["ERUsingFFNET"]["drivers"]) target = cf[section][series]["ERUsingFFNET"]["target"] for label in driver_list: data,flag,attr = pfp_utils.GetSeriesasMA(ds,label) if numpy.ma.count_masked(data)!=0: logger.error("ERUsingFFNET: driver "+label+" contains missing data, skipping target "+target) return # create the dictionary keys for this series ffnet_info = {} # site name ffnet_info["site_name"] = ds.globalattributes["site_name"] # source series for ER opt = pfp_utils.get_keyvaluefromcf(cf, [section,series,"ERUsingFFNET"], "source", default="Fc") ffnet_info["source"] = opt # target series name ffnet_info["target"] = cf[section][series]["ERUsingFFNET"]["target"] # list of drivers ffnet_info["drivers"] = ast.literal_eval(cf[section][series]["ERUsingFFNET"]["drivers"]) # name of ffnet output series in ds ffnet_info["output"] = cf[section][series]["ERUsingFFNET"]["output"] # results of best fit for plotting later on ffnet_info["results"] = {"startdate":[],"enddate":[],"No. points":[],"r":[], "Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[], "Avg (obs)":[],"Avg (FFNET)":[], "Var (obs)":[],"Var (FFNET)":[],"Var ratio":[], "m_ols":[],"b_ols":[]} # create an empty series in ds if the SOLO output series doesn't exist yet if ffnet_info["output"] not in ds.series.keys(): data,flag,attr = pfp_utils.MakeEmptySeries(ds,ffnet_info["output"]) pfp_utils.CreateSeries(ds,ffnet_info["output"],data,flag,attr) # create the merge directory in the data structure if "merge" not in dir(ds): ds.merge = {} if "standard" not in ds.merge.keys(): ds.merge["standard"] = {} # create the dictionary keys for this series ds.merge["standard"][series] = {} # output series name ds.merge["standard"][series]["output"] = series # source ds.merge["standard"][series]["source"] = ast.literal_eval(cf[section][series]["MergeSeries"]["Source"]) # create an empty series in ds if the output series doesn't exist yet if ds.merge["standard"][series]["output"] not in ds.series.keys(): data,flag,attr = pfp_utils.MakeEmptySeries(ds,ds.merge["standard"][series]["output"]) pfp_utils.CreateSeries(ds,ds.merge["standard"][series]["output"],data,flag,attr) return ffnet_info
60646de63106895eeb716f763c49195b9c5459e8
22,895
def svn_client_relocate(*args): """ svn_client_relocate(char dir, char from_prefix, char to_prefix, svn_boolean_t recurse, svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t """ return _client.svn_client_relocate(*args)
55e78e311d461e5a20e5cd04778e6c8431a6d990
22,896
def get_pairs(image1, image2, global_shift, current_objects, record, params): """ Given two images, this function identifies the matching objects and pairs them appropriately. See disparity function. """ nobj1 = np.max(image1) nobj2 = np.max(image2) if nobj1 == 0: print('No echoes found in the first scan.') return elif nobj2 == 0: zero_pairs = np.zeros(nobj1) return zero_pairs obj_match = locate_all_objects(image1, image2, global_shift, current_objects, record, params) pairs = match_pairs(obj_match, params) return pairs
5013764c7e2a1d5e12abc2107ffdbfca640f1423
22,897
def getComparedVotes(request): """ * @api {get} /getComparedVotes/?people_same={people_same_ids}&parties_same={parties_same_ids}&people_different={people_different_ids}&parties_different={parties_different_ids} List all votes where selected MPs/PGs voted the same/differently * @apiName getComparedVotes * @apiGroup Session * @apiParam {people_same_ids} Comma separated list of Parladata ids for MPs who voted the same * @apiParam {parties_same_ids} Comma separated list of Parladata ids for PGs who voted the same * @apiParam {people_different_ids} Comma separated list of Parladata ids for MPs who voted differently * @apiParam {parties_different_ids} Comma separated list of Parladata ids for PGs who voted the differently * @apiSuccess {Integer} total Total number of votes so far * @apiSuccess {Object[]} results List of votes that satisfy the supplied criteria * @apiSuccess {Object} results.session Session data for this vote * @apiSuccess {String} results.session.name Name of session. * @apiSuccess {Date} results.session.date_ts Date and time of session. * @apiSuccess {Date} results.session.date Date of session. * @apiSuccess {Integer} results.session.id Id of session. * @apiSuccess {Boolean} results.session.in_review Return true or false if session is in review. * @apiSuccess {Object[]} results.session.orgs Organization object * @apiSuccess {String} results.session.orgs.acronym Organization acronym * @apiSuccess {Boolean} results.session.orgs.is_coalition True of False if organization is in coalition * @apiSuccess {Integer} results.session.orgs.id Id of organization * @apiSuccess {Integer} results.session.orgs.name Name of organization * @apiSuccess {Object} results.results Results for this vote * @apiSuccess {Integer} results.results.abstain Number of abstentions * @apiSuccess {Integer} results.results.against Number of MPs who voted against the motion * @apiSuccess {Integer} results.results.not_present Number of MPs who weren't present at the vote * @apiSuccess {Integer} results.results.votes_for Number of MPs who voted for the motion * @apiSuccess {date} results.results.date The date of the vote * @apiSuccess {String} results.results.text The text of the motion which was voted upon * @apiSuccess {String[]} results.results.tags List of tags that belong to this motion * @apiSuccess {Boolean} results.results.is_outlier Is this vote a weird one (flame icon)? * @apiSuccess {Boolean} results.results.result Did the motion pass? * @apiExample {curl} Example: curl -i https://analize.parlameter.si/v1/s/getComparedVotes/?people_same=&parties_same=1&people_different=&parties_different=2 * @apiSuccessExample {json} Example response: { "total": 2155, "results": [{ "session": { "name": "44. izredna seja", "date_ts": "2017-05-30T02:00:00", "orgs": [{ "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }], "date": "30. 5. 2017", "org": { "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }, "id": 9587, "in_review": false }, "results": { "abstain": 0, "against": 0, "motion_id": 7260, "date": "09.06.2017", "text": "Dnevni red v celoti", "tags": ["Proceduralna glasovanja"], "is_outlier": false, "not_present": 34, "votes_for": 56, "result": true } }, { "session": { "name": "44. izredna seja", "date_ts": "2017-05-30T02:00:00", "orgs": [{ "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }], "date": "30. 5. 2017", "org": { "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }, "id": 9587, "in_review": false }, "results": { "abstain": 0, "against": 34, "motion_id": 7258, "date": "09.06.2017", "text": "Priporo\u010dilo Vladi RS v zvezi z okoljsko katastrofo, ki jo je povzro\u010dil po\u017ear v podjetju Kemis d.o.o. - Amandma: k 5. to\u010dki 9.6.2017 [SDS - Poslanska skupina Slovenske demokratske stranke]", "tags": ["Odbor za infrastrukturo, okolje in prostor"], "is_outlier": false, "not_present": 35, "votes_for": 21, "result": false } }, { "session": { "name": "30. redna seja", "date_ts": "2017-05-22T02:00:00", "orgs": [{ "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }], "date": "22. 5. 2017", "org": { "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }, "id": 9580, "in_review": true }, "results": { "abstain": 4, "against": 18, "motion_id": 7219, "date": "30.05.2017", "text": "Zakon o dopolnitvi Zakona o omejevanju uporabe toba\u010dnih in povezanih izdelkov - Glasovanje o zakonu v celoti", "tags": ["Odbor za zdravstvo"], "is_outlier": false, "not_present": 16, "votes_for": 52, "result": true } }, { "session": { "name": "30. redna seja", "date_ts": "2017-05-22T02:00:00", "orgs": [{ "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }], "date": "22. 5. 2017", "org": { "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }, "id": 9580, "in_review": true }, "results": { "abstain": 6, "against": 23, "motion_id": 7218, "date": "30.05.2017", "text": "Zakon o spremembah in dopolnitvah Zakona o zdravstveni dejavnosti - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017", "tags": ["Odbor za zdravstvo"], "is_outlier": false, "not_present": 19, "votes_for": 42, "result": true } }, { "session": { "name": "30. redna seja", "date_ts": "2017-05-22T02:00:00", "orgs": [{ "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }], "date": "22. 5. 2017", "org": { "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }, "id": 9580, "in_review": true }, "results": { "abstain": 6, "against": 23, "motion_id": 7218, "date": "30.05.2017", "text": "Zakon o spremembah in dopolnitvah Zakona o zdravstveni dejavnosti - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017", "tags": ["Odbor za zdravstvo"], "is_outlier": false, "not_present": 19, "votes_for": 42, "result": true } }, { "session": { "name": "30. redna seja", "date_ts": "2017-05-22T02:00:00", "orgs": [{ "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }], "date": "22. 5. 2017", "org": { "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }, "id": 9580, "in_review": true }, "results": { "abstain": 3, "against": 22, "motion_id": 7217, "date": "30.05.2017", "text": "Priporo\u010dilo v zvezi s problematiko slovenskega zdravstva - Eviden\u010dni sklep MDT 30.5.2017", "tags": ["Odbor za zdravstvo"], "is_outlier": false, "not_present": 14, "votes_for": 51, "result": true } }, { "session": { "name": "30. redna seja", "date_ts": "2017-05-22T02:00:00", "orgs": [{ "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }], "date": "22. 5. 2017", "org": { "acronym": "DZ", "is_coalition": false, "id": 95, "name": "Dr\u017eavni zbor" }, "id": 9580, "in_review": true }, "results": { "abstain": 2, "against": 51, "motion_id": 7216, "date": "30.05.2017", "text": "Zakon o spremembah in dopolnitvah Zakona o pokojninskem in invalidskem zavarovanju - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017", "tags": ["Odbor za delo, dru\u017eino, socialne zadeve in invalide"], "is_outlier": false, "not_present": 13, "votes_for": 24, "result": false } }] } """ people_same = request.GET.get('people_same') parties_same = request.GET.get('parties_same') people_different = request.GET.get('people_different') parties_different = request.GET.get('parties_different') if people_same != '': people_same_list = people_same.split(',') else: people_same_list = [] if parties_same != '': parties_same_list = parties_same.split(',') else: parties_same_list = [] if people_different != '': people_different_list = people_different.split(',') else: people_different_list = [] if parties_different != '': parties_different_list = parties_different.split(',') else: parties_different_list = [] if len(people_same_list) + len(parties_same_list) == 0: return HttpResponse('Need at least one same to compare.') if len(people_same_list) + len(parties_same_list) < 2 and len(people_different_list) + len(parties_different_list) < 1: return HttpResponse('Not enough to compare.') beginning = 'SELECT * FROM ' select_same_people = '' select_same_parties = '' match_same_people_ballots = '' match_same_people_persons = '' match_same_people_options = '' match_same_parties_ballots = '' match_same_parties_organizations = '' match_same_parties_options = '' select_different_people = '' select_different_parties = '' match_different_people_ballots = '' match_different_people_persons = '' match_different_people_options = '' match_different_parties_ballots = '' match_different_parties_organizations = '' match_different_parties_options = '' # select for same people DONE for i, e in enumerate(people_same_list): if i < len(people_same_list) - 1: select_same_people = '%s parlaseje_ballot b%s, parlaseje_activity a%s, parlaposlanci_person p%s, ' % (select_same_people, str(i), str(i), str(i)) else: select_same_people = '%s parlaseje_ballot b%s, parlaseje_activity a%s, parlaposlanci_person p%s' % (select_same_people, str(i), str(i), str(i)) # select for same parties DONE for i, e in enumerate(parties_same_list): if i < len(parties_same_list) - 1: select_same_parties = '%s parlaseje_ballot pb%s, parlaskupine_organization o%s, ' % (select_same_parties, str(i), str(i)) else: select_same_parties = '%s parlaseje_ballot pb%s, parlaskupine_organization o%s' % (select_same_parties, str(i), str(i)) # select for different people DONE for i, e in enumerate(people_different_list): if i < len(people_different_list) - 1: select_different_people = '%s parlaseje_ballot db%s, parlaseje_activity da%s, parlaposlanci_person dp%s, ' % (select_different_people, str(i), str(i), str(i)) else: select_different_people = '%s parlaseje_ballot db%s, parlaseje_activity da%s, parlaposlanci_person dp%s' % (select_different_people, str(i), str(i), str(i)) # select for different parties DONE for i, e in enumerate(parties_different_list): if i < len(parties_different_list) - 1: select_different_parties = '%s parlaseje_ballot dpb%s, parlaskupine_organization do%s, ' % (select_different_parties, str(i), str(i)) else: select_different_parties = '%s parlaseje_ballot dpb%s, parlaskupine_organization do%s' % (select_different_parties, str(i), str(i)) # match same people ballots by vote id DONE # if only one person was passed, match_same_people_ballots will remain an empty string for i, e in enumerate(people_same_list): if i != 0: if i < len(people_same_list) - 1: match_same_people_ballots = '%s b0.vote_id = b%s.vote_id AND ' % (match_same_people_ballots, str(i)) else: match_same_people_ballots = '%s b0.vote_id = b%s.vote_id' % (match_same_people_ballots, str(i)) # match same parties ballots by vote id DONE # if only one same party was passed match_same_parties_ballots will remain an empty string if len(people_same_list) == 0: # no same people were passed to the API pass if len(parties_same_list) == 0: # no same parties were passed return HttpResponse('You need to pass at least one "same" person or party.') elif len(parties_same_list) == 1: # only one same party was passed, there is nothing to match yet match_same_parties_ballots = '' else: # more than one same party was passed for i, e in enumerate(parties_same_list): if i != 0: # ignore the first one, because all others will be compared with it if i < len(parties_same_list) - 1: # not last match_same_parties_ballots = '%s pb0.vote_id = pb%s.vote_id AND ' % (match_same_parties_ballots, str(i)) else: # last match_same_parties_ballots = '%s pb0.vote_id = pb%s.vote_id' % (match_same_parties_ballots, str(i)) elif len(people_same_list) > 0: # one or more same people were passed for i, e in enumerate(parties_same_list): # do not ignore the first one, because all will be compared to the first person ballot if i < len(parties_same_list) - 1: # not last match_same_parties_ballots = '%s b0.vote_id = pb%s.vote_id AND ' % (match_same_parties_ballots, str(i)) else: # last match_same_parties_ballots = '%s b0.vote_id = pb%s.vote_id' % (match_same_parties_ballots, str(i)) # match same people with persons DONE for i, e in enumerate(people_same_list): if i < len(people_same_list) - 1: match_same_people_persons = '%s b%s.activity_ptr_id = a%s.id AND a%s.person_id = p%s.id AND p%s.id_parladata = %s AND ' % (match_same_people_persons, str(i), str(i), str(i), str(i), str(i), e) else: match_same_people_persons = '%s b%s.activity_ptr_id = a%s.id AND a%s.person_id = p%s.id AND p%s.id_parladata = %s' % (match_same_people_persons, str(i), str(i), str(i), str(i), str(i), e) # match same parties with organizations DONE for i, e in enumerate(parties_same_list): if i < len(parties_same_list) -1: match_same_parties_organizations = '%s pb%s.org_voter_id = o%s.id AND o%s.id_parladata = %s AND ' % (match_same_parties_organizations, str(i), str(i), str(i), e) else: match_same_parties_organizations = '%s pb%s.org_voter_id = o%s.id AND o%s.id_parladata = %s' % (match_same_parties_organizations, str(i), str(i), str(i), e) # match same people based on options DONE for i, e in enumerate(people_same_list): if i != 0: if i != len(people_same_list) - 1: match_same_people_options = '%s b0.option = b%s.option AND ' % (match_same_people_options, str(i)) else: match_same_people_options = '%s b0.option = b%s.option' % (match_same_people_options, str(i)) # match same parties based on options for i, e in enumerate(parties_same_list): if i == 0: if select_same_people != '': if len(parties_same_list) > 1: match_same_parties_options = '%s b0.option = pb0.option AND ' % (match_same_parties_options) else: match_same_parties_options = '%s b0.option = pb0.option ' % (match_same_parties_options) else: if i != len(parties_same_list) - 1: match_same_parties_options = '%s pb0.option = pb%s.option AND ' % (match_same_parties_options, str(i)) else: match_same_parties_options = '%s pb0.option = pb%s.option' % (match_same_parties_options, str(i)) # compare different people and parties if len(people_same_list) > 0: # we compare with same people # match different people ballots by vote id for i, e in enumerate(people_different_list): if i < len(people_different_list) - 1: match_different_people_ballots = '%s b0.vote_id = db%s.vote_id AND ' % (match_different_people_ballots, str(i)) else: match_different_people_ballots = '%s b0.vote_id = db%s.vote_id' % (match_different_people_ballots, str(i)) # match different parties ballots by vote id for i, e in enumerate(parties_different_list): if i < len(parties_different_list) - 1: match_different_parties_ballots = '%s b0.vote_id = dpb%s.vote_id AND ' % (match_different_parties_ballots, str(i)) else: match_different_parties_ballots = '%s b0.vote_id = dpb%s.vote_id' % (match_different_parties_ballots, str(i)) # match different people based on options for i, e in enumerate(people_different_list): if i != len(people_different_list) - 1: match_different_people_options = '%s b0.option != db%s.option AND ' % (match_different_people_options, str(i)) else: match_different_people_options = '%s b0.option != db%s.option' % (match_different_people_options, str(i)) # match different parties based on options for i, e in enumerate(parties_different_list): if i < len(parties_different_list) - 1: match_different_parties_options = '%s b0.option != dpb%s.option AND ' % (match_different_parties_options, str(i)) else: match_different_parties_options = '%s b0.option != dpb%s.option ' % (match_different_parties_options, str(i)) else: # we compare with same parties # match different people ballots by vote id for i, e in enumerate(people_different_list): if i < len(people_different_list) - 1: match_different_people_ballots = '%s pb0.vote_id = db%s.vote_id AND ' % (match_different_people_ballots, str(i)) else: match_different_people_ballots = '%s pb0.vote_id = db%s.vote_id' % (match_different_people_ballots, str(i)) # match different parties ballots by vote id for i, e in enumerate(parties_different_list): if i < len(parties_different_list) - 1: match_different_parties_ballots = '%s pb0.vote_id = dpb%s.vote_id AND ' % (match_different_parties_ballots, str(i)) else: match_different_parties_ballots = '%s pb0.vote_id = dpb%s.vote_id' % (match_different_parties_ballots, str(i)) # match different people based on options for i, e in enumerate(people_different_list): if i != len(people_different_list) - 1: match_different_people_options = '%s pb0.option != db%s.option AND ' % (match_different_people_options, str(i)) else: match_different_people_options = '%s pb0.option != db%s.option' % (match_different_people_options, str(i)) # match different parties based on options for i, e in enumerate(parties_different_list): if i < len(parties_different_list) - 1: match_different_parties_options = '%s pb0.option != dpb%s.option AND ' % (match_different_parties_options, str(i)) else: match_different_parties_options = '%s pb0.option != dpb%s.option ' % (match_different_parties_options, str(i)) # match different people with person for i, e in enumerate(people_different_list): if i < len(people_different_list) - 1: match_different_people_persons = '%s db%s.activity_ptr_id = da%s.id AND da%s.person_id = dp%s.id AND dp%s.id_parladata = %s AND ' % (match_different_people_persons, str(i), str(i), str(i), str(i), str(i), e) else: match_different_people_persons = '%s db%s.activity_ptr_id = da%s.id AND da%s.person_id = dp%s.id AND dp%s.id_parladata = %s' % (match_different_people_persons, str(i), str(i), str(i), str(i), str(i), e) # match different parties with organizations for i, e in enumerate(parties_different_list): if i < len(parties_different_list) - 1: match_different_parties_organizations = '%s dpb%s.org_voter_id = do%s.id AND do%s.id_parladata = %s AND ' % (match_different_parties_organizations, str(i), str(i), str(i), e) else: match_different_parties_organizations = '%s dpb%s.org_voter_id = do%s.id AND do%s.id_parladata = %s' % (match_different_parties_organizations, str(i), str(i), str(i), e) query = beginning q_selectors_list = [select_same_people, select_same_parties, select_different_people, select_different_parties] q_selectors_list_clean = [s for s in q_selectors_list if s != ''] q_selectors = ', '.join(q_selectors_list_clean) print 'q_selectors ' + q_selectors query = query + ' ' + q_selectors + ' WHERE' q_match_ballots_list = [match_same_people_ballots, match_same_parties_ballots, match_different_people_ballots, match_different_parties_ballots] q_match_ballots_list_clean = [s for s in q_match_ballots_list if s != ''] q_match_ballots = ' AND '.join(q_match_ballots_list_clean) print 'q_match_ballots ' + q_match_ballots # query = query + ' ' + q_match_ballots + ' AND' q_match_options_list = [match_same_people_options, match_same_parties_options, match_different_people_options, match_different_parties_options] q_match_options_list_clean = [s for s in q_match_options_list if s != ''] q_match_options = ' AND '.join(q_match_options_list_clean) print 'q_match_options ' + q_match_options # query = query + ' ' + q_match_options + ' AND' q_match_persons_list = [match_same_people_persons, match_different_people_persons] q_match_persons_list_clean = [s for s in q_match_persons_list if s != ''] q_match_persons = ' AND '.join(q_match_persons_list_clean) print 'q_match_persons ' + q_match_persons # query = query + ' ' + q_match_persons + ' AND' q_match_organizations_list = [match_same_parties_organizations, match_different_parties_organizations] q_match_organizations_list_clean = [s for s in q_match_organizations_list if s != ''] q_match_organizations = ' AND '.join(q_match_organizations_list_clean) print 'q_match_organizations ' + q_match_organizations # query = query + ' ' + q_match_organizations after_where_list = [q_match_ballots, q_match_options, q_match_persons, q_match_organizations] after_where_list_clean = [s for s in after_where_list if s != ''] after_where = ' AND '.join(after_where_list_clean) query = query + after_where if request.GET.get('special'): # exclude 'ni' exclude_ni_people_same = '' exclude_ni_parties_same = '' exclude_ni_people_different = '' exclude_ni_parties_different = '' for i, e in enumerate(people_same_list): if i < len(people_same_list) - 1: exclude_ni_people_same = '%s b%s.option != \'ni\' AND ' % (exclude_ni_people_same, i) else: exclude_ni_people_same = '%s b%s.option != \'ni\'' % (exclude_ni_people_same, i) for i, e in enumerate(parties_same_list): if i < len(parties_same_list) - 1: exclude_ni_parties_same = '%s pb%s.option != \'ni\' AND ' % (exclude_ni_parties_same, i) else: exclude_ni_parties_same = '%s pb%s.option != \'ni\'' % (exclude_ni_parties_same, i) for i, e in enumerate(people_different_list): if i < len(people_different_list) - 1: exclude_ni_people_different = '%s db%s.option != \'ni\' AND ' % (exclude_ni_people_different, i) else: exclude_ni_people_different = '%s db%s.option != \'ni\'' % (exclude_ni_people_different, i) for i, e in enumerate(parties_different_list): if i < len(parties_different_list) - 1: exclude_ni_parties_different = '%s dpb%s.option != \'ni\' AND ' % (exclude_ni_parties_different, i) else: exclude_ni_parties_different = '%s dpb%s.option != \'ni\'' % (exclude_ni_parties_different, i) exclude_ni_list = [exclude_ni_people_same, exclude_ni_parties_same, exclude_ni_people_different, exclude_ni_parties_different] exclude_ni_list_clean = [s for s in exclude_ni_list if s != ''] exclude_ni = ' AND '.join(exclude_ni_list_clean) query = query + ' AND ' + exclude_ni # return HttpResponse(query) print query print 'STATEMENT PARTS:' print 'select_same_people ' + select_same_people print 'select_same_parties ' + select_same_parties print 'match_same_people_ballots ' + match_same_people_ballots print 'match_same_people_persons ' + match_same_people_persons print 'match_same_people_options ' + match_same_people_options print 'match_same_parties_ballots ' + match_same_parties_ballots print 'match_same_parties_organizations ' + match_same_parties_organizations print 'match_same_parties_options ' + match_same_parties_options print 'select_different_people ' + select_different_people print 'select_different_parties ' + select_different_parties print 'match_different_people_ballots ' + match_different_people_ballots print 'match_different_people_persons ' + match_different_people_persons print 'match_different_people_options ' + match_different_people_options print 'match_different_parties_ballots ' + match_different_parties_ballots print 'match_different_parties_organizations ' + match_different_parties_organizations print 'match_different_parties_options ' + match_different_parties_options ballots = Ballot.objects.raw(query) session_ids = set([b.vote.session.id for b in ballots]) sessions = {} for s in session_ids: sessions[s] = Session.objects.get(id=s).getSessionData() print '[SESSION IDS:]' print set(session_ids) out = { 'total': Vote.objects.all().count(), 'results': [] } for ballot in ballots: out['results'].append({ 'session': sessions[ballot.vote.session.id], 'results': { 'motion_id': ballot.vote.id_parladata, 'text': ballot.vote.motion, 'votes_for': ballot.vote.votes_for, 'against': ballot.vote.against, 'abstain': ballot.vote.abstain, 'not_present': ballot.vote.not_present, 'result': ballot.vote.result, 'is_outlier': ballot.vote.is_outlier, 'tags': ballot.vote.tags, 'date': ballot.start_time.strftime(API_DATE_FORMAT) } }) return JsonResponse(out, safe=False)
e49b2e1b181761e56795868a3dd6ff5a0452cd05
22,898
def get_bits(register, index, length=1): """ Get selected bit(s) from register while masking out the rest. Returns as boolean if length==1 :param register: Register value :type register: int :param index: Start index (from right) :type index: int :param length: Number of bits (default 1) :type length: int :return: Selected bit(s) :rtype: Union[int, bool] """ result = (register >> index) & ((1 << length) - 1) if length == 1: return result == 1 return result
0663d925c2c74ece359a430392881cf24b75a575
22,899
def addactual(): """Add actual spendings""" if request.method == "POST": allPayments = [] # Current user that is logged-in saved in variable userId = session["user_id"] month = request.form.get("month") housing = request.form.get("housing") housing = float(housing) pensionIns = request.form.get("pensionIns") pensionIns = float(pensionIns) food = request.form.get("food") food= float(food) health = request.form.get("health") health = float(health) transport = request.form.get("transport") transport = float(transport) debt = request.form.get("debt") debt = float(debt) utilities = request.form.get("utilities") utilities = float(utilities) clothing = request.form.get("clothing") clothing = float(clothing) vacation = request.form.get("vacation") vacation = float(vacation) unexpected = request.form.get("unexpected") unexpected = float(unexpected) total = housing + pensionIns + food + health + transport + debt + utilities + clothing + vacation + unexpected allPayments.append({"month": month, "housing": housing, "pensionIns": pensionIns, "food": food, "health": health, "transport": transport, "debt": debt, "utilities": utilities, "clothing": clothing, "vacation": vacation, "unexpected": unexpected, "total": total}) allMonths = db.execute("SELECT month FROM payments WHERE userid = :userId", userId=userId) enteredMonths = allMonths[0]["month"] db.execute("INSERT INTO payments(userId, month, housing, pensionIns, food, health, transport, debt, utilities, clothing, vacation, unexpected, total)\ VALUES(:userId, :month, :housing, :pensionIns, :food, :health, :transport, :debt, :utilities, :clothing, :vacation, :unexpected, :total)", userId=userId, month=month, housing=housing, pensionIns=pensionIns, food=food, health=health, transport=transport, debt=debt, utilities=utilities, clothing=clothing, vacation=vacation, unexpected=unexpected, total=total) # Flash message to confirm that the user add a note flash("Payments added") if month in enteredMonths: return apology("Monnth already entered!") return redirect("/actual") else: return render_template("addactual.html")
ccf7d9c362aa1f250959dce51032e43b00ffe412
22,900
def parse_move(line): """ Parse steps from a move string """ text = line.split() if len(text) == 0: raise ValueError("No steps in move given to parse. %s" % (repr(line))) steps = [] for step in text: from_ix = alg_to_index(step[1:3]) if len(step) > 3: if step[3] == 'x': continue elif step[3] == 'n': to_ix = from_ix + 8 elif step[3] == 's': to_ix = from_ix - 8 elif step[3] == 'e': to_ix = from_ix + 1 elif step[3] == 'w': to_ix = from_ix - 1 else: raise ValueError("Invalid step direction.") steps.append((from_ix, to_ix)) else: raise ValueError("Can't represent placing step") return steps
660374a82c19da61df3e0f8468f09c5df7d3be5e
22,901
def get_tensor_name(node_name, output_slot): """Get tensor name given node name and output slot index. Args: node_name: Name of the node that outputs the tensor, as a string. output_slot: Output slot index of the tensor, as an integer. Returns: Name of the tensor, as a string. """ return "%s:%d" % (node_name, output_slot)
d563a3e4be696fc1109aa7a60fb4dd140ec65431
22,903
def EstimateMarriageSurvival(resp): """Estimates the survival curve. resp: DataFrame of respondents returns: pair of HazardFunction, SurvivalFunction """ # NOTE: Filling missing values would be better than dropping them. complete = resp[resp.evrmarry == 1].agemarry.dropna() ongoing = resp[resp.evrmarry == 0].age hf = EstimateHazardFunction(complete, ongoing) sf = hf.MakeSurvival() return hf, sf
06f7d307662a70ef4c77073e4202f69ec68ee9e4
22,904
def get_cowell_data(): """ Gets Cowell data. :return: Data and headers. """ n = 10000 Y = np.random.normal(0, 1, n) X = np.random.normal(Y, 1, n) Z = np.random.normal(X, 1, n) D = np.vstack([Y, X, Z]).T return D, ['Y', 'X', 'Z']
bd2084b889e8e9068d11b0f49c1d00226bfc6a1f
22,905
def is_str_str_tuple(t): """Is this object a tuple of two strings?""" return (isinstance(t, tuple) and len(t) == 2 and isinstance(t[0], basestring) and isinstance(t[1], basestring))
e568821ee2d7a3926744b93eaf11356744ca4538
22,906
def g(dist, aq): """ Compute function g (Lemma 5) for a given full parent isntantiation. Parameters ---------- dists: list ints Counts of the child variable for a given full parent instantiation. aq: float Equivalent sample size divided by the product of parents arities. """ res = log(2*min(dist)/aq + 1) for d in dist: res += - log(2*d/aq + 1) return res
505f5c0857f97579bcb1be9e812a90c31ecf4e5e
22,907
def order_tweets_by_polarity(tweets, positive_highest=True): """Sort the tweets by polarity, receives positive_highest which determines the order. Returns a list of ordered tweets.""" reverse = True if positive_highest else False return sorted(tweets, key=lambda tweet: tweet.polarity, reverse=reverse)
996c0aa6c374716f10d4d7a890162fe1bb87eef1
22,909
from scipy.stats import beta from params import VoC_start_date, use_vaccine_effect def read_in_Reff_file(file_date, VoC_flag=None, scenario=''): """ Read in Reff h5 file produced by generate_RL_forecast. Args: file_date: (date as string) date of data file VoC_date: (date as string) date from which to increase Reff by VoC """ if file_date is None: raise Exception('Need to provide file date to Reff read.') file_date = pd.to_datetime(file_date).strftime("%Y-%m-%d") df_forecast = pd.read_hdf('results/soc_mob_R'+file_date+scenario+'.h5', key='Reff') if (VoC_flag != '') and (VoC_flag is not None): VoC_start_date = pd.to_datetime(VoC_start_date) if VoC_flag == 'Alpha': print('This VoC will be deprecated in future.') # Here we apply the beta(6,14)+1 scaling from VoC to the Reff. # We do so by editing a slice of the data frame. Forgive me for my sins. row_bool_to_apply_VoC = (df_forecast.type == 'R_L') & (pd.to_datetime(df_forecast.date, format='%Y-%m-%d') >= VoC_start_date) index_map = df_forecast.index[row_bool_to_apply_VoC] # Index 9 and onwards are the 2000 Reff samples. df_slice_after_VoC = df_forecast.iloc[index_map, 8:] multiplier = beta.rvs(6,14, size = df_slice_after_VoC.shape) + 1 if VoC_flag == 'Delta': # Increase from Delta # Here we apply the beta(2,2)+3 scaling from VoC to the Reff based on CDC results. # We do so by editing a slice of the data frame. Forgive me for my sins. row_bool_to_apply_VoC = (df_forecast.type == 'R_L') & (pd.to_datetime(df_forecast.date, format='%Y-%m-%d') >= VoC_start_date) index_map = df_forecast.index[row_bool_to_apply_VoC] # Index 9 and onwards are the 2000 Reff samples. df_slice_after_VoC = df_forecast.iloc[index_map, 8:] multiplier = beta.rvs(3,3, size = df_slice_after_VoC.shape) + 2.1 - 0.5 # Mean 2.1 Delta df_forecast.iloc[index_map , 8:] = df_slice_after_VoC*multiplier if use_vaccine_effect: # Load in vaccination effect data vaccination_by_state = pd.read_csv('data/vaccination_by_state.csv', parse_dates=['date']) vaccination_by_state = vaccination_by_state[['state', 'date','overall_transmission_effect']] # Make datetime objs into strings vaccination_by_state['date_str'] = pd.to_datetime(vaccination_by_state['date'], format='%Y-%m-%d').dt.strftime('%Y-%m-%d') df_forecast['date_str'] = pd.to_datetime(df_forecast['date'], format='%Y-%m-%d').dt.strftime('%Y-%m-%d') # Filling in future days will the same vaccination level as current. for state, forecast_df_state in df_forecast.groupby('state'): latest_Reff_data_date = max(forecast_df_state.date_str) latest_vaccination_data_date = max(vaccination_by_state.groupby('state').get_group(state)['date']) latest_vaccination_date_effect = vaccination_by_state.groupby(['state', 'date']).get_group((state, latest_vaccination_data_date))['overall_transmission_effect'].iloc[0] # Fill in the future dates with the same level of vaccination. vaccination_by_state = vaccination_by_state.append(pd.DataFrame([(state, pd.to_datetime(date), latest_vaccination_date_effect, date.strftime('%Y-%m-%d')) for date in pd.date_range(latest_vaccination_data_date, latest_Reff_data_date)], columns = ['state', 'date', 'overall_transmission_effect', 'date_str'])) # Create a (state,date) indexed map of transmission effect overall_transmission_effect = vaccination_by_state.set_index(['state', 'date_str'])['overall_transmission_effect'].to_dict() # Apply this effect to the forecast vaccination_multiplier = df_forecast.apply(lambda row: 1 if row['type']!='R_L' else overall_transmission_effect.get((row['state'], row['date_str']),1), axis=1) df_forecast = df_forecast.drop('date_str', axis='columns') # Apply the vaccine effect to the forecast. The 8:onwards columns are all the Reff paths. df_forecast.iloc[: , 8:] = df_forecast.iloc[: , 8:].multiply(vaccination_multiplier.to_numpy(), axis='rows') return df_forecast
219118c333f14ba9f7a44416bd30e38c66b3d5d9
22,911
def string_with_fixed_length(s="", l=30): """ Return a string with the contents of s plus white spaces until length l. :param s: input string :param l: total length of the string (will crop original string if longer than l) :return: """ s_out = "" for i in range(0, l): if i < len(s): s_out += s[i] else: s_out += " " return s_out
2230a2893913eadb2c42a03c85728a5fe79e1e0f
22,913
def fetch_ref_proteomes(): """ This method returns a list of all reference proteome accessions available from Uniprot """ ref_prot_list = [] response = urllib2.urlopen(REF_PROT_LIST_URL) for ref_prot in response: ref_prot_list.append(ref_prot.strip()) return ref_prot_list
f42c879f78a0e7281df369b40145c5d60aedb32b
22,914
from typing import BinaryIO def load(fp: BinaryIO, *, fmt=None, **kwargs) -> TextPlistTypes: """Read a .plist file (forwarding all arguments).""" if fmt is None: header = fp.read(32) fp.seek(0) if FMT_TEXT_HANDLER["detect"](header): fmt = PF.FMT_TEXT if fmt == PF.FMT_TEXT: return FMT_TEXT_HANDLER["parser"](**kwargs).parse(fp) else: # This one can fail a bit more violently like the original return pl.load(fp, fmt=translation[fmt], **kwargs)
d8445e388b33f69555c1270cebecfd552a34196a
22,915
def _swig_add_metaclass(metaclass): """Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass""" def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper
d1e4f766827c13fa312ce3485daf43be2fc0eda1
22,916
from typing import Set def create_affected_entities_description(security_data: SecurityData, limit: int = 5) -> str: """Create a description of the entities which are affected by a security problem. :param security_data: the security details for which to create the description :param limit: the maximum number of entities to list in the description :return: the description """ def _stringify(entity_list: Set[str], label: str, the_limit: int): if len(entity_list) > the_limit: return f"{len(entity_list)} {label} affected ([details|{security_data.url}])\n" return f"Affected {label}: {', '.join(entity_list)}\n" desc = _stringify(security_data.affected_entity_names, 'entities', limit) desc += _stringify(security_data.related_hostnames, 'hostnames', limit) return desc
95adc5a6a1fe88e0ec80273deee95e39ee196a55
22,917
def amac_person_org_list_ext(): """ 中国证券投资基金业协会-信息公示-从业人员信息-基金从业人员资格注册外部公示信息 http://gs.amac.org.cn/amac-infodisc/res/pof/extperson/extPersonOrgList.html :return: :rtype: pandas.DataFrame """ data = get_data(url=amac_person_org_list_ext_url, payload=amac_person_org_list_ext_payload) need_data = data["content"] keys_list = [ "orgName", "orgType", "extWorkerTotalNum", "extOperNum", "extSalesmanNum", "extInvestmentManagerNum", "extFundManagerNum", ] # 定义要取的 value 的 keys manager_data_out = pd.DataFrame(need_data) manager_data_out = manager_data_out[keys_list] manager_data_out.columns = [ "机构名称", "机构性质", "员工人数", "基金从业资格", "基金销售业务资格", "基金经理", "投资经理", ] return manager_data_out
f84d40a79ae49ebdf5a8ecb6612b37515f5ea676
22,918
def ele_types(eles): """ Returns a list of unique types in eles """ return list(set([e['type'] for e in eles] ))
e87ea4c6256c2520f9f714dd065a9e8642f77555
22,920
def colorful_subgraph(G, colors, k, s, subgraph_type, get_detail=True, verbose=False): """Detect if colorful path exists fom s to any node by dynamic programming. Args: G (nx.Graph): with n nodes and m edges colors (list): list of integers represents node colors k (int): number of colors s (int): starting node subgraph (str): 'path' or 'cycle' Return: List: nodes connect to s with at least one colorful path """ n = G.number_of_nodes() col = 2**k dp_mat = np.zeros((n, col)) dp_mat[s][power2(colors[s])] = 1 targets = dp_helper(dp_mat, 2, G, colors, k, set([s]), s, subgraph_type) if not get_detail: return targets else: empty_color = 0 total_count = 0 for target in targets: total_count += backtrack(dp_mat, G, colors, target, s, [str(target)], set_bit(empty_color, colors[target]), verbose) if verbose: print('from node', s, 'find in total', total_count, 'colorful paths of length', k) return total_count
644a1091bbee9bf79f236a8f815ae4c07fb1f538
22,921
import itertools def find_all_combos( conformer, delta=float(120), cistrans=True, chiral_centers=True): """ A function to find all possible conformer combinations for a given conformer Params: - conformer (`Conformer`) an AutoTST `Conformer` object of interest - delta (int or float): a number between 0 and 180 or how many conformers to generate per dihedral - cistrans (bool): indication of if one wants to consider cistrans bonds - chiral_centers (bool): indication of if one wants to consider chiral centers bonds Returns: - all_combos (list): a list corresponding to the number of unique conformers created. """ conformer.get_geometries() _, torsions = find_terminal_torsions(conformer) torsion_angles = np.arange(0, 360, delta) torsion_combos = list(itertools.product( torsion_angles, repeat=len(torsions))) if cistrans: cistranss = [] cistrans_options = ["E", "Z"] try: ring_info = conformer._pseudo_geometry.GetRingInfo() except AttributeError: ring_info = conformer.rdkit_molecule.GetRingInfo() for cistrans in conformer.cistrans: i,j,k,l = cistrans.atom_indices if (ring_info.NumAtomRings(i) != 0) or (ring_info.NumAtomRings(k) != 0): continue cistranss.append(cistrans) cistrans_combos = list(itertools.product( cistrans_options, repeat=len(cistranss))) else: cistrans_combos = [()] if chiral_centers: chiral_centerss = [] chiral_options = ["R", "S"] try: ring_info = conformer._pseudo_geometry.GetRingInfo() except AttributeError: ring_info = conformer.rdkit_molecule.GetRingInfo() for center in conformer.chiral_centers: if ring_info.NumAtomRings(center.atom_index) != 0: continue chiral_centerss.append(center) chiral_combos = list(itertools.product( chiral_options, repeat=len(chiral_centerss))) else: chiral_combos = [()] all_combos = list( itertools.product( torsion_combos, cistrans_combos, chiral_combos)) return all_combos
1b5c5f44de23524a9392f51e76f46ef0f234648c
22,922
def generate_abbreviations( labels: tp.Iterable[str], max_abbreviation_len: int = 3, dictionary: tp.Union[tp.Tuple[str], str] = "cdfghjklmnpqrstvxz"): """ Returns unique abbreviations for the given labels. Generates the abbreviations with :func:`beatsearch.utils.generate_unique_abbreviation`. :param labels: labels to abbreviate :param max_abbreviation_len: maximum length of the abbreviations :param dictionary: characteristic characters (defaults to consonants) :return: abbreviations of the given labels """ abbreviations = list() for label in labels: abbreviations.append(generate_unique_abbreviation( label, max_len=max_abbreviation_len, taken_abbreviations=abbreviations, dictionary=dictionary )) return abbreviations
a22e68990147bd973c4a7af8e9e1a8f28fa7b4ac
22,924
def best_hand(hand): """Из "руки" в 7 карт возвращает лучшую "руку" в 5 карт """ i = iter(combinations(hand, 5)) best_rank = 0, 0, 0 best_combination = None for combination in i: current_rank = hand_rank(combination) if compare(current_rank, best_rank): best_rank = current_rank best_combination = combination return best_combination
c885625c2b5f60453b6dca59e25003b9f977e9d4
22,925
def calculate_label_counts(examples): """Assumes that the examples each have ONE label, and not a distribution over labels""" label_counts = {} for example in examples: label = example.label label_counts[label] = label_counts.get(label, 0) + 1 return label_counts
4c45378c6e29ce3d1b40b4d02a112e1fbd23d8b6
22,926
def printer(arg1): """ Even though 'times' is destroyed when printer() has been called, the 'inner' function created remembers what times is. Same goes for the argument arg1. """ times = 3 def inner(): for i in range(times): print(arg1) return inner
7e3d2033602eaef9ef570c97a058208066073427
22,927
from bs4 import BeautifulSoup def from_get_proxy(): """ From "http://www.getproxy.jp" :return: """ base = 'http://www.getproxy.jp/proxyapi?' \ 'ApiKey=659eb61dd7a5fc509bef01f2e8b15669dfdb0f54' \ '&area={:s}&sort=requesttime&orderby=asc&page={:d}' urls = [base.format('CN', i) for i in range(1, 25)] urls += [base.format('US', i) for i in range(1, 25)] urls += [base.format('CN', i) for i in range(25, 50)] urls += [base.format('US', i) for i in range(25, 50)] proxies = [] i = 0 retry = 0 length = len(urls) while i < length: res = _safe_http(urls[i]) try: soup = BeautifulSoup(res, 'lxml') except: i += 1 continue data = soup.find_all('ip') if len(data) == 0: retry += 1 if retry == 4: break else: sleep(62) else: retry = 0 proxies += [pro.text for pro in data] i += 1 return proxies
b3302b0092eb973022e2d322cc00e1391fe68c8b
22,928
def KORL(a, kappa=None): """ log rounds k-ary OR """ k = len(a) if k == 1: return a[0] else: t1 = KORL(a[:k//2], kappa) t2 = KORL(a[k//2:], kappa) return t1 + t2 - t1.bit_and(t2)
2c85f7131dcfe0b35d3bfd8b04b876fad320572f
22,929
import json import requests def verify(token): """Verifies a JWS token, returning the parsed token if the token has a valid signature by the key provided by the key of the OpenID Connect server stated in the ISS claim of the token. If the signature does not match that key, None is returned. """ unverified_token_data = json.loads(jose.jws.get_unverified_claims(token)) jwks_uri = requests.get("%s/.well-known/openid-configuration" % unverified_token_data["iss"]).json()["jwks_uri"] keys = requests.get(jwks_uri).json()["keys"] for key in keys: try: verified_token_data = json.loads( jose.jws.verify(token, key, [key["alg"]])) except: pass else: return verified_token_data return None
8d1dac4d1c87de3d2d619f58bdf077f82b54dfda
22,930
def get_listing_panel(tool, ghidra): """ Get the code listing UI element, so we can get up-to-date location/highlight/selection """ cvs = tool.getService(ghidra.app.services.CodeViewerService) return cvs.getListingPanel()
f14477cf13cb7eb4e7ede82b0c2068ca53a30723
22,931
def template_data(environment, template_name="report_html.tpl", **kwds): """Build an arbitrary templated page. """ template = env.get_template(template_name) return template.render(**environment)
6b3c1ea5c280931280b5d6c69f380b9349ac0627
22,932
def resnet152_ibn_a(**kwargs): """ Constructs a ResNet-152-IBN-a model. """ model = ResNet_IBN(block=Bottleneck_IBN, layers=[3, 8, 36, 3], ibn_cfg=('a', 'a', 'a', None), **kwargs) return model
5cb059910c5442b0df7c08471f75b96fe3fb4c80
22,933
import scipy def calibratePose(pts3,pts2,cam,params_init): """ Calibrates the camera to match the view calibrated by updating R,t so that pts3 projects as close as possible to pts2 :param pts3: Coordinates of N points stored in a array of shape (3,N) :param pts2: Coordinates of N points stored in a array of shape (2,N) :param cam_init: Initial estimate of camera :param params_init: :return: Refined estimate of camera with updated R,t parameters """ func = lambda rt: residuals(pts3,pts2,cam,rt) least = scipy.optimize.leastsq(func,params_init)[0] cam.update_extrinsics(least) return cam
5f1fcf55ec934596fd46f129d12e8457173239eb
22,934
import base64 def image_to_base64(file_image): """ ESSA FUNÇÃO TEM COMO OBJETIVO, CONVERTER FORMATO DE INPUT (PNG) -> BASE64 O ARQUIVO OBTIDO (PNG) É SALVO NA MÁQUINA QUE ESTÁ EXECUTANDO O MODELO. # Arguments file_image - Required : Caminho do arquivo no formato imagem (String) # Returns built_base64 - Required : Valor no formato Base64 (BASE64) """ # INICIANDO A VARIÁVEL QUE RECEBERÁ O VALOR DA BASE64 built_base64 = None try: # DECODOFICANDO A BASE64, ARMAZENANDO-O NO OBJETO ABERTO # ESCREVENDO NA MÁQUINA built_base64 = base64.b64encode(open(file_image, 'rb').read()) except Exception as ex: execute_log.error("ERRO NA FUNÇÃO {} - {}".format(stack()[0][3], ex)) return built_base64
74e9c46ce48e23fdb5453cb9ce5223dfb8e6004b
22,935
from pathlib import Path from typing import Set def get_files_recurse(path: Path) -> Set: """Get all files recursively from given :param:`path`.""" res = set() for p in path.rglob("*"): if p.is_dir(): continue res.add(p) return res
c129ce43130da09962264f6e7935410685815943
22,936
from typing import List def img_after_ops(img: List[str], ops: List[int]) -> List[str]: """Apply rotation and flip *ops* to image *img* returning the result""" new_img = img[:] for op in ops: if op == Tile.ROTATE: new_img = [cat(l)[::-1] for l in zip(*new_img)] elif op == Tile.FLIP: new_img = [l[::-1] for l in new_img] return new_img
a28f1dbdf7f756c9b8b313d889596797466ab729
22,937
import functools import urllib def authenticated(method): """Decorate methods with this to require that the user be logged in. Fix the redirect url with full_url. Tornado use uri by default. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): user = self.current_user if not user: if self.request.method == "GET": url = self.get_login_url() if "?" not in url: url += "?" + urllib.urlencode(dict(next=self.request.full_url())) self.redirect(url) return raise HTTPError(403) #self._current_user = user return method(self, *args, **kwargs) return wrapper
c4dc18af60b9270d644ed807ea1c74b821ea7bca
22,938
import math def ring_samp_ranges(zma, rng_atoms): """ Set sampling range for ring dihedrals. :param zma: Z-Matrix :type zma: automol.zmat object :param rng_atoms: idxs for atoms inside rings :type rng_atoms: list """ samp_range_dct = {} ring_value_dct = ring_dihedrals(zma, rng_atoms) for key, value in ring_value_dct.items(): samp_range_dct[key] = [value - math.pi/4, value + math.pi/4] return samp_range_dct
6e1958f66f596d9b1230864e3b9a2b73cd01cb35
22,939
def users_key(group='default'): """ Returns the user key """ return db.Key.from_path('users', group)
1912165ff75c39c9fbcb1432f46ef80f9b08c096
22,940
def VolumetricFlow(self): """Volumetric flow (m^3/hr).""" stream, mol = self.data m = mol[0] if m: c = self.name # c = compound c.T = stream.T c.P = stream.P c.phase = stream._phase return c.Vm * m * 1000 else: return 0.
c799c27079494561e30975a6e03b5c1cefe9a907
22,941
def build_queue_adapter(workflow_client, logger=None, **kwargs): """Constructs a queue manager based off the incoming queue socket type. Parameters ---------- workflow_client : object ("distributed.Client", "fireworks.LaunchPad") A object wrapper for different distributed workflow types logger : logging.Logger, Optional. Default: None Logger to report to **kwargs Additional kwargs for the Adapter Returns ------- ret : Adapter Returns a valid Adapter for the selected computational queue """ adapter_type = type(workflow_client).__module__ + "." + type(workflow_client).__name__ if adapter_type == "parsl.dataflow.dflow.DataFlowKernel": adapter = parsl_adapter.ParslAdapter(workflow_client, logger=logger) elif adapter_type == "distributed.client.Client": adapter = dask_adapter.DaskAdapter(workflow_client, logger=logger) elif adapter_type == "fireworks.core.launchpad.LaunchPad": adapter = fireworks_adapter.FireworksAdapter(workflow_client, logger=logger) else: raise KeyError("QueueAdapter type '{}' not understood".format(adapter_type)) return adapter
bbd013fef1095dd4881a8b51561ed4080682141e
22,942
import torch def pad_sents(sents, pad_token, return_tensor = False): """ Pad list of sentences according to the longest sentence in the batch. The paddings should be at the end of each sentence. @param sents (list[list[str]]): list of sentences, where each sentence is represented as a list of words @param pad_token (str): padding token @returns sents_padded (list[list[str]]): list of sentences where sentences shorter than the max length sentence are padded out with the pad_token, such that each sentences in the batch now has equal length. """ sents_padded = [] maxLen = 0 ### YOUR CODE HERE (~6 Lines) for i in sents: maxLen = max(len(i),maxLen) for i in range(len(sents)): sen = sents[i].cpu().numpy().tolist() for j in range(maxLen - len(sen)): sen.append(pad_token) sen = torch.tensor(sen, dtype=torch.long).cuda() sents_padded.append(sen) if return_tensor: t = torch.zeros(len(sents), maxLen).long() for i in range(len(sents)): t[i] = sents_padded[i] sents_padded = t.cuda() return sents_padded
3100ef6f1924685f7a46b22753830cdf203e565d
22,943
def _take_along_axis(array, indices, axis): """Takes values from the input array by matching 1D index and data slices. This function serves the same purpose as jax.numpy.take_along_axis, except that it uses one-hot matrix multiplications under the hood on TPUs: (1) On TPUs, we use one-hot matrix multiplications to select elements from the array. (2) Otherwise, we fall back to jax.numpy.take_along_axis. Notes: - To simplify matters in case (1), we only support slices along the second or last dimensions. - We may wish to revisit (1) for very large arrays. Args: array: Source array. indices: Indices to take along each 1D slice of array. axis: Axis along which to take 1D slices. Returns: The indexed result. """ if array.ndim != indices.ndim: raise ValueError( "indices and array must have the same number of dimensions; " f"{indices.ndim} vs. {array.ndim}.") if (axis != -1 and axis != array.ndim - 1 and # Not last dimension axis != 1 and axis != -array.ndim + 1): # Not second dimension raise ValueError( "Only slices along the second or last dimension are supported; " f"array.ndim = {array.ndim}, while axis = {axis}.") if _favor_one_hot_slices(): one_hot_length = array.shape[axis] one_hot_indices = jax.nn.one_hot(indices, one_hot_length, axis=axis) if axis == -1 or array.ndim == 1: # Take i elements from last dimension (s). # We must use HIGHEST precision to accurately reproduce indexing # operations with matrix multiplications. result = jnp.einsum( "...s,...is->...i", array, one_hot_indices, precision=jax.lax.Precision.HIGHEST) else: # Take i elements from second dimension (s). We assume here that we always # want to slice along the second dimension. # We must use HIGHEST precision to accurately reproduce indexing # operations with matrix multiplications. result = jnp.einsum( "ns...,nis...->ni...", array, one_hot_indices, precision=jax.lax.Precision.HIGHEST) return jax.lax.convert_element_type(result, array.dtype) else: return jnp.take_along_axis(array, indices, axis=axis)
9a926a53341e0fc964fc568474ca29db286ed14e
22,944
import requests import logging def send_envelope( adfs_host: str, envelope: str, ) -> requests.Response: """Send an envelope to the target ADFS server. Arguments: adfs_host: target ADFS server envelope: envelope to send Returns: ADFS server response """ url = f"http://{adfs_host}/adfs/services/policystoretransfer" headers = {"Content-Type": "application/soap+xml"} response = None try: response = requests.post(url, data=envelope, headers=headers) except Exception as e: logging.error(e) return response
bc59fa99fa28432dd969f1a72bbae00af716b443
22,945
def display_main(choice): """ Link option To main board """ return main(choice)
66b0b0d36d47b4107b5b57dce9ea94787f3fa83b
22,946
import random def generate_network_table(seed=None): """ Generates a table associating MAC and IP addressed to be distributed by our virtual network adapter via DHCP. """ # we use the seed in case we want to generate the same table twice if seed is not None: random.seed(seed) # number of IPs per network is 253 (2-254) # generate random MACs, set ensures they are unique macs: set[str] = set() while len(macs) < 253: macs.add( "48:d2:24:bf:" + to_byte(random.randint(0, 255)) + ":" + to_byte(random.randint(0, 255)) ) # associate each MAC with a sequential IP table = {} ip_counter = 2 for mac in macs: table[mac] = "192.168.150." + str(ip_counter) ip_counter += 1 return table
d39915c129b2d5a99fc41c90b718fcca17d20cd5
22,947
import torch def loss_mGLAD(theta, S): """The objective function of the graphical lasso which is the loss function for the meta learning of glad loss-meta = 1/B(-log|theta| + <S, theta>) Args: theta (tensor 3D): precision matrix BxDxD S (tensor 3D): covariance matrix BxDxD (dim=D) Returns: loss (tensor 1D): the loss value of the obj function """ B, D, _ = S.shape t1 = -1*torch.logdet(theta) # Batch Matrix multiplication: torch.bmm t21 = torch.einsum("bij, bjk -> bik", S, theta) # getting the trace (batch mode) t2 = torch.einsum('jii->j', t21) # print(t1, torch.det(theta), t2) # regularization term # tr = 1e-02 * torch.sum(torch.abs(theta)) meta_loss = torch.sum(t1+t2)/B # sum over the batch return meta_loss
b056a5c57e681cca40c6a7a0d030dee25049e6de
22,948
def parse_remote_path(remote_path): """ Wrapper around the utils function - checks for the right protocol """ protocol, bucket, key = utils.parse_remote_path(remote_path) assert protocol == "s3:", "Mismatched protocol (expected AWS S3)" return bucket, key
65c26139d0e28f64ae966a75bf730d1a6b8b2248
22,949
from typing import Callable def operations(func: Callable) -> Callable: """Allows developers to specify operations which should not be called in the fuzzing process. Examples: Ignoring operations specified by operation ids in lists >>> @fuzz_lightyear.exclude.operations ... def b(): ... return ['get_pets', 'get_store_inventory'] Ignoring operations specified by "tag.operation_id" in lists >>> @fuzz_lightyear.exclude.operations ... def c(): ... return ['pets.get_pets', 'store.get_store_inventory'] """ get_operations_fn = _get_formatted_operations(func) get_excluded_operations().update(get_operations_fn()) return func
ce6d9596ff307f15c86d4823d3ebcfdafa5f4e33
22,950
def _gen_dfa_table(t: UxsdComplex) -> str: """Generate a 2D C++ array representing DFA table from an UxsdComplex's DFA. The array is indexed by the state and input token value, such that table[state][input] gives the next state. """ assert isinstance(t.content, UxsdDfa) dfa = t.content.dfa out = "" out += "constexpr int NUM_%s_STATES = %d;\n" % (t.cpp.upper(), len(dfa.states)) out += "constexpr const int NUM_%s_INPUTS = %d;\n" % (t.cpp.upper(), len(dfa.alphabet)) out += "constexpr int gstate_%s[NUM_%s_STATES][NUM_%s_INPUTS] = {\n" % (t.cpp, t.cpp.upper(), t.cpp.upper()) for i in range(0, max(dfa.states)+1): state = dfa.transitions[i] row = [str(state[x]) if state.get(x) is not None else "-1" for x in dfa.alphabet] out += "\t{%s},\n" % ", ".join(row) out += "};\n" return out
f0bae5dd8f897786a62016b7b807e2c7730f1e89
22,951
def get_heat_capacity_derivative(Cv, temperature_list, plotfile='dCv_dT.pdf'): """ Fit a heat capacity vs T dataset to cubic spline, and compute derivatives :param Cv: heat capacity data series :type Cv: Quantity or numpy 1D array :param temperature_list: List of temperatures used in replica exchange simulations :type temperature: Quantity or numpy 1D array :param plotfile: path to filename to output plot :type plotfile: str :returns: - dC_v_out ( 1D numpy array (float) ) - 1st derivative of heat capacity, from a cubic spline evaluated at each point in Cv) - d2C_v_out ( 1D numpy array (float) ) - 2nd derivative of heat capacity, from a cubic spline evaluated at each point in Cv) - spline_tck ( scipy spline object (tuple) ) - knot points (t), coefficients (c), and order of the spline (k) fit to Cv data """ xdata = temperature_list ydata = Cv # Strip units off quantities: if type(xdata[0]) == unit.quantity.Quantity: xdata_val = np.zeros((len(xdata))) xunit = xdata[0].unit for i in range(len(xdata)): xdata_val[i] = xdata[i].value_in_unit(xunit) xdata = xdata_val if type(ydata[0]) == unit.quantity.Quantity: ydata_val = np.zeros((len(ydata))) yunit = ydata[0].unit for i in range(len(ydata)): ydata_val[i] = ydata[i].value_in_unit(yunit) ydata = ydata_val # Fit cubic spline to data, no smoothing spline_tck = interpolate.splrep(xdata, ydata, s=0) xfine = np.linspace(xdata[0],xdata[-1],1000) yfine = interpolate.splev(xfine, spline_tck, der=0) dCv = interpolate.splev(xfine, spline_tck, der=1) d2Cv = interpolate.splev(xfine, spline_tck, der=2) dCv_out = interpolate.splev(xdata, spline_tck, der=1) d2Cv_out = interpolate.splev(xdata, spline_tck, der=2) figure, axs = plt.subplots( nrows=3, ncols=1, sharex=True, ) axs[0].plot( xdata, ydata, 'ok', markersize=4, fillstyle='none', label='simulation data', ) axs[0].plot( xfine, yfine, '-b', label='cubic spline', ) axs[0].set_ylabel(r'$C_{V} (kJ/mol/K)$') axs[0].legend() axs[1].plot( xfine, dCv, '-r', label=r'$\frac{dC_{V}}{dT}$', ) axs[1].legend() axs[1].set_ylabel(r'$\frac{dC_{V}}{dT}$') axs[2].plot( xfine, d2Cv, '-g', label=r'$\frac{d^{2}C_{V}}{dT^{2}}$', ) axs[2].legend() axs[2].set_ylabel(r'$\frac{d^{2}C_{V}}{dT^{2}}$') axs[2].set_xlabel(r'$T (K)$') plt.tight_layout() plt.savefig(plotfile) plt.close() return dCv_out, d2Cv_out, spline_tck
49f27209c9f6387fc25936481d5d35cebdc6523f
22,952
def get_gradients_of_activations(model, x, y, layer_names=None, output_format='simple', nested=False): """ Get gradients of the outputs of the activation functions, regarding the loss. Intuitively, it shows how your activation maps change over a tiny modification of the loss. :param model: keras compiled model or one of ['vgg16', 'vgg19', 'inception_v3', 'inception_resnet_v2', 'mobilenet_v2', 'mobilenetv2']. :param x: Model input (Numpy array). In the case of multi-inputs, x should be of type List. :param y: Model target (Numpy array). In the case of multi-inputs, y should be of type List. :param layer_names: (optional) Single name of a layer or list of layer names for which activations should be returned. It is useful in very big networks when it is computationally expensive to evaluate all the layers/nodes. :param output_format: Change the output dictionary key of the function. - 'simple': output key will match the names of the Keras layers. For example Dense(1, name='d1') will return {'d1': ...}. - 'full': output key will match the full name of the output layer name. In the example above, it will return {'d1/BiasAdd:0': ...}. - 'numbered': output key will be an index range, based on the order of definition of each layer within the model. :param nested: (optional) If set, will move recursively through the model definition to retrieve nested layers. Recursion ends at leaf layers of the model tree or at layers with their name specified in layer_names. E.g., a model with the following structure -layer1 -conv1 ... -fc1 -layer2 -fc2 ... yields a dictionary with keys 'layer1/conv1', ..., 'layer1/fc1', 'layer2/fc2'. If layer_names = ['layer2/fc2'] is specified, the dictionary will only hold one key 'layer2/fc2'. The layer names are generated by joining all layers from top level to leaf level with the separator '/'. :return: Dict {layer_names (specified by output_format) -> activation of the layer output/node (Numpy array)}. """ nodes = _get_nodes(model, output_format, nested=nested, layer_names=layer_names) return _get_gradients(model, x, y, nodes)
5cb9234594b867383f92f4e2e7e91e39eb79d120
22,953
def string_to_screens_and_lines(source, allowed_width, allowed_height, f, pixels_between_lines = None, end_screens_with = (), do_not_include = ()): """ Convert a string to screens and lines. Pygame does not allow line breaks ("\n") when rendering text. The purpose of this function is to break a string into lines and screens given a font and screen dimensions. The following two assumptions are made: 1. Line breaks ("\n") in source denote the start of a new paragraph. Therefore, to have an actual blank line (i.e., an empty string) appear in the returned array, add another "\n" immediately following the first. 2. Spaces denote the end of a word. Parameters: source: the string to divide into screens and lines. allowed_width: the width, in pixels, permitted for lines; can be a number of pixels or a proportion of the active screen's width. allowed_height: same as allowed_width but for the height of a single screen. f: the font with which source is measured. Keyword Parameters: pixels_between_lines: blank pixel rows between lines of text; defaults to None, in which case it is obtained from f. end_screens_with: a restricted set of characters that may end a screen; defaults to an empty tuple, in which case any character ending a word can end a screen. do_not_include: words that are exceptions to the end_screens_with words (e.g., "Mrs." ends in a period but should not end a screen) Returns: screens: a multidimensional list of screens and lines. """ # Check if allowed_height and allowed_width need to be set: if 0 < allowed_width <= 1 and 0 < allowed_height <= 1: allowed_width, allowed_height = screen_dimensions() elif 0 < allowed_width <= 1 or 0 < allowed_height <= 1: raise ValueError("Both or neither of allowed_width and \ allowed_height can be between 0 and 1.") # Check if pixels_between_lines needs to be set: if not pixels_between_lines: pixels_between_lines = f.get_linesize() else: assert pixels_between_lines > 0, "pixels_between_lines must be \ positive." # Make sure that allowed_height can accommodate the tallest word in # source: assert f.size(source)[1] <= allowed_height, "allowed_height cannot \ accommodate source." screens = [] # Break source into paragraphs and paragraphs into single words: paragraphs = source.split("\n") single_words = [] for paragraph in paragraphs: individual_words = paragraph.split(" ") # While here, verify that the longest word fits: widest_word, pixels = longest_string_to_render(individual_words, f) assert pixels < allowed_width, "{:s} in source is too long for \ allowed_width.".format(widest_word) single_words.append(individual_words) # The function branches next, depending on whether restrictions have been # placed on where screen breaks can occur. if not end_screens_with: # Screen breaks can occur following any word. # Break single_words into lines without regard to screens: lines_of_text, total_height = wrap_text( single_words, allowed_width, f, return_height = True, line_height = pixels_between_lines ) if total_height <= allowed_height: # Everything fits on one screen. screens.append(lines_of_text) else: # There will be at least two screens. # Initialize the first screen and a height counter: screen = [] screen_height = 0 for line in lines_of_text: line_height = f.size(line)[1] screen_height = screen_height+line_height+pixels_between_lines if screen_height < allowed_height: # line fits on the current screen. screen.append(line) elif screen_height == allowed_height or screen_height-pixels_between_lines < allowed_height: # line fits, but no more will. screen.append(line) screens.append(screen) screen = [] screen_height = 0 else: # line doesn't fit. screens.append(screen) screen = [line] screen_height = line_height+pixels_between_lines # Check for a remaining screen: if screen: screens.append(screen)\ else: # Screens can only end following specific strings. # These strings do not need to be end-of-sentence characters, but it # is difficult to imagine what else they would be. Therefore, I refer # to the resulting strings as sentences, acknowledging that this may # be incorrect terminology. # Break paragraphs into sentences: sentences = [] for paragraph in paragraphs: if sentences: # This is not the first line, so start the paragraph on a new # line: sentences.append("") if paragraph: # paragraph is not a blank line. # Break it into sentences: paragraph_as_sentences = text_to_sentences( paragraph, terminators = end_screens_with, exclude = do_not_include ) sentences = sentences+paragraph_as_sentences else: # paragraph is a blank line. sentences.append("") # Initialize the first screen: screen = [] for sentence in sentences: # Determine whether sentence starts on a new line or continues # from the current line: if screen: # If the last line in screen is blank, then sentence starts on # a new line. last_line = screen[-1] if last_line: next_line = False else: next_line = True else: # This screen is blank. # Arbitrarily set next_line to False: next_line = False # Try adding sentence to the current screen: possible_screen, screen_height = wrap_text( sentence, allowed_width, f, old_text = screen, start_new_line = next_line, return_height = True, line_height = pixels_between_lines ) if screen_height <= allowed_height: # Update the current screen: screen = possible_screen else: # This sentence does not fit. # If screen is currently blank, it means that sentence needs # to be broken across screens (i.e., it will not fit on a # single screen). if screen: # This is not an issue. # Save screen: screens.append(screen) # Initialize the next screen with sentence: screen, current_height = wrap_text( sentence, allowed_width, f, return_height = True, line_height = pixels_between_lines ) if current_height > allowed_height: # sentence needs to be broken across screens. # This can be accomplished by calling the present # function without restrictions on screen endings. # However, the text currently on screen is needed too. text_to_add = "" for line in screen: text_to_add = text_to_add+line+"" text_to_add = text_to_add+sentence multiple_screens = string_to_screens_and_lines( text_to_add, allowed_width, allowed_height, f, pixels_between_lines = pixels_between_lines ) for s in multiple_screens: screens.append(s) else: # screen is empty, but sentence will not fit. # Call the present function to get this sentence's # screens: multiple_screens = string_to_screens_and_lines( sentence, allowed_width, allowed_height, f, pixels_between_lines = pixels_between_lines ) for s in multiple_screens: screens.append(s) # Check if a final screen needs to be added: if screen: screens.append(screen) return screens
f43907fdf47b342b1eac3100c07e9d452b0d865f
22,954
def trim_spectrum(self, scouse, flux): """ Trims a spectrum according to the user inputs """ return flux[scouse.trimids]
3f18259986e677f8e8a9718408cdb56352d956e5
22,955
def test_bitwise_and(a, b): """ >>> test_bitwise_and(0b01, 0b10) 0L >>> test_bitwise_and(0b01, 0b11) 1L >>> test_bitwise_and(0b01, 2.0) Traceback (most recent call last): ... NumbaError: 27:15: Expected an int, or object, or bool >>> test_bitwise_and(2.0, 0b01) Traceback (most recent call last): ... NumbaError: 27:11: Expected an int, or object, or bool """ return a & b
0855921300751368eb0ad3f3cba37b6ddac759fd
22,956
import inspect def flagFunction(method, name=None): """ Determine whether a function is an optional handler for a I{flag} or an I{option}. A I{flag} handler takes no additional arguments. It is used to handle command-line arguments like I{--nodaemon}. An I{option} handler takes one argument. It is used to handle command-line arguments like I{--path=/foo/bar}. @param method: The bound method object to inspect. @param name: The name of the option for which the function is a handle. @type name: L{str} @raise UsageError: If the method takes more than one argument. @return: If the method is a flag handler, return C{True}. Otherwise return C{False}. """ if _PY3: reqArgs = len(inspect.signature(method).parameters) if reqArgs > 1: raise UsageError('Invalid Option function for %s' % (name or method.__name__)) if reqArgs == 1: return False else: reqArgs = len(inspect.getargspec(method).args) if reqArgs > 2: raise UsageError('Invalid Option function for %s' % (name or method.__name__)) if reqArgs == 2: return False return True
265f92ca52ec4b3c1b6b3955a7ac719f64099bad
22,957
from pathlib import Path def envnotfound(env): """`'Env "my-venv" not found. Did you mean "./my-venv"?'`""" msg = f'Env "{env}" not found.' if arg_is_name(env) and Path(env).exists(): msg += f'\nDid you mean "./{env}"?' return msg
e2437bbf141a841650f33ede5d7fb6489a954f00
22,958
def random_translation_along_x(gt_boxes, points, offset_range): """ Args: gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]] points: (M, 3 + C), offset_range: [min max]] Returns: """ offset = np.random.uniform(offset_range[0], offset_range[1]) points[:, 0] += offset gt_boxes[:, 0] += offset # if gt_boxes.shape[1] > 7: # gt_boxes[:, 7] += offset return gt_boxes, points
6998e463313faeaaf75e96b0374c0bdc5415c8f1
22,959
import numpy import scipy def predband(xd,yd,a,b,conf=0.95,x=None): """ Calculates the prediction band of the linear regression model at the desired confidence level, using analytical methods. Clarification of the difference between confidence and prediction bands: "The 2sigma confidence interval is 95% sure to contain the best-fit regression line. This is not the same as saying it will contain 95% of the data points. The prediction bands are further from the best-fit line than the confidence bands, a lot further if you have many data points. The 95% prediction interval is the area in which you expect 95% of all data points to fall." (from http://graphpad.com/curvefit/linear_regression.htm) Arguments: - conf: desired confidence level, by default 0.95 (2 sigma) - xd,yd: data arrays - a,b: linear fit parameters as in y=ax+b - x: (optional) array with x values to calculate the confidence band. If none is provided, will by default generate 100 points in the original x-range of the data. Usage: >>> lpb,upb,x=nemmen.predband(all.kp,all.lg,a,b,conf=0.95) calculates the prediction bands for the given input arrays >>> pylab.fill_between(x, lpb, upb, alpha=0.3, facecolor='gray') plots a shaded area containing the prediction band :returns: Sequence (lpb,upb,x) with the arrays holding the lower and upper confidence bands corresponding to the [input] x array. References: 1. `Introduction to Simple Linear Regression, Gerard E. Dallal, Ph.D. <http://www.JerryDallal.com/LHSP/slr.htm>`_ """ alpha=1.-conf # significance n=xd.size # data sample size if x is None: x=numpy.linspace(xd.min(),xd.max(),100) # Predicted values (best-fit model) y=a*x+b # Auxiliary definitions sd=scatterfit(xd,yd,a,b) # Scatter of data about the model sxd=numpy.sum((xd-xd.mean())**2) sx=(x-xd.mean())**2 # array # Quantile of Student's t distribution for p=1-alpha/2 q=scipy.stats.t.ppf(1.-alpha/2.,n-2) # Prediction band dy=q*sd*numpy.sqrt( 1.+1./n + sx/sxd ) upb=y+dy # Upper prediction band lpb=y-dy # Lower prediction band return lpb,upb,x
a235548f4593cfc105bba9d7268dba2e14374df7
22,960
def orient_edges(G): """Orient remaining edges after colliders have been oriented. :param G: partially oriented graph (colliders oriented) :returns: maximally oriented DAG """ undir_list = [edge for edge in G.edges() if G.is_undir_edge(edge)] undir_len = len(undir_list) idx = 0 while idx < undir_len: success = False for edge in undir_list: if can_orient(G,edge): G.remove_edge(*edge[::-1]) success = True if success: undir_list = [edge for edge in G.edges() if G.is_undir_edge(edge)] idx += 1 else: break return G
958c22b7c7906219bfc52d2cc59945a22e4e1060
22,961
def _create_deserialize_fn(attributes: dict, globals: dict, bases: tuple[type]) -> str: """ Create a deserialize function for binary struct from a buffer The function will first deserialize parent classes, then the class attributes """ lines = [] # For this class bases for parent in bases: if not _is_parent_fn_callable(parent, 'deserialize'): continue lines.append(f'{parent.__name__}.deserialize(self, buf)') lines.append(f'buf = buf[{parent.__name__}._bs_size(self):]') # For this class attributes for name, annotation in attributes.items(): annotation_type = _get_annotation_type(annotation) if annotation_type == AnnotationType.TYPED_BUFFER: lines.append(f'self.{name}.deserialize(buf)') else: lines.append(f'self.{name}.deserialize(buf[:self.{name}.size_in_bytes])') lines.append(f'buf = buf[self.{name}.size_in_bytes:]') return _create_fn('deserialize', ['self, buf'], lines + ['return self'], globals)
e5058b73d47a034323a4ebe65edbf888bdf98321
22,962
def blck_repeat(preprocessor: Preprocessor, args: str, contents: str) -> str: """The repeat block. usage: repeat <number> renders its contents one and copies them number times""" args = args.strip() if not args.isnumeric(): preprocessor.send_error("invalid-argument", "invalid argument. Usage: repeat [uint > 0]") number = int(args) if number <= 0: preprocessor.send_error("invalid-argument", "invalid argument. Usage: repeat [uint > 0]") preprocessor.context.update(preprocessor.current_position.end, "in block repeat") contents = preprocessor.parse(contents) preprocessor.context.pop() return contents * number
8dfedd854a68b2fcc33ea4b9714744302fe5934d
22,963
import math def is_prime(n: int) -> bool: """Determines if the natural number n is prime.""" # simple test for small n: 2 and 3 are prime, but 1 is not if n <= 3: return n > 1 # check if multiple of 2 or 3 if n % 2 == 0 or n % 3 == 0: return False # search for subsequent prime factors around multiples of 6 max_factor = int(math.sqrt(n)) for i in range(5, max_factor + 1, 6): if n % i == 0 or n % (i + 2) == 0: return False return True
e7bd02271681906f9ee4e63305c5a6f630578171
22,964