content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Any def build_get301_request(**kwargs: Any) -> HttpRequest: """Return 301 status code and redirect to /http/success/200. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest """ accept = "application/json" # Construct URL url = kwargs.pop("template_url", "/http/redirect/301") # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
2ef01a4c126890fd30fd3bc656036b92d2ef0408
20,434
def error_function_latticeparameters(varying_parameters_values_array, varying_parameters_keys, Miller_indices, allparameters, absolutespotsindices, Xexp, Yexp, initrot=IDENTITYMATRIX, pureRotation=0, verbose=0, pixelsize=165.0 / 2048, dim=(2048, 2048), weights=None, kf_direction="Z>0", returnalldata=False, additional_expression="none"): """ q = UzUyUz Ustart B0 G* Interface error function to return array of pair (exp. - model) distances Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ] Xmodel,Ymodel comes from G*=ha*+kb*+lc* q = refinedUzUyUz Ustart refinedB0 G* B0 reference structure reciprocal space frame (a*,b*,c*) a* // ki b* perp to a* and perp to z (z belongs to the plane of ki and detector normal vector n) i.e. columns of B0 are components of a*,b* and c* expressed in x,y,z LT frame refinedB0 is obtained by refining the 5 /6 lattice parameters possible keys for parameters to be refined are: five detector frame calibration parameters: det_distance,det_xcen,det_ycen,det_beta, det_gamma three misorientation angles with respect to LT orthonormal frame (x, y, z) matrices Ux, Uy,Uz: anglex,angley,anglez 5 lattice parameters among 6 (a,b,c,alpha, beta,gamma) """ # reading default parameters # CCD plane calibration parameters if isinstance(allparameters, np.ndarray): calibrationparameters = (allparameters.tolist())[:5] else: calibrationparameters = allparameters[:5] # allparameters[5:8] = 0,0,0 Uy, Ux, Uz = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX latticeparameters = np.array(allparameters[8:14]) nb_varying_parameters = len(varying_parameters_keys) # factorscale = 1. for varying_parameter_index, parameter_name in enumerate(varying_parameters_keys): # print "varying_parameter_index,parameter_name", varying_parameter_index, parameter_name if parameter_name in ("anglex", "angley", "anglez"): # print "got angles!" if nb_varying_parameters > 1: anglevalue = varying_parameters_values_array[varying_parameter_index] * DEG else: anglevalue = varying_parameters_values_array[0] * DEG # print "anglevalue (rad)= ",anglevalue ca = np.cos(anglevalue) sa = np.sin(anglevalue) if parameter_name is "angley": Uy = np.array([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]]) elif parameter_name is "anglex": Ux = np.array([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]]) elif parameter_name is "anglez": Uz = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]]) elif parameter_name in ("alpha", "beta", "gamma"): # print 'got Tc elements: ', parameter_name indparam = dict_lattice_parameters[parameter_name] # if nb_varying_parameters > 1: # latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale) # else: # latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[0] / factorscale) if nb_varying_parameters > 1: latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index] else: latticeparameters[indparam] = varying_parameters_values_array[0] elif parameter_name in ("a", "b", "c"): # print 'got Tc elements: ', parameter_name indparam = dict_lattice_parameters[parameter_name] # if nb_varying_parameters > 1: # latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale) # else: # latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[0] / factorscale) if nb_varying_parameters > 1: latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index] else: latticeparameters[indparam] = varying_parameters_values_array[0] Uxyz = np.dot(Uz, np.dot(Ux, Uy)) if additional_expression == "a==b": indparam = dict_lattice_parameters["b"] indparam1 = dict_lattice_parameters["a"] latticeparameters[indparam] = latticeparameters[indparam1] newB0matrix = CP.calc_B_RR(latticeparameters, directspace=1, setvolume=False) # if verbose: # print("\n-------\nvarying_parameters_keys", varying_parameters_keys) # print("varying_parameters_values_array", varying_parameters_values_array) # print("Uxyz", Uxyz) # print("latticeparameters", latticeparameters) # print("newB0matrix", newB0matrix) # DictLT.RotY40 such as X=DictLT.RotY40 Xsample (xs,ys,zs =columns expressed in x,y,z frame) # transform in sample frame Ts # same transform in x,y,z LT frame T # Ts = DictLT.RotY40-1 T DictLT.RotY40 # T = DictLT.RotY40 Ts DictLT.RotY40-1 newmatrix = np.dot(Uxyz, initrot) # if 0: # verbose: # print("initrot", initrot) # print("newmatrix", newmatrix) Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters, Miller_indices, absolutespotsindices, UBmatrix=newmatrix, B0matrix=newB0matrix, pureRotation=0, labXMAS=0, verbose=0, pixelsize=pixelsize, dim=dim, kf_direction=kf_direction) if 0: # verbose: print("Xmodel, Ymodel", Xmodel, Ymodel) if 0: # verbose: print("Xexp, Yexp", Xexp, Yexp) distanceterm = np.sqrt((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2) if weights is not None: allweights = np.sum(weights) distanceterm = distanceterm * weights / allweights # if verbose: # # print "** distance residues = " , distanceterm, " ********" # print("** mean distance residue = ", np.mean(distanceterm), " ********") # print "twthe, chi", twthe, chi alldistances_array = distanceterm if verbose: # print "varying_parameters_values in error_function_on_demand_strain",varying_parameters_values # print "arr_indexvaryingparameters",arr_indexvaryingparameters # print "Xmodel",Xmodel # print "pixX",pixX # print "Ymodel",Ymodel # print "pixY",pixY # print "newmatrix",newmatrix # print "newB0matrix",newB0matrix # print "deltamat",deltamat # print "initrot",initrot # print "param_orient",param_calib # print "distanceterm",distanceterm pass # if weights is not None: # print("***********mean weighted pixel deviation ", # np.mean(alldistances_array), " ********") # else: # print( # "***********mean pixel deviation ", np.mean(alldistances_array), # " ********") # print "newmatrix", newmatrix if returnalldata: # concatenated all pairs distances, all UB matrices, all UB.newB0matrix matrices return alldistances_array, Uxyz, newmatrix, newB0matrix, latticeparameters else: return alldistances_array
e1c3242855354ed82d2dd164a7ae16aa76cd5e22
20,435
def run_forward_model(z_in): """ Run forward model and return approximate measured values """ x_dummy[:prm.nn]=z_in x_dummy[prm.nn:]=prm.compute_velocity(z_in,t0) x_meas = H_meas.dot(x_dummy) return x_meas
fd6bfbbacba59e08b2bb8c4588793b969cab4b60
20,436
def optimize(name: str, circuit: cirq.Circuit) -> cirq.Circuit: """Applies sycamore circuit decompositions/optimizations. Args: name: the name of the circuit for printing messages circuit: the circuit to optimize_for_sycamore """ print(f'optimizing: {name}', flush=True) start = timer() optimized = cirq.google.optimized_for_sycamore(circuit) stop = timer() print_stats(stop - start, optimized) return optimized
07027dc2ad21e33ca2038cb40c3cbb2b529941e7
20,437
def download_sbr(destination=None): """Download an example of SBR+ Array and return the def path. Examples files are downloaded to a persistent cache to avoid re-downloading the same file twice. Parameters ---------- destination : str, optional Path where files will be downloaded. Optional. Default is user temp folder. Returns ------- str Path to the example file. Examples -------- Download an example result file and return the path of the file >>> from pyaedt import examples >>> path = examples.download_antenna_array() >>> path 'C:/Users/user/AppData/local/temp/pyaedtexamples/FiniteArray_Radome_77GHz_3D_CADDM.aedt' """ return _download_file("sbr", "Cassegrain.aedt", destination)
0b928977806b546325569dbf71e93e8b760868fa
20,438
from typing import Tuple from typing import Union def isvalid_sequence( level: str, time_series: Tuple[Union[HSScoring, CollegeScoring]] ) -> bool: """Checks if entire sequence is valid. Args: level: 'high school' or 'college' level for sequence analysis. time_series: Tuple of sorted match time_series events. Raises: ValueError: Invalid level. ValueError: Not sorted time_series. ValueError: Invalid position. Returns: bool: True if sequence is valid, otherwise raises ValueError. """ if level not in {"college", "high school"}: raise ValueError( f"Expected `level` to be one of " f"'college' or 'high school', " f"got {level!r}." ) # aliases sequences based on level sequences = COLLEGE_SEQUENCES if level == "college" else HS_SEQUENCES position = "neutral" # skips iteration the last value because we check the next for i, score in enumerate(time_series[:-1]): # current time can't be larger than next time if time_series[i].time_stamp > time_series[i + 1].time_stamp: raise ValueError( f"Values in `time_series` appear to be sorted incorrectly." ) if position == "neutral": check_neutral(score, sequences["neutral"]) if score.formatted_label == "fT2" or score.formatted_label == "oBOT" or score.formatted_label == 'fTOP': position = "top" elif score.formatted_label == "oT2" or score.formatted_label == "fBOT" or score.formatted_label == 'oTOP': position = "bottom" elif position == "top": check_top(score, sequences["top"]) if ( score.formatted_label == "oE1" or score.formatted_label == "fNEU" or score.formatted_label == "oNEU" ): position = "neutral" elif ( score.formatted_label == "oR2" or score.formatted_label == "fBOT" or score.formatted_label == "oTOP" ): position = "bottom" elif position == "bottom": check_bottom(score, sequences["bottom"]) if ( score.formatted_label == "fE1" or score.formatted_label == "fNEU" or score.formatted_label == "oNEU" ): position = "neutral" elif ( score.formatted_label == "fR2" or score.formatted_label == "oBOT" or score.formatted_label == "fTOP" ): position = "top" else: raise ValueError( f"Invalid `position`, expected one of ['neutral', " f"'top', 'bottom'], got {position!r}." ) return True
5e32906408540c504347c745113fc303ef0d989b
20,439
def non_linear_relationships(): """Plot logarithmic and exponential data along with correlation coefficients.""" # make subplots fig, axes = plt.subplots(1, 2, figsize=(12, 3)) # plot logarithmic log_x = np.linspace(0.01, 10) log_y = np.log(log_x) axes[0].scatter(log_x, log_y) axes[0].set_title(f'ρ = {np.round(np.corrcoef(log_x, log_y)[0][1], 2):.2f}') # plot exponential exp_x = np.linspace(0, 10) exp_y = np.exp(exp_x) axes[1].scatter(exp_x, exp_y) axes[1].set_title(f'ρ = {np.round(np.corrcoef(exp_x, exp_y)[0][1], 2):.2f}') # labels for ax in axes: ax.set_xlabel('x') ax.set_ylabel('y') return axes
86ce934aebc6b6f8e6b5c1826d9d26c408efc8df
20,441
import io def label_samples(annotation, atlas, atlas_info=None, tolerance=2): """ Matches all microarray samples in `annotation` to parcels in `atlas` Attempts to place each sample provided in `annotation` into a parcel in `atlas`, where the latter is a 3D niimg-like object that contains parcels each idnetified by a unique integer ID. The function tries to best match samples in `annotation` to parcels defined in `atlas` by: 1. Determining if the sample falls directly within a parcel, 2. Checking to see if there are nearby parcels by slowly expanding the search space to include nearby voxels, up to a specified distance (specified via the `tolerance` parameter), 3. Assigning the sample to the closest parcel if there are multiple nearby parcels, where closest is determined by the parcel centroid. If at any step a sample can be assigned to a parcel the matching process is terminated. If there is still no parcel for a given sample after this process the sample is provided a label of 0. Parameters ---------- annotation : (S, 13) pandas.DataFrame Pre-loaded annotation information for a given AHBA donor atlas : niimg-like object A parcellation image in MNI space, where each parcel is identified by a unique integer ID atlas_info : pandas.DataFrame, optional Filepath to or pre-loaded dataframe containing information about `atlas`. Must have *at least* columns 'id', 'hemisphere', and 'structure' containing information mapping atlas IDs to hemisphere and broad structural class (i.e., "cortex", "subcortex", "cerebellum"). Default: None tolerance : int, optional Distance (in mm) that a sample must be from a parcel for it to be matched to that parcel. This is only considered if the sample is not directly within a parcel. Default: 2 Returns ------- labels : (S, 1) pandas.DataFrame Dataframe with parcel labels for each of `S` samples """ # get annotation and atlas data annotation = io.read_annotation(annotation) atlas = utils.check_img(atlas) label_data, affine = np.asarray(atlas.dataobj), atlas.affine # load atlas_info, if provided if atlas_info is not None: atlas_info = utils.check_atlas_info(atlas, atlas_info) # get ijk coordinates for microarray samples and find labels g_ijk = utils.xyz_to_ijk(annotation[['mni_x', 'mni_y', 'mni_z']], affine) labelled = label_data[g_ijk[:, 0], g_ijk[:, 1], g_ijk[:, 2]] # if sample coordinates aren't directly inside a parcel, increment radius # around sample up to `tolerance` to try and find nearby parcels. # if still no parcel, then ignore this sample for idx in np.where(labelled == 0)[0]: label, tol = labelled[idx], 1 while label == 0 and tol <= tolerance: label = _assign_sample(g_ijk[[idx]], atlas, sample_info=annotation.iloc[idx], atlas_info=atlas_info, tolerance=tol) tol += 1 labelled[idx] = label return pd.DataFrame(labelled, dtype=int, columns=['label'], index=annotation.index)
65a3f83b031871a14b250df48c9edef3cdcce7ac
20,442
def group_by(x, group_by_fields='Event', return_group_indices=False): """ Splits x into LIST of arrays, each array with rows that have same group_by_fields values. Gotchas: Assumes x is sorted by group_by_fields (works in either order, reversed or not) Does NOT put in empty lists if indices skip a value! (e.g. events without peaks) If return_indices=True, returns list of arrays with indices of group elements in x instead """ # Support single index and list of indices try: group_by_fields[0] except TypeError: group_by_fields = tuple(group_by_fields) # Define array we'll split if return_group_indices: to_return = np.arange(len(x)) else: to_return = x # Indices to determine split points from indices = fields_view(x, group_by_fields) # Should we split at all? if indices[0] == indices[-1]: return [to_return] else: # Split where indices change value split_points = np.where((np.roll(indices, 1) != indices))[0] # 0 shouldn't be a split_point, will be in it due to roll (and indices[0] != indices[-1]), so remove it split_points = split_points[1:] return np.split(to_return, split_points)
12e8034556ca303a9ebd2ccaab83cbcc131b0bec
20,443
def unlock_file(fd): """unlock file. """ try: fcntl.flock(fd, fcntl.LOCK_UN) return (True, 0) except IOError, ex_value: return (False, ex_value[0])
2c6ce071072fa45607ce284b0881af5df44b5e6d
20,444
def DsseTrad(nodes_num, measurements, Gmatrix, Bmatrix, Yabs_matrix, Yphase_matrix): """ Traditional state estimator It performs state estimation using rectangular node voltage state variables and it is customized to work without PMU measurements @param nodes_num: number of nodes of the grid @param measurements: Vector of measurements in Input (voltages, currents, powers) @param Gmatrix: conductance matrix @param Bmatrix: susceptance matrix @param Yabs_matrix: magnitude of the admittance matrix @param Yphase_matrix: phase of the admittance matrix return: np.array V - estimated voltages """ # calculate weightsmatrix (obtained as stdandard_deviations^-2) weights = measurements.getWeightsMatrix() W = np.diag(weights) inj_code = 0 # Jacobian for Power Injection Measurements H2, H3 = calculateJacobiMatrixSinj(measurements, nodes_num, Gmatrix, Bmatrix, inj_code, type=2) # Jacobian for branch Power Measurements H4, H5 = calculateJacobiBranchPower(measurements, nodes_num, Gmatrix, Bmatrix, inj_code, type=2) # get array which contains the index of measurements type V_mag and I_mag vidx = measurements.getIndexOfMeasurements(type=MeasType.V_mag) iidx = measurements.getIndexOfMeasurements(type=MeasType.I_mag) nvi = len(vidx) nii = len(iidx) # get array which contains the index of measurements type MeasType.Sinj_real, MeasType.Sinj_imag in the array measurements.measurements pidx = measurements.getIndexOfMeasurements(type=MeasType.Sinj_real) qidx = measurements.getIndexOfMeasurements(type=MeasType.Sinj_imag) # get array which contains the index of measurements type MeasType.S_real, MeasType.S_imag in the array measurements.measurements p1br = measurements.getIndexOfMeasurements(type=MeasType.S1_real) p2br = measurements.getIndexOfMeasurements(type=MeasType.S2_real) q1br = measurements.getIndexOfMeasurements(type=MeasType.S1_imag) q2br = measurements.getIndexOfMeasurements(type=MeasType.S2_imag) # get an array with all measured values (affected by uncertainty) z = measurements.getMeasValues() V = np.ones(nodes_num) + 1j * np.zeros(nodes_num) State = np.concatenate((np.ones(nodes_num), np.zeros(nodes_num-1)), axis=0) epsilon = 5 num_iter = 0 # Iteration of Netwon Rapson method: needed to solve non-linear system of equation while epsilon > 10 ** (-6): """ Computation of equivalent current measurements in place of the power measurements """ # in every iteration the input power measurements are converted into currents by dividing by the voltage estimated at the previous iteration z = convertSinjMeasIntoCurrents(measurements, V, z, pidx, qidx) z = convertSbranchMeasIntoCurrents(measurements, V, z, p1br, q1br, p2br, q2br) """ Voltage Magnitude Measurements """ h1, H1 = update_h1_vector(measurements, V, vidx, nvi, nodes_num, inj_code, type=2) """ Power Injection Measurements """ # h(x) vector where power injections are present h2 = np.inner(H2, State) h3 = np.inner(H3, State) """ Power Flow Measurements """ # h(x) vector where power flows are present h4 = np.inner(H4, State) h5 = np.inner(H5, State) """ Current Magnitude Measurements """ h6, H6 = update_h6_vector(measurements, V, iidx, nii, Yabs_matrix, Yphase_matrix, nodes_num, num_iter, inj_code, type=2) """ WLS computation """ # all the sub-matrixes of H calcualted so far are merged in a unique matrix H = np.concatenate((H1, H2, H3, H4, H5, H6), axis=0) # h(x) sub-vectors are concatenated y = np.concatenate((h1, h2, h3, h4, h5, h6), axis=0) # "res" is the residual vector. The difference between input measurements and h(x) res = np.subtract(z, y) # g = transpose(H) * W * res g = np.inner(H.transpose(), np.inner(W, res)) WH = np.inner(W, H.transpose()) # G is the gain matrix, that will have to be inverted at each iteration G = np.inner(H.transpose(), WH.transpose()) # inversion of G Ginv = np.linalg.pinv(G) # Delta includes the updates of the states for the current Newton Rapson iteration Delta_State = np.inner(Ginv, g) # state is updated State = State + Delta_State # calculate the NR treeshold (for the next while check) epsilon = np.amax(np.absolute(Delta_State)) # update the voltages V.real = State[:nodes_num] V.imag = np.concatenate(([0], State[nodes_num:]), axis=0) num_iter = num_iter + 1 return V
9e662255875970fc8df38c29e728637e53a30db5
20,445
def _get_specs(layout, surfs, array_name, cbar_range, nvals=256): """Get array specifications. Parameters ---------- layout : ndarray, shape = (n_rows, n_cols) Array of surface keys in `surfs`. Specifies how window is arranged. surfs : dict[str, BSPolyData] Dictionary of surfaces. array_name : ndarray Names of point data array to plot for each layout entry. cbar_range : {'sym'} or tuple, Range for each array. If 'sym', uses a symmetric range. Only used is array has positive and negative values. nvals : int, optional Number of lookup table values for continuous arrays. Default is 256. Returns ------- specs : ndarray Array with specifications for each array entry. """ nrow, ncol = layout.shape n_overlays = max([len(a) for a in array_name.ravel()]) def _set_spec(x, rg): if rg is None or rg == 'sym': a, b = np.nanmin(x), np.nanmax(x) if rg == 'sym' and np.sign(a) != np.sign(b): b = max(np.abs(a), b) a = -b rg = (a, b) if np.issubdtype(x.dtype, np.floating): return (*rg, nvals, np.array([]), False) vals = np.unique(x) return (*rg, vals.size, vals, True) dt = np.dtype([('min', 'f8'), ('max', 'f8'), ('nval', 'i8'), ('val', 'O'), ('disc', '?')]) specs = np.zeros((n_overlays, nrow, ncol), dtype=dt) specs[:] = (np.nan, np.nan, nvals, np.array([]), False) map_sp = {k: {} for k in surfs.keys()} for idx, k in np.ndenumerate(layout): if k is None: continue for ia, name in enumerate(array_name[idx]): if name not in surfs[k].point_keys: continue if name not in map_sp[k]: arr = surfs[k].PointData[name] map_sp[k][name] = _set_spec(arr, cbar_range[idx][ia]) specs[(ia,) + idx] = map_sp[k][name] return specs
310208c5bd8db46d37635fa8e2fcd8422a753a1b
20,446
def upper_camel_to_lower_camel(upper_camel: str) -> str: """convert upper camel case to lower camel case Example: CamelCase -> camelCase :param upper_camel: :return: """ return upper_camel[0].lower() + upper_camel[1:]
e731bbee45f5fc3d8e3e218837ccd36c00eff734
20,448
def get(isamAppliance, cert_dbase_id, check_mode=False, force=False): """ Get details of a certificate database """ return isamAppliance.invoke_get("Retrieving all current certificate database names", "/isam/ssl_certificates/{0}/details".format(cert_dbase_id))
34ade7c42fcc1b1409b315f8748105ee99157986
20,449
def model_check(func): """Checks if the model is referenced as a valid model. If the model is valid, the API will be ready to find the correct endpoint for the given model. :param func: The function to decorate :type func: function """ def wrapper(*args, **kwargs): model = None if kwargs: model = kwargs.get("model", None) if not model: if len(args) > 1: model = args[1] # args[0] is the decorted function if not constants.TRANSLATION.get(model, None): raise ValueError( "'{model}' doesn't exists. Allowed models: {allowed_models}".format( model=model, allowed_models=",\n".join( list(constants.TRANSLATION.keys()) ), ) ) return func(*args, **kwargs) return wrapper
809d7659a721ad6dedf4a651dd1fdab1b1dbf51e
20,450
def content_loss_func(sess, model): """Content loss function defined in the paper.""" def _content_loss(p, x): # N is the number of filters at layer 1 N = p.shape[3] # M is the height * width of the feature map at layer 1 M = p.shape[1] * p.shape[2] return (1 / (4 * N * M)) * tf.reduce_mean(tf.pow(x - p, 2)) return _content_loss(sess.run(model["conv4_2"]), model["conv4_2"])
229866eaaf6021e7a078460dc29a6f0bfaa853bd
20,451
import joblib def Extract_from_DF_kmeans(dfdir,num,mode=True): """ PlaneDFを読み込んで、client_IP毎に該当index番号の羅列をそれぞれのtxtに書き出す modeがFalseのときはシーケンスが既にあっても上書き作成 """ flag = exists("Database/KMeans/km_full_"+dfdir+"_database_name")#namelistが存在するかどうか if(flag and mode):return plane_df = joblib.load("./DFs/"+dfdir) result_df = joblib.load("./DFs/Results/KMeans/Result_km_"+str(num)+"_full_Input_"+dfdir+"_continuous") iplist=list(set(plane_df["client_ip"]))#読み込んだDFに含まれるclient_ipのリスト(重複はsetによって削除済み) joblib.dump(iplist,"./List/iplist_"+dfdir)#iplistを出力:異常検知に各シーケンスを入れるときに利用 database = []#シーケンスをどんどん追加して最後に出力する database_name = [] if(not(flag)):database_name = []#シーケンス毎の名前を記録 命名規則:(client_ip)_(server_ip) for ip in iplist: result_list = list(result_df.loc[list(plane_df[plane_df["client_ip"]==ip].index)].values.flatten())#client_IPでシーケンス作成 database.append(result_list) database_name.append(ip) #if(len(list(set(result_list)))>1):print(" "+ip+"_"+sip+" : "+str(result_list)) joblib.dump(database,"Database/KMeans/km_"+str(num)+"_full_"+dfdir+"_database") if(not(flag)):joblib.dump(database_name,"Database/KMeans/km_full_"+dfdir+"_database_name") return [database,database_name]
cb086c07024716022343c7e8eb5755f2de3695db
20,452
from typing import Optional def get_workspace(workspace_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult: """ Resource schema for AWS::IoTTwinMaker::Workspace :param str workspace_id: The ID of the workspace. """ __args__ = dict() __args__['workspaceId'] = workspace_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:iottwinmaker:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value return AwaitableGetWorkspaceResult( arn=__ret__.arn, creation_date_time=__ret__.creation_date_time, description=__ret__.description, role=__ret__.role, s3_location=__ret__.s3_location, tags=__ret__.tags, update_date_time=__ret__.update_date_time)
5c0970884be38923ae156511faf619fda725d004
20,453
import socket def find_open_port(): """ Use socket's built in ability to find an open port. """ sock = socket.socket() sock.bind(('', 0)) host, port = sock.getsockname() return port
516540fd23259d0fe247e02c4058c5ed7f3ee3a8
20,454
import itertools def split_list_round_robin(data: tp.Iterable, chunks_num: int) -> tp.List[list]: """Divide iterable into `chunks_num` lists""" result = [[] for _ in range(chunks_num)] chunk_indexes = itertools.cycle(i for i in range(chunks_num)) for item in data: i = next(chunk_indexes) result[i].append(item) return result
a87322b2c6a3601cda6c949354e55c38e215289a
20,455
def calc_Q_loss_FH_d_t(Q_T_H_FH_d_t, r_up): """温水床暖房の放熱損失 Args: Q_T_H_FH_d_t(ndarray): 温水暖房の処理暖房負荷 [MJ/h] r_up(ndarray): 当該住戸の温水床暖房の上面放熱率 [-] Returns: ndarray: 温水床暖房の放熱損失 """ return hwfloor.get_Q_loss_rad(Q_T_H_rad=Q_T_H_FH_d_t, r_up=r_up)
04ad561fa0090de2eb64d5514a28729da92af63c
20,456
import random def t06_ManyGetPuts(C, pks, crypto, server): """Many clients upload many files and their contents are checked.""" clients = [C("c" + str(n)) for n in range(10)] kvs = [{} for _ in range(10)] for _ in range(200): i = random.randint(0, 9) uuid1 = "%08x" % random.randint(0, 100) uuid2 = "%08x" % random.randint(0, 100) clients[i].upload(str(uuid1), str(uuid2)) kvs[i][str(uuid1)] = str(uuid2) good = total = 0 # verify integrity for i, (c, kv) in enumerate(zip(clients, kvs)): for k, v in kv.items(): vv = c.download(k) if vv == v: good += 1 total += 1 return float(good) / total
384aa2b03169da613b25d2da60cdd1ec007aeed5
20,458
def multi_lightness_function_plot(functions=None, **kwargs): """ Plots given *Lightness* functions. Parameters ---------- functions : array_like, optional *Lightness* functions to plot. \*\*kwargs : \*\* Keywords arguments. Returns ------- bool Definition success. Raises ------ KeyError If one of the given *Lightness* function is not found in the factory *Lightness* functions. Examples -------- >>> fs = ('CIE 1976', 'Wyszecki 1964') >>> multi_lightness_function_plot(fs) # doctest: +SKIP True """ if functions is None: functions = ('CIE 1976', 'Wyszecki 1964') samples = np.linspace(0, 100, 1000) for i, function in enumerate(functions): function, name = LIGHTNESS_METHODS.get(function), function if function is None: raise KeyError( ('"{0}" "Lightness" function not found in factory ' '"Lightness" functions: "{1}".').format( name, sorted(LIGHTNESS_METHODS.keys()))) pylab.plot(samples, [function(x) for x in samples], label=u'{0}'.format(name), linewidth=2) settings = { 'title': '{0} - Lightness Functions'.format(', '.join(functions)), 'x_label': 'Luminance Y', 'y_label': 'Lightness L*', 'x_tighten': True, 'legend': True, 'legend_location': 'upper left', 'x_ticker': True, 'y_ticker': True, 'grid': True, 'limits': [0, 100, 0, 100]} settings.update(kwargs) bounding_box(**settings) aspect(**settings) return display(**settings)
18a4706d919c5b8822ff76a40dcd657028a6179b
20,459
def delete_notification(request): """ Creates a Notification model based on uer input. """ print request.POST # Notification's PK Notification.objects.get(pk=int(request.POST["pk"])).delete() return JsonResponse({})
c4750bfbaa8184e64293517689671dbf717e6cd4
20,460
from dateutil import tz def parse_query_value(query_str): """ Return value for the query string """ try: query_str = str(query_str).strip('"\' ') if query_str == 'now': d = Delorean(timezone=tz) elif query_str.startswith('y'): d = Delorean(Delorean(timezone=tz).midnight) d -= timedelta(days=len(query_str)) elif query_str.startswith('t'): d = Delorean(Delorean(timezone=tz).midnight) d += timedelta(days=len(query_str) - 1) else: # Parse datetime string or timestamp try: ts = float(query_str) if ts >= 1000000000000: ts /= 1000 d = epoch(float(ts)) d.shift(tz) except ValueError: d = parse(str(query_str), tz, dayfirst=False) except (TypeError, ValueError): d = None return d
ac9c6845871094d043eee7004214fdcecb20daec
20,461
def build_model(): """ Build the model :return: the model """ model = keras.Sequential([ layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]), layers.Dense(64, activation='relu'), layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model
b5e4b0a64e7d39a0c7b72c0380ef98d8eaf9cc01
20,462
def detect(stream): """Returns True if given stream is a readable excel file.""" try: opendocument.load(BytesIO(stream)) return True except: pass
a9ef5361d9f6f5ae40767f40f12b89c3d53177a4
20,463
def new_default_channel(): """Create new gRPC channel from settings.""" channel_url = urlparse(format_url(settings.SERVICE_BIND)) return Channel(host=channel_url.hostname, port=channel_url.port)
4771306570213fa03cc5df08a0e8c9b216ecfd44
20,464
def iou(bbox1, bbox2): """ Calculates the intersection-over-union of two bounding boxes. Args: bbox1 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2. bbox2 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2. Returns: int: intersection-over-onion of bbox1, bbox2 """ bbox1 = [float(x) for x in bbox1] bbox2 = [float(x) for x in bbox2] (x0_1, y0_1, x1_1, y1_1) = bbox1 (x0_2, y0_2, x1_2, y1_2) = bbox2 # get the overlap rectangle overlap_x0 = max(x0_1, x0_2) overlap_y0 = max(y0_1, y0_2) overlap_x1 = min(x1_1, x1_2) overlap_y1 = min(y1_1, y1_2) # check if there is an overlap if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0: return 0 # if yes, calculate the ratio of the overlap to each ROI size and the unified size size_1 = (x1_1 - x0_1) * (y1_1 - y0_1) size_2 = (x1_2 - x0_2) * (y1_2 - y0_2) size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0) size_union = size_1 + size_2 - size_intersection return size_intersection / size_union
7609bcc6eb39757240a22c28fc7c15f4024cd789
20,465
def get_version(): """ Obtain the version of the ITU-R P.1511 recommendation currently being used. Returns ------- version: int Version currently being used. """ return __model.__version__
4d36eacabebe74bfb18879ba64f190ceb1bbc22a
20,466
import copy def sample_filepaths(filepaths_in, filepaths_out, intensity): """ `filepaths_in` is a list of filepaths for in-set examples. `filepaths_out` is a list of lists, where `filepaths_out[i]` is a list of filepaths corresponding to the ith out-of-set class. `intensity` is the number of in-set examples as a proportion of the total number of examples: `intensity = N_in / (N_in + N_out)`. We can rearrange this to get `N_out = N_in * ((1 / intensity) - 1)`, which we use to set `n_left_to_sample`. An intensity of 0.5 gives `N_in = N_out`. """ filepaths_out_copy = copy.deepcopy(filepaths_out) filepaths_out_sampled = [] inds_to_sample_from = range(len(filepaths_out)) n_left_to_sample = int(len(filepaths_in) * ((1 / intensity) - 1)) while n_left_to_sample > 0: if n_left_to_sample < len(filepaths_out): inds_to_sample_from = np.random.choice( inds_to_sample_from, n_left_to_sample, replace=False) for i in inds_to_sample_from: sample = np.random.choice(filepaths_out_copy[i]) filepaths_out_copy[i].remove(sample) filepaths_out_sampled.append(sample) n_left_to_sample -= len(inds_to_sample_from) return np.random.permutation(filepaths_in + filepaths_out_sampled)
b20c1e1a019eaebc0eb7ea46b33c286b72da7af7
20,467
def makekey(s): """ enerates a bitcoin private key from a secret s """ return CBitcoinSecret.from_secret_bytes(sha256(s).digest())
51658c6426a78ae2e20752542bc579f5bb7ebc01
20,468
def shape_broadcast(shape1: tuple, shape2: tuple) -> tuple: """ Broadcast two shapes to create a new union shape. Args: shape1 (tuple) : first shape shape2 (tuple) : second shape Returns: tuple : broadcasted shape Raises: IndexingError : if cannot broadcast """ for shape in (shape1, shape2): if not shape: raise IndexingError(f"Shape must have at least one dimension: {shape}") len_shape1 = len(shape1) len_shape2 = len(shape2) max_length = max(len_shape1, len_shape2) new_shape = [0] * max_length shape1_reversed = list(reversed(shape1)) shape2_reversed = list(reversed(shape2)) for idx in range(max_length): # iterate over every index. check if values are broadcastable, and if # so, add to new shape dimension if idx >= len_shape1: new_shape[idx] = shape2_reversed[idx] elif idx >= len_shape2: new_shape[idx] = shape1_reversed[idx] else: new_shape[idx] = max(shape1_reversed[idx], shape2_reversed[idx]) if ( shape1_reversed[idx] != new_shape[idx] and shape1_reversed[idx] != 1 ) or (shape2_reversed[idx] != new_shape[idx] and shape2_reversed[idx] != 1): raise IndexingError( f"The size of tensor a ({shape1_reversed[idx]}) must match the size " f"of tensor b ({shape2_reversed[idx]}) at non-singleton dimension {idx}" ) return tuple(reversed(new_shape))
4737332b371e0f16df3860d5c53e46718f68f30e
20,469
import xxhash def hash_array(kmer): """Return a hash of a numpy array.""" return xxhash.xxh32_intdigest(kmer.tobytes())
9761316333fdd9f28e74c4f1975adfca1909f54a
20,470
def GenerateDiskTemplate( lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids, disk_info, file_storage_dir, file_driver, base_index, feedback_fn, full_disk_params): """Generate the entire disk layout for a given template type. """ vgname = lu.cfg.GetVGName() disk_count = len(disk_info) disks = [] CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name) if template_name == constants.DT_DISKLESS: pass elif template_name == constants.DT_DRBD8: if len(secondary_node_uuids) != 1: raise errors.ProgrammerError("Wrong template configuration") remote_node_uuid = secondary_node_uuids[0] minors = lu.cfg.AllocateDRBDMinor( [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid) (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name, full_disk_params) drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG] names = [] for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i) for i in range(disk_count)]): names.append(lv_prefix + "_data") names.append(lv_prefix + "_meta") for idx, disk in enumerate(disk_info): disk_index = idx + base_index data_vg = disk.get(constants.IDISK_VG, vgname) meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg) disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid, disk[constants.IDISK_SIZE], [data_vg, meta_vg], names[idx * 2:idx * 2 + 2], "disk/%d" % disk_index, minors[idx * 2], minors[idx * 2 + 1]) disk_dev.mode = disk[constants.IDISK_MODE] disk_dev.name = disk.get(constants.IDISK_NAME, None) disks.append(disk_dev) else: if secondary_node_uuids: raise errors.ProgrammerError("Wrong template configuration") name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None) if name_prefix is None: names = None else: names = _GenerateUniqueNames(lu, ["%s.disk%s" % (name_prefix, base_index + i) for i in range(disk_count)]) if template_name == constants.DT_PLAIN: def logical_id_fn(idx, _, disk): vg = disk.get(constants.IDISK_VG, vgname) return (vg, names[idx]) elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE): logical_id_fn = \ lambda _, disk_index, disk: (file_driver, "%s/%s" % (file_storage_dir, names[idx])) elif template_name == constants.DT_BLOCK: logical_id_fn = \ lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL, disk[constants.IDISK_ADOPT]) elif template_name == constants.DT_RBD: logical_id_fn = lambda idx, _, disk: ("rbd", names[idx]) elif template_name == constants.DT_EXT: def logical_id_fn(idx, _, disk): provider = disk.get(constants.IDISK_PROVIDER, None) if provider is None: raise errors.ProgrammerError("Disk template is %s, but '%s' is" " not found", constants.DT_EXT, constants.IDISK_PROVIDER) return (provider, names[idx]) else: raise errors.ProgrammerError("Unknown disk template '%s'" % template_name) dev_type = template_name for idx, disk in enumerate(disk_info): params = ExtractDiskParams(disk, template_name) disk_index = idx + base_index size = disk[constants.IDISK_SIZE] feedback_fn("* disk %s, size %s" % (disk_index, utils.FormatUnit(size, "h"))) disk_dev = objects.Disk(dev_type=dev_type, size=size, logical_id=logical_id_fn(idx, disk_index, disk), iv_name="disk/%d" % disk_index, mode=disk[constants.IDISK_MODE], params=params, spindles=disk.get(constants.IDISK_SPINDLES)) disk_dev.name = disk.get(constants.IDISK_NAME, None) disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId()) disks.append(disk_dev) return disks
87995b08d3579fc22a8db7c8408a9c29e47a8271
20,471
def check_pc_overlap(pc1, pc2, min_point_num): """ Check if the bounding boxes of the 2 given point clouds overlap """ b1 = get_pc_bbox(pc1) b2 = get_pc_bbox(pc2) b1_c = Polygon(b1) b2_c = Polygon(b2) inter_area = b1_c.intersection(b2_c).area union_area = b1_c.area + b2_c.area - inter_area if b1_c.area > 11 and b2_c.area > 11: overlap = (inter_area / union_area) > 0.5 elif inter_area > 0: overlap = True else: overlap = False pc_merged = pc2 if overlap: bbox_min = MinimumBoundingBox.MinimumBoundingBox( np.concatenate((pc1[:, 0:2], pc2[:, 0:2]), axis=0) ) l01 = bbox_min.length_parallel l02 = bbox_min.length_orthogonal area = l01 * l02 # shape doesn't look like car bbox if ((area < 2 or area > 12) or ((l01 > 4.6 or l02 > 4.6)) or ((l01 < 1 or l02 < 1)) or union_area > 15 ): if b1_c.area > b2_c.area: pc_merged = pc1 else: pc_merged = pc2 else: idx_overlap = np.zeros((len(pc1))) for i in range(len(pc1)): diff = pc2 - pc1[i] diff = np.sum(diff ** 2, axis=1) if 0 in diff: idx_overlap[i] = 1 pc_merged = np.concatenate((pc_merged, pc1[idx_overlap == 0]), axis=0) if not is_car(pc_merged, min_point_num): overlap = False return overlap, pc_merged
8caa07a42850d9ca2a4d298e9be91a44ac15f6a5
20,472
def apply_hypercube(cube: DataCube, context: dict) -> DataCube: """Reduce the time dimension for each tile and compute min, mean, max and sum for each pixel over time. Each raster tile in the udf data object will be reduced by time. Minimum, maximum, mean and sum are computed for each pixel over time. Args: udf_data (UdfData): The UDF data object that contains raster and vector tiles Returns: This function will not return anything, the UdfData object "udf_data" must be used to store the resulting data. """ # The list of tiles that were created array: xarray.DataArray = cube.get_array() result = xarray.concat( [array.min(dim='t'), array.max(dim='t'), array.sum(dim='t'), array.mean(dim='t')], dim='bands' ) return DataCube(result)
c2c3b7b90a48a37f5e172111ef13e8529a3a80c5
20,473
def dateIsBefore(year1, month1, day1, year2, month2, day2): """Returns True if year1-month1-day1 is before year2-month2-day2. Otherwise, returns False.""" if year1 < year2: return True if year1 == year2: if month1 < month2: return True if month1 == month2: if day1 < day2: return True else: return False else: return False else: return False
3ba19b6e57c8e51a86e590561331057a44885d10
20,474
def all_stat(x, stat_func=np.mean, upper_only=False, stat_offset=3): """ Generate a matrix that contains the value returned by stat_func for all possible sub-windows of x[stat_offset:]. stat_func is any function that takes a sequence and returns a scalar. if upper_only is False, values are added to both the upper and lower triangular sections of the matrix. If True, only the upper section is populated """ if len(x) < stat_offset: return np.zeros([]) stat = np.zeros((len(x), len(x))) for i in range(0, len(x)): for j in range(i + stat_offset, len(x)): v = stat_func(x[i:j]) stat[i, j] = v if not upper_only: stat[j, i] = v return stat
16ef240b33a477948ae99862bb21540a230a8a2f
20,475
def PyCallable_Check(space, w_obj): """Determine if the object o is callable. Return 1 if the object is callable and 0 otherwise. This function always succeeds.""" return int(space.is_true(space.callable(w_obj)))
e5b8ee9bbbdb0fe53d6fc7241d19f93f7ee8259a
20,476
from typing import Callable def _multiclass_metric_evaluator(metric_func: Callable[..., float], n_classes: int, y_test: np.ndarray, y_pred: np.ndarray, **kwargs) -> float: """Calculate the average metric for multiclass classifiers.""" metric = 0 for label in range(n_classes): metric += metric_func(y_test[:, label], y_pred[:, label], **kwargs) metric /= n_classes return metric
a8a61c7a2e3629ff69a6a2aefdb4565e903b82de
20,477
def idxs_of_duplicates(lst): """ Returns the indices of duplicate values. """ idxs_of = dict({}) dup_idxs = [] for idx, value in enumerate(lst): idxs_of.setdefault(value, []).append(idx) for idxs in idxs_of.values(): if len(idxs) > 1: dup_idxs.extend(idxs) return dup_idxs
adc8a0b0223ac78f0c8a6edd3d60acfaf7ca4c04
20,479
async def store_rekey( handle: StoreHandle, wrap_method: str = None, pass_key: str = None, ) -> StoreHandle: """Replace the wrap key on a Store.""" return await do_call_async( "askar_store_rekey", handle, encode_str(wrap_method and wrap_method.lower()), encode_str(pass_key), )
e7abb35147bd7b5be5aa37b6583571e5be8f144b
20,480
import requests def prepare_bitbucket_data(data, profile_data, team_name): """ Prepare bitbucket data by extracting information needed if the data contains next page for this team/organisation continue to fetch the next page until the last page """ next_page = False link = None profile_data = append_bitbucket_data(data.json(), profile_data, team_name) if data.json().get('next'): next_page = True link = data.json().get('next') while next_page: next_data = requests.get(link) profile_data = append_bitbucket_data(next_data.json(), profile_data, team_name) if next_data.json().get('next'): link = next_data.json().get('next') else: next_page = False return profile_data
a2fe54a4fd02e80b4bf4d41ff932e27b555afc5c
20,481
def add(x1: Array, x2: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.add <numpy.add>`. See its docstring for more information. """ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: raise TypeError("Only numeric dtypes are allowed in add") # Call result type here just to raise on disallowed type combinations _result_type(x1.dtype, x2.dtype) x1, x2 = Array._normalize_two_args(x1, x2) return Array._new(np.add(x1._array, x2._array))
7c35ea06f5bff91da283e3521185a6b9f1b55b32
20,482
def aslist(l): """Convenience function to wrap single items and lists, and return lists unchanged.""" if isinstance(l, list): return l else: return [l]
99ccef940229d806d27cb8e429da9c85c44fed07
20,483
def getKeyList(rootFile,pathSplit): """ Get the list of keys of the directory (rootFile,pathSplit), if (rootFile,pathSplit) is not a directory then get the key in a list """ if isDirectory(rootFile,pathSplit): changeDirectory(rootFile,pathSplit) return ROOT.gDirectory.GetListOfKeys() else: return [getKey(rootFile,pathSplit)]
69d51a496ec77e00753518fee7ae8a0e5b9e7c9a
20,485
from typing import Any def query_from_json(query_json: Any, client: cl.Client = None): """ The function converts a dictionary or json string of Query to a Query object. :param query_json: A dictionary or json string that contains the keys of a Query. :type query_json: Any :param client: An IBM PAIRS client. :type client: ibmpairs.client.Client :rtype: ibmpairs.query.Query :raises Exception: if not a dict or a str. """ query = Query.from_json(query_json) cli = common.set_client(input_client = client, global_client = cl.GLOBAL_PAIRS_CLIENT) query.client = cli return query
58d1b1f3efedf0b74a0136d1edd1da13bf16bf8c
20,487
def fetch_validation_annotations(): """ Returns the validation annotations Returns: complete_annotations: array of annotation data - [n_annotations, 4] row format is [T, X, Y, Z] """ ann_gen = _annotation_generator() data = [] for annotation in ann_gen: if annotation[0] in VAL_TIMEPOINTS: data.append(annotation) data = np.asarray(data) # scale z down to expected range data *= [1, 1, 1, 0.2] return data
1b9a8b86bbc005c79b152e1f59e653b7711e674f
20,489
def enough_data(train_data, test_data, verbose=False): """Check if train and test sets have any elements.""" if train_data.empty: if verbose: print('Empty training data\n') return False if test_data.empty: if verbose: print('Empty testing data\n') return False return True
f11014d83379a5df84a67ee3b8f1e85b23c058f7
20,490
def calculate_tidal_offset(TIDE, GM, R, refell): """ Calculates the spherical harmonic offset for a tide system to change from a tide free state where there is no permanent direct and indirect tidal potentials Arguments --------- TIDE: output tidal system R: average radius used in gravity model GM: geocentric graviational constant used in gravity model refell: reference ellipsoid name Returns ------- deltaC20: offset for changing from tide free system """ #-- get ellipsoid parameters for refell ellip = ref_ellipsoid(refell) #-- standard gravitational acceleration gamma = 9.80665 trans = (-0.198*gamma*R**3)/(np.sqrt(5.0)*GM*ellip['a']**2) #-- load love number for degree 2 from PREM (Han and Wahr, 1995) k2 = -0.30252982142510 #-- conversion for each tidal system if (TIDE == 'mean_tide'): conv = (1.0 + k2) elif (TIDE == 'zero_tide'): conv = k2 #-- return the C20 offset return conv*trans
278b27b2a1378cf0ccb44055a37baf9def7d6c6a
20,492
def get_questions(set_id, default_txt=None): """Method to get set of questions list.""" try: cache_key = 'question_list_%s' % (set_id) cache_list = cache.get(cache_key) if cache_list: v_list = cache_list print('FROM Cache %s' % (cache_key)) else: v_list = ListAnswers.objects.filter( answer_set_id=set_id, is_void=False) cache.set(cache_key, v_list, 300) my_list = v_list.values_list( 'answer_code', 'answer').order_by('the_order') if default_txt: initial_list = ('', default_txt) final_list = [initial_list] + list(my_list) return final_list except Exception as e: print('error - %s' % (e)) return () else: return my_list
0153ab71caa705f7a4f2a07ce5ef210b02618dd4
20,493
def get_mms_operation(workspace, operation_id): """ Retrieve the operation payload from MMS. :return: The json encoded content of the reponse. :rtype: dict """ response = make_mms_request(workspace, 'GET', '/operations/' + operation_id, None) return response.json()
c88aca93803ab5075a217a10b7782ae791f168bc
20,495
def _check_data_nan(data): """Ensure data compatibility for the series received by the smoother. (Without checking for inf and nans). Returns ------- data : array Checked input. """ data = np.asarray(data) if np.prod(data.shape) == np.max(data.shape): data = data.ravel() if data.ndim > 2: raise ValueError( "The format of data received is not appropriate. " "Pass an objet with data in this format (series, timesteps)") if data.ndim == 0: raise ValueError( "Pass an object with data in this format (series, timesteps)") if data.dtype not in [np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64]: raise ValueError("data contains not numeric types") return data
1cde49f2836405deb0c1328d5ce53c69ffbcb721
20,496
def function(row, args): """Execute a named function function(arg, arg...) @param row: the HXL data row @param args: the arguments parsed (the first one is the function name) @returns: the result of executing the function on the arguments """ f = FUNCTIONS.get(args[0]) if f: return f(row, args[1:], True) else: logger.error("Unknown function %s", args[0]) return ''
3b6e2e20c09c6cefebb4998d40376ff1b1aa63f2
20,497
def extract_rfc2822_addresses(text): """Returns a list of valid RFC2822 addresses that can be found in ``source``, ignoring malformed ones and non-ASCII ones. """ if not text: return [] candidates = address_pattern.findall(tools.ustr(text).encode('utf-8')) return filter(try_coerce_ascii, candidates)
b256bd585a30900e09a63f0cc29889044da8e0e0
20,499
def set_edge_font_size_mapping(table_column, table_column_values=None, sizes=None, mapping_type='c', default_size=None, style_name=None, network=None, base_url=DEFAULT_BASE_URL): """Map table column values to sizes to set the edge size. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping sizes (list): List of size values to map to ``table_column_values`` mapping_type (str): discrete or passthrough (d,p); default is discrete default_size (int): Size value to set as default style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type, or if invalid size requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_edge_font_size_mapping('EdgeBetweenness', table_column_values=[2.0, 20000.0], sizes=[20, 80], style_name='galFiltered Style') '' >>> set_edge_font_size_mapping('interaction', table_column_values=['pp', 'pd'], sizes=[40, 90], mapping_type='d', style_name='galFiltered Style') '' >>> set_edge_font_size_mapping(**gen_edge_size_map('interaction', mapping_type='d')) '' >>> set_edge_font_size_mapping(**gen_edge_size_map('EdgeBetweenness', scheme_c_number_continuous(100, 200), style_name='galFiltered Style')) '' >>> set_edge_font_size_mapping('PassthruCol', mapping_type='p', default_size=20, style_name='galFiltered Style') '' See Also: :meth:`gen_edge_size_map` See Also: `Value Generators <https://py4cytoscape.readthedocs.io/en/0.0.9/concepts.html#value-generators>`_ in the Concepts section in the py4cytoscape User Manual. """ verify_dimensions('size', sizes) if default_size is not None: style_defaults.set_edge_font_size_default(default_size, style_name=style_name, base_url=base_url) return _update_visual_property('EDGE_LABEL_FONT_SIZE', table_column, table_column_values=table_column_values, range_map=sizes, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url, table='edge')
7360f57d1e4921d58eaf5af4e57a6c5f636fefdb
20,500
def compute_time_overlap(appointment1, appointment2): """ Compare two appointments on the same day """ assert appointment1.date_ == appointment2.date_ print("Checking for time overlap on \"{}\"...". format(appointment1.date_)) print("Times to check: {}, {}". format(appointment1.time_range_, appointment2.time_range_)) latest_start = max(appointment1.start_time_, appointment2.start_time_) earliest_end = min(appointment1.end_time_, appointment2.end_time_) delta = (earliest_end - latest_start).seconds overlap = max(0, delta) if overlap == 0: print("No time overlap.") return False print("\033[93mFound time overlap.\033[0m") return True
c459ef52d78bc8dd094d5be9c9f4f035c4f9fcaa
20,501
def set_up_prior(data, params): """ Function to create prior distribution from data Parameters ---------- data: dict catalog dictionary containing bin endpoints, log interim prior, and log interim posteriors params: dict dictionary of parameter values for creation of prior Returns ------- prior: chippr.mvn object prior distribution as multivariate normal """ zs = data['bin_ends'] # print(str(len(zs))+' input redshift bin ends') log_nz_intp = data['log_interim_prior'] # modify above line if testing implicit prior misspecification print('reading implicit prior '+str(log_nz_intp)) log_z_posts = data['log_interim_posteriors'] z_difs = zs[1:]-zs[:-1] z_mids = (zs[1:]+zs[:-1])/2. n_bins = len(z_mids) # print(str(n_bins)+' bin centers') n_pdfs = len(log_z_posts) a = 1.#amplitude b = 5.#inverse wavelength c = 1.e-2#random fluctuations prior_var = np.eye(n_bins) for k in range(n_bins): # print(k) prior_var[k] = a * np.exp(-0.5 * b * (z_mids[k] - z_mids) ** 2) prior_var += c * np.eye(n_bins) prior_mean = log_nz_intp # print('prior dimensions: '+str((np.shape(prior_mean), np.shape(prior_var)))) prior = mvn(prior_mean, prior_var) if params['prior_mean'] is 'sample': prior_mean = prior.sample_one() prior = mvn(prior_mean, prior_var) return (prior, prior_var)
eb75965b9425bbf9fe3a33b7f0c850e27e2d454a
20,502
def is_nonincreasing(arr): """ Returns true if the sequence is non-increasing. """ return all([x >= y for x, y in zip(arr, arr[1:])])
6d78ef5f68ca93767f3e204dfea2c2be8b3040af
20,503
def restore_missing_features(nonmissing_X, missing_features): """Insert columns corresponding to missing features. Parameters ---------- nonmissing_X : array-like, shape (n_samples, n_nonmissing) Array containing data with missing features removed. missing_features : array-like, shape (n_missing,) Array containing the column indices in the original data that correspond to missing features. Returns ------- X : array-like, shape (n_samples, n_features) Array with missing features inserted. """ if missing_features is None: missing_features = [] n_samples, n_nonmissing = nonmissing_X.shape n_missing = len(missing_features) n_features = n_missing + n_nonmissing # Ensure missing indices are consistent with the # inferred number of features. if len(missing_features) > 0: if min(missing_features) < 0 or max(missing_features) >= n_features: raise ValueError( 'Missing features are inconsistent with ' 'number of features in complete data') if is_dask_array(nonmissing_X): cols = [] idx = 0 for i in range(n_features): if i in missing_features: cols.append(dask.array.full((n_samples, 1), np.NaN)) else: cols.append(nonmissing_X[:, idx].reshape((n_samples, 1))) idx += 1 X = dask.array.hstack(cols) else: nonmissing_features = [i for i in range(n_features) if i not in missing_features] X = np.full((n_samples, n_features), np.NaN) X[:, nonmissing_features] = nonmissing_X return X
733fd72d36ea86269472949eaa12a306835578f9
20,504
def get_sample_type_from_recipe(recipe): """Retrieves sample type from recipe Args: recipe: Recipe of the project Returns: sample_type_mapping, dic: Sample type of the project For Example: { TYPE: "RNA" } , { TYPE: "DNA" }, { TYPE: "WGS" } """ return find_mapping(recipe_type_mapping, recipe)
57cef2f592d530ad15aed47cc51da4441430f3d2
20,505
import yaml def _is_download_necessary(path, response): """Check whether a download is necessary. There three criteria. 1. If the file is missing, download it. 2. The following two checks depend on each other. 1. Some files have an entry in the header which specifies when the file was modified last. If the file has been modified, download it. 2. If the header has no entry for the last modified date, we compare file sizes. If the file sizes do not match, the file is downloaded. """ path_yaml = path.with_suffix(".yaml") if path_yaml.exists(): last_modified_offline = pd.to_datetime( yaml.safe_load(path_yaml.read_text())["last_modified"] ) else: last_modified_offline = None last_modified_online = pd.to_datetime(response.headers.get("last-modified", None)) path.with_suffix(".yaml").write_text( yaml.dump({"last_modified": response.headers.get("last-modified", None)}) ) if not path.exists(): is_necessary = True reason = f"The file {path.name} does not exist." elif ( last_modified_online is not None and last_modified_online > last_modified_offline ): is_necessary = True reason = f"{path.name} has been modified online." elif last_modified_online is None: file_size_offline = path.stat().st_size file_size_online = int(response.headers.get("content-length", 0)) if file_size_online != file_size_offline: is_necessary = True reason = f"File sizes differ for {path.name}" else: is_necessary = False reason = f"File {path.name} is already downloaded." else: is_necessary = False reason = f"File {path.name} is already downloaded." return is_necessary, reason
69cd2778fb6d4706ff88bd35c1b5c1abda9a39ba
20,506
import hashlib def hash64(s): """Вычисляет хеш - 8 символов (64 бита) """ hex = hashlib.sha1(s.encode("utf-8")).hexdigest() return "{:x}".format(int(hex, 16) % (10 ** 8))
e35a367eac938fdb66584b52e1e8da59582fdb9a
20,507
def course_runs(): """Fixture for a set of CourseRuns in the database""" return CourseRunFactory.create_batch(3)
1ffd4efe008e44f9e2828c9128acfe3cdafb5160
20,508
import json def _response(data=None, status_code=None): """Build a mocked response for use with the requests library.""" response = MagicMock() if data: response.json = MagicMock(return_value=json.loads(data)) if status_code: response.status_code = status_code response.raise_for_status = MagicMock() return response
0ccd38a954d28a4f010becc68319b49896323de0
20,509
def findtailthreshold(v, figpath=None): """ function [f,mns,sds,gmfit] = findtailthreshold(v,wantfig) <v> is a vector of values <wantfig> (optional) is whether to plot a diagnostic figure. Default: 1. Fit a Gaussian Mixture Model (with n=2) to the data and find the point that is greater than the median and at which the posterior probability is equal (50/50) across the two Gaussians. This serves as a nice "tail threshold". To save on computational load, we take a random subset of size 1000000 if there are more than that number of values. We also use some discretization in computing our solution. return: <f> as the threshold <mns> as [A B] with the two means (A < B) <sds> as [C D] with the corresponding std devs <gmfit> with the gmdist object (the order might not be the same as A < B) example: from numpy.random import randn f, mns, sds, gmfit = findtailthreshold(np.r_[randn(1000), 5+3*randn(500)], figpath='test.png') """ # internal constants numreps = 3 # number of restarts for the GMM maxsz = 1000000 # maximum number of values to consider nprecision = 500 # linearly spaced values between median and upper robust range # inputs if figpath is None: wantfig = 0 else: wantfig = 1 # quick massaging of input v2 = v[np.isfinite(v)] if len(v2) > maxsz: print('warning: too big, so taking a subset') v2 = picksubset(v2, maxsz) # fit mixture of two gaussians gmfit = gmdist(n_components=2, n_init=numreps).fit(v2.reshape(-1, 1)) # figure out a nice range rng = robustrange(v2.flatten())[0] # evaluate posterior allvals = np.linspace(np.median(v2), rng[1], num=nprecision) checkit = gmfit.predict_proba(allvals.reshape(-1, 1)) # figure out crossing np.testing.assert_equal( np.any(checkit[:, 0] > .5) and np.any(checkit[:, 0] < .5), True, err_msg='no crossing of 0.5 detected') ix = np.argmin(np.abs(checkit[:, 0]-.5)) # return it f = allvals[ix] # prepare other outputs mns = gmfit.means_.flatten() sds = np.sqrt(gmfit.covariances_.flatten()) if mns[1] < mns[0]: mns = mns[[1, 0]] sds = sds[[1, 0]] # start the figure if wantfig: # make figure plt.plot(allvals, checkit) plt.plot([allvals[ix], allvals[ix]], plt.ylim(), 'k-', linewidth=2) plt.title('Posterior Probabilities') plt.savefig(figpath) plt.close('all') return f, mns, sds, gmfit
8ef0c3267582d604621bd98fa7c81976b76b2c51
20,510
from typing import Optional from typing import Dict import asyncio async def make_request_and_envelope_response( app: web.Application, method: str, url: URL, headers: Optional[Dict[str, str]] = None, data: Optional[bytes] = None, ) -> web.Response: """ Helper to forward a request to the catalog service """ session = get_client_session(app) try: async with session.request(method, url, headers=headers, data=data) as resp: payload = await resp.json() try: resp.raise_for_status() resp_data = wrap_as_envelope(data=payload) except ClientResponseError as err: if 500 <= err.status: raise err resp_data = wrap_as_envelope(error=payload["errors"]) return web.json_response(resp_data, status=resp.status, dumps=json_dumps) except (asyncio.TimeoutError, ClientConnectionError, ClientResponseError) as err: logger.warning( "Catalog service errors upon request %s %s: %s", method, url.relative(), err ) raise web.HTTPServiceUnavailable( reason="catalog is currently unavailable" ) from err
22d18de671cc84d3471120273a7b0599fda26210
20,511
def get_provincial_miif_sets(munis): """ collect set of indicator values for each province, MIIF category and year returns dict of the form { 'cash_coverage': { 'FS': { 'B1': { '2015': [{'result': ...}] } } } } """ prov_sets = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list)))) dev_cat_key = lambda muni: muni['municipality.miif_category'] dev_cat_sorted = sorted(munis, key=dev_cat_key) prov_key = lambda muni: muni['municipality.province_code'] for calculator in get_indicator_calculators(has_comparisons=True): name = calculator.indicator_name for dev_cat, dev_cat_group in groupby(dev_cat_sorted, dev_cat_key): prov_sorted = sorted(dev_cat_group, key=prov_key) for prov_code, prov_group in groupby(prov_sorted, prov_key): for muni in prov_group: for period in muni[name]['values']: if period['result'] is not None: prov_sets[name][prov_code][dev_cat][period['date']].append(period) return prov_sets
a4484a40a30f8fe9e702735f6eccf78c395ea446
20,513
def create_kernel(radius=2, invert=False): """Define a kernel""" if invert: value = 0 k = np.ones((2*radius+1, 2*radius+1)) else: value = 1 k = np.zeros((2*radius+1, 2*radius+1)) y,x = np.ogrid[-radius:radius+1, -radius:radius+1] mask = x**2 + y**2 <= radius**2 k[mask] = value return k
33bbfa6141eb722180ffa9111555e4e00fbdac6a
20,514
def edimax_get_power(ip_addr="192.168.178.137"): """ Quelle http://sun-watch.net/index.php/eigenverbrauch/ipschalter/edimax-protokoll/ """ req = """<?xml version="1.0" encoding="UTF8"?><SMARTPLUG id="edimax"><CMD id="get"> <NOW_POWER><Device.System.Power.NowCurrent> </Device.System.Power.NowCurrent><Device.System.Power.NowPower> </Device.System.Power.NowPower></NOW_POWER></CMD></SMARTPLUG> """ r = requests.post("http://{0}:10000/smartplug.cgi".format(ip_addr), auth=("admin","1234"), data=req) soup = BeautifulSoup(r.text, features="xml") power = soup.find(name="Device.System.Power.NowPower").get_text() print r.text return float(power)
9de1720f00ca4194b66f79048314bac1cac950ed
20,515
import torch def get_class_inst_data_params_n_optimizer(nr_classes, nr_instances, device): """Returns class and instance level data parameters and their corresponding optimizers. Args: nr_classes (int): number of classes in dataset. nr_instances (int): number of instances in dataset. device (str): device on which data parameters should be placed. Returns: class_parameters (torch.Tensor): class level data parameters. inst_parameters (torch.Tensor): instance level data parameters optimizer_class_param (SparseSGD): Sparse SGD optimizer for class parameters optimizer_inst_param (SparseSGD): Sparse SGD optimizer for instance parameters """ class_parameters = torch.tensor( np.ones(nr_classes) * np.log(1.0), dtype=torch.float32, requires_grad=True, device=device ) optimizer_class_param = SparseSGD( [class_parameters], lr=0.1, momentum=0.9, skip_update_zero_grad=True ) inst_parameters = torch.tensor( np.ones(nr_instances) * np.log(1.0), dtype=torch.float32, requires_grad=True, device=device ) optimizer_inst_param = SparseSGD( [inst_parameters], lr=0.1, momentum=0.9, skip_update_zero_grad=True ) return class_parameters, inst_parameters, optimizer_class_param, optimizer_inst_param
f97e12b99a42f32bb3629df5567ca44477d71dc0
20,516
def inc(x): """ Add one to the current value """ return x + 1
c8f9a68fee2e8c1a1d66502ae99e42d6034b6b5c
20,517
def regionError(df, C, R): """Detects if a selected region is not part of one of the selected countries Parameters: ----------- df : Pandas DataFrame the original dataset C : str list list of selected countries R : str list list of selected regions Returns ----------- bool True if the error is detected """ if C == None: C = ['USA'] available_regions = list(regions_of_country(df, C)) + ['All_regions', 'All'] for region in R: if not(region in available_regions): return True return False
53e237bba7c1696d23b5f1e3c77d4b2d2a4c9390
20,518
def lherzolite(): """ Elastic constants of lherzolite rock (GPa) from Peselnick et al. (1974), in Voigt notation - Abbreviation: ``'LHZ'`` Returns: (tuple): tuple containing: * C (np.ndarray): Elastic stiffness matrix (shape ``(6, 6)``) * rho (float): Density (3270 kg/m^3) Example ------- >>> from telewavesim import elast >>> elast.lherzolite()[0] array([[ 1.8740e+02, 6.3710e+01, 6.3870e+01, 7.8000e-01, 2.0200e+00, -3.2000e+00], [ 6.3710e+01, 2.1125e+02, 6.4500e+01, -3.0700e+00, 8.7000e-01, -5.7800e+00], [ 6.3870e+01, 6.4500e+01, 1.9000e+02, 3.8000e-01, 2.3800e+00, -1.2000e-01], [ 7.8000e-01, -3.0700e+00, 3.8000e-01, 6.7900e+01, -2.1200e+00, 1.6000e+00], [ 2.0200e+00, 8.7000e-01, 2.3800e+00, -2.1200e+00, 6.3120e+01, -5.5000e-01], [-3.2000e+00, -5.7800e+00, -1.2000e-01, 1.6000e+00, -5.5000e-01, 6.6830e+01]]) >>> elast.lherzolite()[1] 3270.0 """ rho = 3270. C = np.zeros((6,6), dtype=float) C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2 C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78 C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12 C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6 C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55 C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83 return C, rho
4d7e16fcfc1732ee3a881d4b1c2d755bbd9035f3
20,519
def int_to_bit(x_int, nbits, base=2): """Turn x_int representing numbers into a bitwise (lower-endian) tensor.""" x_l = tf.expand_dims(x_int, axis=-1) x_labels = [] for i in range(nbits): x_labels.append( tf.floormod( tf.floordiv(tf.to_int32(x_l), tf.to_int32(base)**i), tf.to_int32(base))) res = tf.concat(x_labels, axis=-1) return tf.to_float(res)
a9529c737e058da664d31055aa85cc7e6179f585
20,520
def create_ldap_external_user_directory_config_content(server=None, roles=None, role_mappings=None, **kwargs): """Create LDAP external user directory configuration file content. """ entries = { "user_directories": { "ldap": { } } } entries["user_directories"]["ldap"] = [] if server: entries["user_directories"]["ldap"].append({"server": server}) if roles: entries["user_directories"]["ldap"].append({"roles": [{r: None} for r in roles]}) if role_mappings: for role_mapping in role_mappings: entries["user_directories"]["ldap"].append({"role_mapping": role_mapping}) return create_xml_config_content(entries, **kwargs)
baaf3d7a02f2f4e18c3ccddb7f3ff4d5b379c1d4
20,521
def intersection(lst1, lst2): """! \details Finds hashes that are common to both lists and stores their location in both documents Finds similarity that is measured by sim(A,B) = number of hashes in intersection of both hash sets divided by minimum of the number of hashes in lst1 and lst2 \param lst1 : 1st list whose elements are of the form [hash, start location, end location] \param lst2: 2nd list whose elements are of the form [hash, start location, end location] \return l3: list of common hashes and their locations in both documents. This is a list whose elements are of the form [common hash, [start location in 1, end location in 1], [start location in 2, end location in 2]] \return sim: similarity measure evaluated """ l1h = [h[0] for h in lst1] l2h = [h[0] for h in lst2] l1loc = {h[0]:h[1:] for h in lst1} l2loc = {h[0]:h[1:] for h in lst2} l3h = list(set(l1h)&set(l2h)) l3 = [[h, l1loc[h], l2loc[h]] for h in l3h] sim = len(l3)/min(len(set(l1h)), len(set(l2h))) return l3, sim
7288e523e743fda89596e56f217aac8c87899b50
20,522
def allrad2(F_nm, hull, N_sph=None, jobs_count=1): """Loudspeaker signals of All-Round Ambisonic Decoder 2. Parameters ---------- F_nm : ((N_sph+1)**2, S) numpy.ndarray Matrix of spherical harmonics coefficients of spherical function(S). hull : LoudspeakerSetup N_sph : int Decoding order, defaults to hull.characteristic_order. jobs_count : int or None, optional Number of parallel jobs, 'None' employs 'cpu_count'. Returns ------- ls_sig : (L, S) numpy.ndarray Loudspeaker L output signal S. References ---------- Zotter, F., & Frank, M. (2018). Ambisonic decoding with panning-invariant loudness on small layouts (AllRAD2). In 144th AES Convention. Examples -------- .. plot:: :context: close-figs ls_setup = spa.decoder.LoudspeakerSetup(ls_x, ls_y, ls_z) ls_setup.pop_triangles(normal_limit=85, aperture_limit=90, opening_limit=150) ls_setup.ambisonics_setup(update_hull=True) spa.plots.decoder_performance(ls_setup, 'ALLRAD2') """ if not hull.ambisonics_hull: raise ValueError('Run LoudspeakerSetup.ambisonics_setup() first!') if hull.kernel_hull: kernel_hull = hull.kernel_hull else: raise ValueError('Run LoudspeakerSetup.ambisonics_setup() first!') if N_sph is None: N_sph = hull.characteristic_order N_sph_in = int(np.sqrt(F_nm.shape[0]) - 1) assert(N_sph == N_sph_in) # for now if N_sph_in > kernel_hull.N_kernel: warn("Undersampling the sphere. Needs higher N_Kernel.") # virtual t-design loudspeakers J = len(kernel_hull.points) # virtual speakers expressed as phantom sources (Kernel) G_k = allrap2(src=kernel_hull.points, hull=hull, N_sph=N_sph, jobs_count=jobs_count) # tapering already applied in kernel, sufficient? # virtual Ambisonic decoder _k_azi, _k_colat, _k_r = utils.cart2sph(kernel_hull.points[:, 0], kernel_hull.points[:, 1], kernel_hull.points[:, 2]) # band-limited Dirac Y_bld = sph.sh_matrix(N_sph, _k_azi, _k_colat, SH_type='real') # ALLRAD2 Decoder D = 4 * np.pi / J * G_k.T @ Y_bld # loudspeaker output signals ls_sig = D @ F_nm return ls_sig
a34cfd7719c36dd8abf1313e3eca4aa2ce49477b
20,523
def outfeed(token, xs): """Outfeeds value `xs` to the host. Experimental. `token` is used to sequence infeed and outfeed effects. """ flat_xs, _ = pytree.flatten(xs) return outfeed_p.bind(token, *flat_xs)
1b4b8c289ebd5dbb90ddd7b565c98ea9ebaec038
20,525
def add_gaussian_noise(images: list, var: list, random_var: float=None, gauss_noise: list=None): """ Add gaussian noise to input images. If random_var and gauss_noise are given, use them to compute the final images. Otherwise, compute random_var and gauss_noise. :param images: list of images :param var: variance range from which the variance value is uniformly sampled if random_var is None. :param random_var: optional value specifying the variance multiplier. :param gauss_noise: optional value specifying the additive gaussian noise per image. :return: transformed image, random_var value, gauss_noise_out list """ if random_var is None: random_var = np.random.uniform(var[0], var[1]) mean = 0 new_images = [] gauss_noise_out = [] for i,image in enumerate(images): row, col, c = image.shape if gauss_noise is None or \ (gauss_noise is not None and row*col*c != gauss_noise[i].shape[0]*gauss_noise[i].shape[1] * gauss_noise[i].shape[2]): gauss = np.random.normal(mean, random_var * 127.5, (row, col, c)) else: gauss = gauss_noise[i] gauss_noise_out.append(gauss) gauss = gauss.reshape(row, col, c) image1 = np.clip(image + gauss, 0., 255.) new_images.append(image1) return new_images, random_var, gauss_noise_out
e453f1fda24ec428eb1e33aec87db9456fa015b2
20,526
def get_sso_backend(): """ Return SingleSignOnBackend class instance. """ return get_backend_instance(cfg.CONF.auth.sso_backend)
4c2a9b857006405f804826b1c096aa8d828d6e42
20,527
def conv_relu_pool_forward(x, w, b, conv_param, pool_param): """ Convenience layer that performs a convolution, a ReLU, and a pool. Inputs: - x: Input to the convolutional layer - w, b, conv_param: Weights and parameters for the convolutional layer - pool_param: Parameters for the pooling layer Returns a tuple of: - out: Output from the pooling layer - cache: Object to give to the backward pass """ a, conv_cache = conv_forward_fast(x, w, b, conv_param) s, relu_cache = relu_forward(a) out, pool_cache = max_pool_forward_fast(s, pool_param) cache = (conv_cache, relu_cache, pool_cache) return out, cache
20d5f3d4b30edd91ef54b53a7b09882b3e2ab9b8
20,528
def ConnectWithReader(readerName, mode): """ ConnectWithReader """ hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER) if hresult != SCARD_S_SUCCESS: raise EstablishContextException(hresult) hresult, hcard, dwActiveProtocol = SCardConnect(hcontext, readerName, mode, SCARD_PROTOCOL_ANY) return hresult, hcontext, hcard
af7606fb9ad669185c7a655967d0193b59404fe1
20,529
def interpolate(points: type_alias.TensorLike, weights: type_alias.TensorLike, indices: type_alias.TensorLike, normalize: bool = True, allow_negative_weights: bool = False, name: str = "weighted_interpolate") -> type_alias.TensorLike: """Weighted interpolation for M-D point sets. Given an M-D point set, this function can be used to generate a new point set that is formed by interpolating a subset of points in the set. Note: In the following, A1 to An, and B1 to Bk are optional batch dimensions. Args: points: A tensor with shape `[B1, ..., Bk, M]` and rank R > 1, where M is the dimensionality of the points. weights: A tensor with shape `[A1, ..., An, P]`, where P is the number of points to interpolate for each output point. indices: A tensor of dtype tf.int32 and shape `[A1, ..., An, P, R-1]`, which contains the point indices to be used for each output point. The R-1 dimensional axis gives the slice index of a single point in `points`. The first n+1 dimensions of weights and indices must match, or be broadcast compatible. normalize: A `bool` describing whether or not to normalize the weights on the last axis. allow_negative_weights: A `bool` describing whether or not negative weights are allowed. name: A name for this op. Defaults to "weighted_interpolate". Returns: A tensor of shape `[A1, ..., An, M]` storing the interpolated M-D points. The first n dimensions will be the same as weights and indices. """ with tf.name_scope(name): points = tf.convert_to_tensor(value=points) weights = tf.convert_to_tensor(value=weights) indices = tf.convert_to_tensor(value=indices) shape.check_static( tensor=points, tensor_name="points", has_rank_greater_than=1) shape.check_static( tensor=indices, tensor_name="indices", has_rank_greater_than=1, has_dim_equals=(-1, points.shape.ndims - 1)) shape.compare_dimensions( tensors=(weights, indices), axes=(-1, -2), tensor_names=("weights", "indices")) shape.compare_batch_dimensions( tensors=(weights, indices), last_axes=(-2, -3), tensor_names=("weights", "indices"), broadcast_compatible=True) if not allow_negative_weights: weights = asserts.assert_all_above(weights, 0.0, open_bound=False) if normalize: sums = tf.reduce_sum(input_tensor=weights, axis=-1, keepdims=True) sums = asserts.assert_nonzero_norm(sums) weights = safe_ops.safe_signed_div(weights, sums) point_lists = tf.gather_nd(points, indices) return vector.dot( point_lists, tf.expand_dims(weights, axis=-1), axis=-2, keepdims=False)
13d49a570ac482a0b2b76ab0bb49bf7994df7204
20,530
def calcADPs(atom): """Calculate anisotropic displacement parameters (ADPs) from anisotropic temperature factors (ATFs). *atom* must have ATF values set for ADP calculation. ADPs are returned as a tuple, i.e. (eigenvalues, eigenvectors).""" linalg = importLA() if not isinstance(atom, Atom): raise TypeError('atom must be of type Atom, not {0:s}' .format(type(atom))) anisou = atom.getAnisou() if anisou is None: raise ValueError('atom does not have anisotropic temperature factors') element = zeros((3,3)) element[0,0] = anisou[0] element[1,1] = anisou[1] element[2,2] = anisou[2] element[0,1] = element[1,0] = anisou[3] element[0,2] = element[2,0] = anisou[4] element[1,2] = element[2,1] = anisou[5] vals, vecs = linalg.eigh(element) return vals[[2,1,0]], vecs[:, [2,1,0]]
0a0a0db2ca99d4a3b754acb9cd22ec4659af948f
20,531
def bitset(array, bits): """ To determine if the given bits are set in an array. Input Parameters ---------------- array : array A numpy array to search. bits : list or array A list or numpy array of bits to search. Note that the "first" bit is denoted as zero, while the "second" bit is denoted as 1. Optional Parameters: None Returns -------- array Returns a byte array of the same size as array. A pixel is set if any of the bits requested are set in the same pixel of array. Procedure --------- Uses the Gumley IDL ishft technique. Example -------- >>> bitset(np.array([3,4,1]),[0]) array([1, 0, 1]) Modification History -------------------- 2022-03-09 - Written by M. Cushing, University of Toledo. Based on the mc_bitset.pro IDL program. """ # Define empty mask mask = np.zeros_like(array, dtype=np.int8) # Loop over every bit requested and identify those pixels for which that bit is set. for val in bits: tmp = (array >> val) & 1 mask = mask | tmp return mask
cbae61dabfbe0789ff349f12b0df43860db72df7
20,533
from typing import OrderedDict def sample_gene_matrix(request, variant_annotation_version, samples, gene_list, gene_count_type, highlight_gene_symbols=None): """ highlight_gene_symbols - put these genes 1st """ # 19/07/18 - Plotly can't display a categorical color map. See: https://github.com/plotly/plotly.js/issues/1747 # So just doing as HTML table if gene_list: genes = gene_list.get_genes(variant_annotation_version.gene_annotation_release) gene_symbols = set(gene_list.get_gene_names()) else: # This was originally designed around a gene list, but now we need to support no gene list (only when uses # variant classifications) genes = [] gene_symbols = [] qs = gene_count_type.get_variant_queryset(variant_annotation_version) GS_PATH = "variantannotation__transcript_version__gene_version__gene_symbol" qs = qs.filter(**{GS_PATH + "__isnull": False}) for gene, gene_symbol in qs.values_list("variantannotation__gene", GS_PATH).distinct(): genes.append(gene) gene_symbols.append(gene_symbol) gene_values = list(gene_count_type.genevalue_set.all().order_by("id")) default_color = "#d9d9d9" default_text = "" empty_gene_value = list(filter(lambda x: x.use_as_empty_value, gene_values)) if len(empty_gene_value) == 1: default_color = empty_gene_value[0].rgb phenotypes = ["Age", "HPO", "OMIM"] highlight_gene_labels = [] other_gene_labels = [] gene_links_lookup = OrderedDict() for gene_symbol in sorted(gene_symbols): gene_classes_list = ["gene-label", gene_symbol] highlight = highlight_gene_symbols and gene_symbol in highlight_gene_symbols if highlight: gene_classes_list.append("highlight-gene") gene_classes = ' '.join(gene_classes_list) if request.user.is_authenticated: # Only display links to logged in users url = reverse('view_gene_symbol', kwargs={"gene_symbol": gene_symbol}) gene_symbol_text = f'<a class="{gene_classes}" href="{url}">{gene_symbol}</a>' else: gene_symbol_text = f"<span class='{gene_classes}'>{gene_symbol}</span>" if highlight: highlight_gene_labels.append(gene_symbol_text) else: other_gene_labels.append(gene_symbol_text) gene_links_lookup[gene_symbol] = gene_symbol_text matrix_rows = phenotypes + highlight_gene_labels + other_gene_labels color_df = pd.DataFrame(index=matrix_rows, dtype='O') text_df = pd.DataFrame(index=matrix_rows) sample_names = [] used_sample_names = set() for i, sample in enumerate(samples): try: can_access = False if request.user.is_authenticated: # Only display links to logged in users try: Sample.get_for_user(request.user, sample.pk) # Throws exception can_access = True except (Sample.DoesNotExist, PermissionDenied): pass source = SampleAnnotationVersionVariantSource.objects.get(sample=sample, variant_annotation_version=variant_annotation_version) gvcc = GeneValueCountCollection.objects.get(source=source, gene_count_type=gene_count_type) gvc_qs = gvcc.genevaluecount_set.filter(gene__in=genes) sample_code = "%03d" % i if can_access: view_sample_url = reverse('view_sample', kwargs={'sample_id': sample.pk}) sample_link = f'<a href="{view_sample_url}">{sample.name}</a>' if sample_link in used_sample_names: uniq_sample_name = sample.name + "_" + sample_code sample_link = f'<a href="{view_sample_url}">{uniq_sample_name}</a>' sample_name = sample_link else: sample_name = "S" + sample_code sample_names.append(sample_name) used_sample_names.add(sample_name) color_df[sample_name] = default_color color_df.loc["Age", sample_name] = '#FFFFFF' color_df.loc["HPO", sample_name] = '#FFFFFF' color_df.loc["OMIM", sample_name] = '#FFFFFF' text_df[sample_name] = default_text if sample.patient: try: # Check you have Patient permissions patient = Patient.get_for_user(request.user, sample.patient.pk) def format_ontology(ontology_term): return f"<div title='{ontology_term}'>{ontology_term.name}</div>" hpo, omim = OntologyTerm.split_hpo_and_omim(patient.get_ontology_term_ids()) hpo_text = " ".join(map(format_ontology, hpo)) omim_text = " ".join(map(format_ontology, omim)) try: age = sample.specimen.age_at_collection_date except: age = None text_df.loc["Age", sample_name] = age or '' text_df.loc["HPO", sample_name] = hpo_text text_df.loc["OMIM", sample_name] = omim_text except PermissionDenied: pass except Patient.DoesNotExist: pass FIELDS = ["gene__geneversion__gene_symbol", "value__rgb", "value__show_counts", "count"] for gene_symbol, rgb, show_counts, count in gvc_qs.values_list(*FIELDS): gene_link = gene_links_lookup[gene_symbol] color_df.loc[gene_link, sample_name] = rgb if show_counts: text_df.loc[gene_link, sample_name] = count except (SampleAnnotationVersionVariantSource.DoesNotExist, GeneValueCountCollection.DoesNotExist): pass def set_style(s): color_series = color_df[s.name] styles = [] for color in color_series: styles.append(f"color: {rgb_invert(color)}; background-color: {color};") return styles style = text_df.style.apply(set_style) style = style.set_table_attributes('class="sample-gene-matrix"') text_table_html = style.render() context = {"text_table_html": text_table_html, "gene_values": gene_values} return render(request, 'snpdb/patients/cohort_gene_counts_matrix.html', context)
70ed9387ebba73664efdf3c94fe08a67ed07acc9
20,535
import random def normal218(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2): """ for source and destination id generation """ """ for type of banking work,label of fraud and type of fraud """ idvariz=random.choice(zz2) idgirande=random.choice(bb2) first.append("transfer") second.append(idvariz) third.append(idgirande) sixth.append("0") seventh.append("none") """ for amount of money generation """ numberofmoney=random.randrange(50000,money2) forth.append(numberofmoney) """ for date and time generation randomly between two dates """ final=randomDate(startt,endt, random.random()) fifth.append(final) return (first,second,third,forth,fifth,sixth,seventh)
aec17f55306691395dc2797cf30e175e0cb5b9e8
20,537
from typing import List def find_paths(root: TreeNode, required_sum: int) -> List[List[int]]: """ Time Complexity: O(N^2) Space Complexity: O(N) Parameters ---------- root : TreeNode Input binary tree. required_sum : int Input number 'S'. Returns ------- all_paths : List[List[int]] All paths from root-to-leaf such that the sum of all the node values of each path equals 'S'. """ def find_paths_recursive(cur_node: TreeNode, path_sum: int, cur_path: List[int], ins_all_paths: List[List[int]]): if not cur_node: return cur_path.append(cur_node.val) if cur_node.val == path_sum and not cur_node.left and not cur_node.right: ins_all_paths.append(cur_path.copy()) else: find_paths_recursive(cur_node.left, path_sum - cur_node.val, cur_path, ins_all_paths) find_paths_recursive(cur_node.right, path_sum - cur_node.val, cur_path, ins_all_paths) del cur_path[-1] all_paths = [] find_paths_recursive(root, required_sum, [], all_paths) return all_paths
71cd37db1be97015173748e8d2142601e306552b
20,539
import time import requests import json def macro_bank_switzerland_interest_rate(): """ 瑞士央行利率决议报告, 数据区间从20080313-至今 https://datacenter.jin10.com/reportType/dc_switzerland_interest_rate_decision https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v=1578582240 :return: 瑞士央行利率决议报告-今值(%) :rtype: pandas.Series """ t = time.time() res = requests.get( f"https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}" ) json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1]) date_list = [item["date"] for item in json_data["list"]] value_list = [item["datas"]["瑞士央行利率决议报告"] for item in json_data["list"]] value_df = pd.DataFrame(value_list) value_df.columns = json_data["kinds"] value_df.index = pd.to_datetime(date_list) temp_df = value_df["今值(%)"] url = "https://datacenter-api.jin10.com/reports/list_v2" params = { "max_date": "", "category": "ec", "attr_id": "25", "_": str(int(round(t * 1000))), } headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "zh-CN,zh;q=0.9,en;q=0.8", "cache-control": "no-cache", "origin": "https://datacenter.jin10.com", "pragma": "no-cache", "referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36", "x-app-id": "rU6QIu7JHe2gOUeR", "x-csrf-token": "", "x-version": "1.0.0", } r = requests.get(url, params=params, headers=headers) temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2] temp_se.index = pd.to_datetime(temp_se.iloc[:, 0]) temp_se = temp_se.iloc[:, 1] temp_df = temp_df.append(temp_se) temp_df.dropna(inplace=True) temp_df.sort_index(inplace=True) temp_df = temp_df.reset_index() temp_df.drop_duplicates(subset="index", inplace=True) temp_df.set_index("index", inplace=True) temp_df = temp_df.squeeze() temp_df.index.name = None temp_df.name = "switzerland_interest_rate" temp_df = temp_df.astype("float") return temp_df
14eb2ce7dc85e0611a8d55147172256ed8a2c71e
20,540
def create_dataframe(message): """Create Pandas DataFrame from CSV.""" dropdowns = [] df = pd.DataFrame() if message != "": df = pd.read_csv(message) df = df.sample(n = 50, random_state = 2) # reducing Data Load Running on Heroku Free !!!! if len(df) == 0: return pd.DataFrame() df.insert(0,"Index", df.index) for column in df.columns: dropdowns.append({"label":column, "value":column}) return df, dropdowns
c0d764ed47cba31d0129d1c61e645065ba2e99b5
20,541
def load_model(path): """ This function ... :param path: :return: """ # Get the first line of the file with open(path, 'r') as f: first_line = f.readline() # Create the appropriate model if "SersicModel" in first_line: return SersicModel.from_file(path) elif "ExponentialDiskModel" in first_line: return ExponentialDiskModel.from_file(path) elif "DeprojectionModel" in first_line: return DeprojectionModel.from_file(path) else: raise ValueError("Unrecognized model file")
425113abe09b1de1efa1d5cf1ca2df4d999886c2
20,542
import functools def add_metaclass(metaclass): """ Class decorator for creating a class with a metaclass. Borrowed from `six` module. """ @functools.wraps(metaclass) def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper
f6ee3feef418d5bff4f0495fdbfc98c9a8f48665
20,543
import torch def max_pool_nd_inverse(layer, relevance_in : torch.Tensor, indices : torch.Tensor = None, max : bool = False) -> torch.Tensor : """ Inversion of LogSoftmax layer Arguments --------- relevance : torch.Tensor Input relavance indices : torch.Tensor Maximum feature indexes obtained when max pooling max : bool Implement winner takes all scheme in relevance re-distribution Returns ------- torch.Tensor Output relevance """ if indices is None : indices = layer.indices out_shape = layer.out_shape bs = relevance_in.size(0) relevance_in = torch.cat([r.view(out_shape) for r in relevance_in ], dim=0) indices = torch.cat([indices] * bs, dim=0) return ( winner_takes_all(relevance_in, layer.in_shape, layer.indices) if max else relevance_in )
ae3ad1b2a3791063c90568f0c954d3de6f3985f8
20,544
def aroon_up(close, n=25, fillna=False): """Aroon Indicator (AI) Identify when trends are likely to change direction (uptrend). Aroon Up - ((N - Days Since N-day High) / N) x 100 https://www.investopedia.com/terms/a/aroon.asp Args: close(pandas.Series): dataset 'Close' column. n(int): n period. fillna(bool): if True, fill nan values. Returns: pandas.Series: New feature generated. """ return AroonIndicator(close=close, n=n, fillna=fillna).aroon_up()
2a44c15b06e9a1d2facaa800a186b780fc226717
20,545
def triangulate(points): """ triangulate the plane for operation and visualization """ num_points = len(points) indices = np.arange(num_points, dtype=np.int) segments = np.vstack((indices, np.roll(indices, -1))).T tri = pymesh.triangle() tri.points = np.array(points) tri.segments = segments tri.verbosity = 0 tri.run() return tri.mesh
d18a7d171715217b59056337c86bf5b49609b664
20,548
def immutable(): """ Get group 1. """ allowed_values = {'NumberOfPenguins', 'NumberOfSharks'} return ImmutableDict(allowed_values)
851853b54b106cbc3ee621119a863a7b2862e8d5
20,549
def test_plot_colors_sizes_proj(data, region): """ Plot the data using z as sizes and colors with a projection. """ fig = Figure() fig.coast(region=region, projection="M15c", frame="af", water="skyblue") fig.plot( x=data[:, 0], y=data[:, 1], color=data[:, 2], size=0.5 * data[:, 2], style="cc", cmap="copper", ) return fig
4a9f2727d046d91445504d8f147fcef95a261cb5
20,550
def predict_to_score(predicts, num_class): """ Checked: the last is for 0 === Example: score=1.2, num_class=3 (for 0-2) (0.8, 0.2, 0.0) * (1, 2, 0) :param predicts: :param num_class: :return: """ scores = 0. i = 0 while i < num_class: scores += i * predicts[:, i - 1] i += 1 return scores
ee4038583404f31bed42bed4eaf6d0c25684c0de
20,551