content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def parameters_create_lcdm(Omega_c, Omega_b, Omega_k, h, norm_pk, n_s, status): """parameters_create_lcdm(double Omega_c, double Omega_b, double Omega_k, double h, double norm_pk, double n_s, int * status) -> parameters""" return _ccllib.parameters_create_lcdm(Omega_c, Omega_b, Omega_k, h, norm_pk, n_s, status)
d0a623fcfbcee06a387a3bf7add96068a8205824
23,544
def _split_header_params(s): """Split header parameters.""" result = [] while s[:1] == b';': s = s[1:] end = s.find(b';') while end > 0 and s.count(b'"', 0, end) % 2: end = s.find(b';', end + 1) if end < 0: end = len(s) f = s[:end] result.append(f.strip()) s = s[end:] return result
fabbfb0959133e70019742c6661cb3bb443ca34d
23,545
def countDigits(string): """return number of digits in a string (Helper for countHaveTenDigits)""" count = 0 for char in string: if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or \ char == '5' or char == '6' or char == '7' or char == '8' or char == '9': count += 1 return count
f8d2327e022efc7a117b744588dfe16a3a7ba75e
23,546
def get_process_entry(process_id: int) -> Process: """Get process entry :raises AssertionError: When illegal state: Active processes != 1 :param process_id: specify process :return: Process entry """ active_process_entry_query = db.session.query(Process).filter(Process.id == process_id) assert active_process_entry_query.count() == 1, "Active processes != 1: " + str(active_process_entry_query.count()) return active_process_entry_query.first()
442df14f1d032ff1fe8c40598f39a06255982da8
23,547
def shrink(filename): """ The function will make the original image shrink to its half without losing too much quality. :param filename: The directory of an image tou want to process. :return img: SimpleImage, a shrink image that is similar to the original image. """ img = SimpleImage(filename) # Create a blank image that its size is 1/2 of the original image. img_blank = SimpleImage.blank(img.width // 2, img.height // 2) for x in range(img_blank.width): for y in range(img_blank.height): new_pixel = img_blank.get_pixel(x, y) # Right upper corner. if x == 0 and y == 0: new_pixel.red = (img.get_pixel(x, y+1).red + img.get_pixel(x+1, y).red + img.get_pixel(x+1, y+1).red + img.get_pixel(x, y).red) // 4 new_pixel.green = (img.get_pixel(x, y+1).green + img.get_pixel(x+1, y).green + img.get_pixel(x+1, y+1).green + img.get_pixel(x, y).green) // 4 new_pixel.blue = (img.get_pixel(x, y+1).blue + img.get_pixel(x+1, y).blue + img.get_pixel(x+1, y+1).blue) + img.get_pixel(x, y).blue // 4 # Left side. elif x == 0 and y != 0: new_pixel.red = (img.get_pixel(x, y*2).red + img.get_pixel(x, y*2+1).red + img.get_pixel(x+1, y*2).red + img.get_pixel(x+1, y*2+1).red) // 4 new_pixel.green = (img.get_pixel(x, y*2).green + img.get_pixel(x, y*2+1).green + img.get_pixel(x+1, y*2).green + img.get_pixel(x+1, y*2+1).green) // 4 new_pixel.blue = (img.get_pixel(x, y*2).blue + img.get_pixel(x, y*2+1).blue + img.get_pixel(x+1, y*2).blue + img.get_pixel(x+1, y*2+1).blue) // 4 # Top. elif y == 0 and x != 0: new_pixel.red = (img.get_pixel(x*2, y).red + img.get_pixel(x*2+1, y).red + img.get_pixel(x*2, y+1).red + img.get_pixel(x*2+1, y+1).red) // 4 new_pixel.green = (img.get_pixel(x*2, y).green + img.get_pixel(x*2+1, y).green + img.get_pixel(x*2, y+1).green + img.get_pixel(x*2+1, y+1).green) // 4 new_pixel.blue = (img.get_pixel(x*2, y).blue + img.get_pixel(x*2+1, y).blue + img.get_pixel(x*2, y+1).blue + img.get_pixel(x*2+1, y+1).blue) // 4 else: new_pixel.red = (img.get_pixel(x*2, y*2).red + img.get_pixel(x*2+1, y*2).red + img.get_pixel(x*2, y*2+1).red + img.get_pixel(x*2+1, y*2+1).red) // 4 new_pixel.green = (img.get_pixel(x*2, y*2).green + img.get_pixel(x*2+1, y*2).green + img.get_pixel(x*2, y*2+1).green + img.get_pixel(x*2+1, y*2+1).green) // 4 new_pixel.blue = (img.get_pixel(x*2, y*2).blue + img.get_pixel(x*2+1, y*2).blue + img.get_pixel(x*2, y*2+1).blue + img.get_pixel(x*2+1, y*2+1).blue) // 4 return img_blank
0bff7d59bb7ae512883103bfe21ba80598c28c17
23,548
import getpass def get_target_config(): """ Get details of the target database (Postgres) """ print('\n------------------------------------------') print('Enter target database settings:') print('------------------------------------------') config = {} config['username'] = input('- Username on target database (default "postgres"): ') or 'postgres' config['host'] = input('- Hostname for target database (default "localhost"): ') or 'localhost' config['port'] = input('- Port for target database (default "5432"): ') or 5432 config['database'] = input("- Name of target database (default 'oracle_migration'): ") or "oracle_migration" config['password'] = getpass.getpass('- Password for target database: ') print('\nUsername: {}'.format(config['username'])) print('Hostname: {}'.format(config['host'])) print('Port: {}'.format(config['port'])) print('Database name: {}'.format(config['database'])) print('Password: {}'.format('*'*len(config['password']))) return config
36820bae4af66b2db92ce1d467996b6e9a7a2624
23,549
def GetGPU(): """Get the global index of GPU. Returns ------- int The global index of GPU. """ return option['device_id']
2c392c97da988c33ff12f59db4bb10f6b41e3bc1
23,550
def get_generic_explanation(exception_type): """Provides a generic explanation about a particular exception.""" if hasattr(exception_type, "__name__"): exception_name = exception_type.__name__ else: exception_name = exception_type if exception_name in GENERIC: return GENERIC[exception_name]() elif exception_name.endswith("Warning"): return GENERIC["UnknownWarning"]() elif hasattr(exception_type, "__name__") and issubclass(exception_type, OSError): return os_error_subclass(exception_type.__name__) else: return no_information()
b590be31518f3eabdc1cdeb31b1c66e66b47b253
23,551
def simple_histogram(queryset, column, bins): """ Return a histogram from data in queryset. :param queryset: A Queryet, Model, or Manager :param column: The column we are aggregating into a histogram :param bins: An ordered iterable of left endpoints of the bins. Must have at least two elements. The endpoints must be a convertible to strings by force_text :return: A dictionary with bin endpoints converted to strings as keys and """ queryset = _get_queryset(queryset) queryset = queryset.annotate(column_name=Value(column, output_field=CharField())) return multi_histogram(queryset, column, bins, slice_on='column_name', choices=((column, column),))
b6f4f2738cdf5e3e610e830886e2c6639aae309e
23,553
def bounding_box(points): """Bounding box Args: points: Array of shape (amount_of_points, dimensions) Returns: numpy.ndarray: Array of [[min, max], [min, max], ...] along the dimensions of points. """ out = np.empty((points.ndim, 2)) for i in range(points.ndim): x = points[:, i] out[i, 0] = x.min() out[i, 1] = x.max() return out
44871a584f3592296c982c82a798c05ee8b166f7
23,555
def get_ts_WFI(self): """ Get kinetic energy density """ ts = np.zeros((self.grid.Nelem, len(self.solver[0,:]) )) if self.optInv.ens_spin_sym is not True: for i in range(self.solver.shape[0]): for j in range(self.solver.shape[1]): self.solver[i,j].calc_ked_WFI() #ts[i,j] = self.solver[i,j].get_ked_WFI() #get_ked_WFI cannot be defined as a solver's method #Get Kinetic Energy Density for i in range(self.solver.shape[0]): for j in range(self.solver.shape[1]): if self.solver[i,j].ked_WFI is not None: ts[:,j] = np.sum( self.solver[i,j].ked_WFI, axis=1 ) else: for i in range(self.solver.shape[0]): self.solver[i,0].calc_ked_WFI() #Get Kinetic Energy Density for i in range(self.solver.shape[0]): for j in range(self.solver.shape[1]): if self.solver[i,j].ked_WFI is not None: ts[:,j] = np.sum( self.solver[i,j].ked_WFI, axis=1 ) return ts
8384a5c3d1e2cdb5551ecb783101961f73a2d523
23,556
def _correct_outlier_correlation(rpeaks: pd.DataFrame, bool_mask: np.array, corr_thres: float, **kwargs) -> np.array: """Apply outlier correction method 'correlation'. This function compute the cross-correlation coefficient between every single beat and the average of all detected beats. It marks beats as outlier if the cross-correlation coefficient is below a certain threshold. Parameters ---------- rpeaks : :class:`~pandas.DataFrame` dataframe with detected R peaks. Output from :meth:`biopsykit.signals.ecg.EcgProcessor.ecg_process()` bool_mask : :class:`numpy.array` boolean array with beats marked as outlier. Results of this outlier correction method will be combined with the array using a logical 'or' corr_thres : float threshold for cross-correlation coefficient. Beats below that threshold will be marked as outlier **kwargs : additional parameters required for this outlier function, such as: * ecg_signal :class:`~pandas.DataFrame` dataframe with processed ECG signal. Output from :meth:`biopsykit.signals.ecg.EcgProcessor.ecg_process()` * sampling_rate : float sampling rate of recorded data in Hz Returns ------- :class:`numpy.array` boolean array with beats marked as outlier. Logical 'or' combination of ``bool_mask`` and results from this algorithm """ ecg_signal = kwargs.get("ecg_signal", None) sampling_rate = kwargs.get("sampling_rate", None) if any(v is None for v in [ecg_signal, sampling_rate]): raise ValueError( "Cannot apply outlier correction method 'correlation' because not all additionally required arguments " "were provided! Make sure you pass the following arguments: 'ecg_signal', 'sampling_rate'." ) # signal outlier # segment individual heart beats heartbeats = nk.ecg_segment(ecg_signal["ECG_Clean"], rpeaks["R_Peak_Idx"], int(sampling_rate)) heartbeats = nk.epochs_to_df(heartbeats) heartbeats_pivoted = heartbeats.pivot(index="Time", columns="Label", values="Signal") heartbeats = heartbeats.set_index("Index") heartbeats = heartbeats.loc[heartbeats.index.intersection(rpeaks["R_Peak_Idx"])].sort_values(by="Label") heartbeats = heartbeats[~heartbeats.index.duplicated()] heartbeats_pivoted.columns = heartbeats.index # compute the average over all heart beats and compute the correlation coefficient between all beats and # the average mean_beat = heartbeats_pivoted.mean(axis=1) heartbeats_pivoted["mean"] = mean_beat corr_coeff = heartbeats_pivoted.corr()["mean"].abs().sort_values(ascending=True) corr_coeff = corr_coeff.drop("mean") # compute RR intervals (in seconds) from R Peak Locations rpeaks["RR_Interval"] = np.ediff1d(rpeaks["R_Peak_Idx"], to_end=0) / sampling_rate # signal outlier: drop all beats that are below a correlation coefficient threshold return np.logical_or(bool_mask, rpeaks["R_Peak_Idx"].isin(corr_coeff[corr_coeff < corr_thres].index))
7216b1c8e2c3352d14273aa058e5c9fd4398044b
23,557
import time def _time_from_timestamp(timestamp: int) -> time: """ Casts a timestamp representing the number of seconds from the midnigh to a time object Parameters ---------- timestamp : int The number of seconds since midnight Returns ------- time The associated time object """ SECONDS_IN_MINUTE = 60 SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE remaining_time = timestamp hour, remaining_time = divmod(remaining_time, SECONDS_IN_HOUR) minute, second = divmod(remaining_time, SECONDS_IN_MINUTE) return time(hour, minute, second)
552f2b3b6841d48f3340ecdd94edb03f791a84c9
23,558
def get_marginal_frequencies_of_spikes_in_bins(symbol_counts, number_of_bins_d): """ Compute for each past bin 1...d the sum of spikes found in that bin across all observed symbols. """ return np.array(sum((emb.symbol_binary_to_array(symbol, number_of_bins_d) * symbol_counts[symbol] for symbol in symbol_counts)), dtype=int)
c1ace43f040715c87a3a137bebf64d862060a590
23,559
def pagination(cl): """ Generate the series of links to the pages in a paginated list. """ paginator, page_num = cl.paginator, cl.page_num pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page if not pagination_required: page_range = [] else: ON_EACH_SIDE = 2 ON_ENDS = 1 # If there are 10 or fewer pages, display links to every page. # Otherwise, do some fancy if paginator.num_pages <= 8: page_range = range(paginator.num_pages) else: # Insert "smart" pagination links, so that there are always ON_ENDS # links at either end of the list of pages, and there are always # ON_EACH_SIDE links at either end of the "current page" link. page_range = [] if page_num > (ON_EACH_SIDE + ON_ENDS): page_range += [ *range(0, ON_ENDS), DOT, *range(page_num - ON_EACH_SIDE, page_num + 1), ] else: page_range.extend(range(0, page_num + 1)) if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1): page_range += [ *range(page_num + 1, page_num + ON_EACH_SIDE - 1), DOT, *range(paginator.num_pages - ON_ENDS, paginator.num_pages), ] else: page_range.extend(range(page_num + 1, paginator.num_pages)) need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page return { "cl": cl, "pagination_required": pagination_required, "show_all_url": need_show_all_link and cl.get_query_string({ALL_VAR: ""}), "page_range": page_range, "ALL_VAR": ALL_VAR, "1": 1, }
cca1b80f1bc2c60c8f4af44f138b5433023298f7
23,561
def ants_apply_inverse_warps_template_to_func( workflow, strat, num_strat, num_ants_cores, input_node, input_outfile, ref_node, ref_outfile, func_name, interp, input_image_type ): """Apply the functional-to-structural and structural-to-template warps inversely to functional time-series in template space to warp it back to native functional space. Parameters ---------- workflow: Nipype workflow object the workflow containing the resources involved strat: C-PAC Strategy object a strategy with one or more resource pools num_strat: int the number of strategy objects num_ants_cores: int the number of CPU cores dedicated to ANTS anatomical-to-standard registration input_node: Nipype pointer pointer to the node containing the 4D functional time-series (often the leaf node) input_outfile: Nipype pointer pointer to the output of the node, i.e. the 4D functional time-series itself ref_node: Nipype pointer pointer to the node containing the reference volume for the C3D FSL-to-ITK affine conversion (often the mean of the functional time-series, which is a single volume) ref_outfile: Nipype pointer pointer to the output of ref_node, i.e. the reference volume itself func_name: str what the name of the warped functional should be when written to the resource pool interp: str which interpolation to use when applying the warps input_image_type: int argument taken by the ANTs apply warp tool; in this case, should be 3 for 4D functional time-series """ # converts FSL-format .mat affine xfm into ANTS-format # .txt; .mat affine comes from Func->Anat registration fsl_to_itk_mni_func = create_wf_c3d_fsl_to_itk( name='fsl_to_itk_%s_%d' % (func_name, num_strat) ) # collects series of warps to be applied collect_transforms_mni_func = \ create_wf_collect_transforms( inverse=True, name='collect_transforms_%s_%d' % (func_name, num_strat) ) # apply ants warps apply_ants_warp_mni_func = \ create_wf_apply_ants_warp( inverse=True, name='apply_ants_warp_%s_%d' % (func_name, num_strat), ants_threads=int(num_ants_cores)) apply_ants_warp_mni_func.inputs.inputspec.dimension = 3 apply_ants_warp_mni_func.inputs.inputspec.interpolation = interp # input_image_type: # (0 or 1 or 2 or 3) # Option specifying the input image type of scalar # (default), vector, tensor, or time series. apply_ants_warp_mni_func.inputs.inputspec. \ input_image_type = input_image_type # convert the .mat from linear Func->Anat to # ANTS format node, out_file = strat['functional_to_anat_linear_xfm'] workflow.connect(node, out_file, fsl_to_itk_mni_func, 'inputspec.affine_file') node, out_file = strat["anatomical_brain"] workflow.connect(node, out_file, fsl_to_itk_mni_func, 'inputspec.reference_file') workflow.connect(ref_node, ref_outfile, fsl_to_itk_mni_func, 'inputspec.source_file') workflow.connect(ref_node, ref_outfile, apply_ants_warp_mni_func, 'inputspec.reference_image') # Field file from anatomical nonlinear registration node, out_file = strat['mni_to_anatomical_nonlinear_xfm'] workflow.connect(node, out_file, collect_transforms_mni_func, 'inputspec.warp_file') # initial transformation from anatomical registration node, out_file = strat['ants_initial_xfm'] workflow.connect(node, out_file, collect_transforms_mni_func, 'inputspec.linear_initial') # affine transformation from anatomical registration node, out_file = strat['ants_affine_xfm'] workflow.connect(node, out_file, collect_transforms_mni_func, 'inputspec.linear_affine') # rigid transformation from anatomical registration node, out_file = strat['ants_rigid_xfm'] workflow.connect(node, out_file, collect_transforms_mni_func, 'inputspec.linear_rigid') # Premat from Func->Anat linear reg and bbreg # (if bbreg is enabled) workflow.connect(fsl_to_itk_mni_func, 'outputspec.itk_transform', collect_transforms_mni_func, 'inputspec.fsl_to_itk_affine') # this <node, out_file> pulls in directly because # it pulls in the leaf in some instances workflow.connect(input_node, input_outfile, apply_ants_warp_mni_func, 'inputspec.input_image') workflow.connect(collect_transforms_mni_func, 'outputspec.transformation_series', apply_ants_warp_mni_func, 'inputspec.transforms') strat.update_resource_pool({ func_name: (apply_ants_warp_mni_func, 'outputspec.output_image') }) strat.append_name(apply_ants_warp_mni_func.name) return apply_ants_warp_mni_func
e1fdd24a9e3b13ff280baf89586eeca85f1f0a7d
23,562
def get_metrics_influx(query, query_index): """ Function to Query InfluxDB """ influx_connect = InfluxDBClient( host=defs.INFLUX_DETAILS[query_index][0], database=defs.INFLUX_DETAILS[query_index][1], port=8086, timeout=5, retries=5) response = influx_connect.query(query, epoch='s') return response
3f5d7c147553d3b16cfb3d18a1b86805f879fda7
23,563
def find_buckets(pc, target_centres, N, bucket_height=.38, bucket_radius=.15): """ Returns: pc, bucket_centres """ ### find buckets and remove ### print ('finding buckets') buckets = pc[pc.z.between(.1, .4)] # voxelise to speed-up dbscan buckets.loc[:, 'xx'] = (buckets.x // .005) * .005 buckets.loc[:, 'yy'] = (buckets.y // .005) * .005 buckets.loc[:, 'zz'] = (buckets.z // .005) * .005 buckets.sort_values(['xx', 'yy', 'zz', 'refl'], inplace=True) bucket_voxels = buckets[~buckets[['xx', 'yy', 'zz']].duplicated()] # print(buckets) dbscan = DBSCAN(min_samples=20, eps=.05).fit(bucket_voxels[['xx', 'yy', 'zz']]) bucket_voxels.loc[:, 'labels_'] = dbscan.labels_ # merge results back buckets = pd.merge(buckets, bucket_voxels[['xx', 'yy', 'zz', 'labels_']], on=['xx', 'yy', 'zz']) # find three largest targets (assumed buckets) labels = buckets.labels_.value_counts().index[:N] buckets = buckets[buckets.labels_.isin(labels)] bucket_centres = buckets.groupby('labels_')[['x', 'y']].mean().reset_index() bucket_centres.loc[:, 'aruco'] = -1 try: # pair up aruco and buckets , identify and label bucket points for i, lbl in enumerate(buckets.labels_.unique()): bucket = buckets[buckets.labels_ == lbl] X, Y = bucket[['x', 'y']].mean(), target_centres[['x', 'y']].astype(float) dist2bucket = np.linalg.norm(X - Y, axis=1) aruco = target_centres.loc[np.where(dist2bucket == dist2bucket.min())].aruco.values[0] print ('bucket {} associated with aruco {}'.format(lbl, aruco)) bucket_centres.loc[bucket_centres.labels_ == lbl, 'aruco'] = aruco # identify buckets points x_shift = bucket_centres[bucket_centres.aruco == aruco].x.values y_shift = bucket_centres[bucket_centres.aruco == aruco].y.values pc.dist = np.sqrt((pc.x - x_shift)**2 + (pc.y - y_shift)**2) idx = pc[(pc.z < bucket_height) & (pc.dist < bucket_radius) & (pc.is_branch)].index pc.loc[idx, 'is_branch'] = False # label branch base with aruco idx = pc[(pc.z < bucket_height + .5) & (pc.dist < bucket_radius)].index pc.loc[idx, 'aruco'] = aruco except Exception as err: plt.scatter(buckets.x.loc[::100], buckets.y.loc[::100], c=buckets.labels_.loc[::100]) plt.scatter(target_centres.x, target_centres.y) [plt.text(r.x, r.y, r.aruco) for ix, r in target_centres.iterrows()] raise Exception return pc, bucket_centres
e7e5480783235e6e9ad48cbfc4d006e9a0a7b61e
23,564
def red_bg(text): """ Red background. """ return _create_color_func(text, bgcolor=1)
c867f7415230b6f5c179a4369bf4751f9ee2a442
23,567
def does_block_type_support_children(block_type): """ Does the specified block type (e.g. "html", "vertical") support child blocks? """ try: return XBlock.load_class(block_type).has_children except PluginMissingError: # We don't know if this now-uninstalled block type had children # but to be conservative, assume it may have. return True
f1e86e6b378ef3e134106653e012c8a06cebf821
23,568
def jsonDateTimeHandler(obj): """Takes an object and tries to serialize it in JSON by using strftime or isoformat.""" if hasattr(obj, "strftime"): # To avoid problems with the js date-time format return obj.strftime("%a %b %d, %Y %I:%M %p") elif hasattr(obj, 'isoformat'): return obj.isoformat() # elif isinstance(obj, ...): # return ... else: raise TypeError( 'Object of type %s with value of %s is not JSON serializable' % (type(obj), repr(obj)))
605f8a379575d185bc2a8b16810252511eec52af
23,569
def get_crypto_price(crypto, fiat): """Helper function to convert any cryptocurrency to fiat""" converted_btc_value = float(binance_convert_crypto( crypto, "BTC").split('=')[1].strip().split()[0]) # grab latest bitcoin price btc_price = float(get_price("btc", fiat).split('=')[1].strip().split()[0]) # converted_btc_value * latest reading return converted_btc_value * btc_price
f91e56c74b3422d3ab272c029352ae94521033b0
23,571
def boxblur(stream: Stream, *args, **kwargs) -> FilterableStream: """https://ffmpeg.org/ffmpeg-filters.html#boxblur""" return filter(stream, boxblur.__name__, *args, **kwargs)
5f29981abaf050b43207452649f4ad9e3fafc05c
23,572
def flatten(lis): """Given a list, possibly nested to any level, return it flattened.""" new_lis = [] for item in lis: if type(item) == type([]): new_lis.extend(flatten(item)) else: new_lis.append(item) return new_lis
7e4e00af9f20f58dc0798a0731c352949dd71cf5
23,574
def name(ndims=2, ndepth=2): """ encrypt n and version into a standardized string """ # Model name, depth and version value = 'care_denoise_%dDdepth%d' % (ndims, ndepth) return value
1933ac0454eac4c860d70683e58c922074498b63
23,575
import string import random def _generate_url_slug(size=10, chars=string.ascii_lowercase + string.digits): """ This is for a Django project and it assumes your instance has a model with a slug field and a title character (char) field. Parameters ---------- size: <Int> Size of the slug. chars: <string.class> Character class to be included in the slug. """ slug = ''.join(random.choice(chars) for _ in range(size)) if redis.exists(slug): try: return _generate_url_slug() except RecursionError: return else: return slug
1ebe945730e7e5f977c80666db92cfefb9a1a1a7
23,576
def mc_compute_stationary(P): """ Computes the stationary distribution of Markov matrix P. Parameters ---------- P : array_like(float, ndim=2) A discrete Markov transition matrix Returns ------- solution : array_like(float, ndim=1) The stationary distribution for P Note: Currently only supports transition matrices with a unique invariant distribution. See issue 19. """ n = len(P) # P is n x n I = np.identity(n) # Identity matrix B, b = np.ones((n, n)), np.ones((n, 1)) # Matrix and vector of ones A = np.transpose(I - P + B) solution = np.linalg.solve(A, b).flatten() return solution
dc971399bc7b8626347ba9a20a6c8f449870b606
23,577
def isight_prepare_data_request(a_url, a_query, a_pub_key, a_prv_key): """ :param a_url: :type a_url: :param a_query: :type a_query: :param a_pub_key: :type a_pub_key: :param a_prv_key: :type a_prv_key: :return: :rtype: """ header = set_header(a_prv_key, a_pub_key, a_query) result = isight_load_data(a_url, a_query, header) if not result: PySight_settings.logger.error('Something went wrong when retrieving indicators from the FireEye iSight API') return False else: return result
8854013b509fa52a35bd1957f6680ce4e4c17dc4
23,578
def norm_fisher_vector(v, method=['power', 'l2']): """ Normalize a set of fisher vectors. :param v: numpy.array A matrix with Fisher vectors as rows (each row corresponding to an image). :param method: list A list of normalization methods. Choices: 'power', 'l2'. :return: numpy.array The set of normalized vectors (as a matrix). """ if 'power' in method: v = np.sign(v) * np.abs(v)**0.5 if 'l2' in method: nrm = np.sqrt(np.sum(v**2, axis=1)) v /= nrm.reshape(-1, 1) v[np.isnan(v)] = 100000.0 # some large value return v
06592b42914902f183d085f584b00cd9a1f057ce
23,579
def get_top_funnels_df(funurl: str, funlen: int, useResolvedUrls: bool, events: DataFrame, limit_rows: int = 0) -> dict: """Get top funnels of specified length which contain the specified URL :param funurl: URL that should be contained in the funnel :param funlen: funnel length :param useResolvedUrls: indicates whether original or resolved URLs should be used :param events: events DataFrame :param limit_rows: number of rows of events DataFrame to use (use all rows if 0) :return: dictionary of funnels and their frequencies """ if useResolvedUrls: columnToUse = analyze_traffic.RESOLVEDURL else: columnToUse = analyze_traffic.PAGEURL if limit_rows != 0: events = events.head(limit_rows) if useResolvedUrls: url_regex_resolver.resolve_urls(events, manage_resolutions.get_regex_dict(), analyze_traffic.PAGEURL, analyze_traffic.RESOLVEDURL) si = analyze_traffic.build_session_index(events, columnToUse) funnelCounts = get_funnel_lists(events, si, funurl, funlen, columnToUse) return funnelCounts
1fe29668e98076bbb39023e04fc1a5845788d9ef
23,580
from typing import Dict from typing import Any def graph_to_json(obj: Graph) -> Dict[str, Any]: """ Uses regular serialization but excludes "operator" field to rid of circular references """ serialized_obj = { k: v for k, v in any_to_json(obj).items() if k != 'operator' # to prevent circular reference } return serialized_obj
922d53d5fb9b23773cdea13e94930985785f6c77
23,582
def log_ttest_vs_basal(df, basal_key): """Do t-tests in log space to see if sequences has the same activity as basal. Parameters ---------- df : pd.DataFrame Index is sequence ID, columns are average RNA/DNA barcode counts for each replicate. basal_key : str Index value for basal. Returns ------- pvals : pd.Series p-value for t-test of the null hypothesis that the log activity of a sequence is the same as that of basal. Does not include a p-value for basal. """ log_params = df.apply(_get_lognormal_params, axis=1) # Pull out basal params basal_mean, basal_std, basal_n = log_params.loc[basal_key] # Drop basal from the df log_params = log_params.drop(index=basal_key) # Do t-tests on each row pvals = log_params.apply(lambda x: stats.ttest_ind_from_stats(basal_mean, basal_std, basal_n, x["mean"], x["std"], x["n"], equal_var=False)[1], axis=1) return pvals
b41029c7c61b3b365bf71e2aaba8a81aecf5533a
23,583
def spleen_lymph_cite_seq( save_path: str = "data/", protein_join: str = "inner", remove_outliers: bool = True, run_setup_anndata: bool = True, ) -> anndata.AnnData: """ Immune cells from the murine spleen and lymph nodes [GayosoSteier21]_. This dataset was used throughout the totalVI manuscript, and named SLN-all. Parameters ---------- save_path Location to use when saving/loading the data. protein_join Whether to take an inner join or outer join of proteins remove_outliers Whether to remove clusters annotated as doublet or low quality run_setup_anndata If true, runs setup_anndata() on dataset before returning Returns ------- AnnData with batch info (``.obs['batch']``), label info (``.obs['cell_types']``), protein expression (``.obsm["protein_expression"]``), and tissue (``.obs['tissue']``). Missing protein values are zero, when ``protein_join == "outer`` and are identified during ``AnnData`` setup. Examples -------- >>> import scvi >>> adata = scvi.data.spleen_lymph_cite_seq() """ return _load_spleen_lymph_cite_seq( save_path=save_path, protein_join=protein_join, remove_outliers=remove_outliers, run_setup_anndata=run_setup_anndata, )
7eeead5e6f69b7f3b9dff85b33cae675ea0a47ec
23,584
def strftime_local(aware_time, fmt="%Y-%m-%d %H:%M:%S"): """ 格式化aware_time为本地时间 """ if not aware_time: # 当时间字段允许为NULL时,直接返回None return None if timezone.is_aware(aware_time): # translate to time in local timezone aware_time = timezone.localtime(aware_time) return aware_time.strftime(fmt)
1294795d793c22e7639fb88ca02e34bb6b764892
23,586
import re def filter_issues_fixed_by_prs(issues, prs, show_related_prs, show_related_issues): """ Find related issues to prs and prs to issues that are fixed. This adds extra information to the issues and prs listings. """ words = [ 'close', 'closes', 'fix', 'fixes', 'fixed', 'resolve', 'resolves', 'resolved' ] pattern = re.compile( r'(?P<word>' + r'|'.join(words) + r') ' r'((?P<repo>.*?)#(?P<number>\d*)|(?P<full_repo>.*)/(?P<number_2>\d*))', re.IGNORECASE, ) issue_pr_map = {} pr_issue_map = {} for pr in prs: is_pr = bool(pr.get('pull_request')) if is_pr: pr_url = pr.html_url pr_number = pr.number user = pr.user repo_url = pr_url.split('/pull/')[0] + '/issues/' pr_issue_map[pr_url] = [] body = pr.body or '' # Remove blanks and markdown comments if body: lines = body.splitlines() no_comments = [l for l in lines if (l and not l.startswith("<!---"))] body = '\n'.join(no_comments) for matches in pattern.finditer(body): dic = matches.groupdict() issue_number = dic['number'] or dic['number_2'] or '' repo = dic['full_repo'] or dic['repo'] or repo_url # Repo name can't have spaces. if ' ' not in repo: # In case spyder-ide/loghub#45 was for example used if 'http' not in repo: repo = 'https://github.com/' + repo if '/issues' not in repo: issue_url = repo + '/issues/' + issue_number elif repo.endswith('/') and issue_number: issue_url = repo + issue_number elif issue_number: issue_url = repo + '/' + issue_number else: issue_url = None else: issue_url = None # Set the issue data issue_data = {'url': pr_url, 'text': pr_number, 'user': user} if issue_url is not None: if issue_number in issue_pr_map: issue_pr_map[issue_url].append(issue_data) else: issue_pr_map[issue_url] = [issue_data] pr_data = {'url': issue_url, 'text': issue_number} pr_issue_map[pr_url].append(pr_data) if show_related_issues: pr['loghub_related_issues'] = pr_issue_map[pr_url] for issue in issues: issue_url = issue.html_url if issue_url in issue_pr_map and show_related_prs: issue['loghub_related_pulls'] = issue_pr_map[issue_url] # Now sort the numbers in descending order for issue in issues: related_pulls = issue.get('loghub_related_pulls', []) related_pulls = sorted( related_pulls, key=lambda p: p['url'], reverse=True) issue['loghub_related_pulls'] = related_pulls for pr in prs: related_issues = pr.get('loghub_related_issues', []) related_issues = sorted( related_issues, key=lambda i: i['url'], reverse=True) pr['loghub_related_issues'] = related_issues return issues, prs
6e63dc9988c9343b4f9d2baae2d995b26b666ed3
23,587
import re def run_job(answer: str, job: dict, grade: float, feedback: str): """ Match answer to regex inside job dictionary. Add weight to grade if successful, else add comment to feedback. :param answer: Answer. :param job: Dictionary with regex, weight, and comment. :param grade: Current grade for the answer. :param feedback: Current feedback for the answer. :return: Modified answer, grade, and feedback. """ match = re.search(job["regex"], answer) if match: grade += job["weight"] answer = answer.replace(match[0], "", 1) else: feedback += job["comment"] + "\n" return answer, grade, feedback
487916da129b8958f8427b11f0118135268f9245
23,588
def __build_data__(feature, qars): """ Return all the data needed to build the Benin republic departments Layer """ data = { 'qars': qars, } # GEOJSON layer consisting of a single feature department_name = feature["properties"]["NAME_1"] data["department"] = department_name data["predictions"] = data_dictionary[feature["properties"]["NAME_0"]][feature["properties"]["NAME_1"]][ "properties"] z_list = [] # looping through all departments in Benin Repubic to get the ranking for d in range(len(DeptSatellite.objects.all())): y = DeptSatellite.objects.all()[d].department x = CommuneSatellite.objects.filter(department=y).aggregate(Sum('cashew_tree_cover')) x = x['cashew_tree_cover__sum'] z_list.append((y, x)) sorted_by_second = sorted(z_list, reverse=True, key=lambda tup: tup[1]) list1, _ = zip(*sorted_by_second) # A small logic to solve the French symbols department error when viewed on local host if heroku: position = list1.index(department_name) else: position = 1 data["position"] = position my_dict = {'0': "highest", '1': "2nd", '2': "3rd", '3': "4th", '4': "5th", '5': "6th", '6': "7th", '7': "8th", '8': "9th", '9': "10th", '10': "11th", '11': "lowest"} data["my_dict"] = my_dict pred_dept_data = [] pred_ground_dept_data = [['Communes', 'Satellite Prediction', 'Ground Data Estimate']] for c in CommuneSatellite.objects.filter(department=department_name): y = c.commune x = round(c.cashew_tree_cover / 10000, 2) pred_dept_data.append([y, x]) pred_ground_dept_data.append([y, x, x]) data["pred_dept_data"] = pred_dept_data data["pred_ground_dept_data"] = pred_ground_dept_data # load statistics from the database and formating them for displaying on popups. # The try catch is to avoid error that arise when we round null values tree_ha_pred_dept = CommuneSatellite.objects.filter(department=department_name).aggregate(Sum('cashew_tree_cover')) try: tree_ha_pred_dept = int(round(tree_ha_pred_dept['cashew_tree_cover__sum'] / 10000, 2)) except Exception as e: tree_ha_pred_dept = 0 data["tree_ha_pred_dept"] = tree_ha_pred_dept surface_area_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('surface_area')) try: surface_area_d = int(round(surface_area_d['surface_area__sum'], 2)) except Exception as e: surface_area_d = 0 data["surface_area_d"] = surface_area_d total_yield_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_yield_kg')) try: total_yield_d = int(round(total_yield_d['total_yield_kg__sum'], 2)) except Exception as e: total_yield_d = 0 data["total_yield_d"] = total_yield_d yield_ha_d = BeninYield.objects.filter(department=department_name).aggregate(Avg('total_yield_per_ha_kg')) try: yield_ha_d = int(round(yield_ha_d['total_yield_per_ha_kg__avg'], 2)) except Exception as e: yield_ha_d = 0 data["yield_ha_d"] = yield_ha_d # Used only in case of error in the try and except catch yield_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Avg('total_yield_per_tree_kg')) try: yield_tree_d = int(round(yield_tree_d['total_yield_per_tree_kg__avg'], 2)) except Exception as e: yield_tree_d = 0 data["yield_tree_d"] = yield_tree_d num_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_number_trees')) try: num_tree_d = int(num_tree_d['total_number_trees__sum']) except Exception as e: num_tree_d = 0 data["num_tree_d"] = num_tree_d sick_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_sick_trees')) try: sick_tree_d = int(sick_tree_d['total_sick_trees__sum']) except Exception as e: sick_tree_d = 0 data["sick_tree_d"] = sick_tree_d out_prod_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_trees_out_of_prod')) try: out_prod_tree_d = int(out_prod_tree_d['total_trees_out_of_prod__sum']) except Exception as e: out_prod_tree_d = 0 data["out_prod_tree_d"] = out_prod_tree_d dead_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_dead_trees')) try: dead_tree_d = int(round(dead_tree_d['total_dead_trees__sum'], 2)) except Exception as e: dead_tree_d = 0 data["dead_tree_d"] = dead_tree_d region_size_d = area(feature['geometry']) / 10000 try: active_trees_d = num_tree_d - sick_tree_d - out_prod_tree_d - dead_tree_d except Exception as e: active_trees_d = 0 data["active_trees_d"] = active_trees_d try: r_tree_ha_pred_dept = round(tree_ha_pred_dept, 1 - int( floor(log10(abs(tree_ha_pred_dept))))) if tree_ha_pred_dept < 90000 else round(tree_ha_pred_dept, 2 - int(floor(log10( abs(tree_ha_pred_dept))))) except Exception as e: r_tree_ha_pred_dept = tree_ha_pred_dept data["r_tree_ha_pred_dept"] = r_tree_ha_pred_dept try: r_surface_area_d = round(surface_area_d, 1 - int(floor(log10(abs(surface_area_d))))) if surface_area_d < 90000 else round( surface_area_d, 2 - int(floor(log10(abs(surface_area_d))))) except Exception as e: r_surface_area_d = surface_area_d data["r_surface_area_d"] = r_surface_area_d try: r_total_yield_d = round(total_yield_d, 1 - int(floor(log10(abs(total_yield_d))))) if total_yield_d < 90000 else round( total_yield_d, 2 - int(floor(log10(abs(total_yield_d))))) except Exception as e: r_total_yield_d = total_yield_d data["r_total_yield_d"] = r_total_yield_d try: r_yield_ha_d = round(yield_ha_d, 1 - int(floor(log10(abs(yield_ha_d))))) if yield_ha_d < 90000 else round( yield_ha_d, 2 - int(floor(log10(abs(yield_ha_d))))) except Exception as e: r_yield_ha_d = yield_ha_d data["r_yield_ha_d"] = r_yield_ha_d try: yield_pred_dept = int(r_yield_ha_d * tree_ha_pred_dept) except Exception as e: yield_pred_dept = 0 data["yield_pred_dept"] = yield_pred_dept try: r_yield_pred_dept = round(yield_pred_dept, 1 - int( floor(log10(abs(yield_pred_dept))))) if yield_pred_dept < 90000 else round(yield_pred_dept, 2 - int( floor(log10(abs(yield_pred_dept))))) except Exception as e: r_yield_pred_dept = yield_pred_dept data["r_yield_pred_dept"] = r_yield_pred_dept try: r_yield_tree_d = round(r_total_yield_d / active_trees_d) except Exception as e: r_yield_tree_d = yield_tree_d data["r_yield_tree_d"] = r_yield_tree_d try: r_num_tree_d = round(num_tree_d, 1 - int(floor(log10(abs(num_tree_d))))) if num_tree_d < 90000 else round( num_tree_d, 2 - int(floor(log10(abs(num_tree_d))))) except Exception as e: r_num_tree_d = num_tree_d data["r_num_tree_d"] = r_num_tree_d try: r_region_size_d = round(region_size_d, 1 - int(floor(log10(abs(region_size_d))))) if region_size_d < 90000 else round( region_size_d, 2 - int(floor(log10(abs(region_size_d))))) except Exception as e: r_region_size_d = region_size_d data["r_region_size_d"] = r_region_size_d return data
e1982a1f610ea724ca8cf06f6641a4bc3428fa47
23,589
def hook(callback): """ Installs a global listener on all available mouses, invoking `callback` each time it is moved, a key status changes or the wheel is spun. A mouse event is passed as argument, with type either `mouse.ButtonEvent`, `mouse.WheelEvent` or `mouse.MoveEvent`. Returns the given callback for easier development. """ _listener.add_handler(callback) return callback
4bf0884de591fc4f0b30bee42b6e36b06c526134
23,590
def notification_list(request): """ returns the notification list """ notifications = Notification.get_notifications(user=request.user) return {"notifications": notifications}
c8e967fa8cef0dfd5cc9673c99289e17813f2e75
23,591
def predictClass(x, mus, sigmas, X_train, number_of_classes, class_probabilities): """ For every model, it calculates the likelihood for each class, and picks the class with max likelihood. :param x: The datapoint we want to derive the class for. :param mus: A list with the mean vector for each method. First three are for first class, next three for second class, etc. :param sigmas: A list with the covariance matrix for each method. Same as mus. :param X_train: The train set - needed for Parzen Windows method. :param number_of_classes: The number of different classes in the dataset. :param class_probabilities: An array with the probability of each class. :return: A vector with the predicted classes by each model. """ predictions = [] # For the parametric methods number_of_models = int(len(mus) / 2) for i in range(0, number_of_models): method_likelihoods = [] for j in range(number_of_classes): index = i + j * number_of_models # the index will "jump" over the other methds in the lists. prob = gaussian(x, mus[index], sigmas[index]) * class_probabilities[j] # The beyes classifier rule method_likelihoods.append(prob) predictions.append(np.argmax(method_likelihoods)) # For the non-parametric method method_likelihoods = [] for j in range(number_of_classes): sumlog_pi = question_d(X_train, x) p_i = sumlog_pi * class_probabilities[j] # The beyes classifier rule method_likelihoods.append(p_i) predictions.append(np.argmax(method_likelihoods)) return predictions
dbd9d6227c8877862d74d4bf1932f3f1acd37a2f
23,593
def create_secret_id(vault, name, version=None): """ :param vault: The vault uri. :type vault: str :param name: The secret name. :type name: str :param version: The secret version. :type version: str :rtype: KeyVaultId """ return create_object_id('secrets', vault, name, version)
65c918a8f9c1f5c087835ff36a9eb13233bada2d
23,594
def config_output_page(): """ Configuration landing page :return: config.html """ config_type = "output" c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path(config_type) docs = cdb.get_all() outputs = [] for doc in docs: i = c.get_output_from_data(doc) outputs.append(i) output_types = c.get_outputs_available() config_descr = """ Outputs act as stores - seperate from the local database - for host information """ return render_template('config.html', items=outputs, config_type=config_type, config_descr=config_descr, item_types=output_types)
346ae058db6e0081a37a5ebedd6d231f0a3204da
23,595
from typing import Optional def compute_all_aggregator_metrics( per_plan_confidences: np.ndarray, predictions: np.ndarray, ground_truth: np.ndarray, metric_name: Optional[str] = None ): """Batch size B, we assume consistent number of predictions D per scene. per_plan_confidences: np.ndarray, shape (B, D), we assume that all prediction requests have the same number of proposed plans here. predictions: np.ndarray, shape (B, D, T, 2) ground_truth: np.ndarray, shape (B, T, 2), there is only one ground_truth trajectory for each prediction request. metric_name: Optional[str], if specified, compute a particular metric only. """ metrics_dict = defaultdict(list) if metric_name is None: base_metrics = VALID_BASE_METRICS else: base_metrics = [] for metric in VALID_BASE_METRICS: if metric.upper() in metric_name: base_metrics.append(metric) if not base_metrics: raise ValueError(f'Invalid metric name {metric_name} specified.') if metric_name is None: aggregators = VALID_AGGREGATORS else: aggregators = [] for agg in VALID_AGGREGATORS: if agg in metric_name: aggregators.append(agg) if not aggregators: raise ValueError(f'Invalid metric name {metric_name} specified.') for base_metric_name in base_metrics: if base_metric_name == 'ade': base_metric = average_displacement_error elif base_metric_name == 'fde': base_metric = final_displacement_error else: raise NotImplementedError # For each prediction request: for index, (req_preds, req_gt, req_plan_confs) in enumerate( zip(predictions, ground_truth, per_plan_confidences)): req_plan_losses = base_metric( predicted=req_preds, ground_truth=req_gt) for aggregator in aggregators: metric_key = f'{aggregator}{base_metric_name.upper()}' metrics_dict[metric_key].append( aggregate_prediction_request_losses( aggregator=aggregator, per_plan_losses=req_plan_losses, per_plan_weights=_softmax_normalize(req_plan_confs))) metrics_dict = { key: np.stack(values) for key, values in metrics_dict.items()} return metrics_dict
cd17c4c1273ec2a23aa16efebd7a4437dc4e16f7
23,596
import requests import re def query_url_base(_url, _proxy=True, _isPC=True, _isPhone=False): """ 基于requset的模块,不能采集动态网页数据 :param _url<str> :param _proxy<bool> :param _isPc<bool> :param _isPhone<bool> :return _result<dict> """ _result = {} _headers = {'Connection':'kepp-alive'} if _isPC: _headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36' elif _isPhone: _headers['User-Agent'] = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1' _ip_url = 'https://restapi.amap.com/v3/ip?output=json&key=880b9655c8c084258bfbedf98145a936' _proxy = { 'http': 'socks5://127.0.0.1:1080', 'https': 'socks5://127.0.0.1:1080', } if _proxy else None _pattern_dict = { 'title': r"<(title|TITLE)>(?P<title>[^<>]+)</(title|TITLE)>"} # print(requests.get(_ip_url, proxies=_proxy).json()) response = requests.post(_url, proxies=_proxy, headers=_headers, verify=False, timeout=30) content = response.text for k,v in _pattern_dict.items(): _match = re.search(v, content) if not re.match: continue _result[k] = _match.groupdict()[k] _result['text'] = html2text(content) return _result
f916b974a472f6a3d079911461902da1ae7cb18d
23,597
def timefstring(dtobj, tz_name=True): """Standardize the format used for timestamp string format. Include 3 letter string for timezone if set to True. """ if tz_name: return f'{dtobj.strftime("%Y-%m-%d_%H:%M:%S%Z")}' else: return f'{dtobj.strftime("%Y-%m-%d_%H:%M:%S")}NTZ'
5bbf0454a76ed1418cbc9c44de909940065fb51f
23,598
def is_block(modules): """Check if is ResNet building block.""" if isinstance(modules, (ShuffleUnit, )): return True return False
ac6f059b763f25d81508826a3a8c8db5beb769b0
23,599
def _func(*args, **kwargs): """Test function used in some tests.""" return args, kwargs
7fb2aa947806578e5378e66ce7dc1b4f3f593dbe
23,601
def combine_parallel_circuits(IVprev_cols, pvconst): """ Combine crosstied circuits in a substring :param IVprev_cols: lists of IV curves of crosstied and series circuits :return: """ # combine crosstied circuits Irows, Vrows = [], [] Isc_rows, Imax_rows = [], [] for IVcols in zip(*IVprev_cols): Iparallel, Vparallel = zip(*IVcols) Iparallel = np.asarray(Iparallel) Vparallel = np.asarray(Vparallel) Irow, Vrow = pvconst.calcParallel( Iparallel, Vparallel, Vparallel.max(), Vparallel.min() ) Irows.append(Irow) Vrows.append(Vrow) Isc_rows.append(np.interp(np.float64(0), Vrow, Irow)) Imax_rows.append(Irow.max()) Irows, Vrows = np.asarray(Irows), np.asarray(Vrows) Isc_rows = np.asarray(Isc_rows) Imax_rows = np.asarray(Imax_rows) return pvconst.calcSeries( Irows, Vrows, Isc_rows.mean(), Imax_rows.max() )
31d6a96189b703bca0e9cf212472cb0c8870d3cd
23,602
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str) -> tfx.dsl.Pipeline: """Creates a three component penguin pipeline with TFX.""" # Brings data into the pipeline. example_gen = tfx.components.CsvExampleGen(input_base=data_root) # Uses user-provided Python function that trains a model. trainer = tfx.components.Trainer( module_file=module_file, examples=example_gen.outputs['examples'], train_args=tfx.proto.TrainArgs(num_steps=100), eval_args=tfx.proto.EvalArgs(num_steps=5)) # NEW: Get the latest blessed model for Evaluator. model_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy, model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model), model_blessing=tfx.dsl.Channel( type=tfx.types.standard_artifacts.ModelBlessing)).with_id( 'latest_blessed_model_resolver') # NEW: Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key='species')], slicing_specs=[ # An empty slice spec means the overall slice, i.e. the whole dataset. tfma.SlicingSpec(), # Calculate metrics for each penguin species. tfma.SlicingSpec(feature_keys=['species']), ], metrics_specs=[ tfma.MetricsSpec(per_slice_thresholds={ 'sparse_categorical_accuracy': tfma.config.PerSliceMetricThresholds(thresholds=[ tfma.PerSliceMetricThreshold( slicing_specs=[tfma.SlicingSpec()], threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.6}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10})) )]), })], ) evaluator = tfx.components.Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) # Checks whether the model passed the validation steps and pushes the model # to a file destination if check passed. pusher = tfx.components.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], # Pass an evaluation result. push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir))) components = [ example_gen, trainer, # Following two components were added to the pipeline. model_resolver, evaluator, pusher, ] return tfx.dsl.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, metadata_connection_config=tfx.orchestration.metadata .sqlite_metadata_connection_config(metadata_path), components=components)
6254505a2d0309a8576a277b95a521294f9f8901
23,603
def create_global_step() -> tf.Variable: """Creates a `tf.Variable` suitable for use as a global step counter. Creating and managing a global step variable may be necessary for `AbstractTrainer` subclasses that perform multiple parameter updates per `Controller` "step", or use different optimizers on different steps. In these cases, an `optimizer.iterations` property generally can't be used directly, since it would correspond to parameter updates instead of iterations in the `Controller`'s training loop. Such use cases should simply call `step.assign_add(1)` at the end of each step. Returns: A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the first replica's value retained when synchronizing across replicas in a distributed setting. """ return tf.Variable( 0, dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
d1fc499b60d09d50e555977b73eec04971e11b3b
23,604
def supported_coins_balance(balance, tickers): """ Return the balance with non-supported coins removed """ supported_coins_balance = {} for coin in balance.keys(): if coin != "BTC": if f"{coin}/BTC" in tickers: supported_coins_balance[coin] = balance[coin] else: try: supported_coins_balance["BTC"] = balance[coin] except KeyError: print("BTC not in balance") return supported_coins_balance
aaea856c728d04f47f52c1b07c66be57ff17d8cf
23,605
def _identity_map(size): """Function returning list of lambdas mapping vector to itself.""" return [lambda x, id: x[id] for _ in range(size)]
6236d42d359fdc9b006bffcc597fccbc161eb53d
23,606
def With(prop, val): """The 'with <property> <value>' specifier. Specifies the given property, with no dependencies. """ return Specifier(prop, val)
fc4a167322ab5bde74eabf1b69efb5d37f643405
23,607
def pad_node_id(node_id: np.uint64) -> str: """ Pad node id to 20 digits :param node_id: int :return: str """ return "%.20d" % node_id
28cdaad2aa327143432c5be58598271139574a50
23,608
def ballcurve(x: ArrayLike, xi: float) -> ArrayLike: """ function to generate the curve for the nested structure, given a shape parameter xi. If xi= 1 is linear. input: ---------- x: 1D array, [0,1] initial values to be evaluated on the function xi: number, >=1 shape parameter of how stylised is the curve output: ---------- y: 1D array, [0,1] evaluated function """ return 1 - (1 - (x) ** (1 / xi)) ** xi
6b261793e1bdccc39bc66f25f4013d07d3bfc376
23,609
def center_vertices(vertices, faces, flip_y=True): """ Centroid-align vertices. Args: vertices (V x 3): Vertices. faces (F x 3): Faces. flip_y (bool): If True, flips y verts to keep with image coordinates convention. Returns: vertices, faces """ vertices = vertices - vertices.mean(dim=0, keepdim=True) if flip_y: vertices[:, 1] *= -1 faces = faces[:, [2, 1, 0]] return vertices, faces
85743c3b3e3838533e78c66b137cc9c8c7702519
23,610
def fingerprint_atompair(fpSize=2048, count=False): """Atom pair fingerprint (list of int). Args: fpSize: Size of the generated fingerprint (defaults to 2048). count: The default value of False will generate fingerprint bits (0 or 1) whereas a value of True will generate the count of each fingerprint value. """ generator = rdFingerprintGenerator.GetAtomPairGenerator(fpSize=fpSize) if count: fingerprint_fn = _fingerprint_fn_count(generator) else: fingerprint_fn = _fingerprint_fn_bits(generator) fingerprint_fn.__name__ = 'fingerprint_atompair(' + \ f'fpSize={fpSize},count={count})' return fingerprint_fn
cbacf8bdaae11520f2bb71ae825ea574258f6242
23,611
def bend_euler_s(**kwargs) -> Component: """Sbend made of euler bends.""" c = Component() b = bend_euler(**kwargs) b1 = c.add_ref(b) b2 = c.add_ref(b) b2.mirror() b2.connect("o1", b1.ports["o2"]) c.add_port("o1", port=b1.ports["o1"]) c.add_port("o2", port=b2.ports["o2"]) return c
55c3d4dc5cc2766463f088ede2b4f04c6018eac6
23,612
def phot_error(star_ADU,n_pix,n_b,sky_ADU,dark,read,gain=1.0): """ Photometric error including INPUT: star_ADU - stellar flux in ADU (total ADU counts within aperture) n_pix - number of pixels in aperture n_b - number of background pixels sky_ADU - in ADU/pix dark - in e/pix read - in e^2/pix gain - gain in e/ADU OUTPUT: Photometric error N in ADUs NOTES: This is not the normalized error. To normalize, have to do sigma_rel = N / star_ADU This does not include scintillation """ noise = np.sqrt( gain*star_ADU + n_pix *((1. + n_pix/n_b) * (gain*sky_ADU + dark + read**2. + (gain*0.289)**2. )) )/gain return noise
45d3f335c2b7fad1e2e0f8e8e415a0bda0f774e8
23,613
def tan(x): """ tan(x) -> number Return the tangent of x; x in radians. """ try: res, x = _init_check_mpfr(x) gmp.mpfr_tan(res, x, gmp.MPFR_RNDN) return mpfr._from_c_mpfr(res) except TypeError: res, x = _init_check_mpc(x) gmp.mpc_tan(res, x, gmp.MPC_RNDNN) return mpc._from_c_mpc(res)
651119ccd44f313b25f49e03a3f5094fa1c1a829
23,614
def test_wrapped_func(): """ Test uncertainty-aware functions obtained through wrapping. """ ######################################## # Function which can automatically handle numbers with # uncertainties: def f_auto_unc(angle, *list_var): return umath.cos(angle) + sum(list_var) def f(angle, *list_var): # We make sure that this function is only ever called with # numbers with no uncertainty (since it is wrapped): assert not isinstance(angle, uncert_core.UFloat) assert not any(isinstance(arg, uncert_core.UFloat) for arg in list_var) return f_auto_unc(angle, *list_var) f_wrapped = uncert_core.wrap(f) my_list = [1, 2, 3] ######################################## # Test of a wrapped function that only calls the original # function: it should obtain the exact same result: assert f_wrapped(0, *my_list) == f(0, *my_list) # 1 == 1 +/- 0, so the type must be checked too: assert type(f_wrapped(0, *my_list)) == type(f(0, *my_list)) ######################################## # Call with uncertainties: angle = uncert_core.ufloat(1, 0.1) list_value = uncert_core.ufloat(3, 0.2) # The random variables must be the same (full correlation): assert ufloats_close(f_wrapped(angle, *[1, angle]), f_auto_unc(angle, *[1, angle])) assert ufloats_close(f_wrapped(angle, *[list_value, angle]), f_auto_unc(angle, *[list_value, angle])) ######################################## # Non-numerical arguments, and explicit and implicit derivatives: def f(x, y, z, t, u): return x+2*z+3*t+4*u f_wrapped = uncert_core.wrap( f, [lambda *args: 1, None, lambda *args:2, None]) # No deriv. for u assert f_wrapped(10, 'string argument', 1, 0, 0) == 12 x = uncert_core.ufloat(10, 1) assert numbers_close(f_wrapped(x, 'string argument', x, x, x).std_dev, (1+2+3+4)*x.std_dev)
9f432e6fd0c796c733e43c2ca66d2d0373148ee4
23,615
def T2str_mag_simplified(K, TE, T2str, N): """Signal Model of T2str-weighted UTE GRE Magnitude Image S = K * [ exp(-TE/T2*) ] + N parameters: K :: constant (proportional to proton density) TE :: sequence echo time T2str :: relaxation due to spin-spin effects and dephasing N :: constant offset "noise" term @return expected (magnitude) signal """ S = K * np.exp((-1.0 * TE)/T2str) + N return S
ddf829cf8e209602b141f1b13c8fbf5af566a8d7
23,616
def tune(runner, kernel_options, device_options, tuning_options): """ Find the best performing kernel configuration in the parameter space :params runner: A runner from kernel_tuner.runners :type runner: kernel_tuner.runner :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: kernel_tuner.interface.Options :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: kernel_tuner.interface.Options :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: kernel_tuner.interface.Options :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict() """ dna_size = len(tuning_options.tune_params.keys()) pop_size = 20 generations = 100 tuning_options["scaling"] = False tune_params = tuning_options.tune_params population = random_population(dna_size, pop_size, tune_params) best_time = 1e20 all_results = [] cache = {} for generation in range(generations): if tuning_options.verbose: print("Generation %d, best_time %f" % (generation, best_time)) #determine fitness of population members weighted_population = [] for dna in population: time = _cost_func(dna, kernel_options, tuning_options, runner, all_results, cache) weighted_population.append((dna, time)) population = [] #'best_time' is used only for printing if tuning_options.verbose and all_results: best_time = min(all_results, key=lambda x: x["time"])["time"] #population is sorted such that better configs have higher chance of reproducing weighted_population.sort(key=lambda x: x[1]) #crossover and mutate for _ in range(pop_size//2): ind1 = weighted_choice(weighted_population) ind2 = weighted_choice(weighted_population) ind1, ind2 = crossover(ind1, ind2) population.append(mutate(ind1, dna_size, tune_params)) population.append(mutate(ind2, dna_size, tune_params)) return all_results, runner.dev.get_environment()
099b5e513ab52353efbce8ba1e7465acf5b1f6bc
23,617
def getTimeString(t, centi=True): """ category: General Utility Functions Given a value in milliseconds, returns a Lstr with: (hours if > 0):minutes:seconds:centiseconds. WARNING: this Lstr value is somewhat large so don't use this to repeatedly update node values in a timer/etc. For that purpose you should use timeDisplay nodes and attribute connections. """ if type(t) is not int: t = int(t) bits = [] subs = [] h = (t/1000)/(60*60) if h != 0: bits.append('${H}') subs.append(('${H}', bs.Lstr(resource='timeSuffixHoursText', subs=[('${COUNT}', str(h))]))) m = ((t/1000)/60)%60 if m != 0: bits.append('${M}') subs.append(('${M}', bs.Lstr(resource='timeSuffixMinutesText', subs=[('${COUNT}', str(m))]))) # we add seconds if its non-zero *or* we havn't added anything else if centi: s = (t/1000.0 % 60.0) if s >= 0.005 or not bits: bits.append('${S}') subs.append(('${S}', bs.Lstr(resource='timeSuffixSecondsText', subs=[('${COUNT}', ('%.2f' % s))]))) else: s = (t/1000 % 60) if s != 0 or not bits: bits.append('${S}') subs.append(('${S}', bs.Lstr(resource='timeSuffixSecondsText', subs=[('${COUNT}', str(s))]))) return bs.Lstr(value=' '.join(bits), subs=subs)
12cbcf4fcfd8450af110f93c77c4c5b50285c0fd
23,618
def validation_supervised(model, input_tensor, y_true, loss_fn, multiclass =False, n_classes= 1): """ Returns average loss for an input batch of data with a supervised model. If running on multiclass mode, it also returns the accuracy. """ y_pred = model(input_tensor.float()) if multiclass: loss = loss_fn(y_pred, y_true) y_hat = y_pred.argmax(dim = 1) acc = accuracy(y_hat, y_true) else: loss = loss_fn(y_pred, y_true.view(-1, n_classes).float()) try: acc = accuracy(y_pred, y_true.view(-1, n_out).float()) except: acc = None return loss.mean().item(), acc
901f4416fab5ebc23115ef2f3aab1b971607368e
23,619
def timing(func=None, *, name=None, is_stage=None): """ Decorator to measure the time taken by the function to execute :param func: Function :param name: Display Name of the function for which the time is being calculated :param is_stage: Identifier for mining stage Examples: >>> >>> @timing(name="foo") >>> def func(): >>> ... >>> >>> @timing >>> def func(): >>> ... >>> """ if func is None: return partial(timing, name=name, is_stage=is_stage) @wraps(func) def wrapper(*args, **kwargs): start = timer() result = func(*args, **kwargs) end = timer() total_time = end - start logger.info(f"Time taken to execute `{name}`: {total_time} sec") if not is_stage: if name in ELAPSED_TIME_ON_FUNCTIONS: ELAPSED_TIME_ON_FUNCTIONS[name] += total_time else: ELAPSED_TIME_ON_FUNCTIONS[name] = total_time else: if name in STAGE_WISE_TIME: STAGE_WISE_TIME[name] += total_time else: STAGE_WISE_TIME[name] = total_time return result return wrapper
e7368e64bda81811075a295b6e36f0f9e9e7bcd5
23,621
def is_skip_file(filename): """ Should the given file be skipped over for testing :param filename: The file's name :type filename: String :return: True if the given file should be skipped, false otherwise :rtype: Boolean """ filename_len = len(filename) for skip_name in SKIP_FILES: skip_name_len = len(skip_name) if (skip_name_len <= filename_len) and ( skip_name == filename[-skip_name_len:]): return True return False
066bcfbff6f984fb293c422f613746967713b31b
23,622
def lowercase_or_notify(x): """ Lowercases the input if it is valid, otherwise logs the error and sets a default value Args: String to lowercase Returns: Lowercased string if possible, else unmodified string or default value. """ try: return x.lower() except Exception: if x and not np.isnan(x): logger.info('Program activity of {} was unable to be lowercased. Entered as-is.'.format(x)) return x else: logger.info('Null value found for program activity name. Entered default value.') # should not happen return '(not provided)'
a9e9cce9450f21f5cec80739d435e362288e8844
23,623
def is_not_null(node, eval_type, given_variables): """Process the is_not_null operator. :param node: Formula node :param eval_type: Type of evaluation :param given_variables: Dictionary of var/values :return: Boolean result, SQL query, or text result """ if eval_type == EVAL_EXP: # Python evaluation return not value_is_null(get_value(node, given_variables)) if eval_type == EVAL_SQL: # SQL evaluation query = sql.SQL('({0} is not null)').format( OnTaskDBIdentifier(node['field']), ) return query, [] # Text evaluation return '{0} is not null'.format(node['field'])
a261731103f81f1e4fe2c6eb191d3127acb163fe
23,624
def search_for_rooms(filters, allow_admin=False, availability=None): """Search for a room, using the provided filters. :param filters: The filters, provided as a dictionary :param allow_admin: A boolean specifying whether admins have override privileges :param availability: A boolean specifying whether (un)available rooms should be provided, or `None` in case all rooms should be returned. """ query = (Room.query .outerjoin(favorite_room_table, db.and_(favorite_room_table.c.user_id == session.user.id, favorite_room_table.c.room_id == Room.id)) .reset_joinpoint() # otherwise filter_by() would apply to the favorite table .options(joinedload('owner').load_only('id')) .filter(~Room.is_deleted) .order_by(favorite_room_table.c.user_id.is_(None), db.func.indico.natsort(Room.full_name))) criteria = {} if 'capacity' in filters: query = query.filter(Room.capacity >= filters['capacity']) if 'building' in filters: criteria['building'] = filters['building'] if 'division' in filters: criteria['division'] = filters['division'] query = query.filter_by(**criteria) if 'text' in filters: text = ' '.join(filters['text'].strip().split()) if text.startswith('#') and text[1:].isdigit(): query = query.filter(Room.id == int(text[1:])) else: query = query.filter(_make_room_text_filter(text)) if filters.get('equipment'): subquery = (db.session.query(RoomEquipmentAssociation) .with_entities(db.func.count(RoomEquipmentAssociation.c.room_id)) .filter(RoomEquipmentAssociation.c.room_id == Room.id, EquipmentType.name.in_(filters['equipment'])) .join(EquipmentType, RoomEquipmentAssociation.c.equipment_id == EquipmentType.id) .correlate(Room) .as_scalar()) query = query.filter(subquery == len(filters['equipment'])) if filters.get('features'): for feature in filters['features']: query = query.filter(Room.available_equipment.any(EquipmentType.features.any(RoomFeature.name == feature))) if filters.get('favorite'): query = query.filter(favorite_room_table.c.user_id.isnot(None)) if filters.get('mine'): ids = get_managed_room_ids(session.user) query = query.filter(Room.id.in_(ids)) query = _filter_coordinates(query, filters) if availability is None: return query start_dt, end_dt = filters['start_dt'], filters['end_dt'] repeatability = (filters['repeat_frequency'], filters['repeat_interval']) availability_filters = [Room.filter_available(start_dt, end_dt, repeatability, include_blockings=False, include_pre_bookings=False)] if not (allow_admin and rb_is_admin(session.user)): selected_period_days = (filters['end_dt'] - filters['start_dt']).days booking_limit_days = db.func.coalesce(Room.booking_limit_days, rb_settings.get('booking_limit')) criterion = db.and_(Room.filter_bookable_hours(start_dt.time(), end_dt.time()), Room.filter_nonbookable_periods(start_dt, end_dt), db.or_(booking_limit_days.is_(None), selected_period_days <= booking_limit_days)) unbookable_ids = [room.id for room in query.filter(db.and_(*availability_filters), ~criterion) if not room.can_override(session.user, allow_admin=False)] availability_filters.append(~Room.id.in_(unbookable_ids)) availability_criterion = db.and_(*availability_filters) if availability is False: availability_criterion = ~availability_criterion return query.filter(availability_criterion)
fe29ec5b4bf27d51b45ed2ba87cb7153d176583c
23,625
def get_scalar(obj): """obj can either be a value, or a type Returns the Stella type for the given object""" type_ = type(obj) if type_ == type(int): type_ = obj elif type_ == PyWrapper: type_ = obj.py # HACK { if type_ == type(None): # noqa return None_ elif type_ == str: return Str # } HACK try: return _pyscalars[type_] except KeyError: raise exc.TypeError("Invalid scalar type `{0}'".format(type_))
2b5c829a8a933ff5f80a1d17d0ba8c2a49c90643
23,626
import math def sin(x, deg=None, **kwargs): """Computes the sine of x in either degrees or radians""" x = float(x) if deg or (trigDeg and deg is None): x = math.radians(x) return math.sin(x)
5f5809fac0fd6970fa58a20b8c70e9f6a53d96d7
23,628
def bloated_nested_block(block_dets, *, repeat=False, **_kwargs): """ Look for long indented blocks under conditionals, inside loops etc that are candidates for separating into functions to simplify the narrative of the main code. """ bloated_outer_types = set() included_if = False for lbl, outer_xpath in OUTER_XPATHS.items(): if has_long_block(block_dets.element, outer_xpath): bloated_outer_types.add(lbl) if lbl == 'if': included_if = True if not bloated_outer_types: return None title = layout("""\ ### Possibility of avoiding excessively long nested blocks """) summary_bits = [] for bloated_outer_type in bloated_outer_types: summary_bits.append(layout(f"""\ The code has at least one long nested block under `{bloated_outer_type}:` """)) summary = ''.join(summary_bits) short_circuit_msg = layout("""\ #### Short-circuit and exit early It may be possible to unnest the indented code block by exiting early if the condition in the `if` expression is not met. """) short_circuit_demo_msg = ( layout(""" For example, instead of: """) + layout("""\ if tall_enough: ## add to basketball team line 1 line 2 line 3 ... line 30 logging.info("Finished!") """, is_code=True) + layout("""\ we could possibly write: """) + layout('''\ if not tall_enough: return ## add to basketball team line 1 line 2 line 3 ... line 30 logging.info("Finished!") ''', is_code=True) ) move_to_func_msg = layout("""\ #### Shift to function It may be possible to pull most of the nested code block into a function which can be called instead. """) move_to_func_demo_msg = ( layout(""" For example, instead of: """) + layout("""\ for name in names: ## contact name line 1 line 2 line 3 ... line 30 logging.info("Finished!") """, is_code=True) + layout("""\ we could possibly write: """) + layout('''\ def contact(name): """ Contact person ... """ line 1 line 2 line 3 ... line 30 for name in names: contact(name) logging.info("Finished!") ''', is_code=True) ) if not repeat: brief_strategy = layout("""\ You might want to consider applying a strategy for avoiding excessively long indented blocks: """) if included_if: short_circuit = short_circuit_msg short_circuit_demo = short_circuit_demo_msg else: short_circuit = '' short_circuit_demo = '' move_to_func = move_to_func_msg move_to_func_demo = move_to_func_demo_msg human = layout("""\ Computers can handle lots of nesting without malfunctioning. Human brains are not so fortunate. As it says in The Zen of Python: > "Flat is better than nested." """) else: brief_strategy = '' short_circuit = '' short_circuit_demo = '' move_to_func = '' move_to_func_demo = '' human = '' message = { conf.Level.BRIEF: (title + summary + brief_strategy + short_circuit + move_to_func), conf.Level.MAIN: (title + summary + brief_strategy + short_circuit + short_circuit_demo + move_to_func + move_to_func_demo), conf.Level.EXTRA: human, } return message
fc25529485c9725cf0de3fe5917299b084f499a3
23,630
from typing import Any def _to_bytes(value: Any, type_str: str = "bytes32") -> bytes: """Convert a value to bytes""" if isinstance(value, bool) or not isinstance(value, (bytes, str, int)): raise TypeError(f"Cannot convert {type(value).__name__} '{value}' to {type_str}") value = _to_hex(value) if type_str == "bytes": return eth_utils.to_bytes(hexstr=value) if type_str == "byte": type_str = "bytes1" size = int(type_str.strip("bytes")) if size < 1 or size > 32: raise ValueError(f"Invalid type: {type_str}") try: return int(value, 16).to_bytes(size, "big") except OverflowError: raise OverflowError(f"'{value}' exceeds maximum length for {type_str}")
f324d915377cd281eacb25b3afbde7b83deedad1
23,631
def _map_sbs_sigs_back(df: pd.DataFrame) -> pd.Series: """ Map Back Single-Base Substitution Signatures. ----------------------- Args: * df: pandas.core.frame.DataFrame with index to be mapped Returns: * pandas.core.series.Series with matching indices to context96 """ def _check_to_flip(x, ref): if x in ref: return x else: return compl(x) if df.index.name is None: df.index.name = 'index' df_idx = df.index.name if ">" in df.index[0]: # Already in arrow format context_s = df.reset_index()[df_idx].apply(sbs_annotation_converter) else: # Already in word format context_s = df.reset_index()[df_idx] return context_s.apply(lambda x: _check_to_flip(x, context96.keys()))
d6a8843c80acdaf5320191af51cb40c8ce7e0d42
23,632
def rmsd( coords1: np.ndarray, coords2: np.ndarray, atomicn1: np.ndarray, atomicn2: np.ndarray, center: bool = False, minimize: bool = False, atol: float = 1e-9, ) -> float: """ Compute RMSD Parameters ---------- coords1: np.ndarray Coordinate of molecule 1 coords2: np.ndarray Coordinates of molecule 2 atomicn1: np.ndarray Atomic numbers for molecule 1 atomicn2: np.ndarray Atomic numbers for molecule 2 center: bool Center molecules at origin minimize: bool Compute minimum RMSD (with QCP method) atol: float Absolute tolerance parameter for QCP method (see :func:`qcp_rmsd`) Returns ------- float RMSD Notes ----- When `minimize=True`, the QCP method is used. [1]_ The molecules are centred at the origin according to the center of geometry and superimposed in order to minimize the RMSD. .. [1] D. L. Theobald, *Rapid calculation of RMSDs using a quaternion-based characteristic polynomial*, Acta Crys. A **61**, 478-480 (2005). """ assert np.all(atomicn1 == atomicn2) assert coords1.shape == coords2.shape # Center coordinates if required c1 = utils.center(coords1) if center or minimize else coords1 c2 = utils.center(coords2) if center or minimize else coords2 if minimize: rmsd = qcp.qcp_rmsd(c1, c2, atol) else: n = coords1.shape[0] rmsd = np.sqrt(np.sum((c1 - c2) ** 2) / n) return rmsd
e5f430d3ddb330c7bf61e0674c29cba3d6fadd7f
23,633
def get_bridge_interfaces(yaml): """Returns a list of all interfaces that are bridgedomain members""" ret = [] if not "bridgedomains" in yaml: return ret for _ifname, iface in yaml["bridgedomains"].items(): if "interfaces" in iface: ret.extend(iface["interfaces"]) return ret
dad9e634a1c5306289e73d465b08b7ea857518e4
23,634
from typing import List import tqdm def get_entity_matched_docs(doc_id_map: List[str], data: List[dict]): """Gets the documents where the document name is contained inside the claim Args: doc_id_map (List[str]): A list of document names data (List[dict]): One of the FEVEROUS datasets Returns: List[List[str]]: A list of lists of the related documents """ claims = [d["claim"] for d in data] related_docs = [] for claim in tqdm(claims): claim_docs = [doc_id for doc_id in doc_id_map if doc_id in claim] claim_docs = [doc for doc in claim_docs if len(doc) > 3] related_docs.append(claim_docs) return related_docs
dd49d58bd2a4dc4eed06e16d5673c85bf1ed8b73
23,636
import requests def getTemplateKeys(k): """ Prints out templates key for license or gitignore templates from github api Params: str Return: code """ code = 0 if k.lower() == "license": r = requests.get(GITHUB_LICENSE_API) if r.status_code != 200: code = 1 print("Github LICENSE template keys: ") for item in r.json(): print(item["key"]) elif k.lower() == "git": r = requests.get(GITHUB_GITIGNORE_API) if r.status_code != 200: code = 1 print("Github .gitignore template keys: ") for item in r.json(): print(item) else: print("Invalid argument for --get-template-keys! : options [git, license]") code = 2 return code
641e6aeb599fb206214530b55faea44be7de7d37
23,637
def get_num_conv2d_layers(model, exclude_downsample=True, include_linear=True): """ Check the number of Conv2D layers. """ num = 0 for n, m in model.named_modules(): if "downsample" in n and exclude_downsample: continue if is_conv2d(m) or (include_linear and isinstance(m, nn.Linear)): num += 1 return num
79d1453f4cc49d358329a7d59fdd07bcdbb97736
23,638
def im_list_to_blob(ims, RGB, NIR, DEPTH): """Convert a list of images into a network input. Assumes images are already prepared (means subtracted, BGR order, ...). """ max_shape = np.array([im.shape for im in ims]).max(axis=0) num_images = len(ims) if RGB & NIR & DEPTH: blob = np.zeros((num_images, max_shape[0], max_shape[1], 5), dtype=np.float32) elif (RGB & NIR) | (RGB & DEPTH) | (NIR & DEPTH): blob = np.zeros((num_images, max_shape[0], max_shape[1], 4), dtype=np.float32) else: blob = np.zeros((num_images, max_shape[0], max_shape[1], 3), dtype=np.float32) for i in xrange(num_images): im = ims[i] blob[i, 0:im.shape[0], 0:im.shape[1], :] = im return blob
96036933eddd742b9db4e211188c1716933d37dc
23,639
async def async_setup_entry(hass, config_entry, async_add_devices): """Set up entry.""" miniserver = get_miniserver_from_config_entry(hass, config_entry) loxconfig = miniserver.lox_config.json devices = [] for switch_entity in get_all_switch_entities(loxconfig): if switch_entity["type"] in ["Pushbutton", "Switch"]: switch_entity.update( { "room": get_room_name_from_room_uuid( loxconfig, switch_entity.get("room", "") ), "cat": get_cat_name_from_cat_uuid( loxconfig, switch_entity.get("cat", "") ), } ) new_push_button = LoxoneSwitch(**switch_entity) devices.append(new_push_button) elif switch_entity["type"] == "TimedSwitch": switch_entity.update( { "room": get_room_name_from_room_uuid( loxconfig, switch_entity.get("room", "") ), "cat": get_cat_name_from_cat_uuid( loxconfig, switch_entity.get("cat", "") ), } ) new_push_button = LoxoneTimedSwitch(**switch_entity) devices.append(new_push_button) elif switch_entity["type"] == "Intercom": if "subControls" in switch_entity: for sub_name in switch_entity["subControls"]: subcontol = switch_entity["subControls"][sub_name] _ = subcontol _.update( { "name": "{} - {}".format( switch_entity["name"], subcontol["name"] ) } ) _.update( { "room": get_room_name_from_room_uuid( loxconfig, switch_entity.get("room", "") ) } ) _.update( { "cat": get_cat_name_from_cat_uuid( loxconfig, switch_entity.get("cat", "") ) } ) new_push_button = LoxoneIntercomSubControl(**_) devices.append(new_push_button) async_add_devices(devices, True) return True
1cd4114645b7454c371bc23a13e212e2ae9f8173
23,640
import torch def gradU_from_momenta(x, p, y, sigma): """ strain F'(x) for momenta p defined at control points y a method "convolve_gradient" is doing a similar job but only compute (gradF . z) x (M, D) p (N, D) y (N, D) return gradU (M, D, D) """ kern = deformetrica.support.kernels.factory("torch", gpu_mode=False, kernel_width=sigma) # move tensors with respect to gpu_mode t_x = torch.tensor(x, device="cpu") t_y = torch.tensor(y, device="cpu") t_p = torch.tensor(p, device="cpu") # A = exp(-(x_i - y_j)^2/(ker^2)). sq = kern._squared_distances(t_x, t_y) A = torch.exp(-sq / (sigma ** 2)) # M, N # B = -2/(ker^2) * (x_i - y_j)*exp(-(x_i - y_j)^2/(ker^2)). B = (-2/(sigma ** 2)) * kern._differences(t_x, t_y) * A # (D, M, N) res = torch.matmul(B, t_p) # (D, M, D) return np.array(res.transpose(0,1))
03dc67bf8bc6b8a576b1ed96de841003bcb53383
23,641
def process(seed, K): """ K is model order / number of zeros """ print(K, end=" ") # create the dirac locations with many, many points rng = np.random.RandomState(seed) tk = np.sort(rng.rand(K)*period) # true zeros uk = np.exp(-1j*2*np.pi*tk/period) coef_poly = poly.polyfromroots(uk) # more accurate than np.poly # estimate zeros uk_hat = np.roots(np.flipud(coef_poly)) # place on unit circle? uk_hat_unit = uk_hat / np.abs(uk_hat) # compute error min_dev_norm = distance(uk, uk_hat)[0] _err_roots = 20*np.log10(np.linalg.norm(uk)/min_dev_norm) min_dev_norm = distance(uk, uk_hat_unit)[0] _err_unit = 20*np.log10(np.linalg.norm(uk)/min_dev_norm) return _err_roots, _err_unit
544d5116cf5ef3a2bff08253ee697d4a04994a2e
23,642
def _gen_sieve_array(M, factor_base): """Sieve Stage of the Quadratic Sieve. For every prime in the factor_base that doesn't divide the coefficient `a` we add log_p over the sieve_array such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i` is an integer. When p = 2 then log_p is only added using ``-M <= soln1 + i*p <= M``. Parameters: =========== M : sieve interval factor_base : factor_base primes """ sieve_array = [0]*(2*M + 1) for factor in factor_base: if factor.soln1 is None: #The prime does not divides a continue for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p if factor.prime == 2: continue #if prime is 2 then sieve only with soln_1_p for idx in range((M + factor.soln2) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p return sieve_array
98a8e5bedaa56dbe53aa8a152c20a015d7b3556d
23,643
def yolo_eval_weighted_nms(yolo_outputs, anchors, num_classes, image_shape, score_threshold=.6): """ yolo evaluate Args: yolo_outputs: [batch, 13, 13, 3*85] anchors: [9, 2] num_classes: num of your own classes image_shape: the shape of original image score_threshold: when score > score threshold, the anchor is positive Returns: boxes_, scores_, classes_ """ num_layers = len(yolo_outputs) anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] input_shape = K.shape(yolo_outputs[0])[1:3] * 32 boxes = [] box_scores = [] for l in range(num_layers): _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, image_shape, l) boxes.append(_boxes) box_scores.append(_box_scores) boxes = K.concatenate(boxes, axis=0) box_scores = K.concatenate(box_scores, axis=0) mask = box_scores >= score_threshold boxes_ = [] scores_ = [] classes_ = [] for c in range(num_classes): # get positive anchors by using box_scores >= score_threshold class_boxes = tf.boolean_mask(boxes, mask[:, c]) class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c]) classes = K.ones_like(class_box_scores, 'int32') * c boxes_.append(class_boxes) scores_.append(class_box_scores) classes_.append(classes) return boxes_, scores_, classes_
7066f2dbb4908709a3d762443385376d44d7f9f6
23,644
def next_coach_id(): """ Generates the next id for newly added coaches, since their slugs (which combine the id and name fields) are added post-commit. """ c = Coach.objects.aggregate(Max("id")) return c['id__max']+1
55be7f6411685b391e9130bd9248588f3d0d8ffc
23,645
def get_unsigned_short(data, index): """Return two bytes from data as an unsigned 16-bit value""" return (data[index+1] << 8) + data[index]
9e3b7dc30eaedb99edfb35b944442d7386ad8f9e
23,646
def getObjDetRoI(imgSize, imgPatchSize, objx1, objy1, objx2, objy2): """ Get region of interest (ROI) for a given object detection with respect to image and image patch boundaries. :param imgSize: size of the image of interest (e.g., [1920x1080]). :param imgPatchSize: Patch size of the image patch of interest (e.g., 192). :param objx1: Upper left x coordinate of the object detection. :param objy1: Upper left y coordinate of the object detection. :param objx2: Lower right x coordinate of the object detection. :param objy2: Lower right y coordinate of the object detection. """ # Cast to float values for calculations startX = float(objx1); startY = float(objy1); endX = float(objx2); endY = float(objy2); # Ensure image and image patch boundaries xRange = endX - startX; yRange = endY - startY; addX = (imgPatchSize - (xRange % imgPatchSize)); addY = (imgPatchSize - (yRange % imgPatchSize)); endX = endX + addX; endY = endY + addY; if endX > imgSize[1]: endX = imgSize[1] if endY > imgSize[0]: endY = imgSize[0] return startX, startY, endX, endY
2feedb9a5f79c24d0fda4eaa9b8db5bd6922b4ce
23,647
def sigma_pp(b): """pair production cross section""" return ( sigma_T * 3.0 / 16.0 * (1 - b ** 2) * (2 * b * (b ** 2 - 2) + (3 - b ** 4) * np.log((1 + b) / (1 - b))) )
a3745b5f39e71c5f5713e3d7e0c7fbdb53146d15
23,648
def compute_radii_simple(distances): """ Compute the radius for every hypersphere given the pairwise distances to satisfy Eq. 6 in the paper. Does not implement the heuristic described in section 3.5. """ n_inputs = tf.shape(distances)[1] sorted_distances = tf.sort(distances, direction="ASCENDING", axis=-1) median_index = n_inputs // 2 radii = sorted_distances[:, median_index] return radii
a935bbe6539c32d9de87b80dbaa6314152979b07
23,649
def data_resolution_and_offset(data, fallback_resolution=None): """Compute resolution and offset from x/y axis data. Only uses first two coordinate values, assumes that data is regularly sampled. Returns ======= (resolution: float, offset: float) """ if data.size < 2: if data.size < 1: raise ValueError("Can't calculate resolution for empty data") if fallback_resolution is None: raise ValueError("Can't calculate resolution with data size < 2") res = fallback_resolution else: res = (data[data.size - 1] - data[0]) / (data.size - 1.0) res = res.item() off = data[0] - 0.5 * res return res, off.item()
9cb5a14ff5be8509509e67b1576146231258583b
23,650
from typing import List def get_changed_files_committed_and_workdir( repo: Git, commithash_to_compare: str ) -> List[str]: """Get changed files between given commit and the working copy""" return repo.repo.git.diff("--name-only", commithash_to_compare).split()
1696c3bc41084db5d260bc1ddd7811dd9f143586
23,651
from typing import Optional from typing import Any def load_document_by_string( string: str, uri: str, loadingOptions: Optional[LoadingOptions] = None ) -> Any: """Load a CWL object from a serialized YAML string.""" yaml = yaml_no_ts() result = yaml.load(string) return load_document_by_yaml(result, uri, loadingOptions)
1750f0df653f155e112b3cbb363e4ee499f76ab6
23,652
import re def rename_symbol(symbol): """Rename the given symbol. If it is a C symbol, prepend FLAGS.rename_string to the symbol, but account for the symbol possibly having a prefix via split_symbol(). If it is a C++ symbol, prepend FLAGS.rename_string to all instances of the given namespace. Args: symbol: C or C++ symbol to rename. Returns: Dictionary, keys = old symbols, values = renamed symbols. """ new_renames = {} if is_cpp_symbol(symbol): # Scan through the symbol looking for the namespace name, then modify it. new_symbol = symbol if FLAGS.platform in ["linux", "android", "darwin", "ios"]: for ns in FLAGS.hide_cpp_namespaces: if symbol_includes_cpp_namespace(symbol, ns): # Linux and Darwin: To rename "namespace" to "prefixnamespace", # change all instances of "9namespace" to "15prefixnamespace". # (the number is the length of the namespace name) # See https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling new_ns = FLAGS.rename_string + ns new_symbol = re.sub("(?<=[^0-9])%d%s" % (len(ns), ns), "%d%s" % (len(new_ns), new_ns), new_symbol) new_renames[symbol] = new_symbol elif FLAGS.platform == "windows": for ns in FLAGS.hide_cpp_namespaces: if symbol_includes_cpp_namespace(symbol, ns): # Windows: To rename "namespace" to "prefixnamespace", # change all instances of "@namespace@@" to "@prefixnamespace@@". # See https://msdn.microsoft.com/en-us/library/56h2zst2.aspx new_ns = FLAGS.rename_string + ns new_symbol = re.sub("@%s@@" % ns, "@%s@@" % new_ns, new_symbol) new_renames[symbol] = new_symbol else: if FLAGS.platform == "windows" and symbol.startswith("$LN"): # Don't rename $LN*, those are local symbols. return new_renames # C symbol. Just split, rename, and re-join. (prefix, remainder) = split_symbol(symbol) new_symbol = prefix + FLAGS.rename_string + remainder new_renames[symbol] = new_symbol for added_prefix in _additional_symbol_prefixes.get(FLAGS.platform, []): new_renames[added_prefix + symbol] = new_renames[symbol] return new_renames
4c2291e3c604157df1f4d8f1f4e3b7a1277ceee2
23,653
from datetime import datetime def py_time(data): """ returns a python Time """ if '.' in data: return datetime.datetime.strptime(data, '%H:%M:%S.%f').time() else: return datetime.datetime.strptime(data, '%H:%M:%S').time()
53f1bb601ab08e06f67b759fdc9f41820ea0ff20
23,654
def create_empty_copy(G,with_nodes=True): """Return a copy of the graph G with all of the edges removed. Parameters ---------- G : graph A NetworkX graph with_nodes : bool (default=True) Include nodes. Notes ----- Graph, node, and edge data is not propagated to the new graph. """ H=G.__class__() if with_nodes: H.add_nodes_from(G) return H
aea151473bd9f11b4e0cdfdf2ac4a689a1b5af49
23,655
def trim_resize_frame(frame, resize_ratio, trim_factor): """ Resize a frame according to specified ratio while keeping original the original aspect ratio, then trim the longer side of the frame according to specified factor. Parameters ---------- frame: np.array The input frame resize_ratio: float Resize factor. trim_factor: float Trim factor for the longer side of the frame. Must be btw 0 and 1. Returns ---------- np.array Resized and trimmed frame. """ frame = cv2.resize( frame, dsize=(0,0), fx=resize_ratio, fy=resize_ratio) __hor_longer, __l = ( True, frame.shape[1] if frame.shape[1] > frame.shape[0] else (False, frame.shape[0])) __t = int(__l * trim_factor) __i = int((max(__l-__t, 0))/2) if __hor_longer: frame = frame[:,__i:__i+__t,:] else: frame = frame[__i:__i+__t,:,:] return frame
55569da6aad4b24ef367828a2ce3353048f27ae9
23,656
def copy_doclist(doclist, no_copy = []): """ Save & return a copy of the given doclist Pass fields that are not to be copied in `no_copy` """ cl = [] # main doc c = Document(fielddata = doclist[0].fields.copy()) # clear no_copy fields for f in no_copy: if c.fields.has_key(f): c.fields[f] = None c.name = None c.save(1) cl.append(c) # new parent name parent = c.name # children for d in doclist[1:]: c = Document(fielddata = d.fields.copy()) c.name = None # clear no_copy fields for f in no_copy: if c.fields.has_key(f): c.fields[f] = None c.parent = parent c.save(1) cl.append(c) return cl
73e6554696abce1d94ace2b50cb8a28b0563fb30
23,657
def set_from_tags(tags, title, description, all=True): """all=True means include non-public photos""" user = flickr.test_login() photos = flickr.photos_search(user_id=user.id, auth=all, tags=tags) set = flickr.Photoset.create(photos[0], title, description) set.editPhotos(photos) return set
14e30d7334c75d29eccaf7957f53dadc164aedf0
23,658