content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import torch def train_epoch(loader, vae, optimizer, device, epoch_idx, log_interval, loss_weights, stats_logger, clip_gradients=None): """Train VAE for an epoch""" vae.train() train_losses = {} train_total_loss = 0 for batch_idx, data in enumerate(loader): data = data.to(device).float() target = data optimizer.zero_grad() decoder_output, z, mu, logvar = vae(data) losses = vae.loss(decoder_output, target, z, mu, logvar) total_loss = sum(loss_weights.get(loss_name, 1) * loss for loss_name, loss in losses.items() if '_unweighted' not in loss_name) total_loss.backward() if clip_gradients is not None: torch.nn.utils.clip_grad_value_(vae.parameters(), clip_gradients) optimizer.step() train_total_loss += total_loss.item() * len(data) for name, loss in losses.items(): train_loss = train_losses.setdefault(name, 0) train_losses[name] = train_loss + loss.item() * len(data) if batch_idx % log_interval == 0: s = ('Train Epoch: {} [{}/{} ({:.0f}%)]\t' .format(epoch_idx, batch_idx * len(data), len(loader.dataset), 100. * batch_idx / len(loader))) s += ', '.join('Loss {}: {:.7f}'.format(name, loss.item()) for name, loss in losses.items()) print(s) stats = {name: loss / len(loader.dataset) for name, loss in train_losses.items()} stats['total_loss'] = train_total_loss / len(loader.dataset) s = ('====> Epoch: {} Avg. total loss: {:.7f}, ' .format(epoch_idx, stats['total_loss'])) s += ', '.join('{} loss: {:.7f}'.format(name, loss) for name, loss in stats.items() if name != 'total_loss') print(s) # Add weighted losses for logging for name, loss in train_losses.items(): weight = loss_weights.get(name, 1) stats['weighted_' + name] = weight * loss / len(loader.dataset) return stats
e846987844933359a67f7b6581a8429ef88bfb0b
3,400
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost def phased_multi_axes(times, data, std, ephemeris, thin=1, colours='midnightblue', ylim_shrink=0.8, subplot_kw=None, gridspec_kw=None, **kws): """ Parameters ---------- times data std ephemeris thin colours subplot_kw gridspec_kw Returns ------- """ # sharex=True, # not sharing x since it shares # all the ticks which is NOT desired here. # instead set range for all # NOTE: could try: # for tck in ax.xaxis.get_major_ticks(): # tck.label1.set_visible(True) n = len(times) fig, axes = plt.subplots(n, 1, sharey=True, subplot_kw=subplot_kw, gridspec_kw=gridspec_kw ) # hack to get dual axes on topmost pos = axes[0].get_position() axes[0].remove() ax = fig.axes[0] = axes[0] = SubplotHost(fig, n, 1, 1, **subplot_kw) axp = make_twin(ax, 45, ephemeris.P) fig.add_subplot(ax) ax.set_position(pos) # get colours if not isinstance(colours, (list, tuple, np.ndarray)): colours = [colours] * n # plot options opts = dict(fmt='o', ms=1, alpha=0.75, clip_on=False) opts.update(**kws) # do plotting s = np.s_[::thin] xlim = [np.inf, -np.inf] ylim = [np.inf, -np.inf] for i, (ax, t, y, u) in enumerate(zip(axes, times, data, std)): first = (i == 0) last = (i == n - 1) # phase = ephemeris.phase(t) phase -= max(np.floor(phase[0]) + 1, 0) if np.all(phase < 0): phase += 1 ebc = ax.errorbar(phase[s], y[s], u if u is None else u[s], color=colours[i], **opts) xlim = [min(xlim[0], phase[0]), max(xlim[1], phase[-1])] ylim = [min(ylim[0], y.min()), max(ylim[1], y.max())] # ticks ax.tick_params('y', which='minor', length=2.5, left=True, right=True) ax.tick_params('y', which='major', length=5, left=True, right=True) ax.yaxis.set_minor_locator(ticker.AutoMinorLocator()) if last: ax.tick_params('x', which='minor', length=2.5, bottom=(not first), top=(not last)) ax.tick_params('x', which='major', length=5, bottom=(not first), top=(not last)) ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) else: ax.tick_params('x', length=0) # remove top & bottom spines if not first: ax.spines['top'].set_visible(False) if not last: ax.spines['bottom'].set_visible(False) ax.xaxis.set_ticklabels([]) ax.tick_params(labelright=True, labelleft=True) ax.grid(True) # axes limits stretch = np.ptp(xlim) * 0.025 xlim = np.add(xlim, [-stretch, stretch]) ylim[1] *= ylim_shrink for ax in axes: ax.set(xlim=xlim, ylim=ylim) # axes[0].set_ylim(-0.15, 1.65) # x label axes_label_font_spec = dict(weight='bold', size=14) ax.set_xlabel('Orbital Phase', fontdict=axes_label_font_spec) # y label y_middle = 0.5 # (fig.subplotpars.top - fig.subplotpars.bottom) / 2 for x, va in zip((0.01, 1), ('top', 'bottom')): fig.text(x, y_middle, 'Relative Flux', axes_label_font_spec, rotation=90, rotation_mode='anchor', ha='center', va=va) # top ticks # axp.xaxis.set_ticks(np.r_[-2.5:3.5:0.5]) axp.set_xlabel('Time (hours)', fontdict=dict(weight='bold')) axp.tick_params('x', which='minor', length=2.5, bottom=False, top=True) return fig
32d2da62acde2a2424c310e1bd0196dbac9309cf
3,401
def get_delta(K): """This function returns the delta matrix needed calculting Pj = delta*S + (1-delta)*(1-S) Args: inputs: K: Integers below 2^K will be considered outputs: delta: Matrix containing binary codes of numbers (1, 2^K) each one arranged row-wise. shape [2^K x K] one_minus_delta: Matrix containing complement of binary codes of numbers (1, 2^K) each one arranged row-wise. shape [2^K x K] """ delta = np.arange(1, 2 ** K)[:, np.newaxis] >> np.arange(K)[::-1] & 1 # all_ones = np.array( # [list(np.binary_repr(2 ** int(np.ceil(np.log2(1 + x))) - 1, K)) for x in # range(1, 2 ** K)], dtype=int) all_ones = np.array([[1 for _ in range(K)] for _ in range(2**K-1)]) one_minus_delta = all_ones - delta return delta, one_minus_delta
84e72790024c7294e715dd5efc03f001a7ab887d
3,402
def string_split_readable(inp, length): """ Convenience function to chunk a string into parts of a certain length, whilst being wary of spaces. This means that chunks will only be split on spaces, which means some chunks will be shorter, but it also means that the resulting list will only contain readable strings. ValueError is thrown if there's a word that's longer than the max chunk size. :param inp: The string to be split :param length: Maximum length of the chunks to return :return: List containing the split chunks """ done = [] current = "" for word in inp.split(): if len(current) == length: done.append(current) current = "" if len(word) > length: raise ValueError(_("Word %s is longer than %s characters") % (word, length)) else: if len(current + word) > length: done.append(current) current = "" current += word if len(current) <= (length - 1): current += " " if len(current): done.append(current) return done
1f1d3641cc293754c174d32d397dab252c009eca
3,403
import torch def get_similarity_transform_matrix( from_pts: torch.Tensor, to_pts: torch.Tensor) -> torch.Tensor: """ Args: from_pts, to_pts: b x n x 2 Returns: torch.Tensor: b x 3 x 3 """ mfrom = from_pts.mean(dim=1, keepdim=True) # b x 1 x 2 mto = to_pts.mean(dim=1, keepdim=True) # b x 1 x 2 a1 = (from_pts - mfrom).square().sum([1, 2], keepdim=False) # b c1 = ((to_pts - mto) * (from_pts - mfrom)).sum([1, 2], keepdim=False) # b to_delta = to_pts - mto from_delta = from_pts - mfrom c2 = (to_delta[:, :, 0] * from_delta[:, :, 1] - to_delta[:, :, 1] * from_delta[:, :, 0]).sum([1], keepdim=False) # b a = c1 / a1 b = c2 / a1 dx = mto[:, 0, 0] - a * mfrom[:, 0, 0] - b * mfrom[:, 0, 1] # b dy = mto[:, 0, 1] + b * mfrom[:, 0, 0] - a * mfrom[:, 0, 1] # b ones_pl = torch.ones_like(a1) zeros_pl = torch.zeros_like(a1) return torch.stack([ a, b, dx, -b, a, dy, zeros_pl, zeros_pl, ones_pl, ], dim=-1).reshape(-1, 3, 3)
76524a1f85644cfedfda9dd60497768614a058b0
3,404
def get_current_daily_puzzle(**kwargs) -> ChessDotComResponse: """ :returns: ``ChessDotComResponse`` object containing information about the daily puzzle found in www.chess.com. """ return Resource( uri = "/puzzle", top_level_attr = "puzzle", **kwargs )
733ce2eaa45b773cdfc04395ceb4dbe101ae8b78
3,405
def stroke_negative(): """ render template if user is predicted negative for stroke """ return render_template("negative.html")
2f1a07b57b19143e6755f3067c4923bb7231fb89
3,406
import sirepo.sim_data def default_data(sim_type): """New simulation base data Args: sim_type (str): simulation type Returns: dict: simulation data """ return open_json_file( sim_type, path=sirepo.sim_data.get_class(sim_type).resource_path(f'default-data{sirepo.const.JSON_SUFFIX}') )
47791f63a6b6c636d8e0a5513e47ab10bd2db209
3,407
def get_instance_name_to_id_map(instance_info): """ generate instance_name to instance_id map. Every instance without a name will be given a key 'unknownx', where x is an incrementing number of instances without a key. """ instance_name_to_id = {} unknown_instance_count = 0 for instance_id in instance_info: instance = instance_info[instance_id] instance_name = "unnamed" + str(unknown_instance_count) if "Tags" in instance: for tag in instance["Tags"]: if tag["Key"] == "Name": instance_name = tag["Value"] if instance_name == "unnamed" + str(unknown_instance_count): unknown_instance_count = unknown_instance_count + 1 instance_name_to_id[instance_name] = instance["InstanceId"] return instance_name_to_id
293923476a19362fbbc2b3bb0b34bc35523bdfa1
3,408
def log_get_stdio_record(log): """ Returns a darshan log record for STDIO. Args: log: handle returned by darshan.open Returns: dict: log record """ return log_get_generic_record(log, "STDIO", "struct darshan_stdio_file **")
6438d1ca88357cb3928492ddb89c4beab643f9fb
3,409
import subprocess def gits_set_profile(args): """ Function that prints hello message to user console """ # print(args.email) # print("Hello from GITS Commandline Tools-Profile") try: # check regex check_val = check(args.email) # print(check_val) if check_val: process = subprocess.Popen(["git", "config", "--global", "--unset", "user.email"], stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() process1 = subprocess.Popen(["git", "config", "--global", "--unset", "user.name"], stdout=PIPE, stderr=PIPE) stdout, stderr = process1.communicate() process2 = subprocess.Popen(["git", "config", "--global", "user.name", args.name], stdout=PIPE, stderr=PIPE) stdout, stderr = process2.communicate() process3 = subprocess.Popen(["git", "config", "--global", "user.email", args.email], stdout=PIPE, stderr=PIPE) stdout, stderr = process3.communicate() profile_verify_name_command = list() profile_verify_name_command.append("git") profile_verify_name_command.append("config") profile_verify_name_command.append("--list") profile_verify_name = list() profile_verify_name.append("grep") profile_verify_name.append('user.name') process4 = subprocess.Popen(profile_verify_name_command, stdout=PIPE, stderr=PIPE) process41 = subprocess.Popen(profile_verify_name, stdin=process4.stdout, stdout=PIPE, stderr=PIPE) stdout, stderr = process41.communicate() print("Setting name and email..\n") print(stdout.decode('utf-8')) profile_verify_email_command = list() profile_verify_email_command.append("git") profile_verify_email_command.append("config") profile_verify_email_command.append("--list") profile_verify_email = list() profile_verify_email.append("grep") profile_verify_email.append("user.email") process5 = subprocess.Popen(profile_verify_email_command, stdout=PIPE, stderr=PIPE) process51 = subprocess.Popen(profile_verify_email, stdin=process5.stdout, stdout=PIPE, stderr=PIPE) stdout, stderr = process51.communicate() print(stdout.decode('utf-8')) else: print("Enter a valid email id") except Exception as e: print("ERROR: gits profile command caught an exception") print("ERROR: {}".format(str(e))) return False return True
0235498f539046ea5eddedecec56be1980dbb129
3,410
def generate_spiral2d(nspiral=1000, ntotal=500, nsample=100, start=0., stop=1, # approximately equal to 6pi noise_std=.1, a=0., b=1., savefig=True): """Parametric formula for 2d spiral is `r = a + b * theta`. Args: nspiral: number of spirals, i.e. batch dimension ntotal: total number of datapoints per spiral nsample: number of sampled datapoints for model fitting per spiral start: spiral starting theta value stop: spiral ending theta value noise_std: observation noise standard deviation a, b: parameters of the Archimedean spiral savefig: plot the ground truth for sanity check Returns: Tuple where first element is true trajectory of size (nspiral, ntotal, 2), second element is noisy observations of size (nspiral, nsample, 2), third element is timestamps of size (ntotal,), and fourth element is timestamps of size (nsample,) """ # add 1 all timestamps to avoid division by 0 orig_ts = np.linspace(start, stop, num=ntotal) samp_ts = orig_ts[:nsample] # generate clock-wise and counter clock-wise spirals in observation space # with two sets of time-invariant latent dynamics zs_cw = stop + 1. - orig_ts rs_cw = a + b * 50. / zs_cw xs, ys = rs_cw * np.cos(zs_cw) - 5., rs_cw * np.sin(zs_cw) orig_traj_cw = np.stack((xs, ys), axis=1) zs_cc = orig_ts rw_cc = a + b * zs_cc xs, ys = rw_cc * np.cos(zs_cc) + 5., rw_cc * np.sin(zs_cc) orig_traj_cc = np.stack((xs, ys), axis=1) if savefig: plt.figure() plt.plot(orig_traj_cw[:, 0], orig_traj_cw[:, 1], label='clock') plt.plot(orig_traj_cc[:, 0], orig_traj_cc[:, 1], label='counter clock') plt.legend() plt.savefig('./ground_truth.png', dpi=500) print('Saved ground truth spiral at {}'.format('./ground_truth.png')) # sample starting timestamps orig_trajs = [] samp_trajs = [] for _ in range(nspiral): # don't sample t0 very near the start or the end t0_idx = npr.multinomial( 1, [1. / (ntotal - 2. * nsample)] * (ntotal - int(2 * nsample))) t0_idx = np.argmax(t0_idx) + nsample cc = bool(npr.rand() > .5) # uniformly select rotation orig_traj = orig_traj_cc if cc else orig_traj_cw orig_trajs.append(orig_traj) samp_traj = orig_traj[t0_idx:t0_idx + nsample, :].copy() samp_traj += npr.randn(*samp_traj.shape) * noise_std samp_trajs.append(samp_traj) # batching for sample trajectories is good for RNN; batching for original # trajectories only for ease of indexing orig_trajs = np.stack(orig_trajs, axis=0) samp_trajs = np.stack(samp_trajs, axis=0) return orig_trajs, samp_trajs, orig_ts, samp_ts
4d5129f651fd3a817c9be3beb9c2358895dd3654
3,411
import math def AUC_confidence(auc_value, num, interval=0.95): """ Calculate upper and lower 95% CI for area under the roc curve Inspired by https://stats.stackexchange.com/questions/18887 :param r: spearman's rho :param num: number of data points :param interval: confidence interval (0-1.0) :return: lower bound, upper bound """ stderr = 1.0 / math.sqrt(num - 3) z_score = norm.ppf(interval) delta = z_score * stderr lower = math.tanh(math.atanh(auc_value) - delta) upper = math.tanh(math.atanh(auc_value) + delta) return lower, upper
5beab0e62171d49dcfb0fbd126243e4906787273
3,412
def add_data(data): """ This adds data """ item = data db.insert(data) return 'chain updated'
52efd328097c95768de7f049335dbad9761e5715
3,413
import os import logging def detect_face(img_path, cc_path='../files/haarcascade_frontalface_default.xml'): """ Detect the face from the image, return colored face """ cc = cv2.CascadeClassifier(os.path.abspath(cc_path)) img_path = os.path.abspath(img_path) img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = cc.detectMultiScale(gray, 1.3, 5) roi_color = None if len(faces) == 0: logging.exception(img_path + ': No face found') else: x,y,w,h = faces[0] _h, _w = compute_size(h, w) roi_color = img[y - _h:y + h + _h, x - _w:x + w + _w] return roi_color
3d3d6786d7830bf1e03ab7bcc07052c5dd25a089
3,414
from typing import Counter def build_node_to_name_map(head): """ :type head: DecisionGraphNode :return: """ node_to_name_map = {} name_to_next_idx_map = Counter() def add_node_name(node): assert node not in node_to_name_map node_type_name = node.get_node_type_name() idx = name_to_next_idx_map[node_type_name] name_to_next_idx_map[node_type_name] += 1 name = "{}_{}".format(node_type_name, idx) node_to_name_map[node] = name bfs(head, add_node_name) return node_to_name_map
9d4b21317030c30539a5ec5947e574e3bd4fdd60
3,415
def ReduceFloat(f, op=None): """Reduce a single float value over MPI""" if not hasMPI: raise Exception("mpi4py required for Reduce operations: not found") if op is None: op = MPI.SUM fa = np.array([f]) # can only reduce over numpy arrays MPI.COMM_WORLD.Allreduce(MPI.IN_PLACE, fa, op=MPI.SUM) return fa[0]
12ca088e19a20eed145e1a90d8d88941f5d249ac
3,416
def GetVerificationStepsKeyName(name): """Returns a str used to uniquely identify a verification steps.""" return 'VerificationSteps_' + name
e50e9bd7b586d8bbfaf8902ce343d35d752948a4
3,417
def annotate_ms1_peaks(ms1_data, ms2_data, analyte_list): """Interpolate MS1 intensities for the time points for the MS2 scans for the largest mass peak in each analyte. Use relative changes in intensity between interpolated MS1 data and real MS2 data to find MS2 peaks that go with each analyte. """ ms2_data["analyte_id"] = None # Extract list of unique scan numbers and corresponding retention times ms2_scans = ms2_data[["scan", "rt"]].drop_duplicates().sort_values(by=["scan"]) for analyte in analyte_list: max_peak_data = ms1_data[ms1_data["peak_id"] == analyte.max_peak_id][["scan", "rt", "intensity"]].sort_values(by=["scan"]) interpolated_range = ms2_scans[ms2_scans["scan"].between(max_peak_data["scan"].min(), max_peak_data["scan"].max())].copy() if len(interpolated_range.index) >= config.matched_scan_minimum: if len(max_peak_data.index) > 3: tck = interpolate.splrep(max_peak_data["rt"].to_numpy(), max_peak_data["intensity"].to_numpy(), s=0) elif len(max_peak_data.index) == 3: tck = interpolate.splrep(max_peak_data["rt"].to_numpy(), max_peak_data["intensity"].to_numpy(), s=0, k=2) else: continue interpolated_intensities = interpolate.splev(interpolated_range["rt"].to_numpy(), tck, der=0) interpolated_range["intensity"] = interpolated_intensities ms2_data = ms2_to_analyte_vectorized(ms2_data, interpolated_range[["scan", "intensity"]], analyte.analyte_id) else: continue return ms2_data
32e0712ed27d802d99290cf01ba1f5f0dc07bae2
3,418
def split_tblastn_hits_into_separate_genes(query_res_obj, max_gap): """Take a SearchIO QueryResult object and return a new object with hits split into groups of HSPs that represent distinct genes. This is important, because there may be multiple paralogous genes present in a single nucleotide subject sequence (such as a chromosome or scaffold). """ # Print message. print('\n\tSearch program was tblastn.\n\tChecking number of distinct genes represented by HSPs.\n') # Copy the query result object. #query_res_obj2 = copy.deepcopy(query_res_obj) # Compile a list of all HSP clusters. # Display a simple visualization of HSP location. # List hits and HSPs in original object. num_dots = 150 all_hsp_clusters = [] hit_num = 0 for hit in query_res_obj: hit_num += 1 print('\tQuery: ' + hit.query_id) print('\tHit '+ str(hit_num) + ': ' + hit.id + ' ' + hit.description) print('\t' + 'HSP positions in subject sequence (1 dot = ' +\ str(int(hit.seq_len / num_dots)) + ' bp):') print('\t ' + '0' + ' ' * (num_dots -2) + str(hit.seq_len)) print('\t ' + 'v' + ' ' * (num_dots -2) + 'v') print('\t ' + '.' * num_dots + ' ' + 'Query range:') # Make a list of hsps. hsps = [] for hsp in hit: hsps.append(hsp) # Sort the HSPs. hsps2 = sorted(hsps, key=lambda x: x.hit_start) # Display the HSPs. for hsp in hsps2: string = '\t' sign = None if hsp.hit_frame > 0: sign = '+' elif hsp.hit_frame < 0: sign = '-' prepend_dots = '.' * int((hsp.hit_start*num_dots)/(hit.seq_len)) string = string + sign + prepend_dots span_string = str(hsp.hit_start) + ', ' + str(hsp.hit_end) string = string + span_string string = string + '.' * max([0, num_dots - len(prepend_dots) - len(span_string)]) string = string + ' ' + str(hsp.query_range) #+ ' ' + str(hsp.evalue) print(string) #print(hsp.hit.seq) print('\n') # Generate an expanded list of hit objects. # Recursively find clusters of HSPs that likely represent different # genes, and return as a list of lists. hsp_clusters = get_hsp_clusters(hit, max_gap) all_hsp_clusters = all_hsp_clusters + hsp_clusters # Display HSPs in each cluster. cluster_num = 0 for clusterplus in hsp_clusters: cluster = clusterplus[0] cluster_num += 1 # Call function for printing visualization. print_cluster(clusterplus, hit_num, cluster_num, num_dots) #*** ## ***Redundant?: ## Check that the clusters do not overlap with each other on the subject ## sequence. #for cluster1 in hsp_clusters: # for cluster2 in hsp_clusters: # if cluster1[0] != cluster2[0]: # if clusters_overlap(cluster1[0], cluster2[0]): # # Visualize overlapping clusters (for troubleshooting). # startend = get_cluster_range(cluster1[0] + cluster2[0]) # print('Overlapping clusters:') # print_cluster(cluster1,\ # str(get_cluster_range(cluster1[0])),\ # cluster_num, num_dots, startend) # print_cluster(cluster2,\ # str(get_cluster_range(cluster2[0])),\ # cluster_num, num_dots, startend) # ## Assert no overlap. # #assert not clusters_overlap(cluster1[0], cluster2[0]),\ # #"""Clusters overlap: %s and %s""" %\ # #(cluster1[0][0].hit_id + str(get_cluster_range(cluster1[0])),\ # # cluster2[0][0].hit_id + str(get_cluster_range(cluster2[0]))) ## Check that the clusters do not overlap with each other on the subject ## sequence. #for cluster1 in all_hsp_clusters: # for cluster2 in all_hsp_clusters: # if cluster1[0] != cluster2[0]: # assert not clusters_overlap(cluster1[0], cluster2[0]),\ # """Clusters overlap: %s and %s""" %\ # (cluster1[0][0].hit_id + str(get_cluster_range(cluster1[0])),\ # cluster2[0][0].hit_id + str(get_cluster_range(cluster2[0]))) # Sort HSPs according to E-value (the ranking may change because when # TBLASTN HSPs for the same scaffold sequence are split into those # representing potentially separate genes, then some may have higher # E-values). all_hsp_clusters.sort(key=lambda x: min([y.evalue for y in x[0]])) # Return the list of SearchIO HSP (not Hit) object clusters/lists. return all_hsp_clusters
f33bc8ed36343cb4e0c00c186546d6f979885c92
3,419
def to_entity_values(entity_group): """ Parse current entity group content into a CreateEntity[] """ values = [] for _, row in entity_group.iterrows(): value = row[ENTITY_VALUE_COLUMN] if not value: # Handle reserved entities continue synonyms = [] patterns = [] # Drop first two item and iterate the rest items (synonym or pattern) for _, val in row.drop([ENTITY_COLUMN, ENTITY_VALUE_COLUMN]) \ .iteritems(): if not pd.isnull(val): if val.startswith('/'): # is pattern? patterns.append(val[:-1][1:]) else: synonyms.append(val) # Construct CreateValue[] if len(patterns) != 0: values.append({'value': value, 'patterns': patterns, 'type': 'patterns'}) else: values.append({'value': value, 'synonyms': synonyms, 'type': 'synonyms'}) return values
278f9d5a7c8294338d83ba025c67fe23f36a8ac2
3,420
import codecs import logging def read_file(file_path): """ Read the contents of a file using utf-8 encoding, or return an empty string if it does not exist :param file_path: str: path to the file to read :return: str: contents of file """ try: with codecs.open(file_path, 'r', encoding='utf-8', errors='xmlcharrefreplace') as infile: return infile.read() except OSError as e: logging.exception('Error opening {}'.format(file_path)) return ''
13a72bc939021e3046243ed9afc7014cb403652a
3,421
def scrub(data): """ Reads a CSV file and organizes it neatly into a DataFrame. Arguments: data {.csv} -- the csv file to be read and scrubbed Returns: DataFrame -- the logarithmic returns of selected ticker symbols """ df = pd.read_csv(data, header=0, index_col=0, parse_dates=True) df.dropna(axis=1, inplace=True) logret = np.log(df).diff().iloc[1:] return logret
cce082da1d1f4c4308b4f30df918750f91de3f3f
3,422
import argparse def parse_args(): """read arguments from command line """ parser = argparse.ArgumentParser( description='preprocess.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--dataset', type=str, nargs='?', default='data/datasets/solid-state_dataset_2019-09-27_upd.json', help="Path to dataset to use") parser.add_argument('--elem-dict', type=str, nargs='?', default='data/elem_dict', help="Path to element to index dictionary without extension") parser.add_argument('--action-dict', type=str, nargs='?', default='data/action_dict', help="Path to element to index dictionary without extension") parser.add_argument('--magpie-embed', type=str, nargs='?', default='data/magpie_embed', help="Path to magpie embeddings dictionary without extension") parser.add_argument('--clean-set', type=str, nargs='?', default='data/dataset', help="Path to full clean dataset to use without extension") parser.add_argument('--train-set', type=str, nargs='?', default='data/train', help="Path to train dataset to use without extension") parser.add_argument('--test-set', type=str, nargs='?', default='data/test', help="Path to test dataset to use without extension") parser.add_argument('--val-set', type=str, nargs='?', default='data/val', help="Path to val dataset to use without extension") parser.add_argument('--test-size', type=float, nargs='?', default=0.2, help="size of clean dataset for testing") parser.add_argument('--val-size', type=float, nargs='?', default=0, help="size of clean dataset for validation") parser.add_argument('--seed', type=int, nargs='?', default=0, help="Random seed for splitting data") parser.add_argument('--ps', type=str, nargs='?', default='', help="postscript on path for save files") parser.add_argument('--max-prec', type=int, nargs='?', default=10, help='Max number of precursors per reaction.') parser.add_argument('--min-prec', type=int, nargs='?', default=2, help='Min number of precursors per reaction. Default 2') parser.add_argument('--augment', action="store_true", help="augment data with precursor rearrangements") parser.add_argument('--split-prec-amts', action="store_true", help="split out data for the baseline model") parser.add_argument('--num-elem', type=int, metavar='N', nargs='?', default=-1, help='Take N most common elements only. Default: -1 (all)') args = parser.parse_args() return args
7896e1e6edf431a3293ecc2a3970714212132322
3,423
def _get_lto_level(): """ Returns the user-specific LTO parallelism level. """ default = 32 if config.get_lto_type() else 0 return read_int("cxx", "lto", default)
1d0279d363aaa02dcf820f3a064e9b2023ae36a4
3,424
from typing import List from typing import Any def slice_label_rows(labeldf: pd.DataFrame, label: str, sample_list: List[str], row_mask: NDArray[Any]) -> NDArray[Any]: """ Selects rows from the Pandas DataFrame of labels corresponding to the samples in a particular sample_block. Args: labeldf : Pandas DataFrame containing the labels label : Header for the particular label to slice. Can be 'all' if all labels are desired. sample_list : List of sample ids corresponding to the sample_block to be sliced out. row_mask : 1D numpy array of size n_rows containing booleans used to mask samples from the rows sliced from labeldf. Returns: Matrix of [number of samples in sample_block - number of samples masked] x [number of labels to slice] """ if row_mask.size == 0: row_mask = np.full(len(sample_list), True) if label == 'all': return labeldf.loc[sample_list, :].to_numpy()[row_mask, :] else: return labeldf[label].loc[sample_list].to_numpy().reshape(-1, 1)[row_mask, :]
859bac2e577b534592a3428cd163f123608c9d72
3,425
def rollback(var_list, ckpt_folder, ckpt_file=None): """ This function provides a shortcut for reloading a model and calculating a list of variables :param var_list: :param ckpt_folder: :param ckpt_file: in case an older ckpt file is needed, provide it here, e.g. 'cifar.ckpt-6284' :return: """ global_step = global_step_config() # register a session sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=False)) # initialization init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) # load the training graph saver = tf.compat.v1.train.Saver(max_to_keep=2) ckpt = get_ckpt(ckpt_folder, ckpt_file=ckpt_file) if ckpt is None: raise FileNotFoundError('No ckpt Model found at {}.'.format(ckpt_folder)) saver.restore(sess, ckpt.model_checkpoint_path) FLAGS.print('Model reloaded.') # run the session coord = tf.train.Coordinator() # threads = tf.train.start_queue_runners(sess=sess, coord=coord) var_value, global_step_value = sess.run([var_list, global_step]) coord.request_stop() # coord.join(threads) sess.close() FLAGS.print('Variable calculated.') return var_value, global_step_value
e434ba292b842ee29ca5e61e33b24089a34b52a8
3,426
import numpy def read_interaction_file_mat(file): """ Renvoie la matrice d'adjacence associée au graph d'intéraction entre protéines ainsi que la liste ordonnée des sommets :param file: tableau contenant un graphe :type file: dataframe :return: une matrice d'adjascence de ce graphe et une liste ordonnée des sommets :rtype: tuple """ list_sommets = pd.concat([file.Sommet, file.Interaction]) list_sommets = sorted(list(dict.fromkeys(list_sommets))) res_mat = numpy.zeros((len(list_sommets), len(list_sommets)), dtype=int) res_list = read_interaction_file_list(file) for interaction in res_list: res_mat[list_sommets.index(interaction[0])][list_sommets.index(interaction[1])] = 1 res_mat[list_sommets.index(interaction[1])][list_sommets.index(interaction[0])] = 1 return res_mat, list_sommets
de62b45810ada6a69b779f42c39b589092d95428
3,427
def load_figures(fig_names): """ Uses a list of the figure names to load them into a list @param fig_names: @type fig_names: @return: A list containing all the figures @rtype: list """ fig_list = [] for i, name in enumerate(fig_names): fig_list.append(pl.load(open(f"{name}.pickle", "rb"))) return fig_list
6e90a2c9c7fbbbb89d793b8e0c8e7b521f797f64
3,428
def define_mimonet_layers(input_shape, classes, regularized=False): """ Use the functional API to define the model https://keras.io/getting-started/functional-api-guide/ params: input_shape (h,w,channels) """ layers = { 'inputs' : None, 'down_path' : {}, 'bottle_neck' : None, 'up_path' : {}, 'outputs' : None } layers['inputs'] = [Input(input_shape[0],name='in1'),Input(input_shape[1],name='in2'),Input(input_shape[2],name='in3')] layers['down_path'][4] = cnv3x3Relu(64,regularized=regularized)(layers['inputs'][0]) layers['down_path'][4] = cnv3x3Relu(64,regularized=regularized)(layers['down_path'][4]) layers['down_path'][3] = crop_concatenate(layers['inputs'][1], new_down_level(128,layers['down_path'][4],regularized=regularized)) layers['down_path'][2] = crop_concatenate(layers['inputs'][2], new_down_level(256,layers['down_path'][3],regularized=regularized)) layers['down_path'][1] = new_down_level(512,layers['down_path'][2],regularized=regularized) layers['bottle_neck'] = new_down_level(1024,layers['down_path'][1],regularized=regularized) layers['up_path'][1] = new_up_level(512,layers['bottle_neck'],layers['down_path'][1],regularized=regularized) layers['up_path'][2] = new_up_level(256,layers['up_path'][1],layers['down_path'][2],padding='same',regularized=regularized) layers['up_path'][3] = new_up_level(128,layers['up_path'][2],layers['down_path'][3],padding='same',regularized=regularized) layers['up_path'][4] = new_up_level(64,layers['up_path'][3],layers['down_path'][4],regularized=regularized) auxla1, la1 = feature_mask(4,256,64,classes,layers['up_path'][2],'la1') auxla2, la2 = feature_mask(2,128,64,classes,layers['up_path'][3],'la2') auxla3 = layers['up_path'][4] layers['outputs'] = [ la1,la2 ] layers['outputs'] += [ Conv2D(classes, (1, 1), activation='softmax', name='la3')(auxla3) ] l0 = crop_concatenate(auxla1, auxla2) l0 = crop_concatenate(l0,auxla3) l0 = cnv3x3Relu(64,regularized=regularized, padding='same')(l0) l0 = cnv3x3Relu(32,regularized=regularized, padding='same')(l0) layers['outputs'] += [ Conv2D(classes, (1, 1), activation='softmax', name='l0')(l0) ] return layers
e3151f29590cbd523063e13fffc29290a19d071a
3,429
import sys def scattering_angle( sza, vza, phi, Expand=False, Degree=False ): """ Function scattering_angle() calculates the scattering angle. cos(pi-THETA) = cos(theta)cos(theta0) + sin(theta)sin(theta0)cos(phi) Input and output are in the unit of PI Parameters ---------- sza: solar zenith angle is radian vza: viewing zenith angle in radian phi: relative azimuth angle in radian Expand: (optional) Ture/False to expand the dimension of calculated THETA Returns ------- THETA: scattering angle in radian """ # Change angle from degree to radian if needed if Degree: angle2rad = np.pi / 180. sza = sza * angle2rad vza = vza * angle2rad phi = phi * angle2rad # define the m,n,l = np.size(sza),np.size(vza),np.size(phi) if Expand: THETA = np.zeros( (m,n,l) ) for k in range(l): for j in range(n): for i in range(m): t1 = np.cos(vza[j]) * np.cos(sza[i]) \ + np.sin(vza[j]) * np.sin(sza[i]) * np.cos(phi[k]) t2 = np.arccos(t1) THETA[i,j,k] = np.pi - t2 else: # Check the dimension if (( m != n) | (m != l )): sys.ext("scattering_angle() error #1 in util.py") t1 = np.cos(vza) * np.cos(sza) \ + np.sin(vza) * np.sin(sza) * np.cos(phi) t2 = np.arccos(t1) THETA = np.pi - t2 if Degree: THETA = THETA * 180. / np.pi return THETA
c3c96ab2852857528495b932b7e42af9ebd719d5
3,430
def _list_subclasses(cls): """ Recursively lists all subclasses of `cls`. """ subclasses = cls.__subclasses__() for subclass in cls.__subclasses__(): subclasses += _list_subclasses(subclass) return subclasses
4cebf48916c64f32fcd5dfff28ecde7a155edb90
3,431
import logging def main(from_json: bool = True, filename: str = DEFAULT_ARGS['pipeline_config_save_path']): """ Calls the specified pipeline. :param filename: json filename :param from_json: whether to run pipeline from json file or not :return: pipeline call function """ # Parsing arguments parser = HfArgumentParser((ModelArguments, DatabuilderArguments, TrainingArguments, PipelineArguments)) model_args, databuilder_args, training_args, pipeline_args = parser.parse_json_file( json_file=filename) if from_json else parser.parse_args_into_dataclasses() # Asserting specified pipeline does exist assert pipeline_args.pipeline in PIPELINES, \ "Unknown pipeline {}, available pipelines are {}".format(pipeline_args.pipeline, list(PIPELINES.keys())) # Logging session informations logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) # Loading model & tokenizer model = AutoModelForSeq2SeqLM.from_pretrained(training_args.output_dir) tokenizer = AutoTokenizer.from_pretrained(training_args.output_dir) # Getting specified pipeline task_pipeline = PIPELINES[pipeline_args.pipeline]["impl"] logger.info(f'Pipeline has been loaded and is ready for inference. ') return task_pipeline(model=model, tokenizer=tokenizer)
ccd975889d639f3a642e820e4d7ce5e2ef583609
3,432
def put(url, **kwargs): """PUT to a URL.""" return session.put(url, **kwargs)
64618fc239164a73fa90f2348de8402c5a593394
3,433
def cod_records(mocker, cod_records_json): """Fixture for COD records metric instance.""" mocker.patch.object(RecordsMetric, 'collect', new=records_collect(cod_records_json)) return metrics.records('cod_records', 'http://www.google.com')
d4ac73421f3fcef9b175aa42c02354ff437581ad
3,434
def add_computed_document_features(input_dict): """ TODO: Add a feature to Annotated Document Corpus. :param adc: Annotated Document Corpus :param feature_name: the name of new feature :param feature_computation: "New Feature Computatation :param feature_spec: Comma separated list of names of old features used in the 'New Feature Computataion'. :return: new adc """ adc=input_dict["adc"] compute_new_features(adc.documents,input_dict["feature_name"],input_dict["feature_computation"]) return {"adc":adc}
2673eae356c3fc5717d7ba6aa735aa8f6f129731
3,435
def get_lite_addons(): """Load the lite addons file as a set.""" return set_from_file('validations/lite-addons.txt')
68f26084b5e7e13492f61fc65fe504d1b5d53384
3,436
def GetApexPlayerStatus_TRN(api_key, platform, playerName): """ Get the status of a player on Apex Legends. :param api_key: The API key to use. :param platform: The platform to use. :param playerName: The player name to use. """ platform = _fixplatform(platform) if _checkplatform(platform): url = f'https://public-api.tracker.gg/{API_VER}/apex/standard/profile/{platform}/{playerName}' try: res = get_request(url, {'TRN-Api-Key': api_key}) response = res[0] if response.status_code == 200: r = response.json() list_legends_data = [] my_append = list_legends_data.append for d in r['data']['segments']: if d["type"] == "overview": continue else: my_append(d) res = ApexTrackerPy.Apexclass.TRN_PlayerStatus( row_json=r, elapsed_time=res[1], platformUserId=r['data']['platformInfo']['platformUserId'], activelegend=r['data']['metadata']['activeLegend'], userlevel=r['data']['segments'][0]['stats']['level']['value'], totalkill=r['data']['segments'][0]['stats']['kills']['value'], totaldamage=r['data']['segments'][0]['stats']['damage']['value'], totalheadshots=r['data']['segments'][0]['stats']['headshots']['value'], CurrentRank=r['data']['segments'][0]['stats']['rankScore']['metadata']['rankName'], CurrentRankScore=r['data']['segments'][0]['stats']['rankScore']['value'], ArenaRankedName=r['data']['segments'][0]['stats']['arenaRankScore']['metadata']['rankName'], ArenaRankedScore=r['data']['segments'][0]['stats']['arenaRankScore']['value'], legends_json=list_legends_data, ) return res else: raise Exception('HttpError!:The API returned status code '+str(response.status_code)) except Exception as e: raise Exception('HttpError!:An error has occurred during the API call.\n'+str(e)) else: raise Exception('Invalid platform!')
296f9900e3e95afa24a0e643ed45563b57fb172a
3,437
def subFactoryGet(fixture, **kwargs): """ To be used in fixture definition (or in the kwargs of the fixture constructor) to reference a other fixture using the :meth:`.BaseFix.get` method. :param fixture: Desired fixture :param kwargs: *Optional:* key words to overwrite properties of this fixture :return: Proxy object for the desired fixture including the altered properties """ return SubFactory(fixture, METHOD_GET, **kwargs)
480db102897a3edd682acef6ee95a42b6f937b03
3,438
def hello(): """Return the dashboard homepage.""" return render_template('index.html')
adac182b3c8dd2ae0f17425205203c5493499f19
3,439
def rapid_ping(client, dst_ip): """TODO: Docstring for ping. :returns: TODO """ status = False # run ping command with count 10 rapidly command = 'exec cli ping ' + dst_ip + ' count 10 rapid' stdin, stdout, stderr = client.exec_command(command, get_pty=True) for line in iter(stdout.readline, ""): if ("!!!!!!!!!" in line): status = True return status
9533995412eb10ee66b437c7e28b697dcb156b50
3,440
from pathlib import Path def test_data_dir(): """ Returns path of test datas like excel Used for test or notebook """ path = Path(__file__).parent.parent / 'testdata' return path
f410f26276797204dd100d884b162f893b5ce4aa
3,441
import sys def classify_audio(model, callback, labels_file=None, inference_overlap_ratio=0.1, buffer_size_secs=2.0, buffer_write_size_secs=0.1, audio_device_index=None): """ Continuously classifies audio samples from the microphone, yielding results to your own callback function. Your callback function receives the top classification result for every inference performed. Although the audio sample size is fixed based on the model input size, you can adjust the rate of inference with ``inference_overlap_ratio``. A larger overlap means the model runs inference more frequently but with larger amounts of sample data shared between inferences, which can result in duplicate results. Args: model (str): Path to a ``.tflite`` file. callback: A function that takes two arguments (in order): a string for the classification label, and a float for the prediction score. The function must return a boolean: True to continue running inference, or False to stop. labels_file (str): Path to a labels file (required only if the model does not include metadata labels). If provided, this overrides the labels file provided in the model metadata. inference_overlap_ratio (float): The amount of audio that should overlap between each sample used for inference. May be 0.0 up to (but not including) 1.0. For example, if set to 0.5 and the model takes a one-second sample as input, the model will run an inference every half second, or if set to 0, then there is no overlap and it will run once each second. buffer_size_secs (float): The length of audio to hold in the audio buffer. buffer_write_size_secs (float): The length of audio to capture into the buffer with each sampling from the microphone. audio_device_index (int): The audio input device index to use. """ if not model: raise ValueError('model must be specified') if buffer_size_secs <= 0.0: raise ValueError('buffer_size_secs must be positive') if buffer_write_size_secs <= 0.0: raise ValueError('buffer_write_size_secs must be positive') if inference_overlap_ratio < 0.0 or \ inference_overlap_ratio >= 1.0: raise ValueError('inference_overlap_ratio must be in [0.0 .. 1.0)') sample_rate_hz, channels = model_audio_properties(model) if labels_file is not None: labels = dataset.read_label_file(labels_file) else: labels = utils.read_labels_from_metadata(model) print('Say one of the following:') for value in labels.values(): print(' %s' % value) interpreter = tflite.Interpreter(model_path=model) interpreter.allocate_tensors() # Input tensor input_details = interpreter.get_input_details() waveform_input_index = input_details[0]['index'] _, num_audio_frames = input_details[0]['shape'] waveform = np.zeros(num_audio_frames, dtype=np.float32) # Output tensor output_details = interpreter.get_output_details() scores_output_index = output_details[0]['index'] ring_buffer_size = int(buffer_size_secs * sample_rate_hz) frames_per_buffer = int(buffer_write_size_secs * sample_rate_hz) remove_size = int((1.0 - inference_overlap_ratio) * len(waveform)) rb = ring_buffer.ConcurrentRingBuffer( np.zeros(ring_buffer_size, dtype=np.float32)) def stream_callback(in_data, frame_count, time_info, status): try: rb.write(np.frombuffer(in_data, dtype=np.float32), block=False) except ring_buffer.Overflow: print('WARNING: Dropping input audio buffer', file=sys.stderr) return None, pyaudio.paContinue with pyaudio_stream(format=pyaudio.paFloat32, channels=channels, rate=sample_rate_hz, frames_per_buffer=frames_per_buffer, stream_callback=stream_callback, input=True, input_device_index=audio_device_index) as stream: keep_listening = True while keep_listening: rb.read(waveform, remove_size=remove_size) interpreter.set_tensor(waveform_input_index, [waveform]) interpreter.invoke() scores = interpreter.get_tensor(scores_output_index) scores = np.mean(scores, axis=0) prediction = np.argmax(scores) keep_listening = callback(labels[prediction], scores[prediction])
d8462b689b8e5e3ad24933265f4b6b740e71ace4
3,442
def is_leap_year(year: int) -> bool: """Returns whether the given year is a leap year""" if year % 100 == 0: return year % 400 == 0 else: return year % 4 == 0
fccaa3de6378e62b937748c671a21aa5427781e8
3,443
import os def find_manifests(pkgnames, verbose=True): """ return a dictionary keyed by pkgname with the found manifest's full path """ (abspath, dirname) = (os.path.abspath, os.path.dirname) (ret,stdout,stderr) = spawn("git rev-parse --show-toplevel") root = stdout[0] if ret == 0 else os.getcwd() jsonfiles = all_json_files(root) def ensure_json(pkgname): return pkgname if pkgname.endswith(".json") else "{}.json".format(pkgname) def match(pkg, jsonfile): return jsonfile.endswith(ensure_json(pkg)) and is_manifest(jsonfile, verbose) return {p:j for p in pkgnames for j in jsonfiles if match(p,j)}
a23fdab47c8c26a154e484036c52d77b6b4d3ed1
3,444
def is_valid_distribution(qk: np.ndarray, axis: int) -> bool: """valid is e.g.: [], [1.0], [0.5, 0.5]""" """not valid is e.g.: [-1.0], [0.6, 0.6], [np.nan], [np.nan, 0.6], [1.2]""" assert 0 <= axis < len(qk.shape) if qk.shape[axis] == 0: return True if np.any(qk < 0.0): return False if np.any(qk > 1.0): return False result = np.all(np.sum(qk, axis=axis) == 1) return result
fdbb1ac82f2d5cf93843f3d8d1f4f4d02a3ab408
3,445
def srt(data, cube, **kwargs): """ Define Solar Rotational Tomography model with optional masking of data and map areas. Can also define priors. Parameters ---------- data: InfoArray data cube cube: FitsArray map cube obj_rmin: float Object minimal radius. Areas below obj_rmin are masked out. obj_rmax: float Object maximal radius. Areas above obj_rmax are masked out. data_rmin: float Data minimal radius. Areas below data_rmin are masked out. data_rmax: float Data maximal radius. Areas above data_rmax are masked out. mask_negative: boolean If true, negative values in the data are masked out. Returns ------- P : The projector with masking D : Smoothness priors obj_mask : object mask array data_mask : data mask array """ # Model : it is Solar rotational tomography, so obstacle="sun". data_mask = solar.define_data_mask(data, **kwargs) P = siddon_lo(data.header, cube.header, mask=data_mask, obstacle="sun") D = smoothness_prior(cube, kwargs.get("height_prior", False)) P, D, obj_mask = _apply_object_mask(P, D, cube, **kwargs) return P, D, obj_mask, data_mask
e0af1f5d0d00e8651c3668091165beaf0aaa6f55
3,446
def get(status_id): """Fetches a status of previously submitted PushFunds request. Returns a status of :func:`~pyvdp.visadirect.fundstransfer.MultiPushFundsTransactionsModel` request by transaction identifier, returned with 202 response. :param str status_id: **Required**. Transaction status identifier. :return: Dictionary with VDP API response. **Usage:** .. code:: python from pyvdp.visadirect.fundstransfer import multipushfundstransactions status_id = "1491819372_186_81_l73c003_VDP_ARM" result = pushfundstransactions.send(status_id) print(result) """ query_string = '/' + status_id c = VisaDirectDispatcher(resource='visadirect', api='fundstransfer', method='multipushfundstransactions', http_verb='GET', query_string=query_string) return c.send()
fb8951355f342405e93f44747e670afcaf094322
3,447
import os def getLocalDir(jobdir, dirname=''): """ Assemble destination directory for job results. Raises: TargetDirExistsError: Destination for job results already exists. """ if dirname: dstDir = os.path.join(dirname, jobdir) else: dstDir = os.path.join(os.getcwd(), jobdir) if not os.path.exists(dstDir): return dstDir else: raise TargetDirExistsError(dstDir)
a7bd503b86a60761f09abb139e696efadbf899b5
3,448
def eval_pop_thread(args): """ Evaluates solutions, returns a list of floats, between 0 and 1 (probabilities of survival and reproduction). """ m_solutions, m_state_hash_table, id_mi = args[0], args[1], args[2] step = int(N_POP/N_PROC) prob_surv = np.zeros(step) for index_sol in range(len(m_solutions)): print("Solution ", index_sol, " Id: ", id_mi) sol = m_solutions[index_sol] tmp_points = 0 max_sol = np.max(sol) for state_key in m_state_hash_table: state = m_state_hash_table[state_key] tmp_w = compute_heuristic(state_key, 'WHITE', sol) tmp_b = compute_heuristic(state_key, 'BLACK', sol) if tmp_w < 0 and state['value']['white'] / state['games'] > 0.5: tmp_points += 1 elif tmp_w > 0 and state['value']['black'] / state['games'] > 0.5: tmp_points += 1 elif 0+ERROR_ZERO * max_sol >= tmp_w >= 0-ERROR_ZERO * max_sol and \ state['value']['black'] / state['games'] < 0.5 and state['value']['white'] / state['games'] < 0.5: tmp_points += 1 if tmp_b < 0 and state['value']['black'] / state['games'] > 0.5: tmp_points += 1 elif tmp_b > 0 and state['value']['white'] / state['games'] > 0.5: tmp_points += 1 elif 0 + ERROR_ZERO * max_sol >= tmp_b >= 0-ERROR_ZERO * max_sol and \ state['value']['black'] / state['games'] < 0.5 and state['value']['white'] / state['games'] < 0.5: tmp_points += 1 tmp_points /= 2 prob_surv[index_sol] = tmp_points return prob_surv
8acdb0acae737a8bf48578ec48c3dcc1b66c7adb
3,449
def _mini_batch_convergence(model, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, context, verbose=0): """Helper function to encapsulate the early stopping logic""" # Normalize inertia to be able to compare values when # batch_size changes batch_inertia /= model.batch_size centers_squared_diff /= model.batch_size # Compute an Exponentially Weighted Average of the squared # diff to monitor the convergence while discarding # minibatch-local stochastic variability: # https://en.wikipedia.org/wiki/Moving_average ewa_diff = context.get('ewa_diff') ewa_inertia = context.get('ewa_inertia') if ewa_diff is None: ewa_diff = centers_squared_diff ewa_inertia = batch_inertia else: alpha = float(model.batch_size) * 2.0 / (n_samples + 1) alpha = 1.0 if alpha > 1.0 else alpha ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha # Log progress to be able to monitor convergence if verbose: progress_msg = ( 'Minibatch iteration %d/%d:' ' mean batch inertia: %f, ewa inertia: %f ' % ( iteration_idx + 1, n_iter, batch_inertia, ewa_inertia)) print(progress_msg) # Early stopping based on absolute tolerance on squared change of # centers position (using EWA smoothing) if tol > 0.0 and ewa_diff <= tol: if verbose: print('Converged (small centers change) at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # Early stopping heuristic due to lack of improvement on smoothed inertia ewa_inertia_min = context.get('ewa_inertia_min') no_improvement = context.get('no_improvement', 0) if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min: no_improvement = 0 ewa_inertia_min = ewa_inertia else: no_improvement += 1 if (model.max_no_improvement is not None and no_improvement >= model.max_no_improvement): if verbose: print('Converged (lack of improvement in inertia)' ' at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # update the convergence context to maintain state across successive calls: context['ewa_diff'] = ewa_diff context['ewa_inertia'] = ewa_inertia context['ewa_inertia_min'] = ewa_inertia_min context['no_improvement'] = no_improvement return False
701488e530913bfc2e5d382a544679315dc1f013
3,450
def rho_MC(delta, rhoeq=4.39e-38): """ returns the characteristic density of an axion minicluster in [solar masses/km^3] forming from an overdensity with overdensity parameter delta. rhoeq is the matter density at matter radiation equality in [solar masses/km^3] """ return 140 * (1 + delta) * delta**3 * rhoeq
f28e382cfcf661199728363b3ebe86f25e92760c
3,451
from typing import Dict from typing import Union from typing import List from typing import Optional def _parse_parameter_from_value( string: str, parameter_to_wordlist_mapping: Dict[Union[TimeResolution, PeriodType, Parameter], List[List[str]]] ) -> Optional[Union[TimeResolution, PeriodType, Parameter]]: """ Function to parse a parameter from a given string based on a list of parameter enumerations and corresponding list of words. Args: string: string containing the circa name of the parameter parameter_to_wordlist_mapping: mapping of parameter and list of words Returns: None or one of the found enumerations """ string_split = string.split("_") for parameter, wordlist in parameter_to_wordlist_mapping.items(): cond1 = len(wordlist) == len(string_split) cond2 = _find_any_one_word_from_wordlist(string_split, wordlist) if cond1 and cond2: return parameter return None
0fa1e2f7edf5e6be31e0e2ae514ecc22a512e8f7
3,452
def AffineMomentsF(I, returnShape=False): """ Input: - I: A 2D image Output: - Out: A (1x6) vector containing 6 moment features """ # ************************************************************************ # Modified for MRI feature extraction by the Department of Diagnostic # and Interventional Radiology, University Hospital of Tuebingen, Germany # and the Institute of Signal Processing and System Theory University of # Stuttgart, Germany. Last modified: November 2016 # # This implementation is part of ImFEATbox, a toolbox for image feature # extraction and analysis. Available online at: # https://github.com/annikaliebgott/ImFEATbox # # Contact: [email protected] # ************************************************************************ # # Implementation based on: Tomas Suk, Jan Flusser, "Combined Blur and # Affine Moment Invariants and their use in # Pattern Recognition", Pattern Recognition, # vol. 36, 2003. # # Implemented by: Asad Ali. Email: [email protected] if returnShape: return (6,1) # x,y = np.nonzero(I[:,:,1]) TODO: how to handle color image? x,y = np.nonzero(I) pixelValues = I[x,y] m00 = np.sum(pixelValues) x = x - np.sum(x*pixelValues)/m00 y = y - np.sum(y*pixelValues)/m00 ## calculate moments # second order central moments m20 = CentralMoments(x,y,2,0,pixelValues) m02 = CentralMoments(x,y,0,2,pixelValues) m11 = CentralMoments(x,y,1,1,pixelValues) # third order central moments m30 = CentralMoments(x,y,3,0,pixelValues) m03 = CentralMoments(x,y,0,3,pixelValues) m21 = CentralMoments(x,y,2,1,pixelValues) m12 = CentralMoments(x,y,1,2,pixelValues) # fouth order central moments m40 = CentralMoments(x,y,4,0,pixelValues) m04 = CentralMoments(x,y,0,4,pixelValues) m31 = CentralMoments(x,y,3,1,pixelValues) m13 = CentralMoments(x,y,1,3,pixelValues) m22 = CentralMoments(x,y,2,2,pixelValues) # fifth order central moments m50 = CentralMoments(x,y,5,0,pixelValues) m05 = CentralMoments(x,y,0,5,pixelValues) m41 = CentralMoments(x,y,4,1,pixelValues) m14 = CentralMoments(x,y,1,4,pixelValues) m32 = CentralMoments(x,y,3,2,pixelValues) m23 = CentralMoments(x,y,2,3,pixelValues) # seventh order central moments m70 = CentralMoments(x,y,7,0,pixelValues) m07 = CentralMoments(x,y,0,7,pixelValues) m16 = CentralMoments(x,y,1,6,pixelValues) m61 = CentralMoments(x,y,6,1,pixelValues) m52 = CentralMoments(x,y,5,2,pixelValues) m25 = CentralMoments(x,y,2,5,pixelValues) m43 = CentralMoments(x,y,4,3,pixelValues) m34 = CentralMoments(x,y,3,4,pixelValues) # for blur invariance we recompute certain values m50 = m50 - (10*m30*m20/m00) m41 = m41 - (2*(3*m21*m20 + 2*m30*m11)/m00) m32 = m32 - ((3*m12*m20 + m30*m02 + 6*m21*m11)/m00) m23 = m23 - ((3*m21*m02 + m03*m20 + 6*m12*m11)/m00) m14 = m14 - (2*(3*m12*m02 + 2*m03*m11)/m00) m05 = m05 - (10*m03*m02/m00) # for blur invariance seventh order moments recomputed m70 = m70 - 7 * (3*m50*m20 + 5*m30*m40)/m00 + (210*m30*m20**2 / m00**2) m61 = m61 - (6*m50*m11 + 15*m41*m20 + 15*m40*m21 + 20*m31*m30)/m00 + 30*(3*m21*m20**2 + 4*m30*m20*m11)/m00**2 m52 = m52 - (m50*m02 +10*m30*m22 + 10*m32*m20 + 20*m31*m21 +10*m41*m11 + 5*m40*m12)/m00 + 10* (3*m12*m20**2 + 2*m30*m20*m02 + 4*m30*m11**2 + 12*m21*m20*m11)/m00**2 m43 = m43 - (m40*m03 + 18*m21*m22 + 12*m31*m12 + 4*m30*m13 + 3*m41*m02 + 12*m32*m11 + 6*m23*m20)/m00 + 6*(m03*m20**2 + 4*m30*m11*m02 + 12*m21*m11**2 + 12*m12*m20*m11 + 6*m21*m02*m20) m34 = m34 - (m04*m30 + 18*m12*m22 + 12*m13*m21 + 4*m03*m31 + 3*m14*m20 + 12*m23*m11 + 6*m32*m02)/m00 + 6 *(m30*m02**2 + 4*m03*m11*m20 + 12*m12*m11**2 + 12*m21*m02*m11 + 6*m12*m20*m02)/m00**2 m25 = m25 - (m05*m20 + 10*m03*m22 + 10*m23*m02 + 20*m13*m12 + 10*m14*m11 + 5*m04*m21)/m00 + 10*(3*m21*m02**2 + 2*m03*m02*m20 +4*m03*m11**2 + 12*m12*m02*m11)/m00**2 m16 = m16 - (6*m05*m11 + 15*m14*m02 + 15*m04*m12 + 20*m13*m03)/m00 + 30*(3*m12*m02**2 + 4*m03*m02*m11)/m00**2 m07 = m07 - 7*(3*m05*m02 + 5*m03*m04)/m00 + (210*m03*m02**2 / m00**2) # first invariant computed from the determinant of the polynomial I1 = (m30**2*m03**2 - 6*m30*m21*m12*m03 + 4*m30*m12**3 + 4*m21**3*m03 - 3*m21**2*m12**2) / m00**10 I2 = (m50**2*m05**2 - 10*m50*m41*m14*m05 + 4*m50*m32*m23*m05 + 16*m50*m32*m14**2 - 12*m50*m23**2*m14 + 16*m41**2*m23*m05 + 9*m41**2*m14**2 - 12*m41*m32**2*m05 - 76*m41*m32*m23*m14 + 48*m41*m23**3 + 48*m32**3*m14 - 32*m32**2*m23**2)/m00**14 I3 = (m30**2*m12*m05 - m30**2*m03*m14 - m30*m21**2*m05 - 2*m30*m21*m12*m14 + 4*m30*m21*m03*m23 + 2*m30*m12**2*m23 - 4*m30*m12*m03*m32 + m30*m03**2*m41 + 3*m21**3*m14 - 6*m21**2*m12*m23 - 2*m21**2*m03*m32 + 6*m21*m12**2*m32 + 2*m21*m12*m03*m41 - m21*m03**2*m50 - 3*m12**3*m41 + m12**2*m03*m50) / m00**11 I4 = (2*m30*m12*m41*m05 - 8*m30*m12*m32*m14 + 6*m30*m12*m23**2 - m30*m03*m50*m05 + 3*m30*m03*m41*m14 - 2*m30*m03*m32*m23 - 2*m21**2*m41*m05 + 8*m21**2*m32*m14 - 6*m21**2*m23**2 + m21*m12*m50*m05 - 3*m21*m12*m41*m14 + 2*m21*m12*m32*m23 + 2*m21*m03*m50*m14 - 8*m21*m03*m41*m23 + 6*m21*m03*m32**2 - 2*m12**2*m50*m14 + 8*m12**2*m41*m23 - 6*m12**2*m32**2)/m00**12 I5 = (m30*m41*m23*m05 - m30*m41*m14**2 - m30*m32**2*m05 + 2*m30*m32*m23*m14 - m30*m23**3 - m21*m50*m23*m05 + m21*m50*m14**2 + m21*m41*m32*m05 - m21*m41*m23*m14 - m21*m32**2*m14 + m21*m32*m23**2 + m12*m50*m32*m05 - m12*m50*m23*m14 - m12*m41**2*m05 + m12*m41*m32*m14 + m12*m41*m23**2 - m12*m32**2*m23 - m03*m50*m32*m14 + m03*m50*m23**2 + m03*m41**2*m14 - 2*m03*m41*m32*m23 + m03*m32**3)/m00**13 I6 = (m70**2*m07**2 - 14*m70*m61*m16*m07 + 18*m70*m52*m25*m07 + 24*m70*m52*m16**2 - 10*m70*m43*m34*m07 - 60*m70*m43*m25*m16 + 40*m70*m34**2*m16 + 24*m61**2*m25*m07 + 25*m61**2*m16**2 - 60*m61*m52*m34*m07 - 234*m61*m52*m25*m16 + 40*m61*m43**2*m07 + 50*m61*m43*m34*m16 + 360*m61*m43*m25**2 - 240*m61*m34**2*m25 + 360*m52**2*m34*m16 + 81*m52**2*m25**2 - 240*m52*m43**2*m16 - 990*m52*m43*m34*m25 + 600*m52*m34**3 + 600*m43**3*m25 - 375*m43**2*m34**2)/m00**18 ## return feature vector Out = np.array([I1, I2, I3, I4, I5, I6]) return Out # Calculate Central Moments
cb275aeacb4c4350a738b424bae0a284f4d40043
3,453
def render(scene): """ :param scene: Scene description :return: [H, W, 3] image """ # Construct rays from the camera's eye position through the screen coordinates camera = scene['camera'] eye, ray_dir, H, W = generate_rays(camera) # Ray-object intersections scene_objects = scene['objects'] obj_intersections, ray_dist, normals, material_idx = ray_object_intersections(eye, ray_dir, scene_objects) # Valid distances pixel_dist = ray_dist valid_pixels = (camera['near'] <= ray_dist) & (ray_dist <= camera['far']) pixel_dist[~valid_pixels] = np.inf # Will have to use gather operation for TF and pytorch # Nearest object needs to be compared for valid regions only nearest_obj = np.argmin(pixel_dist, axis=0) C = np.arange(0, nearest_obj.size) # pixel idx # Create depth image for visualization # use nearest_obj for gather/select the pixel color im_depth = pixel_dist[nearest_obj, C].reshape(H, W) ############################## # Fragment processing ############################## # Lighting color_table = scene['colors'] light_pos = scene['lights']['pos'] light_clr_idx = scene['lights']['color_idx'] light_colors = color_table[light_clr_idx] # Generate the fragments """ Get the normal and material for the visible objects. """ frag_normals = normals[nearest_obj, C] frag_pos = obj_intersections[nearest_obj, C] frag_albedo = scene['materials']['albedo'][material_idx[nearest_obj]] # Fragment shading light_dir = light_pos[np.newaxis, :] - frag_pos[:, np.newaxis, :] light_dir_norm = np.sqrt(np.sum(light_dir ** 2, axis=-1))[..., np.newaxis] light_dir_norm[light_dir_norm <= 0 | np.isinf(light_dir_norm)] = 1 light_dir = ops.nonzero_divide(light_dir, light_dir_norm) im_color = np.sum(frag_normals[:, np.newaxis, :] * light_dir, axis=-1)[..., np.newaxis] * \ light_colors[np.newaxis, ...] * frag_albedo[:, np.newaxis, :] im = np.sum(im_color, axis=1).reshape(H, W, 3) im[(im_depth < camera['near']) | (im_depth > camera['far'])] = 0 # clip negative values im[im < 0] = 0 # Tonemapping if 'tonemap' in scene: im = tonemap(im, **scene['tonemap']) return {'image': im, 'depth': im_depth, 'ray_dist': ray_dist, 'obj_dist': pixel_dist, 'nearest': nearest_obj.reshape(H, W), 'ray_dir': ray_dir, 'valid_pixels': valid_pixels }
35f8cf34fea266034a76f3857213fcb83e334174
3,454
def state_fidelity(state1, state2): """Return the state fidelity between two quantum states. Either input may be a state vector, or a density matrix. The state fidelity (F) for two density matrices is defined as:: F(rho1, rho2) = Tr[sqrt(sqrt(rho1).rho2.sqrt(rho1))] ^ 2 For a pure state and mixed state the fidelity is given by:: F(|psi1>, rho2) = <psi1|rho2|psi1> For two pure states the fidelity is given by:: F(|psi1>, |psi2>) = |<psi1|psi2>|^2 Args: state1 (array_like): a quantum state vector or density matrix. state2 (array_like): a quantum state vector or density matrix. Returns: array_like: The state fidelity F(state1, state2). """ # convert input to numpy arrays s1 = np.array(state1) s2 = np.array(state2) # fidelity of two state vectors if s1.ndim == 1 and s2.ndim == 1: return np.abs(s2.conj().dot(s1)) ** 2 # fidelity of vector and density matrix elif s1.ndim == 1: # psi = s1, rho = s2 return np.abs(s1.conj().dot(s2).dot(s1)) elif s2.ndim == 1: # psi = s2, rho = s1 return np.abs(s2.conj().dot(s1).dot(s2)) # fidelity of two density matrices s1sq = _funm_svd(s1, np.sqrt) s2sq = _funm_svd(s2, np.sqrt) return np.linalg.norm(s1sq.dot(s2sq), ord='nuc') ** 2
9df10584ce9376df5690ebaccaa07046778b097c
3,455
def process_state(request): """Procesa una request GET o POST para consultar datos de provincias. En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400. Args: request (flask.Request): Request GET o POST de flask. Returns: flask.Response: respuesta HTTP """ return _process_entity(request, N.STATES, params.PARAMS_STATES, { N.ID: 'ids', N.NAME: 'name', N.INTERSECTION: 'geo_shape_ids', N.EXACT: 'exact', N.ORDER: 'order', N.FIELDS: 'fields', N.OFFSET: 'offset', N.MAX: 'size' })
8e748dd73845438f768ecd34730a94c2e8696387
3,456
def is_ascii(string): """Return True is string contains only is us-ascii encoded characters.""" def is_ascii_char(char): return 0 <= ord(char) <= 127 return all(is_ascii_char(char) for char in string)
cd3aeddcad7610de83af6ec5a67ecbac95f11fd8
3,457
from typing import Union from typing import Tuple from typing import Optional from typing import List def _get_predictions_from_data( model: Union[Model, SKLEARN_MODELS], data: Union[ tf.data.Dataset, Tuple[Inputs, Outputs], Tuple[Inputs, Outputs, Paths], ], batch_size: Optional[int], tensor_maps_in: Optional[List[TensorMap]], tensor_maps_out: Optional[List[TensorMap]], ) -> Tuple[Predictions, Outputs, Optional[Paths]]: """ Get model predictions, output data, and paths from data source. Data must not be infinite. :param model: Model :param data: finite tensorflow Dataset or tuple of inputs, outputs, and optionally paths :param batch_size: Number of samples to use in a batch, required if data is a tuple input and output numpy arrays :return: Tuple of predictions as a list of numpy arrays, a dictionary of output data, and optionally paths """ if isinstance(data, tuple): if len(data) == 2: input_data, output_data = data paths = None elif len(data) == 3: input_data, output_data, paths = data else: raise ValueError( f"Expected 2 or 3 elements to dataset tuple, got {len(data)}", ) if batch_size is None: raise ValueError( "When providing dataset as tuple of inputs and outputs, batch_size " "is required, got {batch_size}", ) y_predictions = model.predict(x=input_data, batch_size=batch_size) elif isinstance(data, tf.data.Dataset): y_prediction_batches = defaultdict(list) output_data_batches = defaultdict(list) id_batches = [] if isinstance(model, Model): for batch in data: output_data_batch = batch[BATCH_OUTPUT_INDEX] for output_name, output_tensor in output_data_batch.items(): output_data_batches[output_name].append(output_tensor.numpy()) batch_y_predictions = model.predict(batch[BATCH_INPUT_INDEX]) if not isinstance(batch_y_predictions, list): batch_y_predictions = [batch_y_predictions] for prediction_idx, batch_y_prediction in enumerate( batch_y_predictions, ): y_prediction_batches[prediction_idx].append(batch_y_prediction) if len(batch) == 3: id_batches.append(batch[BATCH_IDS_INDEX].numpy().astype(str)) y_predictions = [ np.concatenate(y_prediction_batches[prediction_idx]) for prediction_idx in sorted(y_prediction_batches) ] elif isinstance(model, SKLEARN_MODELS.__args__): data = get_dicts_of_arrays_from_dataset(dataset=data) assert all(tm.axes == 1 for tm in tensor_maps_in + tensor_maps_out) assert len(tensor_maps_out) == 1 # Isolate arrays from datasets for desired tensor maps X = get_array_from_dict_of_arrays( tensor_maps=tensor_maps_in, data=data[BATCH_INPUT_INDEX], drop_redundant_columns=False, ) y_predictions = model.predict_proba(X) for output_name, output_tensor in data[BATCH_OUTPUT_INDEX].items(): output_data_batches[output_name].append(output_tensor) if len(data) == 3: id_batches.append(data[BATCH_IDS_INDEX]) else: raise NotImplementedError( f"Cannot perform inference on model of type {type(model).__name}", ) # Iterate over batches and concatenate into dict of arrays output_data = { output_name: np.concatenate(output_data_batches[output_name]) for output_name in output_data_batches } paths = None if len(id_batches) == 0 else np.concatenate(id_batches).tolist() else: raise NotImplementedError( "Cannot get data for inference from data of type " "{type(data).__name__}: {data}", ) if not isinstance(y_predictions, list): y_predictions = [y_predictions] return y_predictions, output_data, paths
29a91481989d283ac1dddd831a9746ada5971a5a
3,458
import pickle def get_data(data, frame_nos, dataset, topic, usernum, fps, milisec, width, height, view_width, view_height): """ Read and return the viewport data """ VIEW_PATH = '../../Viewport/' view_info = pickle.load(open(VIEW_PATH + 'ds{}/viewport_ds{}_topic{}_user{}'.format(dataset, dataset, topic, usernum), 'rb'), encoding='latin1') if dataset == 1: max_frame = int(view_info[-1][0]*1.0*fps/milisec) for i in range(len(view_info)-1): frame = int(view_info[i][0]*1.0*fps/milisec) frame += int(offset*1.0*fps/milisec) frame_nos.append(frame) if(frame > max_frame): break X={} X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width) X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height) data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height))) elif dataset == 2: for k in range(len(view_info)-1): if view_info[k][0]<=offset+60 and view_info[k+1][0]>offset+60: max_frame = int(view_info[k][0]*1.0*fps/milisec) break for k in range(len(view_info)-1): if view_info[k][0]<=offset and view_info[k+1][0]>offset: min_index = k+1 break prev_frame = 0 for i in range(min_index,len(view_info)-1): frame = int((view_info[i][0])*1.0*fps/milisec) if frame == prev_frame: continue if(frame > max_frame): break frame_nos.append(frame) X={} X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width) X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height) data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height))) prev_frame = frame return data, frame_nos, max_frame
f78f3b7505b3ca5ab2cac67f2634b71cfa383707
3,459
import os import requests def getListGroups(config): """ Get list of groups """ print("Retrieve list of group") data = None grpList = None __grpList = gitlabGroupList() if (DUMMY_DATA): curDir = os.path.dirname(os.path.abspath(__file__)) testFile = getFullFilePath(GROUPS_TEST_FILE) with open (testFile, 'rt') as f: data = f.read() f.close() else: # retrieve data from server url = getApiUrl(config, "groups") logD("URL " + url) token = config.getToken() hdrs = {"PRIVATE-TOKEN":config.getToken()} __totalPage = 0 __page = 1 while True: logD("Page %d" % (__page)) params = {'page': __page} logD("header %s" % hdrs) resp = requests.get(url, headers=hdrs, params=params) logD("resp status_code %s" % resp.status_code) if (resp.status_code == 200): data = resp.content logD (resp.headers) if (len(resp.headers.get('X-Next-Page')) > 0): __page = int(resp.headers.get('X-Next-Page')) else: __page = 0 logD("next page %d" % (__page)) else: __page = 0 break if (data is not None) and (len(data) > 0): logD("data %s" % data) __grpList.parseData(data) __totalPage += 1 if (config.getMaxGroup() is not None) and (__grpList.getLen() >= config.getMaxGroup()): print("Reach max %s/%s" % (__grpList.getLen(), config.getMaxGroup())) break if (__page == 0): #ok, reach end, out break if (__totalPage > 500): # 500 pages? no way, something wrong, out print("SOMETHING WRONG, total is to big, out") break print("Total pages %d" % (__totalPage)) return __grpList
4c50964a5954d0132659297e0469119a150e20fd
3,460
import copy def call(args, version): """Converts callList into functionString.""" # Find keyword keywords = [i for i in args if i in Variables.keywords(version)] # Too many keywords is a syntax error. if len(keywords) > 1: raise UdebsSyntaxError("CallList contains to many keywords '{}'".format(args)) # No keywords creates a tuple object. elif len(keywords) == 0: return "(" + ",".join(formatS(i, version) for i in args) + ")" keyword = keywords[0] # Get and fix data for this keyword. data = copy.copy(Variables.default) data.update(Variables.keywords(version)[keyword]) # Create dict of values current = args.index(keyword) nodes = copy.copy(data["default"]) for index in range(len(args)): value = "$" if index >= current else "-$" value += str(abs(index - current)) if args[index] != keyword: nodes[value] = args[index] # Force strings into quoted arguments. for string in data["string"]: nodes[string] = "'" + str(nodes[string]).replace("'", "\\'") + "'" # Claim keyword arguments. kwargs = {} for key, value in data["kwargs"].items(): if value in nodes: new_value = nodes[value] del nodes[value] else: new_value = value kwargs[key] = formatS(new_value, version) arguments = [] # Insert positional arguments for key in data["args"]: if key in nodes: arguments.append(formatS(nodes[key], version)) del nodes[key] else: arguments.append(formatS(key, version)) # Insert ... arguments. if data["all"]: for key in sorted(nodes.keys(), key=lambda x: int(x.replace("$", ""))): arguments.append(formatS(nodes[key], version)) del nodes[key] if len(nodes) > 0: raise UdebsSyntaxError("Keyword contains unused arguments. '{}'".format(" ".join(args))) # Insert keyword arguments. for key in sorted(kwargs.keys()): arguments.append(str(key) + "=" + str(kwargs[key])) return data["f"] + "(" + ",".join(arguments) + ")"
0f5be8582903973ec3ae4077e51a11e084bcc2f8
3,461
from typing import List from typing import Dict from typing import Any import ray def get_object_locations(obj_refs: List[ObjectRef], timeout_ms: int = -1 ) -> Dict[ObjectRef, Dict[str, Any]]: """Lookup the locations for a list of objects. It returns a dict maps from an object to its location. The dict excludes those objects whose location lookup failed. Args: object_refs (List[ObjectRef]): List of object refs. timeout_ms (int): The maximum amount of time in micro seconds to wait before returning. Wait infinitely if it's negative. Returns: A dict maps from an object to its location. The dict excludes those objects whose location lookup failed. The location is stored as a dict with following attributes: - node_ids (List[str]): The hex IDs of the nodes that have a copy of this object. - object_size (int): The size of data + metadata in bytes. Raises: RuntimeError: if the processes were not started by ray.init(). ray.exceptions.GetTimeoutError: if it couldn't finish the request in time. """ if not ray.is_initialized(): raise RuntimeError("Ray hasn't been initialized.") return ray.worker.global_worker.core_worker.get_object_locations( obj_refs, timeout_ms)
c7b4aa6761024853468e09f846af0ada8f7ebbba
3,462
from conf.hosts import getPlateformObject from core.exceptions import EnvironmentDoesNotExist def remove_host(plateform=None, name=None, environment=None): """ Remove Host Object from Platform Object attribute hosts and return updated Platform Object. :param: plateform: host's plateform (same as type yaml file) passed by user :param: name: host's name passed by user :param: name: host's environment passed by user :type: plateform: list of one str :type: name: list of one str :type: environment: list of one str :return: Updated Plateform :rtype: Plateform Object .. seealso:: heimdall.conf.hosts.getPlateformObject(), heimdall.core.plateform.Plateform """ p = getPlateformObject(plateform[0]) try: if not p.check_environment(environment[0]): raise EnvironmentDoesNotExist('Environment %s in plateform %s does not exists!' % (environment[0], p.name), p.name) except EnvironmentDoesNotExist as ede: print ede exit(ede.code) if name[0] == -1: # remove all p.environment[environment[0]] = [] else: [p.remove_host(host) for host in p.environment[environment[0]] for n in name if host.name == n] return p
bc8e8681718f763c382230297087b9ce27a37e20
3,463
import argparse def command_line_parsing(): """ Parse the command line arguments, set global TESTING and return the current position as a tuple (either default or one given on command line """ global TESTING parser = argparse.ArgumentParser(description='Food Truck Finder.') parser.add_argument('latlong', metavar='latlong', type=str, nargs='?', help='current location as latitude,longitude ' \ '(no spaces)') parser.add_argument('--test', dest='am_testing', action='store_const', const=True, default=False, help='testing mode with canned data') args = parser.parse_args() TESTING = args.am_testing if args.latlong is None: return DEFAULT_POSITION parts = args.latlong.split(',') return (float(parts[0]), float(parts[1]))
511fcd1893767fd93a73f112f7e6230d05ea2562
3,464
def read_samplesheet(config): """ read samplesheet """ sample_sheet = pd.read_csv(config["info_dict"]["flowcell_path"]+"/SampleSheet.csv", sep = ",", skiprows=[0]) # sample_sheet = sample_sheet.fillna("no_bc") sample_sheet['I7_Index_ID'] = sample_sheet['I7_Index_ID'].str.replace('No_index1','no_bc', regex = True) # TODO!! need to be applied on bc kit too! # assert(len(sample_sheet["barcode_kits"].unique())==1) # bc_kit = sample_sheet["barcode_kits"].unique()[0] if any(sample_sheet['I7_Index_ID'].str.contains('no_bc')): bc_kit = "no_bc" else: bc_kit = "SQK-PCB109" # TODO just for testing print(sample_sheet) data=dict() for index, row in sample_sheet.iterrows(): assert(row["Sample_ID"] not in data.keys()) data[row["Sample_ID"]] = dict({"Sample_Name": row["Sample_Name"], "Sample_Project": row["Sample_Project"], # "barcode_kits": row["barcode_kits"], TODO "barcode_kits": bc_kit, # TODO just for testing "index_id": row["I7_Index_ID"], "Sample_ID": row["Sample_ID"]}) print(bc_kit) return bc_kit, data
7bd47c5af471862600fd9c2522f018a463ddeac4
3,465
def convert_to_float_if_possible(x, elsevalue=MISSING): """ Return float version of value x, else elsevalue (MISSING or other specified value if conversion fails """ if isnonnumeric(x): return elsevalue else: return float(x)
74b1ca5d4ed63758ef9d56fb2be94cbbdec00b56
3,466
from typing import Union import requests def resolve( names: Union[list, pd.Series, str], data_source_ids: list = None, resolve_once: bool = False, best_match_only: bool = False, with_context: bool = False, with_vernaculars: bool = False, with_canonical_ranks: bool = False ) -> pd.DataFrame: """ Receives a list of names and resolves each against the entire resolver database or against specific data sources using the Global Names Resolver (GNR) API. Underlying resolving and scoring algorithms are described at: http://resolver.globalnames.org/about Parameters ---------- names List of species names to resolve. data_source_ids List of specific data sources IDs to resolve against. A list of all the available data sources and their IDs can be found at: http://resolver.globalnames.org/data_sources. resolve_once Find the first available match instead of matches across all data sources with all possible renderings of a name. best_match_only Returns just one result with the highest score. with_context Reduce the likelihood of matches to taxonomic homonyms. When True, a common taxonomic context is calculated for all supplied names from matches in data sources that have classification tree paths. Names out of determined context are penalized during score calculation. with_vernaculars Return 'vernacular' field to present common names provided by a data source for a particular match. with_canonical_ranks Returns 'canonical_form' with infraspecific ranks, if they are present. Returns ------- pd.DataFrame DataFrame where rows are the result for each match. """ if isinstance(names, str): names = [names] if data_source_ids is None: data_source_ids = [] # Apparently, the GNR API does not accept Booleans so they need to be # converted to lowercase strings first. params = { "data": "\n".join(names), "data_source_ids": "|".join(data_source_ids), "resolve_once": str(resolve_once).lower(), "best_match_only": str(best_match_only).lower(), "with_context": str(with_context).lower(), "with_vernaculars": str(with_vernaculars).lower(), "with_canonical_ranks": str(with_canonical_ranks).lower() } try: response = requests.post(API_URL, json=params) response.raise_for_status() except requests.exceptions.HTTPError as err: raise Exception(f"Error calling Global Name Resolver API. {err}") data = response.json()["data"] # The pd.json_normalize() function does not work when record_path # is not found in every single item inside the list of elements # passed. In some cases, the GNR API returns items without this key, # so it needs to be added (including an empty dictionary) before # normalizing the result. for item in data: if "results" not in item: item["results"] = [{}] return pd.json_normalize(data, record_path="results", meta="supplied_name_string")
a25bd275e8222058e5926bf9a8b53de7a1cb3ccc
3,467
import numpy def polarisation_frame_from_wcs(wcs, shape) -> PolarisationFrame: """Convert wcs to polarisation_frame See FITS definition in Table 29 of https://fits.gsfc.nasa.gov/standard40/fits_standard40draft1.pdf or subsequent revision 1 I Standard Stokes unpolarized 2 Q Standard Stokes linear 3 U Standard Stokes linear 4 V Standard Stokes circular −1 RR Right-right circular −2 LL Left-left circular −3 RL Right-left cross-circular −4 LR Left-right cross-circular −5 XX X parallel linear −6 YY Y parallel linear −7 XY XY cross linear −8 YX YX cross linear stokesI [1] stokesIQUV [1,2,3,4] circular [-1,-2,-3,-4] linear [-5,-6,-7,-8] For example:: pol_frame = polarisation_frame_from_wcs(im.wcs, im.shape) :param wcs: World Coordinate System :param shape: Shape corresponding to wcs :returns: Polarisation_Frame object """ # The third axis should be stokes: polarisation_frame = None if len(shape) == 2: polarisation_frame = PolarisationFrame("stokesI") else: npol = shape[1] pol = wcs.sub(['stokes']).wcs_pix2world(range(npol), 0)[0] pol = numpy.array(pol, dtype='int') for key in PolarisationFrame.fits_codes.keys(): keypol = numpy.array(PolarisationFrame.fits_codes[key]) if numpy.array_equal(pol, keypol): polarisation_frame = PolarisationFrame(key) return polarisation_frame if polarisation_frame is None: raise ValueError("Cannot determine polarisation code") assert isinstance(polarisation_frame, PolarisationFrame) return polarisation_frame
a2ed057be23add9a6c2041a243286bf06519306f
3,468
import random import json import logging def _update_traffic_class(class_name, class_type, **kwargs): """ Perform a PUT call to version-up a traffic class. This is required whenever entries of a traffic class are changed in any way. :param class_name: Alphanumeric name of the traffic class :param class_type: Class type should be one of "ipv4," "ipv6," or "mac" :param kwargs: keyword s: requests.session object with loaded cookie jar keyword url: URL in main() function :return: True if successful, False otherwise """ traffic_class_data = _get_traffic_class(class_name, class_type, **kwargs) # # must remove these fields from the data since they can't be modified # traffic_class_data.pop('origin', None) # traffic_class_data.pop('name', None) # traffic_class_data.pop('type', None) traffic_class_data['cfg_version'] = random.randrange(9007199254740991) target_url = kwargs["url"] + "system/classes/%s,%s" % (class_name, class_type) put_data = json.dumps(traffic_class_data, sort_keys=True, indent=4) response = kwargs["s"].put(target_url, data=put_data, verify=False) if not common_ops._response_ok(response, "PUT"): logging.warning("FAIL: Updating %s traffic class '%s' failed with status code %d: %s" % (class_type, class_name, response.status_code, response.text)) return False else: logging.info("SUCCESS: Updating %s traffic class '%s' succeeded" % (class_type, class_name)) return True
8a19fedcce20a94a3e5c8f06f7fb1ee901dcc6dd
3,469
def eff_w_error(n_before, n_after): """ n_before = entries before n_after = entries after """ eff = n_after/n_before eff_error = np.sqrt(eff*(1-eff)/n_before) return (eff, eff_error)
307945af0acc2eb04686b5453f2905be1111944a
3,470
import scipy def hurst(x): """Estimate Hurst exponent on a timeseries. The estimation is based on the second order discrete derivative. Parameters ---------- x : 1D numpy array The timeseries to estimate the Hurst exponent for. Returns ------- h : float The estimation of the Hurst exponent for the given timeseries. """ y = np.cumsum(np.diff(x, axis=1), axis=1) b1 = [1, -2, 1] b2 = [1, 0, -2, 0, 1] # second order derivative y1 = scipy.signal.lfilter(b1, 1, y, axis=1) y1 = y1[:, len(b1) - 1:-1] # first values contain filter artifacts # wider second order derivative y2 = scipy.signal.lfilter(b2, 1, y, axis=1) y2 = y2[:, len(b2) - 1:-1] # first values contain filter artifacts s1 = np.mean(y1 ** 2, axis=1) s2 = np.mean(y2 ** 2, axis=1) return 0.5 * np.log2(s2 / s1)
0632f0e4c5912410568c25774c1da66c160ff78e
3,471
import yaml def explode_on_matched_columns(df, safe_columns, other_columns): """Given the name of multiple columns where each entry is a string encoding a list, and where for each row the lists in all columns are the same length, return a dataframe where the each row is transformed into len(list) rows, each of which contains one entry of the various lists and the remaining columns are identical. The columns are split into 'safe_columns', which must always contain strings that encode lists and 'other_columns' which can sometimes be np.nan. If a column from other_columns has a np.nan entry in some row, it will be replaced with a list of np.nan values, with the list the same length as the lists in safe_columns for that row. Lists from different rows need not have the same number of elements.""" stringlist_columns = safe_columns + other_columns copied_df = df.copy() # Only keep rows where at least one of the stringlist columns is present copied_df = copied_df.dropna(subset=stringlist_columns, how='all') # Map the safe columns from strings (strings encoding lists) to lists for stringlist_column in safe_columns: copied_df[stringlist_column] = copied_df[stringlist_column].map(yaml.safe_load) for column in other_columns: # Replace any nan values with an empty list, matching the list lengths # from one of the safe columns copied_df[column] = replace_nan_with_empty_list(column, safe_columns[0], copied_df) exploded = pd.DataFrame({ col:np.repeat(copied_df[col].values, copied_df[stringlist_columns[0]].str.len()) for col in copied_df.columns.drop(stringlist_columns)} ) exploded_with_col = exploded.assign(**{column_to_expand:np.concatenate(copied_df[column_to_expand].values) for column_to_expand in stringlist_columns})[df.columns] return exploded_with_col
4f38310e563c8081ee7297ec2af2211ca8084504
3,472
import networkx def plot_time_series_graph(val_matrix, var_names=None, fig_ax=None, figsize=None, sig_thres=None, link_matrix=None, link_colorbar_label='MCI', save_name=None, link_width=None, arrow_linewidth=20., vmin_edges=-1, vmax_edges=1., edge_ticks=.4, cmap_edges='RdBu_r', order=None, node_size=10, arrowhead_size=20, curved_radius=.2, label_fontsize=10, alpha=1., node_label_size=10, label_space_left=0.1, label_space_top=0., network_lower_bound=0.2, undirected_style='dashed' ): """Creates a time series graph. This is still in beta. The time series graph's links are colored by val_matrix. Parameters ---------- val_matrix : array_like Matrix of shape (N, N, tau_max+1) containing test statistic values. var_names : list, optional (default: None) List of variable names. If None, range(N) is used. fig_ax : tuple of figure and axis object, optional (default: None) Figure and axes instance. If None they are created. figsize : tuple Size of figure. sig_thres : array-like, optional (default: None) Matrix of significance thresholds. Must be of same shape as val_matrix. Either sig_thres or link_matrix has to be provided. link_matrix : bool array-like, optional (default: None) Matrix of significant links. Must be of same shape as val_matrix. Either sig_thres or link_matrix has to be provided. save_name : str, optional (default: None) Name of figure file to save figure. If None, figure is shown in window. link_colorbar_label : str, optional (default: 'MCI') Test statistic label. link_width : array-like, optional (default: None) Array of val_matrix.shape specifying relative link width with maximum given by arrow_linewidth. If None, all links have same width. order : list, optional (default: None) order of variables from top to bottom. arrow_linewidth : float, optional (default: 30) Linewidth. vmin_edges : float, optional (default: -1) Link colorbar scale lower bound. vmax_edges : float, optional (default: 1) Link colorbar scale upper bound. edge_ticks : float, optional (default: 0.4) Link tick mark interval. cmap_edges : str, optional (default: 'RdBu_r') Colormap for links. node_size : int, optional (default: 20) Node size. arrowhead_size : int, optional (default: 20) Size of link arrow head. Passed on to FancyArrowPatch object. curved_radius, float, optional (default: 0.2) Curvature of links. Passed on to FancyArrowPatch object. label_fontsize : int, optional (default: 10) Fontsize of colorbar labels. alpha : float, optional (default: 1.) Opacity. node_label_size : int, optional (default: 10) Fontsize of node labels. link_label_fontsize : int, optional (default: 6) Fontsize of link labels. label_space_left : float, optional (default: 0.1) Fraction of horizontal figure space to allocate left of plot for labels. label_space_top : float, optional (default: 0.) Fraction of vertical figure space to allocate top of plot for labels. network_lower_bound : float, optional (default: 0.2) Fraction of vertical space below graph plot. undirected_style : string, optional (default: 'dashed') Style of undirected contemporaneous links. """ if fig_ax is None: fig = pyplot.figure(figsize=figsize) ax = fig.add_subplot(111, frame_on=False) else: fig, ax = fig_ax if sig_thres is None and link_matrix is None: raise ValueError("Need to specify either sig_thres or link_matrix") elif sig_thres is not None and link_matrix is None: link_matrix = np.abs(val_matrix) >= sig_thres if link_width is not None and not np.all(link_width >= 0.): raise ValueError("link_width must be non-negative") N, N, dummy = val_matrix.shape tau_max = dummy - 1 max_lag = tau_max + 1 if var_names is None: var_names = range(N) if order is None: order = range(N) if set(order) != set(range(N)): raise ValueError("order must be a permutation of range(N)") def translate(row, lag): return row * max_lag + lag # Define graph links by absolute maximum (positive or negative like for # partial correlation) tsg = np.zeros((N * max_lag, N * max_lag)) tsg_attr = np.zeros((N * max_lag, N * max_lag)) for i, j, tau in np.column_stack(np.where(link_matrix)): # print '\n',i, j, tau # print np.where(nonmasked[:,j])[0] for t in range(max_lag): if (0 <= translate(i, t - tau) and translate(i, t - tau) % max_lag <= translate(j, t) % max_lag): # print translate(i, t-tau), translate(j, t), val_matrix[i,j,tau] tsg[translate(i, t - tau), translate(j, t) ] = val_matrix[i, j, tau] tsg_attr[translate(i, t - tau), translate(j, t) ] = val_matrix[i, j, tau] G = networkx.DiGraph(tsg) # node_color = np.zeros(N) # list of all strengths for color map all_strengths = [] # Add attributes, contemporaneous and directed links are handled separately for (u, v, dic) in G.edges(data=True): dic['directed_attribute'] = None if u != v: if u % max_lag == v % max_lag: dic['undirected'] = True dic['directed'] = False else: dic['undirected'] = False dic['directed'] = True dic['undirected_alpha'] = alpha dic['undirected_color'] = _get_absmax( np.array([[[tsg_attr[u, v], tsg_attr[v, u]]]]) ).squeeze() dic['undirected_width'] = arrow_linewidth all_strengths.append(dic['undirected_color']) dic['directed_alpha'] = alpha dic['directed_width'] = arrow_linewidth # value at argmax of average dic['directed_color'] = tsg_attr[u, v] all_strengths.append(dic['directed_color']) dic['label'] = None dic['directed_edge'] = False dic['directed_edgecolor'] = None dic['undirected_edge'] = False dic['undirected_edgecolor'] = None # If no links are present, set value to zero if len(all_strengths) == 0: all_strengths = [0.] posarray = np.zeros((N * max_lag, 2)) for i in range(N * max_lag): posarray[i] = np.array([(i % max_lag), (1. - i // max_lag)]) pos_tmp = {} for i in range(N * max_lag): # for n in range(N): # for tau in range(max_lag): # i = n*N + tau pos_tmp[i] = np.array([((i % max_lag) - posarray.min(axis=0)[0]) / (posarray.max(axis=0)[0] - posarray.min(axis=0)[0]), ((1. - i // max_lag) - posarray.min(axis=0)[1]) / (posarray.max(axis=0)[1] - posarray.min(axis=0)[1])]) pos = {} for n in range(N): for tau in range(max_lag): pos[n * max_lag + tau] = pos_tmp[order[n] * max_lag + tau] node_rings = {0: {'sizes': None, 'color_array': None, 'label': '', 'colorbar': False, } } # ] for v in range(max_lag)] node_labels = ['' for i in range(N * max_lag)] _draw_network_with_curved_edges( fig=fig, ax=ax, G=deepcopy(G), pos=pos, # dictionary of rings: {0:{'sizes':(N,)-array, 'color_array':(N,)-array # or None, 'cmap':string, node_rings=node_rings, # 'vmin':float or None, 'vmax':float or None, 'label':string or None}} node_labels=node_labels, node_label_size=node_label_size, node_alpha=alpha, standard_size=node_size, standard_cmap='OrRd', standard_color='grey', log_sizes=False, cmap_links=cmap_edges, links_vmin=vmin_edges, links_vmax=vmax_edges, links_ticks=edge_ticks, cmap_links_edges='YlOrRd', links_edges_vmin=-1., links_edges_vmax=1., links_edges_ticks=.2, link_edge_colorbar_label='link_edge', arrowstyle='simple', arrowhead_size=arrowhead_size, curved_radius=curved_radius, label_fontsize=label_fontsize, label_fraction=.5, link_colorbar_label=link_colorbar_label, undirected_curved=True, network_lower_bound=network_lower_bound, undirected_style=undirected_style ) for i in range(N): trans = transforms.blended_transform_factory( fig.transFigure, ax.transData) ax.text(label_space_left, pos[order[i] * max_lag][1], '%s' % str(var_names[order[i]]), fontsize=label_fontsize, horizontalalignment='left', verticalalignment='center', transform=trans) for tau in np.arange(max_lag - 1, -1, -1): trans = transforms.blended_transform_factory( ax.transData, fig.transFigure) if tau == max_lag - 1: ax.text(pos[tau][0], 1.-label_space_top, r'$t$', fontsize=label_fontsize, horizontalalignment='center', verticalalignment='top', transform=trans) else: ax.text(pos[tau][0], 1.-label_space_top, r'$t-%s$' % str(max_lag - tau - 1), fontsize=label_fontsize, horizontalalignment='center', verticalalignment='top', transform=trans) # fig.subplots_adjust(left=0.1, right=.98, bottom=.25, top=.9) # savestring = os.path.expanduser(save_name) if save_name is not None: pyplot.savefig(save_name) else: pyplot.show()
e4acb78dbb8809f3b1604b4a44437c775c0cdfb7
3,473
def get_configuration_docname(doctype=None, txt=None, searchfield=None, start=None, page_len=None, filters=None): """get relevant fields of the configuration doctype""" return frappe.db.sql("""select soi.configuration_docname, so.name, so.customer from `tabSales Order Item` soi inner join `tabSales Order` so on soi.parent=so.name where soi.configuration_doctype = %(configuration_doctype)s and soi.configuration_docname is not null and (soi.configuration_docname like %(txt)s or so.name like %(txt)s)""", {'configuration_doctype':filters.get('configuration_doctype'), 'txt': "%%%s%%" % txt})
fb9494aacfbff6ec77f0e512daab35ffcd9c7fb9
3,474
import pickle def run_cnn_dist( X_bytes: bytes, ) -> bytes: """Run distributed CNN on bytes_in and return the calculated result.""" X = pickle.loads(X_bytes) # TODO: <He> Process the X data with the fancy neural network. result_data = X # MARK: Metadata could be added here to mark the processing status of the # data. bytes_out = pickle.dumps(result_data) return bytes_out
4a1996ed0ddc0ae8be0543c1de016f845b99020e
3,475
import requests import re def skymapper_search(searchrad,waveband,targetra,targetdec): """ Search for stars within search radius of target in Skymapper catalogue """ # set up arrays and url star_ra = [] star_dec = [] star_mag = [] star_magerr = [] sky_ra = [] sky_dec = [] sky_u_petro = [] sky_u_petro_err = [] sky_u_psf = [] sky_u_psf_err = [] sky_v_petro = [] sky_v_petro_err = [] sky_v_psf = [] sky_v_psf_err = [] sky_g_petro = [] sky_g_petro_err = [] sky_g_psf = [] sky_g_psf_err = [] sky_r_petro = [] sky_r_petro_err = [] sky_r_psf = [] sky_r_psf_err = [] sky_i_petro = [] sky_i_petro_err = [] sky_i_psf = [] sky_i_psf_err = [] sky_z_petro = [] sky_z_petro_err = [] sky_z_psf = [] sky_z_psf_err = [] sr_deg = float(searchrad*0.0166667) sky_url = "http://skymapper.anu.edu.au/sm-cone/query?RA={0}&DEC={1}&SR={2}" sky_url = sky_url.format(targetra,targetdec,sr_deg) # Attempt to parse url to find stars within search radius of filter try: skytable = requests.get(sky_url,timeout=30).text sc = 0 for lines in skytable.split('<TR>'): sc += 1 if sc >= 2: columns = re.split("<TD>|</TD>|\n",lines) sky_ra.append(columns[5]) sky_dec.append(columns[7]) sky_u_petro.append(columns[33]) sky_u_petro_err.append(columns[35]) sky_u_psf.append(columns[29]) sky_u_psf_err.append(columns[31]) sky_v_petro.append(columns[41]) sky_v_petro_err.append(columns[43]) sky_v_psf.append(columns[37]) sky_v_psf_err.append(columns[39]) sky_g_petro.append(columns[49]) sky_g_petro_err.append(columns[51]) sky_g_psf.append(columns[45]) sky_g_psf_err.append(columns[47]) sky_r_petro.append(columns[57]) sky_r_petro_err.append(columns[59]) sky_r_psf.append(columns[53]) sky_r_psf_err.append(columns[55]) sky_i_petro.append(columns[65]) sky_i_petro_err.append(columns[67]) sky_i_psf.append(columns[61]) sky_i_psf_err.append(columns[63]) sky_z_petro.append(columns[73]) sky_z_petro_err.append(columns[75]) sky_z_psf.append(columns[69]) sky_z_psf_err.append(columns[71]) # Raise error if something goes wrong except requests.exceptions.RequestException as e: print ('\nException raised for Skymapper url!!') print (e) print ('') # Save parsed star properties for a given filter and remove extended # shaped sources for i in range(len(sky_ra)): if (sky_g_psf[i] != '' and sky_g_petro[i] != '' and sky_r_psf[i] != '' and sky_r_petro[i] != ''): if (np.abs(float(sky_g_psf[i]) - float(sky_g_petro[i])) < 0.25 and np.abs(float(sky_r_psf[i]) - float(sky_r_petro[i])) < 0.25): if waveband == 'V': V_mag = float(sky_g_psf[i])-0.0038 V_mag = (V_mag-0.5784*(float(sky_g_psf[i]) -float(sky_r_psf[i]))) gerr = float(sky_g_psf_err[i])**2 rerr = float(sky_r_psf_err[i])**2 V_magerr = np.sqrt((0.5784*rerr)**2+(0.4216*gerr)**2) star_mag.append(V_mag) star_magerr.append(V_magerr) star_ra.append(float(sky_ra[i])) star_dec.append(float(sky_dec[i])) if waveband == 'B': B_mag = float(sky_g_psf[i])+0.2271 B_mag = (B_mag+0.3130*(float(sky_g_psf[i])- float(sky_r_psf[i]))) gerr = float(sky_g_psf_err[i])**2 rerr = float(sky_r_psf_err[i])**2 B_magerr = np.sqrt((0.3130*rerr)**2+(1.3130*gerr)**2) star_mag.append(B_mag) star_magerr.append(B_magerr) star_ra.append(float(sky_ra[i])) star_dec.append(float(sky_dec[i])) if waveband == 'R': R_mag = float(sky_r_psf[i])-0.0971 R_mag = (R_mag-0.1837*(float(sky_g_psf[i])- float(sky_r_psf[i]))) gerr = float(sky_g_psf_err[i])**2 rerr = float(sky_r_psf_err[i])**2 R_magerr = np.sqrt((1.1837*rerr)**2+(0.1837*gerr)**2) star_mag.append(R_mag) star_magerr.append(R_magerr) star_ra.append(float(sky_ra[i])) star_dec.append(float(sky_dec[i])) if waveband == 'u': if (sky_u_psf[i] != '' and sky_u_petro[i] != ''): if (np.abs(float(sky_u_psf[i]) - float(sky_u_petro[i]))<0.25): star_mag.append(float(sky_u_psf[i])) star_magerr.append(float(sky_u_psf_err[i])) star_ra.append(float(sky_ra[i])) star_dec.append(float(sky_dec[i])) if waveband == 'g': if (sky_g_psf[i] != '' and sky_g_petro[i] != ''): if (np.abs(float(sky_g_psf[i]) - float(sky_g_petro[i]))<0.25): star_mag.append(float(sky_g_psf[i])) star_magerr.append(float(sky_g_psf_err[i])) star_ra.append(float(sky_ra[i])) star_dec.append(float(sky_dec[i])) if waveband == 'r': if (sky_r_psf[i] != '' and sky_r_petro[i] != ''): if (np.abs(float(sky_r_psf[i]) - float(sky_r_petro[i]))<0.25): star_mag.append(float(sky_r_psf[i])) star_magerr.append(float(sky_r_psf_err[i])) star_ra.append(float(sky_ra[i])) star_dec.append(float(sky_dec[i])) if waveband == 'i' : if (sky_i_psf[i] != '' and sky_i_petro[i] != ''): if (np.abs(float(sky_i_psf[i]) - float(sky_i_petro[i]))<0.25): star_mag.append(float(sky_i_psf[i])) star_magerr.append(float(sky_i_psf_err[i])) star_ra.append(float(sky_ra[i])) star_dec.append(float(sky_dec[i])) if waveband == 'z' : if (sky_z_psf[i] != '' and sky_z_petro[i] != ''): if (np.abs(float(sky_z_psf[i]) - float(sky_z_petro[i]))<0.25): star_mag.append(float(sky_z_psf[i])) star_magerr.append(float(sky_z_psf_err[i])) star_ra.append(float(sky_ra[i])) star_dec.append(float(sky_dec[i])) # Create list with catalogue name star_cat = ['SkyMapper'] * len(star_ra) return star_ra,star_dec,star_mag,star_magerr,star_cat
3ebed23f2ec73f6a8e859e645a2c3b5f936ac674
3,476
import random def Decimal_to_Hexadecimal(x : str) -> str: """ It Converts the Given Decimal Number into Hexadecimal Number System of Base `16` and takes input in `str` form Args: x `(str)` : It is the Positional Argument by order which stores the Decimal Input from User. Returns (str) : The Output `returned` is in the form of a `str` which is the Hexadecimal Converted Number. """ """ For Recognising the Dot """ list1 = list(x) left = [] right = [] flag = False for val in range(len(list1)): if list1[val] == "." or flag == True: if list1[val] != ".": right.append(list1[val]) else: flag = True continue else: num = int(list1[val]) left.append(num) """ For Shifting the left elements in list into a variable """ leftmost = 0 for val in left: leftmost = leftmost*10 + val """ For Shifting the right elements in list into a variable """ rightmost = '' for val in right: rightmost = rightmost + val dict = {10: "A", 11 : "B", 12 : "C", 13 : "D", 14 : "E", 15 : "F"} """ Calculation of the left part """ cur = 0 rem = 0 next = leftmost list_of_numbers = [] while next != 0: rem = next%16 if rem > 9: if rem in dict: rem = dict[rem] list_of_numbers.append(rem) else: pass else: list_of_numbers.append(rem) cur = next//16 next = cur list_of_numbers.reverse() numbers = '' for val in range(len(list_of_numbers)): string = str(list_of_numbers[val]) numbers = numbers + string """ Calculation of the right part """ zeros = '1' + len(rightmost)*'0' length = int(zeros) next = int(rightmost)/length list_of_numbers = [] length = 0 while length <= 20: if next * 16< 1: list_of_numbers.append(0) next = (next * 16) else: next = (next * 16) num2 = int(next) if num2 > 9: if num2 in dict: alter = dict[num2] list_of_numbers.append(alter) else: pass else: list_of_numbers.append(num2) num = int(next) next = next - num pass length += 1 numbers2 = '' for val in range(len(list_of_numbers)): number = str(list_of_numbers[val]) numbers2 = numbers2 + number # print(f"The Decimal -> Hexadecimal Conversion is {numbers}.{numbers2.rstrip('0')}") color = random.choice([RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN]) return f" {BOLD} {color} The Decimal -> Hexadecimal Conversion is {numbers}.{numbers2.rstrip('0')} {RESET}"
ffe5050a834a9111a50f28c425f1bd21f60605ff
3,477
def hardcorenas_d(pretrained=False, **kwargs): """ hardcorenas_D """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25'], ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', 'ir_r1_k5_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) return model
ff9be560a0061101fd672bd115fbfd8920537177
3,478
import tqdm def refine(weights, trees, X, Y, epochs, lr, batch_size, optimizer, verbose): """Performs SGD using the MSE loss over the leaf nodes of the given trees on the given data. The weights of each tree are respected during optimization but not optimized. Args: weights (np.array): The weights of the trees. trees (list of Tree): The trees. X (2d np.array): The data. Y (np.array): The targe. epochs (int): The number of epochs SGD is performed. lr (float): The learning rate of SGD. batch_size (int): The batch size of SGD optimizer (str): The optimizer used for optimization. Can be {{"sgd", "adam"}}. verbose (bool): If True outputs the loss during optimization. Returns: list of trees: The refined trees. """ n_classes = trees[0].n_classes if batch_size > X.shape[0]: if verbose: print("WARNING: The batch size for SGD is larger than the dataset supplied: batch_size = {} > X.shape[0] = {}. Using batch_size = X.shape[0]".format(batch_size, X.shape[0])) batch_size = X.shape[0] # To make the following SGD somewhat efficient this code extracts all the leaf nodes and gathers them in an array. To do so it iterates over all trees and all nodes in the trees. Each leaf node is added to the leafs array and the corresponding node.id is stored in mappings. For scikit-learn trees this would be much simpler as they already offer a dedicated leaf field: # leafs = [] # for tree in trees: # tmp = tree.tree_.value / tree.tree_.value.sum(axis=(1,2))[:,np.newaxis,np.newaxis] # leafs.append(tmp.squeeze(1)) mappings = [] leafs = [] for t, w in zip(trees, weights): leaf_mapping = {} l = [] for i, n in enumerate(t.nodes): if n.prediction is not None: leaf_mapping[n.id] = len(l) # Normalize the values in the leaf nodes for SGD. This is usually a better initialization pred = np.array(n.prediction) / sum(n.prediction) l.append(pred) mappings.append(leaf_mapping) leafs.append(np.array(l)) if optimizer == "adam": m = [] v = [] t = 1 for l in leafs: m.append(np.zeros_like(l)) v.append(np.zeros_like(l)) for epoch in range(epochs): mini_batches = create_mini_batches(X, Y, batch_size, True) batch_cnt = 0 loss_sum = 0 accuracy_sum = 0 with tqdm(total=X.shape[0], ncols=150, disable = not verbose) as pbar: for x,y in mini_batches: # Prepare the target and apply all trees target_one_hot = np.array( [ [1.0 if yi == i else 0.0 for i in range(n_classes)] for yi in y] ) indices = [apply(t, m, x) for t,m in zip(trees, mappings)] pred = [] for i, idx, w in zip(range(len(trees)), indices, weights): pred.append(w * leafs[i][idx]) pred = np.array(pred) fbar = pred.sum(axis=0) # SGD if optimizer == "sgd": deriv = 2 * (fbar - target_one_hot) * 1.0 / x.shape[0] * 1.0 / n_classes #* 1.0 / len(trees) for i, idx in zip(range(len(trees)), indices): np.add.at(leafs[i], idx, - lr * deriv) else: # Adam deriv = 2 * (fbar - target_one_hot) * 1.0 / x.shape[0] * 1.0 / n_classes #* 1.0 / len(trees) beta1 = 0.9 beta2 = 0.999 for i, idx in zip(range(len(trees)), indices): grad = np.zeros_like(leafs[i]) np.add.at(grad, idx, deriv) m[i] = beta1 * m[i] + (1-beta1) * grad v[i] = beta2 * v[i] + (1-beta2) * (grad ** 2) m_corrected = m[i] / (1-beta1**t) v_corrected = v[i] / (1-beta2**t) leafs[i] += - lr * m_corrected / (np.sqrt(v_corrected) + 1e-8) t += 1 # compute some statistics loss_sum += ((fbar - target_one_hot)**2).mean() accuracy_sum += (fbar.argmax(axis=1) == y).mean() * 100.0 batch_cnt += 1 pbar.update(x.shape[0]) desc = '[{}/{}] loss {:2.4f} accuracy {:2.4f}'.format( epoch, epochs-1, loss_sum / batch_cnt, accuracy_sum / batch_cnt, ) pbar.set_description(desc) # Copy the optimized leafs back into the trees with the pre-computed mapping for t, m, l in zip(trees, mappings, leafs): for nid, i in m.items(): t.nodes[nid].prediction = l[i].tolist() return trees
6704e36b61ac9bda65ba0e118590aa2b627c8e2a
3,479
from typing import ClassVar from typing import Any from typing import Dict def fetch_db_object(cls: ClassVar, body: Any): """Fetch a database object via SQLAlchemy. :param cls: the class of object to fetch. :param body: the body of the object. If the body is None then None is returned (for the case where no object exists), if the body is already of type cls then the body is returned as the object and if the body is a dictionary with the key 'id' a query is made to fetch the given object. :return: the object. """ if body is None: item = None elif isinstance(body, cls): item = body elif isinstance(body, Dict): if "id" not in body: raise AttributeError(f"id not found in {body}") id = body["id"] item = session_.query(cls).filter(cls.id == id).one_or_none() if item is None: raise ValueError(f"{item} with id {id} not found") else: raise ValueError(f"Unknown item type {body}") return item
ae4a96ac9875d5b936df1d9c05f8a022a9a4b51e
3,480
def should_skip_cred_test(): """ Returns `True` if a test requiring credentials should be skipped. Otherwise returns `False` """ if username is None or password is None: return True return False
c5f45a20f7febc100a2f2eb950697c91837e0281
3,481
from typing import List from pathlib import Path def list_input_images(img_dir_or_csv: str, bucket_name: str = None, glob_patterns: List = None): """ Create list of images from given directory or csv file. :param img_dir_or_csv: (str) directory containing input images or csv with list of images :param bucket_name: (str, optional) name of aws s3 bucket :param glob_patterns: (list of str) if directory is given as input (not csv), these are the glob patterns that will be used to find desired images returns list of dictionaries where keys are "tif" and values are paths to found images. "meta" key is also added if input is csv and second column contains a metadata file. Then, value is path to metadata file. """ if bucket_name: s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) if img_dir_or_csv.endswith('.csv'): bucket.download_file(img_dir_or_csv, 'img_csv_file.csv') list_img = read_csv('img_csv_file.csv') else: raise NotImplementedError( 'Specify a csv file containing images for inference. Directory input not implemented yet') else: if img_dir_or_csv.endswith('.csv'): list_img = read_csv(img_dir_or_csv) elif is_url(img_dir_or_csv): list_img = [] img_path = Path(img_dir_or_csv) img = {} img['tif'] = img_path list_img.append(img) else: img_dir = Path(img_dir_or_csv) assert img_dir.is_dir() or img_dir.is_file(), f'Could not find directory/file "{img_dir_or_csv}"' list_img_paths = set() if img_dir.is_dir(): for glob_pattern in glob_patterns: assert isinstance(glob_pattern, str), f'Invalid glob pattern: "{glob_pattern}"' list_img_paths.update(sorted(img_dir.glob(glob_pattern))) else: list_img_paths.update(img_dir) list_img = [] for img_path in list_img_paths: img = {} img['tif'] = img_path list_img.append(img) assert len(list_img) >= 0, f'No .tif files found in {img_dir_or_csv}' return list_img
0dccd2d0356b8f89991a1ab1f8a621e696918ab5
3,482
def get_insta_links(L: Instaloader, url: str) -> tuple: """ Return list of shortcodes :param url: URL :return: success status and list of shortcodes """ try: shortcode = get_insta_shortcode(url) post = Post.from_shortcode(L.context, shortcode) return True, post except Exception as e: print(str(e)) return False, []
6ee9eac712d4603d1b7cffedd11cf07e4345ec0a
3,483
import re import os def read_snli(data_dir, is_train): """将SNLI数据集解析为前提、假设和标签""" def extract_text(s): # 删除我们不会使用的信息 s = re.sub('\\(', '', s) s = re.sub('\\)', '', s) # 用一个空格替换两个或多个连续的空格 s = re.sub('\\s{2,}', ' ', s) return s.strip() label_set = {'entailment': 0, 'contradiction': 1, 'neutral': 2} file_name = os.path.join(data_dir, 'snli_1.0_train.txt' if is_train else 'snli_1.0_test.txt') with open(file_name, 'r') as f: rows = [row.split('\t') for row in f.readlines()[1:]] premises = [extract_text(row[1]) for row in rows if row[0] in label_set] hypotheses = [extract_text(row[2]) for row in rows if row[0] \ in label_set] labels = [label_set[row[0]] for row in rows if row[0] in label_set] return premises, hypotheses, labels
96f552d900327a2c78c69f1a0b9aa9188852cf89
3,484
async def http_request_callback(_request: HttpRequest) -> HttpResponse: """A response handler which returns some text""" with open(__file__, 'rb') as file_pointer: buf = file_pointer.read() headers = [ (b'content-type', b'text/plain'), (b'content-length', str(len(buf)).encode('ascii')) ] return HttpResponse(200, headers, bytes_writer(buf, chunk_size=-1))
2c5bdf2e4617c7780fe9c8d0b4a65b363e05babc
3,485
import os def ensure_directory_exists(directory, domain=None, permissions=0o777): """Create a directory and give access rights to all Args: directory (str): Root directory domain (str): Domain. Basically a subdirectory to prevent things like overlapping signal filenames. rights (int): Directory permissions (default is 0o777) Returns: (str) a path to the directory """ if domain: directory = os.path.join(directory, domain) # Expand and normalize the path directory = os.path.normpath(directory) directory = os.path.expanduser(directory) if not os.path.isdir(directory): try: save = os.umask(0) os.makedirs(directory, permissions) except OSError: LOG.warning("Failed to create: " + directory) finally: os.umask(save) return directory
0fe89ea6d23deffa67260bb9a465a3189fde6d0d
3,486
from typing import Tuple from typing import List from typing import Union def item_coverage( possible_users_items: Tuple[List[Union[int, str]], List[Union[int, str]]], recommendations: List[Tuple[Union[int, str], Union[int, str]]], ) -> float: """ Calculates the coverage value for items in possible_users_items[1] given the collection of recommendations. Recommendations over users/items not in possible_users_items are discarded. Args: possible_users_items (Tuple[List[Union[int, str]], List[Union[int, str]]]): contains exactly TWO sub-lists, first one with users, second with items recommendations (List[Tuple[Union[int, str], Union[int, str]]]): contains user-item recommendation tuples, e.g. [(user1, item1),(user2, item2),] Returns: item coverage (float): a metric showing the fraction of items which got recommended at least once. """ if len(possible_users_items) != 2: raise ValueError("possible_users_items must be of length 2: [users, items]") if np.any([len(x) == 0 for x in possible_users_items]): raise ValueError("possible_users_items cannot hold empty lists!") possible_items = set(possible_users_items[1]) items_with_recommendations = set([x[1] for x in recommendations]) items_without_recommendations = possible_items.difference(items_with_recommendations) item_cov = 1 - len(items_without_recommendations) / len(possible_items) return round(item_cov, 3)
f3eb59e0146561c8a18f74c548539b8cc9dcbb5b
3,487
def calc_area(img_it, contours, conv_sq, list_save): """ Summary Parameters ---------- yearstr : TYPE DESCRIPTION. Returns ------- TYPE DESCRIPTION. """ # Calculate areas sum_file = 0 for c in contours: M = cv2.moments(c) area = M['m00'] area_conv = area * conv_sq sum_file = sum_file + area_conv # print(sum_file) list_save.append([img_it, sum_file]) return(list_save)
f3bdba8892041edfe5ba0497c927f846fd8110d9
3,488
def truth_seed_box(true_params, init_range, az_ind=4, zen_ind=5): """generate initial box limits from the true params Parameters ---------- true_params : np.ndarray init_range : np.ndarray Returns ------- np.ndarray shape is (n_params, 2); returned energy limits are in units of log energy """ n_params = len(true_params) true_params = np.copy(true_params[:, np.newaxis]) # clip true energies between 0.3 GeV and 1000 GeV true_params[-2:] = true_params[-2:].clip(0.3, 1000) limits = np.empty((n_params, 2), np.float32) limits[:-2] = true_params[:-2] + init_range[:-2] limits[-2:] = np.log10(true_params[-2:]) + init_range[-2:] limits[az_ind] = limits[az_ind].clip(0, 2 * np.pi) limits[zen_ind] = limits[zen_ind].clip(0, np.pi) return limits
3c3087702be4b91589f7e75f5c7e2f18776a658a
3,489
from xia2.Driver.DriverFactory import DriverFactory from xia2.Handlers.Streams import Debug def Report(DriverType=None): """A factory for ReportWrapper classes.""" DriverInstance = DriverFactory.Driver(DriverType) class ReportWrapper(DriverInstance.__class__): def __init__(self): DriverInstance.__class__.__init__(self) self.set_executable("dials.report") self._experiments_filename = None self._reflections_filename = None self._html_filename = None def set_experiments_filename(self, experiments_filename): self._experiments_filename = experiments_filename def set_reflections_filename(self, reflections_filename): self._reflections_filename = reflections_filename def set_html_filename(self, html_filename): self._html_filename = html_filename def run(self, wait_for_completion=False): Debug.write("Running dials.report") self.clear_command_line() assert ( self._experiments_filename is not None or self._reflections_filename is not None ) if self._experiments_filename is not None: self.add_command_line(self._experiments_filename) if self._reflections_filename is not None: self.add_command_line(self._reflections_filename) if self._html_filename is not None: self.add_command_line("output.html=%s" % self._html_filename) self.start() if wait_for_completion: self.close_wait() else: self.close() self.check_for_errors() return ReportWrapper()
918ed6d0acad9d80fdd9c0a1ef6e33a19216c8c9
3,490
def summary(task): """Given an ImportTask, produce a short string identifying the object. """ if task.is_album: return u'{0} - {1}'.format(task.cur_artist, task.cur_album) else: return u'{0} - {1}'.format(task.item.artist, task.item.title)
87387c47e90998c270f6f8f2f63ceacebd4cdc78
3,491
def ds_tc_resnet_model_params(use_tf_fft=False): """Generate parameters for ds_tc_resnet model.""" # model parameters model_name = 'ds_tc_resnet' params = model_params.HOTWORD_MODEL_PARAMS[model_name] params.causal_data_frame_padding = 1 # causal padding on DataFrame params.clip_duration_ms = 160 params.use_tf_fft = use_tf_fft params.mel_non_zero_only = not use_tf_fft params.feature_type = 'mfcc_tf' params.window_size_ms = 5.0 params.window_stride_ms = 2.0 params.wanted_words = 'a,b,c' params.ds_padding = "'causal','causal','causal','causal'" params.ds_filters = '4,4,4,2' params.ds_repeat = '1,1,1,1' params.ds_residual = '0,1,1,1' # no residuals on strided layers params.ds_kernel_size = '3,3,3,1' params.ds_dilation = '1,1,1,1' params.ds_stride = '2,1,1,1' # streaming conv with stride params.ds_pool = '1,2,1,1' # streaming conv with pool params.ds_filter_separable = '1,1,1,1' # convert ms to samples and compute labels count params = model_flags.update_flags(params) # compute total stride pools = model_utils.parse(params.ds_pool) strides = model_utils.parse(params.ds_stride) time_stride = [1] for pool in pools: if pool > 1: time_stride.append(pool) for stride in strides: if stride > 1: time_stride.append(stride) total_stride = np.prod(time_stride) # override input data shape for streaming model with stride/pool params.data_stride = total_stride params.data_shape = (total_stride * params.window_stride_samples,) # set desired number of frames in model frames_number = 16 frames_per_call = total_stride frames_number = (frames_number // frames_per_call) * frames_per_call # number of input audio samples required to produce one output frame framing_stride = max( params.window_stride_samples, max(0, params.window_size_samples - params.window_stride_samples)) signal_size = framing_stride * frames_number # desired number of samples in the input data to train non streaming model params.desired_samples = signal_size params.batch_size = 1 return params
b018fa56efd67d8378496d5b4b1975580fc92f89
3,492
def expected_calibration_error_evaluator(test_data: pd.DataFrame, prediction_column: str = "prediction", target_column: str = "target", eval_name: str = None, n_bins: int = 100, bin_choice: str = "count") -> EvalReturnType: """ Computes the expected calibration error (ECE), given true label and prediction scores. See "On Calibration of Modern Neural Networks"(https://arxiv.org/abs/1706.04599) for more information. The ECE is the distance between the actuals observed frequency and the predicted probabilities, for a given choice of bins. Perfect calibration results in a score of 0. For example, if for the bin [0, 0.1] we have the three data points: 1. prediction: 0.1, actual: 0 2. prediction: 0.05, actual: 1 3. prediction: 0.0, actual 0 Then the predicted average is (0.1 + 0.05 + 0.00)/3 = 0.05, and the empirical frequency is (0 + 1 + 0)/3 = 1/3. Therefore, the distance for this bin is:: |1/3 - 0.05| ~= 0.28. Graphical intuition:: Actuals (empirical frequency between 0 and 1) | * | * | * ______ Predictions (probabilties between 0 and 1) Parameters ---------- test_data : Pandas' DataFrame A Pandas' DataFrame with with target and prediction scores. prediction_column : Strings The name of the column in `test_data` with the prediction scores. target_column : String The name of the column in `test_data` with the binary target. eval_name : String, optional (default=None) The name of the evaluator as it will appear in the logs. n_bins: Int (default=100) The number of bins. This is a trade-off between the number of points in each bin and the probability range they span. You want a small enough range that still contains a significant number of points for the distance to work. bin_choice: String (default="count") Two possibilities: "count" for equally populated bins (e.g. uses `pandas.qcut` for the bins) "prob" for equally spaced probabilities (e.g. uses `pandas.cut` for the bins), with distance weighed by the number of samples in each bin. Returns ------- log: dict A log-like dictionary with the expected calibration error. """ if eval_name is None: eval_name = "expected_calibration_error_evaluator__" + target_column if bin_choice == "count": bins = pd.qcut(test_data[prediction_column], q=n_bins) elif bin_choice == "prob": bins = pd.cut(test_data[prediction_column], bins=n_bins) else: raise AttributeError("Invalid bin_choice") metric_df = pd.DataFrame({"bins": bins, "predictions": test_data[prediction_column], "actuals": test_data[target_column]}) agg_df = metric_df.groupby("bins").agg({"bins": "count", "predictions": "mean", "actuals": "mean"}) sample_weight = None if bin_choice == "prob": sample_weight = agg_df["bins"].values distance = mean_absolute_error(agg_df["actuals"].values, agg_df["predictions"].values, sample_weight=sample_weight) return {eval_name: distance}
0f34a5c0883325324b11fd9b97a8a55250574392
3,493
def format_bytes(size): """ Takes a byte size (int) and returns a formatted, human-interpretable string """ # 2**10 = 1024 power = 2 ** 10 n = 0 power_labels = {0: " bytes", 1: "KB", 2: "MB", 3: "GB", 4: "TB"} while size >= power: size /= power n += 1 return str(round(size, 2)) + power_labels[n]
332b9d43c044da92ef7a9b16e57cfa7d552de12f
3,494
def load_input_data(filenames, Ag_class): """ Load the files specified in filenames. Parameters --- filenames: a list of names that specify the files to be loaded. Ag_class: classification of sequences from MiXCR txt file (i.e., antigen binder = 1, non-binder = 0) """ # Combine the non-binding sequence data sets. # Non-binding data sets include Ab+ data and Ag- # sorted data for all 3 libraries l_data = [] for file in filenames: l_data.append( mixcr_input('data/' + file, Ag_class, seq_len=15) ) mHER_H3 = pd.concat(l_data) # Drop duplicate sequences mHER_H3 = mHER_H3.drop_duplicates(subset='AASeq') # Remove 'CAR/CSR' motif and last two amino acids mHER_H3['AASeq'] = [x[3:-2] for x in mHER_H3['AASeq']] # Shuffle sequences and reset index mHER_H3 = mHER_H3.sample(frac=1).reset_index(drop=True) return mHER_H3
9ae9cc814f150168ca1703b1a4af54bc440b4425
3,495
from typing import Optional def get_maximum_value( inclusive: Optional[Edge] = None, exclusive: Optional[Edge] = None, ignore_unlimited: bool = False, ) -> Result[Boundary, TestplatesError]: """ Gets maximum boundary. :param inclusive: inclusive boundary value or None :param exclusive: exclusive boundary value or None :param ignore_unlimited: indicates whether to ignore unlimited values or not """ return get_value_boundary( MAXIMUM_EXTREMUM, inclusive=inclusive, exclusive=exclusive, ignore_unlimited=ignore_unlimited, )
3ef7557ed3f7f353e0765a92fb008449409039a8
3,496
from typing import List def build_graph(order: int, edges: List[List[int]]) -> List[List[int]]: """Builds an adjacency list from the edges of an undirected graph.""" adj = [[] for _ in range(order)] for u, v in edges: adj[u].append(v) adj[v].append(u) return adj
86bdd0d4314777ff59078b1c0f639e9439f0ac08
3,497
import torch import math def construct_scheduler( optimizer, cfg: OmegaConf, ): """ Creates a learning rate scheduler for a given model :param optimizer: the optimizer to be used :return: scheduler """ # Unpack values from cfg.train.scheduler_params scheduler_type = cfg.train.scheduler decay_factor = cfg.train.scheduler_params.decay_factor decay_steps = cfg.train.scheduler_params.decay_steps patience = cfg.train.scheduler_params.patience warmup_epochs = cfg.train.scheduler_params.warmup_epochs warmup = warmup_epochs != -1 if scheduler_type == "multistep": lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=decay_steps, gamma=1.0 / decay_factor, ) elif scheduler_type == "plateau": lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, mode="max", factor=1.0 / decay_factor, patience=patience, verbose=True, # threshold_mode="rel", # min_lr=2.5e-4, ) elif scheduler_type == "exponential": lr_scheduler = torch.optim.lr_scheduler.ExponentialLR( optimizer, gamma=decay_factor, last_epoch=-1, ) elif scheduler_type == "cosine": size_dataset = DATASET_SIZES[cfg.dataset] if warmup: # If warmup is used, then we need to substract this from T_max. T_max = (cfg.train.epochs - warmup_epochs) * math.ceil( size_dataset / float(cfg.train.batch_size) ) # - warmup epochs else: T_max = cfg.train.epochs * math.ceil( size_dataset / float(cfg.train.batch_size) ) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=T_max, eta_min=1e-6, ) else: lr_scheduler = None print( f"WARNING! No scheduler will be used. cfg.train.scheduler = {scheduler_type}" ) if warmup and lr_scheduler is not None: size_dataset = DATASET_SIZES[cfg.dataset] lr_scheduler = ckconv.nn.LinearWarmUp_LRScheduler( optimizer=optimizer, lr_scheduler=lr_scheduler, warmup_iterations=warmup_epochs * math.ceil(size_dataset / float(cfg.train.batch_size)), ) return lr_scheduler
d0ed907aa3582978cb3adf5eada895366dc1282f
3,498
import numpy def GenerateSerialGraph(num_samples, block_size): """ Generates a (consistent) serial graph. """ N = num_samples num_blocks = N // block_size if N % block_size != 0: err = "num_samples(%d) must be a multiple of block_size (%d)" % (num_samples, block_size) raise Exception(err) if num_blocks < 2: err = "the number of blocks %d should be at least 2 (%d/%d)" % (num_blocks, num_samples, block_size) raise Exception(err) node_weights = numpy.ones(N) * 2.0 node_weights[:block_size] = 1.0 node_weights[-block_size:] = 1.0 edge_weights = {} w = 1.0 for block in range(num_blocks - 1): for i in range(block_size): for j in range(block_size): edge_weights[(i + block * block_size, j + (block + 1) * block_size)] = w edge_weights[(j + (block + 1) * block_size, i + block * block_size)] = w # Loops are simply overwritten return node_weights, edge_weights
7348c08051aa0b7ec51f79f1f6f2097ab5857ef8
3,499