query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
calculate elbow score for a partition specified by labels
|
def elbow_score(X, labels, metric='euclidean', type=('inner', 'standard')):
if type == ('inner', 'standard'):
score = 0
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
dists = cdist(sub_samples, sub_samples, metric=metric)
tmp_score = np.sum(dists) / (2.0 * sub_samples.shape[0])
score += tmp_score
elif type == ('inner', 'centroid'):
# https://stackoverflow.com/questions/19197715/scikit-learn-k-means-elbow-criterion
# formula-1 in (Goutte, Toft et al. 1999 - NeuroImage)
sub_scores = []
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
sub_samples_centroid = np.atleast_2d(np.mean(sub_samples, 0))
tmp_scores = cdist(sub_samples_centroid, sub_samples, metric=metric)[0]
sub_scores.extend(tmp_scores)
score = np.mean(sub_scores)
elif type == ('inner', 'pairwise'):
sub_scores = []
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
sub_scores.extend(pdist(sub_samples, metric=metric))
score = np.mean(sub_scores)
elif type == ('inter', 'centroid'):
# adapted from formula-2 in (Goutte, Toft et al. 1999 - NeuroImage)
sub_centroids = []
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
sub_centroids.append(np.mean(sub_samples, 0))
centroid = np.atleast_2d(np.mean(sub_centroids, 0))
tmp_scores = cdist(centroid, np.array(sub_centroids), metric=metric)[0]
score = np.mean(tmp_scores)
elif type == ('inter', 'pairwise'):
sub_centroids = []
for label in set(labels):
sub_samples = np.atleast_2d(X[labels == label])
sub_centroids.append(np.mean(sub_samples, 0))
sub_centroids = np.array(sub_centroids)
if sub_centroids.shape[0] == 1:
sub_centroids = np.r_[sub_centroids, sub_centroids]
score = np.mean(pdist(sub_centroids, metric=metric))
else:
raise TypeError('Type-{} is not supported at present.'.format(type))
return score
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def overlap_score(labels, labels_pred):\n raw_overlap = 1-fraction_mislabeled_nodes(labels, labels_pred)\n partition_true = np.array(labels).astype(int)\n partition_pred = np.array(labels_pred).astype(int)\n num_nodes = partition_pred.size\n num_groups = partition_true.max() + 1\n\n chance_level = 0.\n for i in range(num_groups):\n temp = np.sum(i == partition_true) / num_nodes\n if temp > chance_level:\n chance_level = temp\n\n score = (raw_overlap - chance_level) / (1 - chance_level)\n if score <= 0:\n score = 0\n\n return score",
"def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]",
"def h_score(x_mat, label, b_onehot = False):\n label = np.array(label)\n if b_onehot:\n label = np.array([label_c.argmax() for label_c in label])\n label_set = np.unique(label)\n x_mean = np.mean(x_mat, axis = 0) # E[X]\n cov_x = np.cov(x_mat.T) # Cov(X)\n pinv_cov_x = np.linalg.pinv(cov_x) # pinv(Cov(X))\n coe = 0\n \"\"\"\n Here we use this formula:\n H-score = E[(E[X|Y] - E[X]) * inv(Cov(X)) * (E[X|Y] - E[X])] / 2\n \"\"\"\n for label_i in label_set: # current label i\n x_i = x_mat[label == label_i, :] # get X where corresponding Y = i\n x_i_mean = np.mean(x_i, axis = 0) # E[X|Y = i]\n x_i_mean_c = x_i_mean - x_mean # E[X|Y = i] - E[X]\n pr_i = float(sum(label == label_i)) / len(label) # Pr(Y = i)\n if cov_x.size > 1:\n coe_i = np.dot(x_i_mean_c, np.matmul(pinv_cov_x, x_i_mean_c))\n # (E[X|Y = i] - E[X]) * inv(Cov(X)) * (E[X|Y = i] - E[X])\n else:\n coe_i = x_i_mean_c ** 2 / cov_x\n coe = coe + coe_i * pr_i\n coe = coe / 2\n return coe",
"def labels_to_scores(labels):\n device = sp.get_device(labels)\n xp = device.xp\n with device:\n num_classes = labels.max() + 1\n scores = xp.zeros([len(labels), num_classes], dtype=np.float32)\n scores[xp.arange(len(labels)), labels] = 1\n\n return scores",
"def evaluateClusters( features, labels):\r\n\r\n\treturn silhouette_score( features, labels)",
"def Bayes_prediction(X, y, fold_number=10):\n D = X.shape[1]\n fold = KFold(n_splits=fold_number)\n cross_tab_all = []\n lamb_hat_all = []\n \n for train_index, test_index in fold.split(X, y):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n length = X_train.shape[0]\n pi_hat = y_train.mean()\n lamb_hat = np.zeros((2, D))\n \n for flag in range(2):\n for d in range(D):\n lamb_hat[flag][d] = (sum(X_train.iloc[i][d] * (y_train.iloc[i]==flag) for i in range(length))) / (sum(y_train.iloc[i]==flag for i in range(length)))\n\n y_pred = np.zeros(len(X_test))\n for i in range(len(X_test)):\n y_pred[i] = Bayes_classifier(pi_hat, X_test.iloc[i], lamb_hat)\n \n cross_tab = np.zeros((2, 2))\n for m in [0, 1]:\n for n in [0, 1]:\n cross_tab[m][n] = sum([(y_test.values[i]==m) & (y_pred[i]==n) for i in range(len(y_pred))]) \n \n cross_tab_all.append(cross_tab)\n lamb_hat_all.append(lamb_hat)\n \n cross_tab_all = sum(cross_tab_all)\n lamb_hat_all\n\n return lamb_hat_all, cross_tab_all",
"def calc_ac_score(labels_true, labels_pred):\n nclass = len(np.unique(labels_true))\n labels_size = len(labels_true)\n mat = labels_size * np.ones((nclass, nclass))\n \n idx = 0\n \n for i in range(labels_size):\n mat[labels_pred[i], labels_true[i]] -= 1.0\n \n munkres = Munkres()\n mapping = munkres.compute(mat)\n \n ac = 0.0\n\n for i in range(labels_size):\n val = mapping[labels_pred[i]][1]\n if val == labels_true[i]:\n ac += 1.0\n\n ac = ac / labels_size \n \n return ac",
"def compute_eer(labels, scores):\n fpr, tpr, thresholds = roc_curve(labels, scores, pos_label=1)\n eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)\n thresh = interp1d(fpr, thresholds)(eer)\n return eer, thresh",
"def segmentation_scores(label_trues, label_preds, n_class):\n hist = np.zeros((n_class, n_class))\n for lt, lp in zip(label_trues, label_preds):\n hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n\n return {'overall_acc': acc,\n 'mean_acc': acc_cls,\n 'freq_w_acc': fwavacc,\n 'mean_iou': mean_iu}",
"def knn(train_data, train_labels, test_data, test_labels, k):\n pred_labels = []\n for t in test_data:\n dist = calculate_distances(train_data, t)\n pred_class = majority_voting(dist, train_labels, k)\n pred_labels.append(pred_class)\n correct_pred_count = np.sum(pred_labels == test_labels)\n acc = correct_pred_count/len(test_labels)\n return acc",
"def calc_loss(predictions, labels):\n return np.mean(np.square(predictions - labels))",
"def label_accuracy_score(label_trues, label_preds, n_class):\n hist = np.zeros((n_class, n_class))\n # 一个batch里面可能有多个数据\n # 通过迭代器将一个个数据进行计算\n for lt, lp in zip(label_trues, label_preds):\n # numpy.ndarray.flatten将numpy对象拉成1维\n hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)\n\n # np.diag(a)假如a是一个二维矩阵,那么会输出矩阵的对角线元素\n # np.sum()可以计算出所有元素的和。如果axis=1,则表示按行相加\n \"\"\"\n acc是准确率 = 预测正确的像素点个数/总的像素点个数\n acc_cls是预测的每一类别的准确率(比如第0行是预测的类别为0的准确率),然后求平均\n iu是召回率Recall,公式上面给出了\n mean_iu就是对iu求了一个平均\n freq是每一类被预测到的频率\n fwavacc是频率乘以召回率,我也不知道这个指标代表什么\n \"\"\"\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n # nanmean会自动忽略nan的元素求平均\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n return acc, acc_cls, mean_iu, fwavacc",
"def label_accuracies(preds, labels):\n num_correct = num_correct_fun(preds, labels)\n return (num_correct / preds.size(0)) * 100.0",
"def custom_scoring(y_te, y_pred):\n #weights computed with training data set\n w = np.array([0.02409584, 0.00787456, 0.03685528, 0.01760536, 0.04589969, 0.8483942 , 0.01724058, 0.00203449]);\n \n ## F1 SCORES\n #evaluate F1 score, precision and recall for each label, \n #along with custom proportionally weighted F1 score\n #and built in weighted and macro F1 scores\n F1_tab, Ptab, Rtab, pf1 = F1_score(y_te, y_pred, w)\n f = F1Score(8, threshold = 0.5, average = 'weighted')\n f.update_state(y_te, y_pred)\n wf1 = f.result().numpy() #weighted f1 score\n f.reset_states()\n f = F1Score(8, threshold = 0.5, average = 'macro')\n f.update_state(y_te, y_pred)\n mf1 = f.result().numpy() #macro f1 score\n f.reset_states()\n\n ##EDIT DISTANCE\n #edit_dist_av = LevDistMultilabels(y_true, y_pred)\n\n ##ACCURACY\n #evaluate accuracy per label\n acc_tab = Acc(y_te, y_pred)\n\n return wf1, mf1, pf1, F1_tab, Ptab, Rtab, acc_tab",
"def _leaf_calculation(y, label, sample_weights=None):\n if sample_weights is None:\n sample_weights = np.ones(y.shape[0]) / y.shape[0]\n # YOUR CODE HERE\n # begin answer\n numerator=np.sum(y)\n denominator=np.sum((label-y)*(1-label+y))\n if numerator == 0 or abs(denominator) < 1e-150:\n return 0.0\n else:\n return numerator/denominator",
"def predict_boosting_example(x, h_ens):\r\n\r\n arr = []\r\n sum_alpha = 0\r\n\r\n for y in h_ens:\r\n # splitting hypothesis, weight pairs\r\n alpha, tree = h_ens[y]\r\n tst_pred = predict_example(x, tree)\r\n # appending prediction\r\n arr.append(tst_pred*alpha)\r\n sum_alpha += alpha\r\n predict_egz = np.sum(arr) / sum_alpha\r\n # weak learner\r\n if predict_egz >= 0.5:\r\n return 1\r\n else:\r\n return 0",
"def evaluate(labels, predictions):\n actual_positive = 0\n actual_negative = 0\n predicted_positive = 0\n predicted_negative = 0\n for i, j in zip(labels, predictions):\n if i == 1:\n actual_positive += i\n predicted_positive += j\n else:\n actual_negative += 1\n if j == 0:\n predicted_negative += 1\n return predicted_positive/actual_positive, predicted_negative/actual_negative",
"def computeMetrics(model, input, shard_count, delimiter, print_cutoff=False):\n print('compute metrics for %s' % model)\n data = np.loadtxt(input,\n delimiter=delimiter,\n dtype={\n 'names': ('model', 'weight', 'score', 'label'),\n 'formats': ('S16', 'f4', 'f4', 'i1')\n })\n dataSize = len(data)\n shardSize = int(dataSize / shard_count)\n\n rocPoints = [(0, 0)]\n prPoints = []\n corrPoints = []\n cutoff = []\n\n totalConditionPositive = 0.0\n totalConditionNegative = 0.0\n\n for record in data:\n modelId = record[0]\n weight = record[1]\n score = record[2]\n label = record[3]\n\n if label == 1:\n totalConditionPositive += weight\n elif label == 0:\n totalConditionNegative += weight\n else:\n assert False, 'label invalid: %d' % label\n\n truePositive = 0.0\n falsePositive = 0.0\n binTotalScore = 0.0\n binWeight = 0.0\n binPositive = 0.0\n overallTatalScore = 0.0\n\n partitionSize = 0\n for record in data:\n modelId = record[0]\n weight = record[1]\n score = record[2]\n label = record[3]\n\n partitionSize += 1\n binWeight += weight\n overallTatalScore += weight * score\n\n if label == 1:\n truePositive += weight\n binPositive += weight\n binTotalScore += score * weight\n elif label == 0:\n falsePositive += weight\n\n if partitionSize % shardSize == 0 or partitionSize == dataSize:\n recall = (truePositive / totalConditionPositive) if totalConditionPositive > 0 else 0.0\n fallout = (falsePositive / totalConditionNegative) if totalConditionPositive > 0 else 0.0\n precision = truePositive / (truePositive + falsePositive)\n\n meanPctr = binTotalScore / binWeight\n eCtr = binPositive / binWeight\n\n rocPoints += [(fallout, recall)]\n prPoints += [(recall, precision)]\n corrPoints += [(eCtr, meanPctr)]\n cutoff += [(score, recall, precision, fallout)]\n\n binWeight = 0.0\n binTotalScore = 0.0\n binPositive = 0.0\n\n rocPoints = sorted(rocPoints, key=lambda x: x[0])\n prPoints = sorted(prPoints, key=lambda x: x[0])\n corrPoints = sorted(corrPoints, key=lambda x: x[0])\n cutoff = sorted(cutoff, key=lambda x: x[0])\n\n def calculateAUC(rocPoints):\n AUC = 0.0\n lastPoint = (0, 0)\n for point in rocPoints:\n AUC += (point[1] + lastPoint[1]) * (point[0] - lastPoint[0]) / 2.0\n lastPoint = point\n return AUC\n\n AUC = calculateAUC(rocPoints)\n OER = truePositive / overallTatalScore #Observed Expected Ratio\n F1 = 2 * truePositive / (truePositive + falsePositive + totalConditionPositive)\n\n print('%s AUC: %f' % (model, AUC))\n print('%s F1: %f' % (model, F1))\n print('%s Observed/Expected Ratio: %f' % (model, OER))\n if print_cutoff:\n print('%s cutoff:' % model, cutoff)\n\n return model, {\n 'ROC': rocPoints,\n 'PR': prPoints,\n 'CORR': corrPoints,\n 'AUC': AUC,\n 'OER': OER,\n 'F1': F1,\n 'cutoff': cutoff\n }",
"def c_index(true_labels, predictions):\n\n true_labels = list(true_labels)\n predictions = list(predictions)\n\n n = 0\n h_sum = 0\n for i in range(len(true_labels)):\n t = true_labels[i]\n p = predictions[i]\n for j in range(i + 1, len(true_labels)):\n nt = true_labels[j]\n np = predictions[j]\n if t != nt:\n n += 1\n if (p < np and t < nt) or (p > np and t > nt):\n h_sum += 1\n elif p == np:\n h_sum += 0.5\n # To avoid 'ZeroDivisionError' exception\n if n == 0:\n return h_sum\n return h_sum / n",
"def davies_bouldin_score(self):\r\n print(colored(\"The davies bouldin score of the clustering is %0.002f\\n\" %(davies_bouldin_score(self.X, self.labels)),color = 'red', attrs=['bold']))\r\n print()\r\n print(colored(\"The points in each cluster are : \",color = 'yellow', attrs=['bold']))\r\n print(collections.Counter(self.labels))",
"def evaluate(net, dmanager, partition): \n def accuracy(outputs, labels, phrases, dmanager):\n correct = 0\n total = 0\n misclassified = []\n for (i, output) in enumerate(outputs):\n total += 1\n if labels[i] == output.argmax():\n correct += 1\n else:\n misclassified.append((phrases[i], \n dmanager.tag(output.argmax().item()),\n dmanager.tag(labels[i].item())))\n return correct, total, misclassified\n val_loader = dmanager.batched_loader(partition, 128)\n total_val_loss = 0\n correct = 0\n total = 0\n misclassified = []\n loss = torch.nn.CrossEntropyLoss() \n for data in val_loader:\n raw_inputs = [dmanager.vectorize(p) for p in data['phrase']]\n raw_labels = [dmanager.tag_index(c) for c in data['tag']]\n inputs = []\n labels = []\n for (inp, label) in zip(raw_inputs, raw_labels):\n if inp is not None:\n inputs.append(inp)\n labels.append(label) \n inputs = torch.Tensor(inputs)\n labels = torch.LongTensor([dmanager.tag_index(c) for \n c in data['tag']])\n val_outputs = net(inputs) \n val_loss_size = loss(val_outputs, labels)\n correct_inc, total_inc, misclassified_inc = accuracy(val_outputs, \n labels, \n data['phrase'], \n dmanager)\n correct += correct_inc\n total += total_inc\n misclassified += misclassified_inc\n total_val_loss += val_loss_size.data.item()\n return correct/total, misclassified",
"def dice_score_average_left_right(label_gt, label_pred, n_class=2, connectivity=26):\n import cc3d # pip install connected-components-3d --no-binary :all: (https://pypi.org/project/connected-components-3d/)\n from copy import deepcopy\n\n epsilon = 1.0e-6\n assert np.all(label_gt.shape == label_gt.shape)\n dice_scores_av = np.zeros(n_class, dtype=np.float32)\n dice_scores_two_sides = np.zeros((n_class - 1, 2), dtype=np.float32) # Number of classes X left + right\n #print(n_class)\n for class_id in range(n_class):\n # Inputs per class\n img_A = np.array(label_gt == class_id, dtype=np.uint16)#.flatten()\n img_B = np.array(label_pred == class_id, dtype=np.uint16)#.flatten()\n\n if class_id != 0: # 0 is background\n # Divide into left and right using connected components\n img_A_cc = cc3d.connected_components(img_A, connectivity=connectivity) # 26-connected (default)\n img_B_cc = cc3d.connected_components(img_B, connectivity=connectivity) # 26-connected (default)\n\n # I found a bug in cc3d for img_B_cc - patient PD092\n n_class_for_two_sides = 3 # Background, left and right parts (0, 1, 2)\n if np.max(img_B_cc) > n_class_for_two_sides - 1:\n print('Found outlier value in cc3d, correcting...')\n img_B_cc[img_B_cc == np.max(img_B_cc)] = n_class_for_two_sides - 1\n\n # Make sure the CC are in the same order\n img_A_cc = check_cc_of_same_object(img_A_cc, img_B_cc, class_id)\n\n # Extract individual components - img A\n for segid in range(1, np.max(img_A_cc) + 1):\n extracted_image_A = img_A_cc * (img_A_cc == segid)\n extracted_image_A[extracted_image_A != 0] = 1\n extracted_image_A = extracted_image_A.astype(np.float32).flatten()\n\n extracted_image_B = img_B_cc * (img_B_cc == segid)\n extracted_image_B[extracted_image_B != 0] = 1\n extracted_image_B = extracted_image_B.astype(np.float32).flatten()\n\n score = 2.0 * np.sum(extracted_image_A * extracted_image_B) / (np.sum(extracted_image_A) + np.sum(extracted_image_B) + epsilon)\n #print(score)\n # Each row is a different structure, columns are left and right\n dice_scores_two_sides[class_id - 1, segid - 1] = score\n else:\n img_A = img_A.flatten()\n img_B = img_B.flatten()\n score = 2.0 * np.sum(img_A * img_B) / (np.sum(img_A) + np.sum(img_B) + epsilon)\n dice_scores_av[class_id] = score\n\n # Averaged DICE on both sides\n dice_scores_av[1:n_class] = np.mean(dice_scores_two_sides, axis=1)\n return dice_scores_av, dice_scores_two_sides",
"def _detection_scores(inputs, gt_boxes, gt_labels, model):\n model = check_model('model', model, BlackModel)\n boxes_and_confi, pred_labels = model.predict(*inputs)\n det_scores = []\n correct_labels_num = []\n # repeat gt_boxes and gt_labels for all particles cloned from the same sample in PSOAttack/GeneticAttack\n if gt_boxes.shape[0] == 1 and boxes_and_confi.shape[0] > 1:\n gt_boxes = np.repeat(gt_boxes, boxes_and_confi.shape[0], axis=0)\n gt_labels = np.repeat(gt_labels, boxes_and_confi.shape[0], axis=0)\n iou_thres = 0.5\n for boxes, labels, gt_box, gt_label in zip(boxes_and_confi, pred_labels, gt_boxes, gt_labels):\n gt_box_num = gt_box.shape[0]\n score = 0\n box_num = boxes.shape[0]\n correct_label_flag = np.zeros(gt_label.shape)\n for i in range(box_num):\n pred_box = boxes[i]\n max_iou_confi = 0\n for j in range(gt_box_num):\n iou = calculate_iou(pred_box[:4], gt_box[j][:4])\n if labels[i] == gt_label[j] and iou > iou_thres and correct_label_flag[j] == 0:\n max_iou_confi = max(max_iou_confi, pred_box[-1] + iou)\n correct_label_flag[j] = 1\n score += max_iou_confi\n det_scores.append(score)\n correct_labels_num.append(np.sum(correct_label_flag))\n return np.array(det_scores), np.array(correct_labels_num)",
"def evaluate(y_true, y_preds, labels=[0, 1, 2, 3, 4]):\n p_scores = []\n r_scroes = []\n for label in labels:\n p = (((y_true == label) * (y_preds == label)).sum()+0.001) / ((y_preds == label).sum() + 0.001)\n p_scores.append(p)\n r = (((y_true == label) * (y_preds == label)).sum()+0.001) / ((y_true == label).sum() + 0.001)\n r_scroes.append(r)\n p_scores = np.array(p_scores)\n r_scroes = np.array(r_scroes)\n f1 = 2 * (p_scores * r_scroes + 0.001) / (p_scores + r_scroes + 0.001)\n\n confmat = []\n for label in labels:\n conf = []\n for label2 in labels:\n conf.append(((y_preds == label) * (y_true == label2)).sum())\n confmat.append(conf)\n confmat = np.array(confmat)\n\n return p_scores, r_scroes, f1, confmat",
"def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict",
"def pkscore(labels, preds, k=10):\n # Remove padding from labels and preds\n mask = np.where(labels<=1, True, False)\n labels = labels[mask]\n preds = preds[mask]\n\n num_windows = len(labels) - k + 1\n assert num_windows>0, 'Choose a smaller k value'\n\n correct = 0\n for i in range(num_windows):\n # calculate index of window close\n j = i + k\n\n # Get number of segment splits in labels and preds\n label_diff = sum(labels[i:j])\n pred_diff = sum(preds[i:j])\n\n # Check for agreement between labels and preds\n if (label_diff and pred_diff) or (not label_diff and not pred_diff):\n correct += 1\n return 1-(correct/(num_windows))",
"def calculate_priors(trainingLabels):\r\n sum = 0\r\n priors = {}\r\n totalSamples = len(trainingLabels)\r\n classes = set(trainingLabels)\r\n for cls in classes:\r\n numCls = len(filter(lambda x: x == cls, trainingLabels))\r\n sum += numCls\r\n priors[cls] = float(numCls) / float(totalSamples)\r\n \r\n # Sanity check: valid partitioning\r\n assert(sum == totalSamples)\r\n\r\n return priors",
"def equalizing_odds(preds, labels, protect):\r\n counts = [[[0.0, 0.0] for i in range(len(np.unique(protect)))] for _ in range(len(np.unique(labels)))]\r\n for pred, label, subgroup in zip(preds, labels, protect):\r\n counts[int(label)][int(subgroup)][0] += int(pred == label)\r\n counts[int(label)][int(subgroup)][1] += 1\r\n return [[round(p[0] / p[1], 3) for p in l] for l in counts]",
"def map_clusters(labels, rows):\r\n counts = Counter(labels)\r\n mappings = {c + 1: ((counts[c] / rows) * 100) for c in sorted(counts)}\r\n\r\n return mappings",
"def get_error(scores, labels):\r\n bs = scores.size(0) # 'bs' stands for 'batch size'\r\n predicted_labels = scores.argmax(dim = 1) # Tensor with 'bs' entries\r\n indicator = (predicted_labels == labels) # Tensor containing 'True' for each success\r\n num_matches = indicator.sum().item()\r\n return 1 - (num_matches / bs)"
] |
[
"0.65524256",
"0.6286647",
"0.6196709",
"0.6085883",
"0.60324734",
"0.5888701",
"0.5882019",
"0.58427835",
"0.5783772",
"0.5759874",
"0.5710676",
"0.5702147",
"0.5696532",
"0.56714094",
"0.5647144",
"0.5626384",
"0.56258297",
"0.5609299",
"0.5585062",
"0.5583836",
"0.55611545",
"0.55585104",
"0.5554531",
"0.5553341",
"0.5550562",
"0.5541051",
"0.5537496",
"0.55331826",
"0.5527628",
"0.55275136"
] |
0.7236042
|
0
|
Ensure that a vertex will appear in at least one of the possible positions
|
def vertex_at_least_once(self,vertex):
clauses = []
for position in range(0,self.graph.num_vertices):
clauses.append(ClauseVariable(False,position,vertex))
return clauses
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()",
"def has_vertex(self, x, y):\n\n return min(x, y) > 0 and x <= self.width and y <= self.height",
"def is_vertex(self): \n return False",
"def contains_several_vertices(self, currentState):\n\t\treturn True if sum(currentState) > 3 else False",
"def hasvertices(self):\n if len(self.vertices) > 0:\n return True\n else:\n return False",
"def vertices_at_least_once(self):\n clauses = []\n for vertex in range(0,self.graph.num_vertices):\n clauses.append(self.vertex_at_least_once(vertex))\n return clauses",
"def _validateVertex(self, v):\n if v < 0 or v >= self._V:\n raise Exception(\"vertex {} is not between 0 and {}\".format(v, (self._V-1)))",
"def is_vertex(self):\n return False",
"def is_vertex(self):\n return True",
"def is_vertex(self):\n return True",
"def is_legal(vtx, color):\n for neighbor in vtx[\"adjacent\"]:\n if VERTICES[neighbor][\"color\"] is color:\n return False\n return True",
"def has_vertex(self, vertex) -> bool:\n return self._start is vertex or self._end is vertex",
"def is_valid_projection(proj: Projection):\n \"\"\"every square corner must appear in the edge list exactly once\"\"\"\n for square in proj.square_list:\n for corner in [square.zero, square.one, square.two, square.three]:\n contained_in_edge_list = False\n for edge in proj.inter_square_edges:\n if corner in edge:\n if contained_in_edge_list:\n raise ValueError(f\"corner: {corner} connects to more than one edge.\")\n else:\n contained_in_edge_list = True\n\n if not contained_in_edge_list:\n raise ValueError(f\"corner: {corner} does not connect to any other corners.\")\n print(f\"The projection with {proj.square_list} is checked. It does not violate obvious restrictions.\")",
"def test_polygon_with_duplicate_nodes_is_valid():\n geom = query_row(db_conf, 'osm_landusages', 30005)['geometry']\n assert geom.is_valid\n assert len(geom.exterior.coords) == 4",
"def has_vertex(t, tri, vertex):\n for i in range(3):\n if t[tri][i] == vertex:\n return True\n return False",
"def vertex_at_most_once(self,vertex):\n clauses = []\n for (p1,p2) in itertools.combinations(range(0,self.graph.num_vertices),2):\n clause = [ ClauseVariable(True,vertex,p1),\n ClauseVariable(True,vertex,p2)]\n clauses.append(clause)\n return clauses",
"def _vertex_constraint_violated(self, vertex, **kwargs):\n key_index = self._vconstraints.get(vertex.label, {})\n\n # first check the entity properties for constraint violations\n # Then check any additional properties for constraint violations.\n # Additional properties are for cases like `.set_property`\n for props in [vertex.properties, kwargs]:\n for key, value in props.items():\n if key not in key_index:\n continue\n\n for indexed_entity in key_index[key]:\n if indexed_entity != vertex:\n if indexed_entity.properties[key] == value:\n raise interfaces.ConstraintViolation(\n \"{!r} violated constraint {!r}\".format(\n vertex, key\n )\n )",
"def checkVertices(vertices, limits):\n isWithin = True\n for i,v in enumerate(vertices):\n x = v[0]\n y = v[1]\n z = v[2]\n if x < limits[0][0] or x > limits[0][1]:\n isWithin = False\n break\n if y < limits[1][0] or y > limits[1][1]:\n isWithin = False\n break\n if z < limits[2][0] or z > limits[2][1]:\n isWithin = False\n break\n return isWithin",
"def vertices_saturating(self, constraint):\n from sage.libs.ppl import C_Polyhedron, Poly_Con_Relation\n result = []\n for i,v in enumerate(self.minimized_generators()):\n v = C_Polyhedron(v)\n if v.relation_with(constraint).implies(Poly_Con_Relation.saturates()):\n result.append(self.vertices()[i])\n return tuple(result)",
"def valid(self):\n # Verify correct vertex values\n self.verify_vertex_values()\n # Check for duplicate values in lines\n for line in range(9):\n seen = []\n for row in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in rows\n for row in range(9):\n seen = []\n for line in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in subgrids\n for (subgrid_line, subgrid_row) in [(subg_ln, subg_rw) for subg_ln in range(3) for subg_rw in range(3)]:\n seen = []\n for (line, row) in [(ln, rw) for ln in range(3) for rw in range(3)]:\n if self.grid[3*subgrid_line + line][3*subgrid_row + row] is None:\n pass\n elif self.grid[3*subgrid_line + line][3*subgrid_row + row] in seen:\n return False\n else:\n seen.append(self.grid[3*subgrid_line + line][3*subgrid_row + row])\n # No duplicates found\n return True",
"def hasNoDoubleVertices(self):\n assert all(self.vertices.count(v) == 1 for v in self.vertices)\n return (all(all(v1 == v2 or v1.dist(v2) > COMPARISON_EPSILON for v2 in self.vertices)\n for v1 in self.vertices) and\n all(self.vertices.count(v) == 1 for v in self.vertices))",
"def is_pos_valid(self, pos):\n if pos is None:\n pos = (0, 0)\n assert isinstance(pos, tuple)\n\n if self.grid_map[pos[0], pos[1]] in [self.empty_value, 0.7]:\n return True\n else:\n return False",
"def pertenece(self,v):\n return v in self.vertices.keys()",
"def __can_enter(self, position, traversed):\n row, col = position\n # Check index values\n if row < 0 or col < 0:\n return False\n if row >= self.__row_count or col >= self.__col_count:\n return False\n # Check if already traversed\n if traversed[row][col]:\n return False\n # Check if blocked\n if self.__grid[row][col].blocked:\n return False\n return True",
"def contains(self, vertex):\n raise NotImplementedError(\"Bound has not been implemented yet.\")",
"def is_position_allowed(new_x, new_y):\n\n return min_x <= new_x <= max_x and min_y <= new_y <= max_y",
"def is_connected(self, vertices_encountered = None, start_vertex=None):\n\n if vertices_encountered is None:\n vertices_encountered = set()\n gdict = self.__graph_dict\n vertices = list(gdict.keys()) # list is necessary in python 3\n # if empty list return\n if len(vertices) == 0 :\n return False\n if not start_vertex:\n # Choose a vertex vertex from graph as starting point\n start_vertex = vertices[0]\n vertices_encountered.add(start_vertex)\n if len(vertices_encountered) != len(vertices):\n for vertex in gdict[start_vertex]:\n if vertex not in vertices_encountered:\n if self.is_connected(vertices_encountered,vertex):\n return True\n else:\n return True\n return False",
"def check_empty(self, coord):\n x, y, z = coord\n if self.perlin_3d(x, y, z) <= 0:\n return True\n else:\n return False",
"def is_valid_position(self, x, y):\n if (x > self.n_cols-1 or y > self.n_rows-1) or (x < 0 or y < 0):\n return False\n\n elif self.grid[x][y] == 3:\n return False\n\n return True",
"def check_place(self, positions):\n return self.size == len(set(positions[i] + i for i in range(self.size))) == len(\n set(positions[i] - i for i in range(self.size)))"
] |
[
"0.72547096",
"0.65933186",
"0.65661526",
"0.64855134",
"0.6433209",
"0.6399874",
"0.6321705",
"0.62870926",
"0.626445",
"0.626445",
"0.623239",
"0.6083356",
"0.6008976",
"0.60075384",
"0.59953636",
"0.5975935",
"0.5967519",
"0.58793044",
"0.5859508",
"0.58269125",
"0.580967",
"0.5800707",
"0.57747495",
"0.57696706",
"0.5756806",
"0.5756665",
"0.5721894",
"0.57039523",
"0.5702076",
"0.56964517"
] |
0.6731959
|
1
|
Ensure that a vertex will appear at most once at this is done by considering all combinations of vertices and ensuring that vertex can only ever appear at one position for each pair of positions.
|
def vertex_at_most_once(self,vertex):
clauses = []
for (p1,p2) in itertools.combinations(range(0,self.graph.num_vertices),2):
clause = [ ClauseVariable(True,vertex,p1),
ClauseVariable(True,vertex,p2)]
clauses.append(clause)
return clauses
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def vertices_at_least_once(self):\n clauses = []\n for vertex in range(0,self.graph.num_vertices):\n clauses.append(self.vertex_at_least_once(vertex))\n return clauses",
"def vertex_at_least_once(self,vertex):\n clauses = []\n for position in range(0,self.graph.num_vertices):\n clauses.append(ClauseVariable(False,position,vertex))\n return clauses",
"def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()",
"def hasNoDoubleVertices(self):\n assert all(self.vertices.count(v) == 1 for v in self.vertices)\n return (all(all(v1 == v2 or v1.dist(v2) > COMPARISON_EPSILON for v2 in self.vertices)\n for v1 in self.vertices) and\n all(self.vertices.count(v) == 1 for v in self.vertices))",
"def pair_is_consistent(graph, u, v):\n relations = get_all_relations(graph, u, v)\n\n if 1 != len(relations):\n return False\n\n return list(relations)[0]",
"def contains_several_vertices(self, currentState):\n\t\treturn True if sum(currentState) > 3 else False",
"def valid(self):\n # Verify correct vertex values\n self.verify_vertex_values()\n # Check for duplicate values in lines\n for line in range(9):\n seen = []\n for row in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in rows\n for row in range(9):\n seen = []\n for line in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in subgrids\n for (subgrid_line, subgrid_row) in [(subg_ln, subg_rw) for subg_ln in range(3) for subg_rw in range(3)]:\n seen = []\n for (line, row) in [(ln, rw) for ln in range(3) for rw in range(3)]:\n if self.grid[3*subgrid_line + line][3*subgrid_row + row] is None:\n pass\n elif self.grid[3*subgrid_line + line][3*subgrid_row + row] in seen:\n return False\n else:\n seen.append(self.grid[3*subgrid_line + line][3*subgrid_row + row])\n # No duplicates found\n return True",
"def random_vertex_set(self, k, E=None):\n if E is None:\n E = set()\n S = [None for _ in xrange(k)]\n E = list(E)\n for i in xrange(k):\n S[i] = ifilter(lambda x: x not in E, produce(self.random_vertex)).next()\n E.append(S[i][0])\n return tuple(S)",
"def vertices(size):\n return set(range(size))",
"def vertices_saturating(self, constraint):\n from sage.libs.ppl import C_Polyhedron, Poly_Con_Relation\n result = []\n for i,v in enumerate(self.minimized_generators()):\n v = C_Polyhedron(v)\n if v.relation_with(constraint).implies(Poly_Con_Relation.saturates()):\n result.append(self.vertices()[i])\n return tuple(result)",
"def _remove_single_vertex(self, x):\n if x not in self.vertices:\n return False\n del self.vertices[x]\n for vert in self.edges[x].keys():\n del self.edges[vert][x]\n del self.edges[x]",
"def candidate_map(self):\n candidates = [[set(range(1, 10)) for _dummy in range(9)] for _dummy in range(9)]\n vertex_value_unknown = [[True for _dummy in range(9)] for _dummy in range(9)]\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] in range(1, 10):\n candidates[line][row] = set([self.grid[line][row]])\n vertex_value_unknown[line][row] = False\n for i in range(9):\n if i != row:\n candidates[line][i].discard(self.grid[line][row])\n if i != line:\n candidates[i][row].discard(self.grid[line][row])\n if line - line%3 + i//3 != line or row - row%3 + i%3 != row:\n candidates[line - line%3 + i//3][row - row%3 + i%3].discard(self.grid[line][row])\n # Further reduce candidate map\n reduce_cadidate_map_further = True\n while reduce_cadidate_map_further:\n reduce_cadidate_map_further = False\n total_number_of_candidates = sum([len(candidates[ln][rw]) for ln in range(9) for rw in range(9)])\n for number in range(1, 10):\n for i in range(9):\n # Check for single possible vertex for *number* in candidate map line *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[i][j]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[i][seen_in_j[0]]:\n candidates[i][seen_in_j[0]] = set([number])\n vertex_value_unknown[i][seen_in_j[0]] = False\n # Discard other candidates for *number* in corresponding row and subsquare\n for j in range(9):\n if j != i:\n candidates[j][seen_in_j[0]].discard(number)\n if i - i%3 + j//3 != i:\n candidates[i - i%3 + j//3][seen_in_j[0] - seen_in_j[0]%3 + j%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same subsquare\n elif 1 < len(seen_in_j) < 4:\n subsquares = set()\n for j in seen_in_j:\n subsquares.add(3*(i//3) + j//3)\n if len(subsquares) == 1:\n subsquare = subsquares.pop()\n for j in range(9):\n if 3*(subsquare//3) + j//3 != i:\n candidates[3*(subsquare//3) + j//3][3*(subsquare%3) + j%3].discard(number)\n # Check for single possible vertex for *number* in candidate map row *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[j][i]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[seen_in_j[0]][i]:\n candidates[seen_in_j[0]][i] = set([number])\n vertex_value_unknown[seen_in_j[0]][i] = False\n # Discard other candidates for *number* in corresponding line and subsquare\n for j in range(9):\n if j != i:\n candidates[seen_in_j[0]][j].discard(number)\n if i - i%3 + j%3 != i:\n candidates[seen_in_j[0] - seen_in_j[0]%3 + j//3][i - i%3 + j%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same subsquare\n elif 1 < len(seen_in_j) < 4:\n subsquares = set()\n for j in seen_in_j:\n subsquares.add(3*(j//3) + i//3)\n if len(subsquares) == 1:\n subsquare = subsquares.pop()\n for j in range(9):\n if 3*(subsquare%3) + j%3 != i:\n candidates[3*(subsquare//3) + j//3][3*(subsquare%3) + j%3].discard(number)\n # Check for single possible vertex for *number* in candidate map subsquare *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[3*(i//3) + j//3][3*(i%3) + j%3]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3]:\n candidates[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3] = set([number])\n vertex_value_unknown[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3] = False\n # Discard other candidates for *number* in corresponding line and row\n for j in range(9):\n if j not in [3*(i%3), 3*(i%3) + 1, 3*(i%3) + 2]:\n candidates[3*(i//3) + seen_in_j[0]//3][j].discard(number)\n if j not in [3*(i//3), 3*(i//3) + 1, 3*(i//3) + 2]:\n candidates[j][3*(i%3) + seen_in_j[0]%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same line/row\n elif 1 < len(seen_in_j) < 4:\n lines = set()\n rows = set()\n for j in seen_in_j:\n lines.add(3*(i//3) + j//3)\n rows.add(3*(i%3) + j%3)\n if len(lines) == 1:\n line = lines.pop()\n for row in [rw for rw in range(9) if rw not in [3*(i%3), 3*(i%3) + 1, 3*(i%3) + 2]]:\n candidates[line][row].discard(number)\n elif len(rows) == 1:\n row = rows.pop()\n for line in [ln for ln in range(9) if ln not in [3*(i//3), 3*(i//3) + 1, 3*(i//3) + 2]]:\n candidates[line][row].discard(number)\n if sum([len(candidates[ln][rw]) for ln in range(9) for rw in range(9)]) < total_number_of_candidates:\n reduce_cadidate_map_further = True\n return candidates",
"def test_polygon_with_duplicate_nodes_is_valid():\n geom = query_row(db_conf, 'osm_landusages', 30005)['geometry']\n assert geom.is_valid\n assert len(geom.exterior.coords) == 4",
"def vertices(self):\n \n yielded = set()\n \n # Iterate over every tuple of edges, e.g. ..., (1, 2), (4, 3), ...\n for vertices in self.edges():\n # Iterate over every vertex in the tuple, e.g. ..., 1, 2, 4, 3, ...\n for vertex in vertices:\n # Yield if it has not been yielded already\n if vertex not in yielded:\n yield vertex",
"def MeshVtxAdjacentVtxs (strMesh, index, blnAbsolutConnections=False, blnCreate=False):\n \"\"\"custom function\"\"\"\n #-----------------------------------------------------------------------------------------------------------------------------------------\n def CullDuplicates(seq, idfun=None): \n # order preserving \n if idfun is None: \n def idfun(x): return x \n seen = {} \n result = [] \n for item in seq: \n marker = idfun(item) \n if marker in seen: continue \n seen[marker] = 1 \n result.append(item) \n return result\n #-----------------------------------------------------------------------------------------------------------------------------------------\n MeshVtxAdjacentVtxs = []\n if rs.IsMesh(strMesh)==False : \n print \"strMesh is not an mesh\"\n return None\n if type(index)==type(\"string\"):\n print \"index is not an integer\"\n return None\n if type(index)==type(0.1): index = int(index)\n\n arrVertices = rs.MeshVertices (strMesh)\n arrFaceVertices = rs.MeshFaceVertices(strMesh)\n\n intCount = 0\n arrAdjacentVtxs = []\n for arrFace in arrFaceVertices:\n blnIsAdjacent = False\n for arrVtxIndex in arrFace:\n if arrVtxIndex == index :\n blnIsAdjacent = True\n if blnIsAdjacent :\n if blnAbsolutConnections :\n if arrFace[2]==arrFace[3] :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex)\n else :\n if index == arrFace[0] :\n arrAdjacentVtxs.append( arrFace[3] )\n arrAdjacentVtxs.append( arrFace[1] )\n elif index == arrFace[1] :\n arrAdjacentVtxs.append( arrFace[0] )\n arrAdjacentVtxs.append( arrFace[2] )\n elif index == arrFace[2] :\n arrAdjacentVtxs.append( arrFace[1] )\n arrAdjacentVtxs.append( arrFace[3] )\n elif index == arrFace(3) :\n arrAdjacentVtxs.append( arrFace[2] )\n arrAdjacentVtxs.append( arrFace[0] )\n else :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex )\n if type(arrAdjacentVtxs) != type([]) : return None\n arrOrderAdjacentVtxs = CullDuplicates(arrAdjacentVtxs)\n if blnCreate :\n arrStrPts = []\n for arrVtxIndex in arrOrderAdjacentVtxs:\n rs.AddPoint ( arrVertices[arrVtxIndex] )\n arrStrPts.append( arrVertices[arrVtxIndex] )\n return arrStrPts\n else :\n return arrOrderAdjacentVtxs",
"def add_vertex(self, vertex_id): # O(1) time complexity\n self.vertices[vertex_id] = set() \n\n # additional options (class)\n '''\n if vertex_id not in self.vertices:\n self.vertices[vertex_id] = {}\n\n else:\n return \"Vertex is already in Graph\"\n '''",
"def vertex_no_simultaneos(self):\n clauses = []\n for position in range(0,self.graph.num_vertices):\n for (v1,v2) in itertools.combinations(range(0,self.graph.num_vertices),2):\n clauses.append([ClauseVariable(True,v1,position),\n ClauseVariable(True,v2,position)])\n return clauses",
"def testDuplicate(self,permutations=True):\n # This algorithm is faster than encode,\n # but for nplex=2 enmagic2 would probably still be faster.\n if permutations:\n C = self.copy()\n C.sort(axis=1)\n else:\n C = self\n ind = sortByColumns(C)\n C = C.take(ind,axis=0)\n ok = (C != roll(C,1,axis=0)).any(axis=1)\n if not ok[0]: # all duplicates -> should result in one unique element\n ok[0] = True\n return ind,ok",
"def _vertex_constraint_violated(self, vertex, **kwargs):\n key_index = self._vconstraints.get(vertex.label, {})\n\n # first check the entity properties for constraint violations\n # Then check any additional properties for constraint violations.\n # Additional properties are for cases like `.set_property`\n for props in [vertex.properties, kwargs]:\n for key, value in props.items():\n if key not in key_index:\n continue\n\n for indexed_entity in key_index[key]:\n if indexed_entity != vertex:\n if indexed_entity.properties[key] == value:\n raise interfaces.ConstraintViolation(\n \"{!r} violated constraint {!r}\".format(\n vertex, key\n )\n )",
"def hasvertices(self):\n if len(self.vertices) > 0:\n return True\n else:\n return False",
"def _ensure_order_consistent(self):\r\n if self.order_sum() != self.order_triangle() or \\\r\n self.force_reset_order is True:\r\n self._reset_order()\r\n self._have_reset_order = True\r\n else:\r\n self._have_reset_order = False\r\n return self._have_reset_order",
"def test_consistent_ids(self) -> None:\n bnode = BNode()\n g0_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Golan Trevize\")),\n (bnode, RDF.type, FOAF.Person),\n }\n bnode = BNode()\n g1_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Janov Pelorat\")),\n (bnode, RDF.type, FOAF.Person),\n }\n\n g0 = Graph()\n g0 += g0_ts\n cg0 = to_canonical_graph(g0)\n cg0_ts = GraphHelper.triple_set(cg0)\n\n g1 = Graph()\n g1 += g1_ts\n cg1 = to_canonical_graph(g1)\n cg1_ts = GraphHelper.triple_set(cg1)\n\n assert cg0_ts.issubset(\n cg1_ts\n ), \"canonical triple set cg0_ts should be a subset of canonical triple set cg1_ts\"",
"def conflict_check() ->None:\r\n global conflict_space\r\n conflict_space = np.zeros(mShape)\r\n for x in range(shape):\r\n for y in range(shape):\r\n for z in range(y+1, shape):\r\n if example[x, y] == example[x, z]:\r\n conflict_space[x, y] = example[x, y]\r\n conflict_space[x, z] = example[x, z]\r\n if example[y, x] == example[z, x]:\r\n conflict_space[y, x] = example[y, x]\r\n conflict_space[z, x] = example[z, x]",
"def is_vertex(self): \n return False",
"def pairs_of_vertices(self):\n pairs_of_vertices = []\n for vertice in self.list_of_vertices:\n for edge in vertice.edges_list:\n if non_oriented:\n if (vertice, edge.linked[1]) and (edge.linked[1], vertice) not in pairs_of_vertices:\n pairs_of_vertices.append((vertice, edge.linked[1]))\n if not non_oriented:\n if (vertice, edge.linked[1]) not in pairs_of_vertices:\n pairs_of_vertices.append((vertice, edge.linked[1]))\n return pairs_of_vertices",
"def badMuons(self, allmuons, allvertices):\n\n muons = list(m for m in allmuons) # make it a python list\n goodMuon = []\n\n if len(allvertices) < 1: raise RuntimeError\n PV = allvertices[0].position()\n \n out = [] \n for mu in muons:\n if (not(mu.isPFMuon()) or mu.innerTrack().isNull()):\n goodMuon.append(-1); # bad but we don't care\n continue;\n if (self.preselection(mu)):\n dxypv = abs(mu.innerTrack().dxy(PV));\n dzpv = abs(mu.innerTrack().dz(PV));\n if (self.tighterId(mu)):\n ipLoose = ((dxypv < 0.5 and dzpv < 2.0) or mu.innerTrack().hitPattern().pixelLayersWithMeasurement() >= 2);\n goodMuon.append(ipLoose or (not(self.selectClones_) and self.tightGlobal(mu)));\n elif (self.safeId(mu)):\n ipTight = (dxypv < 0.2 and dzpv < 0.5);\n goodMuon.append(ipTight);\n else:\n goodMuon.append(0);\n else:\n goodMuon.append(3); # maybe good, maybe bad, but we don't care\n\n n = len(muons)\n for i in range(n):\n if (muons[i].pt() < self.ptCut_ or goodMuon[i] != 0): continue;\n bad = True;\n if (self.selectClones_):\n bad = False; # unless proven otherwise\n n1 = muons[i].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n for j in range(n):\n if (j == i or goodMuon[j] <= 0 or not(self.partnerId(muons[j]))): continue\n n2 = muons[j].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n if (deltaR(muons[i],muons[j]) < 0.4 or (n1 > 0 and n2 > 0 and ROOT.muon.sharedSegments(muons[i],muons[j]) >= 0.5*min(n1,n2))):\n bad = True;\n break;\n if (bad):\n out.append(muons[i]);\n return out",
"def check_quadruples(self, solver=None):\n if not self._has(\"p\"):\n self.pTable()\n r = self._.triple_solution = {}\n g = self._.triple_solution_generator = {}\n zero = {}\n done = {}\n for u in range(self._.d + 1):\n for v in range(u, self._.d + 1):\n for w in range(v, self._.d + 1):\n if self._.p[u, v, w] == 0:\n continue\n S = self.tripleEquations(u, v, w)\n g[u, v, w] = self.tripleSolution_generator(u, v, w, S=S,\n solver=solver)\n try:\n sol = sort_solution(next(g[u, v, w]))\n except StopIteration:\n raise InfeasibleError(\n \"no solution found for a triple of vertices \"\n \"at distances (%d, %d, %d)\" % (u, v, w))\n s = S.subs(sol)\n r[u, v, w] = {sol: s}\n zero[u, v, w] = {(h, i, j)\n for h in range(self._.d + 1)\n for i in range(self._.d + 1)\n for j in range(self._.d + 1)\n if s[h, i, j] == 0\n and self._check_zero(h, i, j, u, v, w)}\n done[u, v, w] = set()\n check = {t for t in g if len(zero[t]) > 0}\n while len(check) > 0:\n for t in list(check):\n if t not in check:\n continue\n check.discard(t)\n u, v, w = t\n for d in list(zero[t]):\n if d not in zero[t]:\n continue\n try:\n sol = sort_solution(\n next(g[t].send((True,\n self._.triple[t][d] >= 1))))\n if sol not in r[t]:\n s = r[t][sol] = self._.triple[t].subs(sol)\n zero[t] -= {z for z in zero[t] if s[z] != 0}\n except (StopIteration, KeyError):\n h, i, j = d\n seen = {(t, d)}\n for lt, ld in {((u, h, i), (v, w, j)),\n ((v, h, j), (u, w, i)),\n ((w, i, j), (u, v, h))}:\n st = tuple(sorted(lt))\n if st not in zero:\n continue\n for tp, dp in zip(TPERMS, DPERMS):\n if tuple(lt[k] for k in tp) != st:\n continue\n sd = tuple(ld[k] for k in dp)\n if (st, sd) in seen:\n continue\n seen.add((st, sd))\n l = len(r[st])\n delete = set()\n for sol, s in r[st].items():\n if s[sd] != 0:\n delete.add(sol)\n for sol in delete:\n del r[st][sol]\n try:\n g[st].send((False,\n self._.triple[st][sd] == 0))\n if len(r[st]) == 0:\n sol = sort_solution(next(g[st]))\n r[st][sol] = \\\n self._.triple[st].subs(sol)\n l += 1\n except StopIteration:\n del g[st]\n except KeyError:\n pass\n if len(r[st]) == 0:\n raise InfeasibleError(\n \"found forbidden quadruple \"\n \"wxyz with d(w, x) = %d, \"\n \"d(w, y) = %d, d(w, z) = %d, \"\n \"d(x, y) = %d, d(x, z) = %d, \"\n \"d(y, z) = %d\" % (sd + st))\n if len(r[st]) < l:\n zero[st] = {(sh, si, sj)\n for sh in range(self._.d + 1)\n for si in range(self._.d + 1)\n for sj in range(self._.d + 1)\n if\n (sh, si, sj) not in done[st]\n and self._check_zero(sh, si,\n sj, *st)\n and all(\n s[sh, si, sj] == 0\n for s in r[st].values())}\n if len(zero[st]) == 0:\n check.discard(st)\n else:\n check.add(st)\n zero[t].discard(d)\n done[t].add(d)",
"def consistent(h, phi):\n for n in h: # for each node in h\n if phi(n) in h and phi(n) not in h:\n return False\n return True",
"def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]",
"def _remove_unreferenced_vertices(self):\n # convert vertices to an array\n vertex_array = np.array(self.mesh_.vertices())\n num_v = vertex_array.shape[0]\n\n # fill in a 1 for each referenced vertex\n reffed_array = np.zeros([num_v, 1])\n for f in self.mesh_.triangles():\n if f[0] < num_v and f[1] < num_v and f[2] < num_v:\n reffed_array[f[0]] = 1\n reffed_array[f[1]] = 1\n reffed_array[f[2]] = 1\n\n # trim out vertices that are not referenced\n reffed_v_old_ind = np.where(reffed_array == 1)\n reffed_v_old_ind = reffed_v_old_ind[0]\n reffed_v_new_ind = np.cumsum(reffed_array).astype(np.int) - 1 # counts number of reffed v before each ind\n\n try:\n self.mesh_.set_vertices(vertex_array[reffed_v_old_ind, :].tolist())\n if self.mesh_.normals() is not None:\n normals_array = np.array(self.mesh_.normals())\n self.mesh_.set_normals(normals_array[reffed_v_old_ind, :].tolist())\n except IndexError:\n return False\n\n # create new face indices\n new_triangles = []\n for f in self.mesh_.triangles():\n new_triangles.append([reffed_v_new_ind[f[0]], reffed_v_new_ind[f[1]], reffed_v_new_ind[f[2]]] )\n self.mesh_.set_triangles(new_triangles)\n return True"
] |
[
"0.6299476",
"0.6132211",
"0.60620284",
"0.6046663",
"0.5724134",
"0.5719957",
"0.5699396",
"0.5691852",
"0.5682706",
"0.5675804",
"0.5652677",
"0.5579296",
"0.55651826",
"0.5511426",
"0.550808",
"0.55038774",
"0.5494495",
"0.54938024",
"0.5456598",
"0.543388",
"0.54337287",
"0.5425789",
"0.5378837",
"0.53663045",
"0.5294483",
"0.5292887",
"0.528029",
"0.52752614",
"0.5270314",
"0.5258299"
] |
0.63261926
|
0
|
Decodes a minisat output instance back into a clause variable
|
def minisat_decode(clause_str):
factor = ClauseVariable.encoding_factor()
int_value = int(clause_str)
compliment = (int_value < 0)
int_value = abs(int_value)
position = (int_value % factor) -1
vertex = math.ceil(int_value/factor)-1
return ClauseVariable(compliment,vertex,position)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def decode_result(found):\n ...",
"def decode_results(self, outputs):\n ...",
"def question_to_statement(self, question, answer):\n qid = question[1]\n pos_statement = self.statements[qid]\n if answer:\n return pos_statement\n else:\n utterance = pos_statement.__str__().replace('is', 'is not')\n return self.human_sensor.utterance_to_statement(utterance)",
"def get_decoding_op(self):\n return self._dual.get_op('output')",
"def convert(self, sm):\n return self.visit(sm)",
"def to_code(self, ipt_args_in_construct: str, variable_name: str, output_var: str, code_fragment):",
"def _decode_result(self, result):\n if isinstance(result, list):\n return [self._decode_result(r) for r in result]\n elif isinstance(result, SimpleString):\n return result.value\n elif isinstance(result, SimpleError):\n return self._decode_error(result)\n else:\n return result",
"def unparse(self):\n return self.format.unparse(self.unparse_struct())",
"def from_output(cls, output: bytes, method: Method):\n s = None\n if b\"=====ERROR=====\" in output:\n s = cls.ERROR\n elif b\"=====UNKNOWN=====\" in output:\n s = cls.UNKNOWN\n elif b\"=====UNSATISFIABLE=====\" in output:\n s = cls.UNSATISFIABLE\n elif (\n b\"=====UNSATorUNBOUNDED=====\" in output or b\"=====UNBOUNDED=====\" in output\n ):\n s = cls.UNBOUNDED\n elif method is Method.SATISFY:\n if b\"==========\" in output:\n s = cls.ALL_SOLUTIONS\n elif b\"----------\" in output:\n s = cls.SATISFIED\n else:\n if b\"==========\" in output:\n s = cls.OPTIMAL_SOLUTION\n elif b\"----------\" in output:\n s = cls.SATISFIED\n return s",
"def decode(self, x):\n return x",
"def _massage_raw_pg_output_vals(self):\n pass",
"def decode_output(self, to_decode):\n if to_decode is not None:\n # return to_decode.decode(self.decode_type)\n return str(to_decode, self.decode_type)\n return False",
"def decode(decode_format):\n return output_from_decode",
"def parse(self, line, output_ds):\r\n JUMP = len(output_ds)\r\n output_ds.append(\"@SP\")\r\n output_ds.append(\"A=M\")\r\n output_ds.append(\"A=A-1\")\r\n output_ds.append(\"D=M\")\r\n output_ds.append(\"A=A-1\")\r\n output_ds.append(\"D=M-D\")\r\n output_ds.append(\"@JUMP\" + str(JUMP))\r\n output_ds.append(\"D;\" + self.jump_condition)\r\n output_ds.append(\"D=-1\")\r\n output_ds.append(\"@END\" + str(JUMP))\r\n output_ds.append(\"1;JMP\")\r\n output_ds.append(\"(JUMP\" + str(JUMP) + \")\")\r\n output_ds.append(\"D=0\")\r\n output_ds.append(\"(END\" + str(JUMP) + \")\")\r\n output_ds.append(\"@SP\")\r\n output_ds.append(\"M=M-1\")\r\n output_ds.append(\"A=M-1\")\r\n output_ds.append(\"M=D\")",
"def get_output(self):\r\n x = self.query('OUTP?')\r\n if x == None: return None\r\n return int(x)",
"def make_get_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_TYPE_* ___madz_TYPE_get_out_struct(){{\n return &___madz_OUTPUT;\n}}\n\n\"\"\"\n return res",
"def decode(self, z):\n if self.switch:\n x = self.bijecter(z, inverse=True)\n return self.decode_(x)\n else:\n return self.decode_(z)",
"def query(mdx_stmt):",
"def decode(self, line):\n try:\n commands = self.tokenize(line)\n for command in commands:\n self.delegate.output(str(command))\n self.execute(command)\n except EmptyStackException as e:\n self.delegate.error(str(e))\n except SmyrkRuntimeError as e:\n self.delegate.error(str(e))\n except KeyError as e:\n self.delegate.error('{0} is not defined'.format(str(e)))",
"def parse_output(use_json, output):\n return json.loads(output[0]) if use_json else parse_info.construct_tree(output)",
"def parse (self, line):\n result = self.program.parseString (line)\n return TranQL_AST (result.asList (), self.backplane)",
"def decode(self, coded_set):",
"def _get_output_from_query_validators(output, query):\n\n ret_dict = {}\n # if it is a valid dq query than apply the query and return the output\n if Dq.query_validator(query):\n output = Dq.str_to_dq_query(output, query)\n ret_dict.update({\n 'action_output': output,\n 'query_type': 'dq_query',\n 'operation': None,\n 'expected_value': None\n })\n # if the query is itself a dictionary\n elif isinstance(query, (dict, list)):\n # output could of of type dict/QDict\n # NOTE: QDict is the type of the parser output\n if isinstance(output, QDict):\n output = dict(output)\n\n ret_dict.update({\n 'action_output': output,\n 'query_type': 'non_dq_query',\n 'operation': '',\n 'expected_value': query\n })\n else:\n # check for the string query\n output = _string_query_validator(output, query)\n action_output = output['right_hand_value']\n operation = output['operation']\n value = output['left_hand_value']\n ret_dict.update({\n 'action_output': action_output,\n 'query_type': 'non_dq_query',\n 'operation': operation,\n 'expected_value': value\n })\n\n return ret_dict",
"def _get_statement_object(db_stmt):\n return Statement._from_json(json.loads(db_stmt.json.decode('utf-8')))",
"def _dinamic_decode(self):\n raise NotImplementedError",
"def output(self):\n return self.expr.lhs",
"def from_statement(cls, statement):\r\n return cls('\\n'.join(textwrap.dedent(statement).splitlines()[1:]))",
"def make_get_python_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_LANG_python_TYPE_* {}_get_out_struct(){{\n return &___madz_LANG_python_OUTPUT;\n}}\n\n\"\"\"\n return res.format(self.python_mangle)",
"def decode_output(output, charset):\n return [decode_row(row, charset) for row in output]",
"def decode(self, value):\r\n return value"
] |
[
"0.5478467",
"0.5344261",
"0.52268684",
"0.5153329",
"0.5108831",
"0.5054543",
"0.50407106",
"0.5035657",
"0.4960097",
"0.49479723",
"0.48915848",
"0.48678803",
"0.48613107",
"0.48597178",
"0.485587",
"0.48152465",
"0.47930557",
"0.47837904",
"0.4771931",
"0.47243187",
"0.4716859",
"0.4706934",
"0.47023642",
"0.46921572",
"0.46851063",
"0.46724984",
"0.46647018",
"0.46591488",
"0.46130782",
"0.45901743"
] |
0.5379999
|
1
|
Write out minisait in format of minisat command runner
|
def write_minisat(self):
num_variables = len(self.label_encodings)
num_clauses = self.num_clauses
clauses = self.clauses
outfile = MinisatRunner.temp_in
out = open(outfile,"w")
try:
out.write("p cnf %3d %3d\n" % (num_variables,num_clauses))
for clause in clauses:
for clause_variable in clause:
out.write(" %3d" % self.minisat_encode_label(clause_variable));
out.write(" 0\n")
finally:
out.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def write_output(self):",
"def write_run(run):\n r=Run(run)\n r.write_all()",
"def _write_start():\n from ._common import header\n\n out = \"{:5}{}\\n\".format(\"----*\", header)\n return [out[:11] + \"MOP: 123456789*123456789*1234\" + out[40:]]",
"def makeCommands(f0,psi0,th,logFileName,mat,fnyld,fnhrd):\n from mk.library.lib import gen_tempfile\n stdoutFileName = gen_tempfile(\n prefix='stdout-mkrun')\n # stdoutFileName ='/tmp/dump'\n\n cmd = 'python main.py --fn %s -f %5.4f -p %+6.1f -t %+7.2f --fnyld %s --fnhrd %s '%(\n logFileName,f0,psi0,th,fnyld,fnhrd)\n\n if mat!=-1:\n cmd = cmd + ' --mat %i'%mat\n cmd = cmd + ' > %s'%stdoutFileName\n print 'cmd:',cmd\n return cmd",
"def sub_command(self):\n self.write(\"@SP\\nAM=M-1\\nD=M\\nA=A-1\\nM=M-D\\n\")",
"def _write_run_mineos(parameters:RunParameters, save_name:str,\n l_run:int, l_min:int):\n\n # Set filenames\n execfile = '{0}_{1}.run_mineos'.format(save_name, l_run)\n ascfile = '{0}_{1}.asc'.format(save_name, l_run)\n eigfile = '{0}_{1}.eig'.format(save_name, l_run)\n modefile = '{0}_{1}.mode'.format(save_name, l_run)\n logfile = '{0}.log'.format(save_name)\n cardfile = '{0}.card'.format(save_name)\n\n _write_modefile(modefile, parameters, l_min)\n\n if os.path.exists(execfile):\n os.remove(execfile)\n\n fid = open(execfile, 'w')\n fid.write('{}/mineos_nohang << ! > {}\\n'.format(\n parameters.bin_path, logfile))\n fid.write('{0}.card\\n{0}_{1}.asc\\n{0}_{1}.eig\\n{0}_{1}.mode\\n!'.format(\n save_name, l_run))\n fid.close()\n\n return execfile",
"def test_apply_command(self):\n from pystarlab.starlab import Story\n king_command = \"makeking -w 1.5 -s 1454677882 -n 5 -i\"\n mass_command = \"makemass -i -l 0.1 -u 20 -s 1454677882\"\n\n mass_output = \"mass.out\"\n\n mass_path = os.path.join(DATA_DIR, mass_output)\n with open(mass_path, 'r') as f:\n mass_str = f.read()\n\n king_story = Story.from_single_command(king_command)\n mass_story = king_story.apply_command(mass_command)\n\n for line in zip(mass_str.splitlines(),\n str(mass_story).splitlines()):\n string_with_date = re.match(\"^ ===>\",line[0])\n if not string_with_date:\n self.assertEquals(line[0], line[1])",
"def write(self, command):\n self.cmd_emiter.emit(str(command))\n self.meas.write(command)",
"def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)",
"def writeOutput(self, output):",
"def write_pbs(self):\n fout = open(\"runStarCCM.pbs\", \"w\")\n fout.write(\"#PBS -S /bin/csh\\n\")\n fout.write(\"#PBS -l select=\" + str(self.numNodes) + \":ncpus=\" + str(self.numCPUs) + \":mpiprocs=\" + str(self.mpiProcs) + \":model=has,walltime=\" + self.WallTime + \"\\n\\n\")\n fout.write(\"#PBS -W group_list=\" + self.GroupID + \"\\n\")\n fout.write(\"#PBS -j oe\\n\")\n fout.write(\"#PBS -q \" + self.queue + \"\\n\")\n fout.write(\"#PBS -N \" + self.jobName + \"\\n\")\n fout.write(\"#PBS -m e\\n\")\n fout.write(\"#PBS -W block=true\\n\\n\")\n fout.write(\"cd $PBS_O_WORKDIR\\n\")\n\n if self.runVolGrid == 1:\n #fout.write(\"/bin/rm -f \" + self.simMeshFile + \".sim\\n\")\n fout.write(\"/bin/rm -f starccmMeshRun.out\\n\")\n fout.write(\"chmod u+x \" + self.cshBatch1File + \".csh\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch1File + \".csh -powerOnDemand \" + self.javaBatch1File + \".java >& starccmMeshRun.out\\n\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a mesh run.'\\n\")\n\n if self.runCFD == 1:\n fout.write(\"chmod u+x \" + self.cshBatch2File + \".csh\\n\")\n fout.write(\"/bin/rm -f *.csv *.png starccmFlowRun.out\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch2File + \".csh -powerOnDemand \" + self.javaBatch2File + \".java \" + self.simMeshFile + \" >& starccmFlowRun.out\\n\\n\")\n fout.write(\"# rename the strange file names\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.csv ForceX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.csv ForceY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.csv ForceZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.csv MomentX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.csv MomentY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.csv MomentZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.csv Residuals.csv\\n\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.png ForceX.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.png ForceY.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.png ForceZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.png MomentX.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.png MomentY.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.png MomentZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.png Residuals.png\\n\")\n fout.write(\"/bin/mv \\$PWDUpperCp.png UpperCp.png\\n\")\n fout.write(\"/bin/mv \\$PWDLowerCp.png LowerCp.png\\n\")\n fout.write(\"/bin/rm -rf null\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a CFD run.'\\n\")\n\n fout.close()",
"def write(self, args, gen, out=sys.stdout):\n seqsum = sniff(gen)\n\n out.write(str(seqsum))",
"def print_command(self):\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n print(' '.join(command))",
"def write(self, command):\n self.meas.write(command)",
"def add_command(self):\n self.write(\"@SP\\nAM=M-1\\nD=M\\nA=A-1\\nM=M+D\\n\")",
"def writer():\n\twhile True:\n\t\tw = (yield)\t\n\t\tprint('>>', w)",
"def main(args):\n # Results: print to console and also write to output file\n pass",
"def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))",
"def write(self, command):\n self.meas.write(command)",
"def _jobfile(self):\n job = self.job.format(fnum=self.fnum)\n with open(job, 'w') as f:\n f.write('#!/bin/sh\\n' + self.phast_cmmd + self.cleanup_cmmd)",
"def write(self, out):\r\n out.write('# {0:<11} {1:<6} {2:<6} {3:<6} {4}\\n'\r\n .format('Time(s)', 'X(mm)', 'Y(mm)', 'Z(um)', 'Tile'))\r\n for i in self: out.write(self.format_pt(i))",
"def write(self):\n # build up all commands into a single request to increase network perf\n connection = self.connection\n commands = self.commands\n try:\n connection.send_packed_command(connection.pack_commands([c.args for c in commands]))\n except ConnectionError as e:\n for c in commands:\n c.result = e",
"def test_command_list(self):\n from pystarlab.starlab import Story\n commands = [\"makeking -w 1.5 -s 1454677882 -n 5 -i\",\n \"makemass -i -l 0.1 -u 20 -s 1454677882\"]\n\n mass_output = \"mass.out\"\n\n mass_path = os.path.join(DATA_DIR, mass_output)\n with open(mass_path, 'r') as f:\n mass_str = f.read()\n\n mass_story = Story.from_command_list(commands)\n for line in zip(mass_str.splitlines(),\n str(mass_story).splitlines()):\n string_with_date = re.match(\"^ ===>\",line[0])\n if not string_with_date:\n self.assertEquals(line[0], line[1])",
"def __build_cmd(self, infname, outdir):\n self._outdirname = os.path.join(outdir, \"trimmomatic_output\")\n cmd = [\"trimmomatic\",\n infname,\n \"-o\", self._outdirname]\n self._cmd = ' '.join(cmd)",
"def reports_cli():",
"def _writeOutput(self):\n head = \"Station\\tX\\tY\\tZ\\tUEast\\tUNorth\\tUUp\\tSigEast\\tSigNorth\\tSigUp\\n\"\n outFmt = \"%s\" + 9 * \"\\t%g\" + \"\\n\"\n\n f = open(self.outputFile, 'w')\n f.write(head)\n\n for stationNum in range(self.numStations):\n outLine = outFmt % (self.stations[stationNum],\n self.coords[stationNum, 0], self.coords[stationNum, 1],\n self.coords[stationNum, 2],\n self.dispNoise[stationNum, 0],\n self.dispNoise[stationNum, 1],\n self.dispNoise[stationNum, 2],\n self.sigmaEast, self.sigmaNorth, self.sigmaUp)\n f.write(outLine)\n\n f.close()\n\n return",
"def write(self):\n\n cmnd = ['lualatex', '--interaction=nonstopmode', self.config['name']]\n\n logging.debug('Command is: ' + ' '.join(cmnd))\n\n pipes = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n std_out, std_err = pipes.communicate()\n\n if pipes.returncode != 0:\n # An error happened!\n sys.stdout.buffer.write(std_out)\n sys.stdout.buffer.write(std_err)\n err_msg = \"Code: %s\" % pipes.returncode\n print(err_msg, file=sys.stderr)\n raise Exception('Error')",
"def logruncmd(self, cmd):\n self.logtxt(\"\\n[%s %s]\" % (datetime.datetime.now(), os.getcwd()), 'info')\n self.logtxt(\"%s\" % cmd, 'cmd')",
"def output_run(run_data, name):\n\n print(json.dumps(run_data, indent=4))\n ret = run_data.get('return', {})\n display_output(\n {name: ret}, \n\tout=run_data.get('out', 'nested'),\n\topts = salt.config.minion_config('/dev/null'))",
"def write_job_manifest(self):\n import time\n with open('bake-manifest-' + time.strftime('%Y-%m-%d-%H:%M:%S') + \n '.txt', 'w') as hout:\n for k, v in self.job.items():\n hout.write(';'.join([k, v]) + '\\n')"
] |
[
"0.5751969",
"0.57239246",
"0.56050336",
"0.55627775",
"0.54654676",
"0.54399246",
"0.53894407",
"0.5349983",
"0.5335354",
"0.5317311",
"0.5316816",
"0.529644",
"0.52800936",
"0.52418727",
"0.5218804",
"0.517004",
"0.51613575",
"0.5155607",
"0.5141282",
"0.51330376",
"0.5125186",
"0.51192105",
"0.5098051",
"0.5096218",
"0.5081919",
"0.50766844",
"0.5073717",
"0.506513",
"0.50609696",
"0.50576067"
] |
0.61422515
|
0
|
choice the form of payment
|
def choice_payment(payment=None):
if payment is None:
return 3 # 支付宝
else:
return payment
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def request_payment(self, payment_type=None):\n self.payment_type = payment_type\n\n # choose a card AI \n self.choose_card_to_discard()\n #self.make_payment(card)",
"def test_individual_ACH(self):\n form_data = self.form_data()\n form_data['payment_type'] = 'DirectDebit'\n form = DonationPaymentForm(data=form_data)\n self.assertTrue(form.is_valid())",
"def payment(self, **post):\n cr, uid, context = request.cr, request.uid, request.context\n payment_obj = request.registry.get('payment.acquirer')\n sale_order_obj = request.registry.get('sale.order')\n\n order = request.website.sale_get_order(context=context)\n order.write({'usersess': request.session['webcalc_session_id']})\n #order.env.cr.commit()\n redirection = self.checkout_redirection(order)\n if redirection:\n return redirection\n\n shipping_partner_id = False\n if order:\n if order.partner_shipping_id.id:\n shipping_partner_id = order.partner_shipping_id.id\n else:\n shipping_partner_id = order.partner_invoice_id.id\n\n values = {\n 'order': request.registry['sale.order'].browse(cr, SUPERUSER_ID, order.id, context=context),\n 'usersess': request.session['webcalc_session_id']\n }\n values['errors'] = sale_order_obj._get_errors(cr, uid, order, context=context)\n values.update(sale_order_obj._get_website_data(cr, uid, order, context))\n\n if not values['errors']:\n acquirer_ids = payment_obj.search(cr, SUPERUSER_ID, [('website_published', '=', True), ('company_id', '=', order.company_id.id)], context=context)\n values['acquirers'] = list(payment_obj.browse(cr, uid, acquirer_ids, context=context))\n render_ctx = dict(context, submit_class='btn btn-primary', submit_txt=_('Завершить оформление'))\n for acquirer in values['acquirers']:\n acquirer.button = payment_obj.render(\n cr, SUPERUSER_ID, acquirer.id,\n '/',\n order.amount_total,\n order.pricelist_id.currency_id.id,\n partner_id=shipping_partner_id,\n tx_values={\n 'return_url': '/shop/payment/validate',\n },\n context=render_ctx)\n #vips_shop\n return request.website.render(\"vips_shop.payment\", values)",
"def test_submit_iso20022_payment_instruction(self):\n pass",
"def input_payment_details(self):\n pass",
"def get_form_class(self):\n if self.survey.get_requires_payment():\n return AuthorizenetSurveyPurchaseForm\n return super(AuthorizenetSurveyPurchaseCreate, self).get_form_class()",
"def awaiting_payment(self):",
"def getPayment(self):\n pass",
"def process_payment(self, form):\n # Let the default processor handle surveys that don't require payment\n if not self.survey.get_requires_payment():\n return super(AuthorizenetSurveyPurchaseCreate, self).process_payment(form)\n\n user = self.request.user\n try:\n charge = authorize.Transaction.sale({\n \"amount\": self.survey.cost,\n \"email\": user.email,\n \"credit_card\": {\n \"card_number\": str(form.cleaned_data[\"card_number\"]),\n \"card_code\": str(form.cleaned_data[\"card_ccv\"]),\n \"expiration_date\": str(form.cleaned_data[\"card_expiry\"]),\n },\n \"billing\": {\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n }\n })\n\n # Show any Authorize.net errors to the user\n except authorize.exceptions.AuthorizeError as exception:\n try:\n # Unpack exceptions with multiple error messages (AuthorizeInvalidError)\n errors = []\n for code, msg in exception.asdict().items():\n errors.append(forms.ValidationError(msg, code=code))\n raise forms.ValidationError(errors)\n except AttributeError:\n # Exception doesn't implement asdict() (AuthorizeError)\n raise forms.ValidationError(str(exception))\n\n # On success, save the transaction details to the form instance\n form.instance.amount = self.survey.cost\n form.instance.payment_method = \"Authorize.Net\"\n try:\n form.instance.transaction_id = charge[\"transaction_response\"][\"trans_id\"]\n except KeyError:\n form.instance.transaction_id = \"Unknown\"",
"def request_payment(self, payment_type=None):\n self.payment_type = payment_type\n self.disable_all_buttons()\n self.hand.enable()",
"def get_form_class():\n return RazorPaymentForm",
"def select_payment(self):\r\n return select_payment_by_id(self.__payment_id__)",
"def name(self) -> Text:\n\n return \"cc_payment_form\"",
"def fill_out_bill_payment_form(self, payee, account, amount):\n\n\t\t# 1 Payee Name:\n\t\tpayee_full_name = payee.FIRST_NAME + \" \" + payee.LAST_NAME\n\t\tself.type_payee_name(payee_full_name)\n\t\twith allure.step(\"Verify payee name\"):\n\t\t\tprint('payee name: {}'.format(payee_full_name))\n\t\t\tassert payee_full_name == self.payee_name()\n\n\t\t# 2 Address:\n\t\tself.type_address(payee.ADDRESS)\n\t\twith allure.step(\"Verify payee address\"):\n\t\t\tprint('payee address: {}'.format(payee.ADDRESS))\n\t\t\tassert payee.ADDRESS == self.address()\n\n\t\t# 3 City:\n\t\tself.type_city(payee.CITY)\n\t\twith allure.step(\"Verify payee city\"):\n\t\t\tprint('payee city: {}'.format(payee.CITY))\n\t\t\tassert payee.CITY == self.city()\n\n\t\t# 4 State:\n\t\tself.type_state(payee.STATE)\n\t\twith allure.step(\"Verify payee state\"):\n\t\t\tprint('payee state: {}'.format(payee.STATE))\n\t\t\tassert payee.STATE == self.state()\n\n\t\t# 5 Zip Code:\n\t\tself.type_zip_code(payee.ZIP_CODE)\n\t\twith allure.step(\"Verify payee zip code\"):\n\t\t\tprint('payee zip code: {}'.format(payee.ZIP_CODE))\n\t\t\tassert payee.ZIP_CODE == self.zip_code()\n\n\t\t# 6 Phone:\n\t\tself.type_phone(payee.PHONE)\n\t\twith allure.step(\"Verify payee phone\"):\n\t\t\tprint('payee phone: {}'.format(payee.PHONE))\n\t\t\tassert payee.PHONE == self.phone()\n\n\t\t# 7 Account:\n\t\tself.type_account(account)\n\t\twith allure.step(\"Verify payee account\"):\n\t\t\tprint('payee account: {}'.format(account))\n\t\t\tassert account == self.account()\n\n\t\t# 8 Verify Account:\n\t\tself.type_verify_account(account)\n\t\twith allure.step(\"Verify 'Verify Account'\"):\n\t\t\tassert account == self.verify_account()\n\n\t\t# 9 Amount\n\t\tself.type_amount(amount)\n\t\twith allure.step(\"Verify amount\"):\n\t\t\tprint('payment amount: {}'.format(amount))\n\t\t\tassert amount == self.amount()\n\n\t\treturn None",
"def proceed_to_checkout_and_payment(self):\r\n # 1- summary\r\n logger.info('starting wizard with summary')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '.cart_navigation a.standard-checkout')))\r\n self.automation.driver.execute_script(\"document.querySelectorAll('.cart_navigation a.standard-checkout')[0]\"\r\n \".click()\")\r\n\r\n # 2-sign in & 3-address\r\n logger.info('2-sign in & 3-address')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, 'button[name=\"processAddress\"]')))\r\n\r\n self.automation.driver.find_element_by_css_selector('button[name=\"processAddress\"]').click()\r\n\r\n # 4- shipping\r\n logger.info('4- shipping')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#uniform-cgv span')))\r\n\r\n is_checked = self.automation.driver.find_element_by_css_selector('#uniform-cgv span').get_attribute('class')\r\n if not is_checked: # agree\r\n self.automation.driver.execute_script(\"document.querySelectorAll('#cgv')[0].click()\")\r\n\r\n self.automation.driver.find_element_by_css_selector('button[name=processCarrier]').click()\r\n logger.info('agree and confirmed')\r\n\r\n # pay by bank wire\r\n logger.info('pay by bank wire')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '.payment_module a')))\r\n\r\n self.automation.driver.find_element_by_css_selector('.payment_module a').click()\r\n\r\n # 5- payment and confirm\r\n logger.info('5- payment and confirm')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#cart_navigation button')))\r\n self.automation.driver.find_element_by_css_selector('#cart_navigation button').click()\r\n\r\n # back to orders\r\n logger.info('back to orders')\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, 'p.cart_navigation .button-exclusive.btn')))\r\n self.automation.driver.find_element_by_css_selector('p.cart_navigation .button-exclusive.btn').click()\r\n\r\n # how many items do you have\r\n time.sleep(1.5)\r\n self.automation.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#order-list tbody tr')))\r\n items = self.automation.driver.find_elements_by_css_selector('#order-list tbody tr')\r\n logger.info(f'You have \"{len(items)}\" at your order')",
"def payment_type(self) -> str:\n return pulumi.get(self, \"payment_type\")",
"def calculate(self):\r\n if self.__calculation_type == self.__DIFFERENTIATED_PAY:\r\n for month in range(1, self.__principal_term+1):\r\n self.__differentiated_pay.append(\r\n ceil(\r\n (self.__credit_principal/self.__principal_term)\r\n + self.__credit_interest*(self.__credit_principal\r\n - (self.__credit_principal\r\n * (month-1))\r\n / self.__principal_term)\r\n )\r\n )\r\n self.__overpayment = sum(self.__differentiated_pay) - self.__credit_principal\r\n\r\n for i, dp in enumerate(self.__differentiated_pay, 1):\r\n print(f'Month {i}: paid out {dp}')\r\n print()\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n elif self.__calculation_type == self.__ANNUITY:\r\n if self.__user_choice == self.__SEEK_ANNUITY_MONTHLY:\r\n self.__annuity_monthly = ceil(\r\n self.__credit_principal * ((self.__credit_interest\r\n * pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = (self.__annuity_monthly * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n print(f'Your annuity payment = {self.__annuity_monthly}!')\r\n\r\n elif self.__user_choice == self.__SEEK_TERM:\r\n self.__principal_term = ceil(\r\n log(self.__annuity_monthly / (self.__annuity_monthly\r\n - (self.__credit_interest\r\n * self.__credit_principal))\r\n , 1+self.__credit_interest)\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n years = self.__principal_term // 12\r\n months = self.__principal_term % 12\r\n\r\n print(f'You need {years} year{\"s\" if self.__principal_term > 1 else \"\"}'\r\n f'{\" and \" + str(months) + \" months\" if months > 0 else \"\"}'\r\n f' to repay this credit!')\r\n\r\n elif self.__user_choice == self.__SEEK_CREDIT_PRINCIPAL:\r\n self.__credit_principal = ceil(\r\n self.__annuity_monthly\r\n / ((self.__credit_interest\r\n * pow(1+self.__credit_interest, self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest, self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal)\r\n\r\n print(f'Your credit principal = {self.__credit_principal}!')\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n else:\r\n print('Incorrect parameters')\r\n self.usage()",
"def form_valid(self, form):\n auth_login(self.request, form.get_user())\n if self.request.session.get('payment'):\n Payment.objects.filter(id=self.request.session['payment']).update(\n user_id=self.request.user.revolvuserprofile, entrant_id=self.request.user.revolvuserprofile)\n payment = Payment.objects.get(id=self.request.session['payment'])\n Tip.objects.filter(id=payment.tip_id).update(user_id=self.request.user.revolvuserprofile)\n Project.objects.get(id=payment.project_id).donors.add(self.request.user.revolvuserprofile)\n AnonymousUserDonation.objects.filter(payment_id=self.request.session['payment']).delete()\n del self.request.session['payment']\n\n # messages.success(self.request, 'Logged in as ' + self.request.POST.get('username'))\n # return redirect(reverse('project:view', kwargs={'title':title})+'?amount='+amount+'&tip='+tip)\n messages.success(self.request, 'Logged in as ' + self.request.POST.get('username'))\n return redirect(self.next_url)",
"def test_get_payment_method(self):\n paymentMethod = PaymentMethod(self.client, 123)\n\n self.assertEqual(paymentMethod.id, 123)\n self.assertTrue(paymentMethod.is_default)\n self.assertEqual(paymentMethod.type, \"credit_card\")",
"def decider(wallet):\n found = False\n if len(wallet.get_cards()) == 0:\n return found\n elif len(wallet.get_cards()) == 1:\n card = wallet.get_cards()[0]\n found = list()\n found.append(card.get_issuer())\n found.append(card.get_card_name())\n found.append(-1)\n return found\n \"\"\"\n First, we need to check for any valid SUB. If so, if there's one,\n then that will be selected, otherwise, narrow the options to just \n those with active sign_up_bonus and do the usual process.\n \"\"\"\n sub_cards = list()\n subs = False\n for card in wallet.get_cards():\n sub = card.get_sign_up_bonus()\n if sub.check_active():\n sub_cards.append(card)\n subs = True\n if len(sub_cards) == 1:\n found = list()\n card = sub_cards[0]\n found.append(card.get_issuer())\n found.append(card.get_card_name())\n found.append(0)\n return found\n elif len(sub_cards) > 1:\n subs = True\n category = decider_menu()\n # PayPal is currently a quarterly category on several cards\n paypal = \"\"\n while paypal != \"N\" and paypal != \"Y\":\n paypal = input(\n \"Will you be purchasing through PayPal? (Y/N): \")\n if paypal == \"Y\":\n category = category + \"(PayPal)\"\n break\n elif paypal == \"N\":\n break\n else:\n print(\"Invalid input\")\n main_categories = wallet.get_generic_category_names()\n if category in main_categories:\n best_card = wallet.find_best_for_category(category)\n found = list()\n found.append(best_card.get_issuer())\n found.append(best_card.get_card_name())\n found.append(category)\n value = best_card.check_categories(category)\n if best_card.get_sign_up_bonus().check_active():\n value += best_card.get_sign_up_bonus().get_return_on_spend() * 100\n found.append(value)\n return found\n best = list()\n best.append(0)\n best.append(0)\n \"\"\"\n Here, depending on whether of not there are active sign-up bonuses, the\n function will go through each card in the wallet to find the best value.\n A future goal is implementing the Wallet class, in which I will have a \n dictionary attribute which will contain the best card mapped to its\n category i.e. {\"dining\":AMEX Gold}, and whenever new cards are added, it\n will check then so as to prevent algorithmic backups which occur now.\n \"\"\"\n card_list = wallet.get_cards()\n if subs:\n card_list = sub_cards\n for card in card_list:\n sub = card.get_sign_up_bonus()\n value = card.check_categories(category)\n if \"(\" in category:\n if \"PayPal\" in category:\n category = category[:len(category) - 8]\n if (card.check_categories(\"quarterly\") !=\n card.check_categories(\"else\")):\n value = card.check_categories(\"quarterly\")\n value += card.check_categories(category)\n if \"IHG\" in category:\n if value != 25 * .6:\n value = card.check_categories(\"travel\")\n if subs:\n value += sub.get_return_on_spend() * 100\n if value > best[0]:\n best[0] = value\n best[1] = card\n if subs:\n print(\"Note: This recommendation is made because\"\n \" of a sign-up bonus, not only multipliers!\")\n found = list()\n card = best[1]\n found.append(card.get_issuer())\n found.append(card.get_card_name())\n found.append(category)\n found.append(best[0])\n return found",
"def payment_mode(self):\n return self._payment_mode",
"def get(self, payment):\n return payment",
"def get_payment_type(self):\n payment_types = dict(settings.SUBSCRIPTION_PAYMENT_METHODS)\n return payment_types.get(self.payment_type, \"N/A\")",
"def check_payment(self):\n return self.payment",
"def printPayment(self):\n print self.output()",
"def make_payment():\n\n response = VoiceResponse()\n if 'caller_name' not in session:\n session['caller_name'] = request.args.get(\n 'caller_name') or \"Twilio Payment\"\n if 'payment_amount' not in session:\n session['payment_amount'] = request.args.get('amount') or \"5000\"\n if 'card_number' not in session:\n response.redirect('/get_card_number')\n elif 'expiry' not in session:\n response.redirect('/get_expiry')\n elif 'cvv' not in session:\n response.redirect('/get_cvv')\n else:\n call_sid = request.form.get('CallSid')\n session['call_sid'] = call_sid\n response.redirect('/process_payment')\n\n return str(response)",
"def proceed(request):\n if request.user.is_authenticated():\n return submit(request)\n agreement_form = forms.DevAgreementForm({'read_dev_agreement': True},\n instance=None, request=request)\n return render(request, 'submit/terms.html',\n {'step': 'terms', 'agreement_form': agreement_form,\n 'proceed': True})",
"def post(self, request, *args, **kwargs):\n try:\n form = self.get_form()\n except RedirectNeeded as exc:\n messages.add_message(request, messages.SUCCESS, \"Payment redirects to %s\" % exc.args[0])\n return HttpResponseRedirect(exc.args[0])\n #except Exception as exc:\n # return HttpResponseBadRequest(exc, content_type=\"text/plain\")\n\n if form.validate():\n messages.add_message(request, messages.SUCCESS, \"Payment succeeded\")\n return self.form_valid(form)\n else:\n messages.add_message(request, messages.ERROR, \"Payment failed\")\n return self.form_invalid(form)",
"def get_eeg_payment(self, type):\n\n assert type in ['chp', 'pv'], 'Type is invalid (must be PV or CHP)'\n\n if type == 'chp':\n eeg_pay = self._dict_eeg_self['chp']\n elif type == 'pv':\n eeg_pay = self._dict_eeg_self['pv']\n\n return eeg_pay",
"def test_retrieve_iso20022_payment_instruction(self):\n pass"
] |
[
"0.66832644",
"0.635746",
"0.6349613",
"0.6226105",
"0.619822",
"0.6092286",
"0.60875404",
"0.60554206",
"0.6008281",
"0.5872958",
"0.58315575",
"0.5797233",
"0.57806766",
"0.5778123",
"0.5763689",
"0.5734996",
"0.56877613",
"0.5645532",
"0.5631109",
"0.5627127",
"0.5595929",
"0.5572253",
"0.5542962",
"0.5515728",
"0.5503128",
"0.5495991",
"0.54852384",
"0.5476083",
"0.5474746",
"0.5466941"
] |
0.70399904
|
0
|
Returns lorentzian profile. 1/pi 0.5gamma / ((0.5gamma)2 + x2)
|
def lorentz(x, gamma):
return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def lorentz(x, x0, gamma): \n return (0.5/pi) * gamma / ((x-x0)**2 + 0.25 * gamma**2)",
"def profile_plaw(r_range, rho_x, r_x, gamma, **kwargs):\n profile = rho_x * (r_range / r_x)**(-gamma)\n profile[(r_range < r_x)] = 0.\n\n return profile",
"def s2profile(r,r0,A,B):\n x = r/r0\n res = A*4./(np.exp(x)+np.exp(-x))**2 + B\n return res",
"def lgamma(x):\n return 0.0",
"def gamma(x1, x2):\r\n gamma1 = math.exp(a / (1 + a * x1/(b * x2)) ** 2.0) \r\n gamma2 = math.exp(b / (1 + b * x2/(a * x1)) ** 2.0)\t\t\r\n return gamma1, gamma2",
"def gamma(x):\n return 0.0",
"def prob1():\n x, y = sy.symbols('x, y')\n return sy.Rational(2,5) * sy.exp(x**2 - y) * sy.cosh(x + y) + \\\n sy.Rational(3,7) * sy.log(x*y + 1)",
"def gprofile(r,sig,A,B):\n res = A*np.exp(-0.5*(r/sig)**2)+B\n return res",
"def grad_step2(self, x, u_prime):\n v = 0.25 * (x @ self.w)\n w = u_prime + v\n z = w.t() @ x\n if self.debug:\n print(f\"-> v={v}\")\n print(f\"-> w={w}\")\n print(f\"-> z={z}\\n\\n\")\n return w, z",
"def profile(x):\n return x",
"def gamma(x):\r\n gammax = ((x + 0.055) / 1.055) ** 2.4 if x > 0.04045 else x / 12.92\r\n return gammax",
"def convertGammaToLaguerre(self,y):\n return (y-self.low)*(self.beta)",
"def convertLaguerreToGamma(self,x):\n return x/self.beta+self.low",
"def LorentzFactor(self):\n # Use of abs() and x ** 0.5 provides a more stable calculation of lorentz\n # factor than math.sqrt() at high velocities.\n return 1 / abs( 1 - Particle.BetaVelocity(self) * Particle.BetaVelocity(self))**0.5",
"def y01(x):\r\n # return pini*((special.gamma(k1+p1))/(special.gamma(k1)*special.gamma(p1))*((x/l)**(k1-1))*(1-(x/l))**(p1-1))/7.3572\r\n return 1/100*x*epsilon*1/q*1e21\r\n # return 1e13*1/sigma*np.sqrt(2*np.pi) * np.exp(-np.power(x - u, 2.) / (2 * np.power(sigma, 2.)))-1e15*1/sigma\r",
"def phi2_coefficient(L):\r\n\r\n if 0 < L < 120:\r\n return L / 120\r\n if L >= 120:\r\n return 1",
"def profile_LLR_binom_ratio_err(k1,n1, k2,n2, alpha=0.05, EPS=1E-15, rang=10, nd=200,\n nd_interp=2000, r0_val=None, tol=1e-9, return_full=False):\n \n def logL(x):\n\n # Numerical protection\n phi = np.clip(x[0], EPS, 1-EPS)\n p1 = np.clip(x[1], EPS, 1-EPS)\n\n # Other terms do not contribute in the ratio, than these below\n ll1 = k1*np.log(p1) + (n1-k1)*np.log(1 - p1)\n ll2 = k2*np.log(p1/phi) + (n2-k2)*np.log(1 - p1/phi)\n return ll1 + ll2\n\n ### Find the numerical Maximum Likelihood\n \"\"\"\n x0 = np.array([(k1/n1) / (k2/n2), (k1/n1)])\n res = scipy.optimize.minimize(lambda x : -logL(x), x0=x0, method='Nelder-Mead', tol=tol)\n r_MLE = res.x[0]\n p1_MLE = res.x[1]\n \"\"\"\n # Closed form\n r_MLE = np.clip((k1/n1) / (k2/n2), EPS, 1-EPS)\n p1_MLE = np.clip((k1/n1), EPS, 1-EPS)\n \n # ------------------------------------------------------------------------\n # Profile likelihood on the ratio r = p1/p2\n\n if r0_val is None:\n r0_val = np.linspace(r_MLE / rang, r_MLE*rang, nd)\n\n LLR = np.zeros(len(r0_val))\n\n # Closed-form quadratic solution [negative branch]\n @numba.njit\n def p1star_closed_form(r0):\n return (k1+n2+k2*r0+n1*r0 - np.sqrt((-k1-n2-k2*r0-n1*r0)**2 - 4*(n1+n2)*(k1*r0+k2*r0))) / (2*(n1+n2))\n\n # Discretize over r0\n for i in range(len(r0_val)):\n r_0 = r0_val[i]\n def profileNegLL(p1):\n return -logL(np.array([r_0, p1]))\n\n # r_0 is our parameter of interest (non-nuisance), profile over the nuisance parameters\n #res = scipy.optimize.minimize(profileNegLL, x0=[p1_MLE], method='Nelder-Mead', tol=tol)\n #p1_star = res.x[0]\n p1_star = p1star_closed_form(r_0)\n\n # Profile log-likelihood ratio\n LLR[i] = 2 * ( logL(np.array([r_MLE, p1_MLE])) - logL(np.array([r_0, p1_star])) )\n \n LLR[np.isnan(LLR)] = 0\n LLR[np.isinf(LLR)] = 0\n\n # Interpolate values\n func_LLR = interp1d(r0_val, LLR, kind='cubic', fill_value='extrapolate')\n r0_dense = np.linspace(np.min(r0_val), np.max(r0_val), nd_interp)\n LLR_dense = func_LLR(r0_dense)\n\n # Find chi2 distribution limit (log-likelihood ratio\n # distributed asymptotically for H0 like chi2)\n chi2 = stats.chi2.ppf(1 - alpha, df=1)\n ind = (np.where(LLR_dense <= chi2))[0] # Note <= not <\n min_ind = ind[0]\n max_ind = ind[-1]\n\n if return_full:\n return np.array([r0_dense[min_ind], r0_dense[max_ind]]), r0_dense, LLR_dense, chi2\n else:\n return np.array([r0_dense[min_ind], r0_dense[max_ind]])",
"def FN2(lam):\n return 1.034 + 3.17 *1e-4 *lam**(-2)",
"def _logprobratio(prob1, prob2):\n return log(prob1) / log(prob2)",
"def fun_lorentzian(p,r):\n return p[1] / ((r/p[0])**2 + 1)",
"def r2(self) -> float:\n zx = (self.true - np.mean(self.true)) / np.std(self.true, ddof=1)\n zy = (self.predicted - np.mean(self.predicted)) / np.std(self.predicted, ddof=1)\n r = np.sum(zx * zy) / (len(self.true) - 1)\n return float(r ** 2)",
"def llr2_to_prob(llr):\n return 1 / (1 + math.pow(2, -llr))",
"def TestFunc2(x):\r\n return 10*(-0.02*x[0] + 0.5*x[0]*x[0] + x[1])**2 \\\r\n + 128*(-0.02*x[0] + 0.5*x[0]*x[0] - x[1]/4) \\\r\n - (8e-5)*x[0]",
"def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')",
"def getK2(inp):\n\treturn 0.9/(1-math.exp(-getLambda(inp)))",
"def profileMain(self, M, z):\n c = self.c_M_z(M,z)\n r200 = self.r200_M(M)\n rho0 = self.rho0_c(c)\n Rs = r200/c\n return r200,rho0,c,Rs",
"def prob_to_llr2(prob):\n return math.log2((prob + EPSLONG) / (1 - prob + EPSLONG))",
"def hyp2f1_repro(a,b,c,z):\n return gamma(b - a)*gamma(c)/(gamma(b)*gamma(c - a)*(-z)**a)*hyp2f1(a, a - c + 1, a - b + 1, 1/z) + \\\n (gamma(a - b)*gamma(c))/(gamma(a)*gamma(c - b)*(-z)**b)*hyp2f1(b, b - c + 1, b - a + 1, 1/z)",
"def gamma(self, tl):\n\t return self.GAMMA_0*(1. + self.GAMMA_1*(tl - self.TO) + self.GAMMA_2*(tl - self.TO)**2.);",
"def nu(x, beta2):\n return 3 * (1 - beta2 - beta2*x) / beta2 / (1+x)"
] |
[
"0.67186457",
"0.61636627",
"0.6080515",
"0.6047759",
"0.5884529",
"0.58815324",
"0.58796155",
"0.58250004",
"0.577892",
"0.57624334",
"0.5681882",
"0.5681246",
"0.56588656",
"0.5637242",
"0.562519",
"0.56056446",
"0.5587748",
"0.55819565",
"0.5564628",
"0.5533224",
"0.5523688",
"0.5516393",
"0.55129546",
"0.55027217",
"0.549759",
"0.54926735",
"0.5487438",
"0.54849404",
"0.54748946",
"0.54641294"
] |
0.6791607
|
0
|
Returns gaussian profile. 1/sqrt(2pi) / gamma exp((x/gamma)2 / 2)
|
def gauss(x, gamma):
return 1 / np.sqrt(2*np.pi) / gamma * np.exp(-(x/gamma)**2 / 2)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gauss(x, x0, gamma):\n sigma = gamma / sqrt(2.0)\n \n A = 1/ (sigma * sqrt(2*pi))\n return (A * exp (-0.5 * (x-x0)**2/sigma**2))",
"def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)",
"def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))",
"def gamma(x):\r\n gammax = ((x + 0.055) / 1.055) ** 2.4 if x > 0.04045 else x / 12.92\r\n return gammax",
"def gaussian_likelihood(x, mu, log_std):\n prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(2 * np.pi))\n return tf.reduce_sum(prob, axis=1)",
"def gamma(x):\n return 0.0",
"def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var",
"def gaussianDist(self, x, mu, var):\n val = 1/(math.sqrt(2 * math.pi * var)) * math.exp(-1 * (x - mu)**2 / (2*var))\n return val",
"def gaus(x, A, mu, sigma):\n return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))",
"def estimate_uni_gaussian(X):\n mu = mean(X, axis=0)\n sigma2 = var(X, axis=0)\n return mu, sigma2",
"def estimateGaussian(X):\n\tmu = np.mean(X, axis=0)\n\tsigma2 = np.std(X, axis=0) ** 2\n\treturn mu, sigma2",
"def gaussian(p, x):\n #2008-09-11 15:11 IJC: Created for LINEPROFILE\n # 2011-05-18 11:46 IJC: Moved to analysis.\n # 2013-04-11 12:03 IJMC: Tried to speed things up slightly via copy=False\n # 2013-05-06 21:42 IJMC: Tried to speed things up a little more.\n\n if not isinstance(x, np.ndarray):\n x = array(x, dtype=float, copy=False)\n\n if len(p)==3:\n p = array(p, copy=True)\n p = concatenate((p, [0]))\n #elif len(p)==4:\n # p = array(p, copy=False)\n\n return p[3] + p[0]/(p[1]*sqrt(2*pi)) * exp(-(x-p[2])**2 / (2*p[1]**2))",
"def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background",
"def pvalue_gaussian(self):\n \n pv = 2 * stats.norm.sf(abs(self.TS_prime_obs), loc=0, scale=1)\n return(pv)",
"def gaussian(pars, x):\n A, b, mu, sigma = pars\n # return b + A/(np.sqrt(2*np.pi)*sigma**2) \\\n return b + A \\\n * np.exp(-.5*(x - mu)**2/sigma**2)",
"def gauss(x, *p):\n mu, sigma = p\n return (1 / (sigma * np.sqrt(2 * np.pi)) *\n np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)))",
"def calculateGaussian(x, mean, stdev):\n\t\t\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n\t\t\tvalue= (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\t\t\tif value==0:\n\t\t\t\treturn np.nan\n\t\t\telse:\n\t\t\t\treturn math.log(value)",
"def gaussian(var):\n stddev = np.sqrt(var)\n return stats.norm(0, stddev)",
"def gauss(x, *p):\n A, mu, sigma = p\n\n return A*np.exp(-(x-mu)**2/(2.*sigma**2))",
"def gaussian_likelihood(x, mu, log_std):\n std = tf.exp(log_std)\n pre_sum = tf.square((x - mu)/std) + 2*log_std + np.log(2*np.pi)\n return -0.5 * tf.reduce_sum(pre_sum, axis=1)",
"def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)",
"def gaussian(amp, fwhm, mean, x):\n return amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)",
"def gamma_pdf(x, shape, scale):\n gamma_x = x**(shape-1)*(np.exp(-x/scale) / (sps.gamma(shape)*scale**shape))\n return gamma_x",
"def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)",
"def gauss(x,p):\n return np.exp((-(x - p[0])**2) / (2 * p[1]**2))",
"def estimateGaussian(X):\n mu = X.mean(0, keepdims=True).T\n sigma2 = X.var(0, keepdims=True).T\n return mu, sigma2",
"def gaussian_likelihood(input_, mu_, log_std):\n pre_sum = -0.5 * (((input_ - mu_) / (\n tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(\n 2 * np.pi))\n return tf.reduce_sum(pre_sum, axis=1)",
"def gaussian(x, sigma):\n try: r = np.exp(-0.5*(x/sigma)**2) \n except: r = np.zeros(len(x))\n return r",
"def gaussian_cdf(x, _erf=erf):\n return (1 + _erf(x / math.sqrt(2))) / 2",
"def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')"
] |
[
"0.75206435",
"0.71392494",
"0.6998809",
"0.6952576",
"0.69341147",
"0.68784",
"0.679881",
"0.67568934",
"0.6733555",
"0.6725777",
"0.6718593",
"0.6709496",
"0.6703196",
"0.6626777",
"0.6625177",
"0.6618803",
"0.6614837",
"0.65624034",
"0.6553338",
"0.6513534",
"0.65014327",
"0.648445",
"0.6483952",
"0.64833796",
"0.64612675",
"0.64388967",
"0.64204794",
"0.63897866",
"0.63843864",
"0.63589984"
] |
0.7844662
|
0
|
callback for receiving TwistStamped message on /twist_cmd topic
|
def cb_twist_cmd(self,msg):
# log message
# rospy.logdebug('DBWNode::twist_cmd_cb %s',msg)
# store message
self.twist = msg.twist
self.velocity = msg.twist.linear.x
self.yaw = msg.twist.angular.z
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handler(event,context):\n tweet = setup_and_get_tweet()\n send_tweet(tweet)",
"def on_tweet(self, tweet):\n pass",
"def twistCallback(msg):\n global robot\n\n\n # extract message components and scale\n fwdRev = (msg.linear.x)/FWD_REV_SCALING\n spin = (msg.angular.z)/SPIN_SCALING\n\n\n # Reduce cross-coupling of commands\n if (abs(spin)<1 and abs(fwdRev)>20): spin=0\n if (abs(fwdRev) < 5 and abs(spin)>5): fwdRev=0\n\n # Pass command to robot base\n execution_time = EXECUTION_TIME\n \n robot.base.set_vel(fwd_speed=fwdRev, \n turn_speed=spin, \n exe_time=execution_time)",
"def TwitterListener():\n l = StdOutListener()\n auth = OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n stream = Stream(auth, l)\n api = API(auth_handler=auth)\n config.HASHTAGS = [x['name'] for x in api.trends_place(id=44418)[0]['trends']]\n\n print(\"Stream listener is up and running\")\n stream.filter(track=config.HASHTAGS)",
"def on_data(self, _data):\n # There are times where the twitter stream fails and will send a NoneType object\n if not _data:\n print('**************************************************')\n print('**** Stream error, received empty data object ****')\n print('**************************************************')\n return\n\n data = json.loads(_data)\n\n # Only follow the created at tweets, ignoring replies, etc when trolling\n if CREATED_AT in data:\n if data[USER][ID_STRING] not in self.trolling_ids:\n return\n\n # Only troll raw tweets, not replies\n if data[REPLY_TO_STATUS_ID]:\n return\n\n print('New tweet, current count:', self.received_tweet_count)\n\n # If received enough than retweet it\n if self.received_tweet_count == TWEET_INTERVAL:\n raw_tweet_url = TWITTER_URL + data[USER][SCREEN_NAME] + STATUS_PATH + data[ID_STRING]\n print(\n '\\n== New Tweet Received @', datetime.now(),\n 'Tweet URL:', raw_tweet_url, '=='\n )\n\n time.sleep(RETWEET_WAIT_PERIOD)\n self.twitter_api.retweet(data[ID])\n\n print('\\n== Successfully retweeted! ==')\n\n self.received_tweet_count = 0\n\n else:\n self.received_tweet_count += 1",
"def on_status(self, data):\n print data\n print data.text\n if not data.text:\n return\n #features = trainer.get_feature_vector(data._json)\n if self.classifier and not self.classifier.predict(features):\n return\n\n print \"Tweet arriving\"\n\n hashtags, user_mentions, urls = join_entity(data._json)\n\n user = data.author\n\n # BUG in python (fixed in python 3.2): %z not supported in strptime\n statement = \"%s;%s\" % (self.insert_tweet_query, self.insert_user_query)\n\n try:\n cursor = self.connection.cursor()\n cursor.execute(statement, (\n data.id, user.id, data.created_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n data.in_reply_to_user_id_str, data.in_reply_to_status_id_str,\n data.retweeted, data.favorited, data.favorite_count,\n data.retweet_count, data.source, data.text, urls, hashtags,\n user_mentions, user.id, user.created_at, user.description,\n user.favourites_count, user.followers_count,\n user.friends_count, user.statuses_count,\n user.listed_count, user.time_zone,\n user.verified, user.geo_enabled,\n user.lang, user.location,\n user.screen_name))\n cursor.close()\n self.connection.commit()\n except Exception as inst:\n print type(inst) # the exception instance\n print inst.args # arguments stored in .args\n print inst # __str__ allows args to printed directly\n\n if not self.queue.empty():\n self.classifier = self.queue.get()\n return False # force a restart of the stream\n else:\n return True",
"def ServerSyncReceived(self,message):",
"def emit(self, tweet):\n raise NotImplementedError",
"def on_data(self, data):\n status = json.loads(data)\n # increase the counter\n self.counter += 1\n\n retweet, rt_user, tweet_text, created_time = organize_tweet(status) \n\n if status['user']['id_str'] in infos.twitterids:\n\n who = status['user']['id_str']\n\n try:\n replied_to = status['in_reply_to_screen_name']\n except:\n replied_to = 'NULL'\n \n else:\n \n who = status['user']['screen_name']\n \n try:\n replied_to = infos.twitterids[status['in_reply_to_user_id_str']]\n except:\n replied_to = 'NULL'\n \n tweet = {\n \n 'id': status['user']['id_str'], #status.user.id_str,\n 'who': who,\n 'replied_to': replied_to,\n 'retweeted': retweet, #status['retweeted'], #status.retweeted,\n 'retweeted_from': rt_user,\n 'text': tweet_text,\n 'timestamp' : created_time\n }\n\n #write to mongoDB here\n collection.insert_one(tweet)\n print(f'New tweet arrived: {tweet[\"text\"]}')\n\n\n # check if we have enough tweets collected\n if self.max_tweets == self.counter:\n # reset the counter\n self.counter=0\n # return False to stop the listener\n return False",
"def tweet_callback(status):\n if status[-3].endswith('}'):\n status = json.loads(status)\n if tweet_is_valid(status):\n if CLIENTS:\n status = geocode_status(status)\n broadcast(status)",
"async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)",
"def TweetHandler(self):\n self.response.out.write('<br/><br/>Tweeting<br/>')\n self.response.out.write('this info will be tweeted:<br/>')\n # oldest non-tweeted and prepared\n oldest_changeset = Changeset.all().order('created_at').filter('is_tweeted =', False).filter('is_prepared =', True).fetch(1)\n if not oldest_changeset:\n self.response.out.write('nothing to tweet')\n return\n else:\n c = oldest_changeset[0]\n \n config = get_config()\n\n # do not tweet from localhost\n if not 'localhost' in self.request.url:\n auth = tweepy.OAuthHandler(config[\"consumer_key\"], config[\"consumer_secret\"])\n auth_data = OAuthAccessToken.all().filter('specifier =', config[\"twitter_username\"]).fetch(1)[0]\n auth.set_access_token(auth_data.oauth_token, auth_data.oauth_token_secret)\n self.response.out.write('<br/>tweeting with oauth:<br/>')\n api = tweepy.API(auth)\n self.response.out.write(\"id: %d\" % c.id)\n self.response.out.write(\"user: %s\" % c.user)\n self.response.out.write(\"comment: %s\" % c.comment)\n self.response.out.write(\"tweet: %s\" % c.tweet)\n try:\n api.update_status(c.tweet)\n except tweepy.error.TweepError, e: \n self.response.out.write( 'failed: %s' % e.reason )\n if \"Status is a duplicate\" in e.reason:\n c.is_tweeted = True\n c.put()\n return\n else:\n self.response.out.write('<br/>localhost - nothing actually tweeted:')\n\n self.response.out.write('<br/>%s' % c.tweet)\n\n c.is_tweeted = True\n c.put()",
"def run(self):\n broker_channel = broker.init_broker_channel()\n\n print(\"Starting Twitter Stream Thread: %s\" % self.routing_key)\n while not self.shutdown_flag.is_set():\n tweet = 'Tweet {0}: {1}'.format(random.randint(1, 100), int(time.time()))\n lng = random.uniform(self.location['sw']['lng'], self.location['ne']['lng'])\n lat = random.uniform(self.location['sw']['lat'], self.location['ne']['lat'])\n\n cleaned_data = json.dumps({'tweet': tweet, 'lat': lat, 'lng': lng})\n\n broker_channel.basic_publish(exchange=broker.broker_exchange, routing_key=self.routing_key, body=cleaned_data)\n time.sleep(10+random.randint(0, 3))\n\n print(\"Exiting Twitter Stream Thread: %s\" % self.routing_key)",
"def tweet(api, message):\n status = api.PostUpdate(message)",
"def on_success(self, data):\n if 'text' not in data:\n logging.warning(\"Recieved tweet without text\")\n return\n\n # Save the name of the collection task alongside the tweet data\n data['collection'] = self.name\n\n # Calculate a timestamp object from the data\n ts_float = float(data['timestamp_ms'])\n data['timestamp_obj'] = datetime.utcfromtimestamp(ts_float/1000)\n\n # Insert the tweet into the database\n insertid = None\n if self.db is not None:\n insertid = self.db.insert_one(data).inserted_id\n\n # Call the callback functions if exists\n if self.callbacks is not None:\n for f in self.callbacks:\n f(self.name, data, insertid)",
"def cli_mdt_callback(request):\n telemetry_pb = proto.telemetry_bis_pb2.Telemetry()\n telemetry_pb.ParseFromString(request.data)\n logging.debug(__format_message(request, __dump_as_json))\n logging.info(__format_message(telemetry_pb, __dump_as_json))",
"def handle_recv(self,stream,msgs):\n pass",
"def on_publish(client, userdata, mid):\n print(\"Message Published.\")",
"def _on_message(self, client, userdata, msg):\n # print 'receiving message'\n epoch_time = self._get_epoch_time()\n time_string = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime())\n if not self.file.closed:\n self.file.write(str(epoch_time) + ',' + time_string + \",\" + msg.topic + \",\" + str(msg.payload) + '\\n')",
"def checkYT(event, context):\r\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\r\n print(pubsub_message)\r\n\r\n api_service_name = \"youtube\"\r\n api_version = \"v3\"\r\n youtube = build(api_service_name, api_version, developerKey=YOUTUBE_DEVELOPER_KEY, cache_discovery=False)\r\n\r\n # Setup Datastore client, key, and entity\r\n client = datastore.Client()\r\n kind = 'date'\r\n name = 'lastTime'\r\n key = client.key(kind, name)\r\n\r\n ent = client.get(key)\r\n lastTime = \"\"\r\n\r\n # Get the last time checkYT ran successfully\r\n if ent:\r\n lastTime = ent['lastTime']\r\n else:\r\n ent = datastore.Entity(key=key)\r\n lastTime = str(datetime.now() - timedelta(days=1))\r\n\r\n # Convert from string into RFC 3339 format\r\n lastTime = datetime.strptime(lastTime, '%Y-%m-%d %H:%M:%S.%f').isoformat()\r\n lastTime = lastTime[:19] + 'Z'\r\n\r\n # Get new YouTube uploads\r\n request = youtube.search().list(\r\n part=\"id,snippet\",\r\n channelId=YOUTUBE_CHANNEL_ID,\r\n order=\"date\",\r\n publishedAfter=lastTime\r\n )\r\n response = request.execute()\r\n\r\n # Twitter API\r\n api = twitter.Api(consumer_key=TWITTER_CONSUMER_KEY,\r\n consumer_secret=TWITTER_CONSUMER_SECRET,\r\n access_token_key=TWITTER_ACCESS_TOKEN_KEY,\r\n access_token_secret=TWITTER_ACCESS_TOKEN_SECRET)\r\n\r\n # Tweet the videos in chronological order\r\n videos = reversed(response[\"items\"])\r\n status = True\r\n\r\n for vid in videos:\r\n try:\r\n message = \"New Upload!\\n{0}\\nhttps://www.youtube.com/watch?v={1}\".format(vid['snippet']['title'][:70], vid['id']['videoId'])\r\n message = html.unescape(message)\r\n status = api.PostUpdate(message)\r\n print(\"{0} just posted: {1}\".format(status.user.name, status.text))\r\n status = True\r\n except Exception as inst:\r\n status = False\r\n print(inst, \"Unable to tweet.\")\r\n\r\n\r\n if status:\r\n # Update Datastore with last successful run of checkYT\r\n ent['lastTime'] = str(datetime.now())\r\n client.put(ent)\r\n return \"checkYT completed\"\r\n\r\n return \"checkYT failed\"",
"def _twist_callback(self, cmd):\n self.set_velocity(cmd.linear.x, cmd.angular.z)",
"def msg_callback(self, *args, **kwargs):\n log(*args, name=self.idf.name, **kwargs)",
"def _subscribe_update_callback(self, client, userdata, message):\n logger.info('Message recieved from {} topic'.format(message.topic))\n payload = message.payload\n try:\n payload_dict = json.loads(payload)\n light_data = payload_dict['current']['state']['desired']\n if self.light.needs_updating(light_data):\n self.light.update_lights(light_data)\n reported_payload = {\n 'state': {\n 'reported': self.light.current_settings()\n }\n }\n JSON_payload = json.dumps(reported_payload)\n self.shadowClient.publish(update_topic, JSON_payload, 0)\n except ValueError:\n logger.error('Value error')\n logger.info(payload)\n except Exception as e:\n logger.error(e.message)",
"def command_tweet(self, bot, update):\n\n bot.sendChatAction(update.message.chat_id, action='typing')\n\n tweet = ext.get_last_tweet(self.config['twitter'])\n\n for url in tweet.get('images', []):\n self.send_photo_url(bot, update, url)\n\n messages = [\n u'{text}',\n '[@{user[screen_name]}](https://twitter.com/{user[screen_name]}) '\n '- {ago}'\n ]\n\n for msg in messages:\n self.send_message(bot, update, msg.format(**tweet))",
"def handler(event,context):\n send_tweet(random.choice(potential_tweets))",
"def on_message(ws, msg):\n data = json.loads(msg)\n if \"results\" in data:\n # This prints out the current fragment that we are working on\n text = data['results'][0]['alternatives'][0]['transcript'].lower()\n print(text)\n # Pass it to the callback\n if CALLBACK(text):\n # If it recognized something, stop listening\n global RUNNING\n RUNNING = False",
"def on_data(self, data):\n\n t = json.loads(data)\n\n\n if 'extended_tweet' in t:\n text = t['extended_tweet']['full_text']\n else:\n text = t['text']\n\n\n is_tweet_reply = t['in_reply_to_status_id'] == None\n is_quote = t['is_quote_status'] == False\n\n if 'RT' not in t['text'] and is_tweet_reply and is_quote:\n\n tweet = {'text': text, 'username' : t['user']['screen_name'],\n 'number_of_followers' : t['user']['followers_count'],\n 'location' : t['user']['location'], 'number_of_friends' : t['user']['friends_count'], 'retweet_count' :\n t['retweet_count']}\n\n\n logging.critical('\\n\\n\\nNEW TWEET INCOMING: ' + tweet['text']) \n \n \n load_tweet_into_mongo(tweet)\n logging.critical('\\n\\n\\nSUCCESSFULLY DUMPED INTO MONGO!')",
"def sendMessage(self):\n #print('sendMessage\\r')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))",
"def output_topic_callback(self, msg):\n with self.callback_lock:\n if self._time_received_input != 0:\n # Get actual time from ROS\n time_now = self.node.get_clock().now().nanoseconds\n\n # Compute the amount of time elapsed from receiving the last\n # message in the input topic\n measure = time_now - self._time_received_input\n\n # Transform from nanoseconds to milliseconds\n measure = measure / (1000 * 1000)\n\n publish_msg = Int64()\n publish_msg.data = int(measure)\n\n # Publish the measurement\n self._publisher.publish(publish_msg)\n\n self._time_received_input = 0",
"def tweet(msg):\r\n m = \"\\n{}\\n\".format(msg)\r\n arcpy.AddMessage(m)\r\n print(m)\r\n print(arcpy.GetMessages())"
] |
[
"0.63539106",
"0.6165861",
"0.5858024",
"0.5807404",
"0.57037586",
"0.5641173",
"0.5571492",
"0.55087745",
"0.55045485",
"0.5440439",
"0.5433192",
"0.54172784",
"0.5383471",
"0.5330142",
"0.53273475",
"0.5322685",
"0.53024226",
"0.528606",
"0.52760834",
"0.52526104",
"0.5234911",
"0.5232494",
"0.5232291",
"0.5225781",
"0.52217835",
"0.52167547",
"0.5208839",
"0.5182402",
"0.51713145",
"0.51708865"
] |
0.70640725
|
0
|
callback for receiving TwistStamped message on /current_velocity topic
|
def cb_current_velocity(self,msg):
# log message
# rospy.logdebug('DBWNode::velocity_cb %s',msg)
# store message
self.current_twist = msg.twist
self.current_velocity = msg.twist.linear.x
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rover_velocity_callback(self, msg):\n\t\t# print(\"Rover velocity callback message: {}\".format(msg))\n\t\tpass",
"def velocity_cmd_callback(self, data):\n with self.lock:\n self.twist = data",
"def cb_twist_cmd(self,msg):\n # log message\n # rospy.logdebug('DBWNode::twist_cmd_cb %s',msg)\n # store message\n self.twist = msg.twist\n self.velocity = msg.twist.linear.x\n self.yaw = msg.twist.angular.z",
"def velocity_callback(self, msg_velocity):\n if self.last_pose.header.stamp.to_sec() > 0: # skip first frame\n\n dt = (msg_velocity.header.stamp - self.last_pose.header.stamp).to_sec()\n\n # Integrate the relative movement between the last pose and the current\n theta_delta = self.last_theta_dot * dt\n # to ensure no division by zero for radius calculation:\n if np.abs(self.last_theta_dot) < 0.000001:\n # straight line\n x_delta = self.last_v * dt\n y_delta = 0\n else:\n # arc of circle\n radius = self.last_v / self.last_theta_dot\n x_delta = radius * np.sin(theta_delta)\n y_delta = radius * (1.0 - np.cos(theta_delta))\n\n # Add to the previous to get absolute pose relative to the starting position\n theta_res = self.last_pose.theta + theta_delta\n x_res = self.last_pose.x + x_delta * np.cos(self.last_pose.theta) - y_delta * np.sin(self.last_pose.theta)\n y_res = self.last_pose.y + y_delta * np.cos(self.last_pose.theta) + x_delta * np.sin(self.last_pose.theta)\n\n # Update the stored last pose\n self.last_pose.theta = theta_res\n self.last_pose.x = x_res\n self.last_pose.y = y_res\n\n # Stuff the new pose into a message and publish\n msg_pose = Pose2DStamped()\n msg_pose.header = msg_velocity.header\n msg_pose.header.frame_id = self.veh_name\n msg_pose.theta = theta_res\n msg_pose.x = x_res\n msg_pose.y = y_res\n self.pub_pose.publish(msg_pose)\n\n self.last_pose.header.stamp = msg_velocity.header.stamp\n self.last_theta_dot = msg_velocity.omega\n self.last_v = msg_velocity.v",
"def twistCallback(msg):\n global robot\n\n\n # extract message components and scale\n fwdRev = (msg.linear.x)/FWD_REV_SCALING\n spin = (msg.angular.z)/SPIN_SCALING\n\n\n # Reduce cross-coupling of commands\n if (abs(spin)<1 and abs(fwdRev)>20): spin=0\n if (abs(fwdRev) < 5 and abs(spin)>5): fwdRev=0\n\n # Pass command to robot base\n execution_time = EXECUTION_TIME\n \n robot.base.set_vel(fwd_speed=fwdRev, \n turn_speed=spin, \n exe_time=execution_time)",
"def dvl_callback(self, msg):\n self.mutex.acquire()\n\n self.ni[0] = msg.velocity_instrument.x\n self.ni[1] = msg.velocity_instrument.y\n self.ni[2] = msg.velocity_instrument.z\n\n self.mutex.release()\n rospy.loginfo(\"%s receive dvl\", self.node_name)",
"def _twist_callback(self, cmd):\n self.set_velocity(cmd.linear.x, cmd.angular.z)",
"def _cb_cmd_vel(self,msg):\r\n print \"Walker velocity command received: \",msg\r\n vx=msg.linear.x\r\n vy=msg.linear.y\r\n vt=msg.angular.z\r\n self.start()\r\n self.set_desired_velocity(vx,vy,vt)",
"def refSpeed_callback(self, msg):\n self.mutex.acquire()\n\n self.speed_ref[0] = msg.vx\n self.speed_ref[1] = msg.vy\n self.speed_ref[2] = msg.vz\n\n self.mutex.release()\n rospy.loginfo(\"%s receive speed reference\", self.node_name)",
"def cmd_vel_callback(self, msg):\n # Just store the desired velocity. The actual control runs on odometry callbacks\n v_l = msg.linear\n v_a = msg.angular\n self.v_linear_des = numpy.array([v_l.x, v_l.y, v_l.z])\n self.v_angular_des = numpy.array([v_a.x, v_a.y, v_a.z])",
"def callback_cmdvel(msg):\n global _stop\n rospy.logdebug(\"received cmd_vel: (%f,%f)\", msg.linear.x, msg.angular.z)\n msgtosend = msg\n if _stop and msg.linear.x > 0:\n msgtosend.linear.x = 0\n rospy.logdebug(\"reset cmd_vel(.linear.x)\")\n _pub_cmdvel.publish(msg)",
"def _vel_callback(self, msg):\n self.joints_vels = msg.data\n self.compute_torques('vel')",
"def cmd_vel_callback(self, msg):\n with self._cmd_vel_lock:\n self._x_linear_cmd = msg.linear.x\n self._z_angular_cmd = msg.angular.z\n self._last_cmd_vel_time = rospy.get_rostime()",
"def sendMessage(self):\n #print('sendMessage\\r')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))",
"def sendMessage(self):\n print(\"sendMessage\")\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))",
"def sendMessage(self):\n print('sendMessage')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))",
"def handle_current_temperature_received(msg: ReceiveMessage) -> None:\n self.handle_climate_attribute_received(\n msg, CONF_CURRENT_TEMP_TEMPLATE, \"_attr_current_temperature\"\n )",
"def sent_velocity(self,velocity):\n if self.mode == 3: # Profiled Velocity\n self.node.sdo[0x6040].bits[0] = 1\n self.node.sdo[0x6040].bits[1] = 1\n self.node.sdo[0x6040].bits[2] = 1\n self.node.sdo[0x6040].bits[3] = 1\n # self.node.sdo[0x6040].bits[7] = 0\n velocity = 10 * self._I85_msg_to_device(velocity)\n self.node.sdo.download(0x60ff, 0x0, self._decTohex_32(velocity)) # velocity",
"def _subscribe_update_callback(self, client, userdata, message):\n logger.info('Message recieved from {} topic'.format(message.topic))\n payload = message.payload\n try:\n payload_dict = json.loads(payload)\n light_data = payload_dict['current']['state']['desired']\n if self.light.needs_updating(light_data):\n self.light.update_lights(light_data)\n reported_payload = {\n 'state': {\n 'reported': self.light.current_settings()\n }\n }\n JSON_payload = json.dumps(reported_payload)\n self.shadowClient.publish(update_topic, JSON_payload, 0)\n except ValueError:\n logger.error('Value error')\n logger.info(payload)\n except Exception as e:\n logger.error(e.message)",
"def _target_callback(self, msg):\n self.target_pose = np.asarray(msg.pos)[np.newaxis].T\n self.target_vel = np.asarray(msg.vel)[np.newaxis].T\n self.target_acc = np.asarray(msg.acc)[np.newaxis].T\n\n print(\"\\nGoing to:\")\n print(\"Pos: \\n\" + str(self.target_pose))\n print(\"Vel: \\n\" + str(self.target_vel))\n print(\"Acc: \\n\" + str(self.target_acc))",
"def get_velocity(self, message):\n #print('**************** vel ')\n self.velocity = message.data\n self.state[0:self.ndegres] = self.velocity[0:self.ndegres]",
"def currentstate_callback(self, odom):\n self.CurrentPosition = np.array([odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z])\n self.CurrentVelocity = np.array([odom.twist.twist.linear.x, odom.twist.twist.linear.y, odom.twist.twist.linear.z])",
"def _callback_meteo(self, msg):\n\n\t\tself.psi = self.north2east( msg.true_wind_direction )",
"def on_message(self, client, userdata, msg):\n\n data = json.loads(msg.payload.decode(\"utf-8\"))\n if debug: print(\"topic: \", msg.topic, \" payload:\", data)\n #print \"Received: \", data\n if msg.topic == self.subControls:\n self.controller.setpoint = int(data['temp'])\n status_old = self.controller.status\n if data['mode'] == \"auto\" or data['mode'] == \"cool1\" or data['mode'] == \"cool2\" or data['mode'] == \"cool3\":\n self.controller.status = 1\n elif data['mode'] == \"off\":\n self.controller.status = 0\n if status_old and self.controller.status: onoff = False\n elif status_old and not self.controller.status: onoff = True\n elif not status_old and self.controller.status: onoff = True\n else: onoff = False\n self.controller.updateControls(onoff = onoff, radio=False)\n\n elif msg.topic == self.subSettings :\n self.controller.temp_interval = int(data['temp-res'])\n self.controller.energy_interval = int(data['energy-res'])\n self.controller.updateIntervals()\n else:\n pass",
"def onPong(self, payload):",
"def getVelocity(self):\n return self.v",
"def listener():\n rospy.Subscriber(\"motion_plan\", FloatList, callback)\n rospy.spin()",
"def waypoints_cb(self, msg):\n rospy.loginfo(rospy.get_name() + ': waypoints received')\n self.base_waypoints = msg.waypoints",
"def on_message(self, client, userdata, message):\n\t\tself.message = message\n\t\tmqtt_msg = mqttJsonLoad(self.message.payload)\n\t\t\n\t\tprint (\"[{}] Message arrived:\\n\\t\\tTopic: {}\\n\\t\\tMessage: {}\".format(\n\t\t\tint(time.time()), \n\t\t\tmessage.topic, \n\t\t\tmessage.payload\n\t\t))\n\t\t\n\t\tif self.message.topic == \"measure/people\":\n\t\t\trpi.updatePendingJson(\"people_inside\", mqtt_msg, \"data\")\n\t\telif self.message.topic == \"system\":\n\t\t\tstart()",
"def on_message(self, client, userdata, msg):\n st = datetime.datetime.fromtimestamp(msg.timestamp).strftime('%Y-%m-%d %H:%M:%S.%f')\n# print st[:-3], \":\", msg.topic, \":\", msg.payload\n\n # Note: Update_display from this function does not work\n if msg.topic == self.mqtt_topic_electricity:\n self.my_gui.update_electricity(float(msg.payload)) # kWh\n\n elif self.mqtt_topic_electricity in msg.topic: # covers /1 /2 ... etc.\n index = int(msg.topic.split('/')[-1])\n self.my_gui.update_electricity_hour(index, float(msg.payload))\n\n # -----------------------------------------------------------------\n elif msg.topic == self.mqtt_topic_water:\n self.my_gui.update_water(int(msg.payload)) # Litter\n\n elif self.mqtt_topic_water in msg.topic: \n index = int(msg.topic.split('/')[-1])\n self.my_gui.update_water_hour(index, int(msg.payload))\n\n # -----------------------------------------------------------------\n elif msg.topic == self.mqtt_topic_gas:\n self.my_gui.update_gas(float(msg.payload)) # m3, 10 Litters/msg\n\n elif self.mqtt_topic_gas in msg.topic:\n index = int(msg.topic.split('/')[-1])\n self.my_gui.update_gas_hour(index, float(msg.payload))\n\n# elif self.mqtt_topic_status == msg.topic:\n# # TODO\n# if \"online\" in msg.payload:\n# print \"A is online\"\n# elif \"offline\" in msg.payload:\n# print \"A is offline\"\n# print st[:-3], \":\", msg.topic, \":\", msg.payload\n\n self.my_gui.update_eur_total()"
] |
[
"0.6958057",
"0.6950165",
"0.6463305",
"0.6463219",
"0.6407563",
"0.63438076",
"0.623812",
"0.62050366",
"0.6087701",
"0.5899115",
"0.58897585",
"0.5846776",
"0.5786905",
"0.57754254",
"0.5760638",
"0.575733",
"0.57383865",
"0.57151663",
"0.57124853",
"0.569276",
"0.5639724",
"0.5560339",
"0.54736954",
"0.5472428",
"0.5463607",
"0.5444785",
"0.54366887",
"0.54351646",
"0.5418878",
"0.5409294"
] |
0.76706904
|
0
|
callback for receiving Bool message on /vehicle/dbw_enabled topic
|
def cb_vehicle_dbw_enabled(self,msg):
# log message
# rospy.logdebug('DBWNode::dbw_enabled_cb %s',msg)
# store message
self.dbw = bool(msg.data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_enabled(self):",
"def Enabled(self) -> bool:",
"def isEnabled(self):",
"def isEnabled(self) -> bool:\n ...",
"def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)",
"def on_message(self, msg):\n self.enabled = (msg == \"ON\")\n self.log.info(\"%s received %s command for logic actuator\",\n self.name, \"enable\" if self.enabled else \"disable\")",
"def handle_onoff_mode_received(\n msg: ReceiveMessage, template_name: str, attr: str\n ) -> None:\n payload = self.render_template(msg, template_name)\n payload_on: str = self._config[CONF_PAYLOAD_ON]\n payload_off: str = self._config[CONF_PAYLOAD_OFF]\n\n if payload == \"True\":\n payload = payload_on\n elif payload == \"False\":\n payload = payload_off\n\n if payload == payload_on:\n setattr(self, attr, True)\n elif payload == payload_off:\n setattr(self, attr, False)\n else:\n _LOGGER.error(\"Invalid %s mode: %s\", attr, payload)\n\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)",
"def EnableBroadcast(self) -> bool:",
"def EnableBroadcast(self) -> bool:",
"def on_off_bool(value):\n return value == 'on'",
"def is_on(self):\n pass",
"def send_states(self):\n\n teleop_enabled_msg = Bool()\n teleop_enabled_msg.data = self.teleop_enabled\n\n assisted_driving_enabled_msg = Bool()\n assisted_driving_enabled_msg.data = self.assisted_driving_enabled\n\n self.teleop_enabled_pub.publish(teleop_enabled_msg)\n self.assisted_driving_enabled_pub.publish(assisted_driving_enabled_msg)",
"def __bool__(self) -> bool:\n return not self._disconnected",
"def ready_bool(ready):\r\n if ready.lower() == 'y':\r\n return True\r\n else:\r\n return False",
"def getvDbBool(self, db, *keys):\n return self.getDbBool(db, \".\".join(keys))",
"def check_device_state(self):",
"def enabled(self):\n return self._packet.get('enabled', True)",
"def affection_status_switch_on(self):\n self._affection_status_switch = False",
"def constrain_app_enabled(self, value: str) -> bool:\n\n # Disable callback if house state is in the disabled presence config\n if \"presence\" in self.disabled_states:\n presence_disable = [\n self.presence_app.HouseState[disabled_state].value\n for disabled_state in self.disabled_states[\"presence\"].split(\",\")\n ]\n if self.get_state(HOUSE[\"presence_state\"]) in presence_disable:\n return False\n\n # Disable callback if mode state is equal to state the disable modes config\n if \"modes\" in self.disabled_states:\n for mode, state in self.disabled_states[\"modes\"].items():\n if self.get_state(MODES[mode]) == state:\n return False\n\n # Disable callback if today is in the disable days config\n if \"days\" in self.disabled_states:\n disabled_days = self.disabled_states[\"days\"].split(\",\")\n if datetime.datetime.today().strftime(\"%A\") in disabled_days:\n return False\n\n if self.get_state(self.enable_input_boolean) == \"off\":\n return False\n\n return True",
"def _ison(self):\n return self.dp.state()==PyTango.DevState.ON",
"def is_on(self):\n return False",
"def affection_status_switch_on(self):\n self._affection_status_switch = True",
"def getDbBool(self, db, key):\n val = self.getDbStr(db, key)\n if val == \"yes\":\n return True\n elif val == \"no\":\n return False\n else:\n raise Exception(\"invalid value for \" + db + \".\" + key + \": \\\"\"\n + val + \"\\\", expected \\\"yes\\\" or \\\"no\\\"\")",
"def on(self):\n status = self.dev.ctrl_transfer(0x40, 0x01, 0x0001, 0xa0, [])\n if status == 0:\n self.ev.set()\n return (status == 0)",
"def __self__(self, GPIO_LED):\n # GPIO.setup()\n # if error raise exception \"Device Not Ready\"\n self.status = false\n return self.status",
"def relayDispatch(self):\n\n if self.ui.relayDevice.currentText().startswith('Built-In'):\n self.app.message.emit('Relay enabled', 0)\n self.deviceStat['relay'] = True\n self.app.relay.startTimers()\n self.ui.relayDevice.setStyleSheet(self.BACK_GREEN)\n else:\n self.app.message.emit('Relay disabled', 0)\n self.deviceStat['relay'] = False\n self.app.relay.stopTimers()\n self.ui.relayDevice.setStyleSheet(self.BACK_NORM)\n\n return True",
"def toggled_comunication(self):\n if self.actionPC_Monitor.isChecked() and self.actionPC_Monitor.isEnabled():\n self.actionPC_Monitor.setEnabled(0)\n self.actionPC_Sensor_Actuador.setChecked(0)\n self.actionPC_Sensor_Actuador.setEnabled(1)\n self.monitor_environment()\n \n elif self.actionPC_Sensor_Actuador.isChecked() and self.actionPC_Sensor_Actuador.isEnabled():\n self.actionPC_Sensor_Actuador.setEnabled(0)\n self.actionPC_Monitor.setChecked(0)\n self.actionPC_Monitor.setEnabled(1)\n self.actuator_environment()",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"def _isdisable(self):\n return self.dp.state()==PyTango.DevState.DISABLE",
"def datadog_dbm_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"datadog_dbm_enabled\")"
] |
[
"0.6191051",
"0.604945",
"0.60477203",
"0.5949311",
"0.5756972",
"0.5711706",
"0.5695784",
"0.56757003",
"0.56757003",
"0.5673843",
"0.56621176",
"0.56335765",
"0.56089544",
"0.56003773",
"0.5595799",
"0.5588316",
"0.5575657",
"0.556023",
"0.55508816",
"0.55404013",
"0.5534452",
"0.55192083",
"0.55166405",
"0.5496873",
"0.54956806",
"0.5480359",
"0.54763925",
"0.5430193",
"0.54099196",
"0.54062635"
] |
0.8801945
|
0
|
Plot a image grids of the dataset. If dataset is labeld it will generate one row of images per class and if it's unlabeled it will generate a bidimentional (almost square) grid of images sampled from dataset
|
def show_imagegrid_dataset(dataset,
num=10,
shuffle=True,
classes='auto',
figsize=None,
fontsize=20,
image_attr={'cmap': plt.cm.Greys_r}):
sample = dataset[0]
if isinstance(sample, tuple) and len(sample) == 2:
images_per_class = get_labeled_imagegrid(dataset,
num=num,
shuffle=shuffle,
classes=classes)
num = min(num, max(map(len, images_per_class.values())))
classes = list(images_per_class.keys())
if figsize is None:
figsize = (2 * num, 2 * len(classes))
fig, axs = plt.subplots(figsize=figsize, nrows=len(classes), ncols=num)
if len(classes) == 1:
axs = np.expand_dims(axs, 0)
if num == 1:
axs = np.expand_dims(axs, -1)
for i, (class_name, class_images) in enumerate(images_per_class.items()):
for j, img in enumerate(class_images):
show_image(img, axs[i][j], image_attr)
axs[i][0].set_ylabel(str(class_name), fontsize=fontsize)
elif isinstance(sample, (Image, torch.Tensor, np.ndarray)):
image_list = get_imagegrid(dataset,
num=num,
shuffle=shuffle)
num = min(len(image_list), num)
nrows = math.ceil(math.sqrt(num))
ncols = math.ceil(num / nrows)
if figsize is None:
figsize = (2 * nrows, 2 * ncols)
fig, axs = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols)
axs = axs.flatten()
for i, img in enumerate(image_list):
show_image(img, axs[i], image_attr)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_images(images, labels, nrows, ncols, cls_true=None, cls_pred=None, grey=False):\n fig, axes = plt.subplots(nrows, ncols, figsize=(16, 2*nrows))\n\n for i, ax in enumerate(axes.flat): \n if grey:\n ax.imshow(images[i,:,:,0], cmap='binary')\n else:\n ax.imshow(images[i])\n\n ax.set_xticks([]); ax.set_yticks([])\n if labels:\n ax.set_title(labels[i])",
"def plot_images_grid(images, labels, title):\n images = images.cpu()\n labels = labels.cpu()\n \n assert type(images[0]) is torch.Tensor, 'Image to plot is not torch.Tensor'\n image_size = int(np.sqrt(images[0].shape[0]))\n \n fig = plt.figure(figsize=(10,4))\n for idx in range(10):\n ax = fig.add_subplot(2,10/2,idx+1, xticks=[], yticks=[])\n ax.imshow(images[idx].view(image_size, image_size), cmap = 'gray')\n label = labels[idx].item()\n ax.set_title(label)\n #end\n fig.suptitle(title, fontsize = 14)\n plt.show()\n plt.close('all')",
"def show_imgs(dataset, n_imgs, plot_size=(15, 15), cmap=None):\n n_cols = int(np.sqrt(n_imgs))\n n_rows = int(np.ceil(np.sqrt(n_imgs)))\n class_idx = dataset.class_to_idx\n idx_class = idx_to_class(class_idx)\n\n fig, axes = plt.subplots(n_rows, n_cols, figsize=plot_size)\n for i, ax in enumerate(axes.flatten()):\n ax.axis('off')\n title = f'Class : {idx_class[dataset.targets[i]]}'\n ax.imshow(dataset.data[i], cmap=cmap)\n ax.set_title(title)\n fig.tight_layout()",
"def view_images(dataset, size):\n images, labels = dataset\n assert images.shape[0] == labels.shape[0]\n\n num_images = images.shape[0]\n num_cols = 3\n num_rows = np.ceil(num_images / num_cols).astype(\"int\")\n plt.figure(figsize=size)\n for i in range(num_images):\n image = images[i]\n label = labels[i]\n ax = plt.subplot(num_rows, num_cols, i + 1)\n plt.imshow(np.array(image, dtype=\"float\"))\n plt.title(\"Number: \" + str(label))\n plt.axis(\"off\")",
"def plot_image_grid(epoch, generated_images):\n\n fig = plt.figure(figsize=(GRID_SIZE, GRID_SIZE))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/train_images/image_iteration_{:05d}.png'.format(epoch + 1))\n plt.close()",
"def show_image_grid(imgs):\n grd = make_grid(imgs)\n npimg = grd.numpy()\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n plt.ion()\n plt.show()",
"def display_images_in_grid(imgs, row, col):\n if len(imgs) != (row * col):\n raise ValueError(f\"Invalid imgs len:{len(imgs)} col:{row} row:{col}\")\n\n for i, img in enumerate(imgs):\n plot_num = i + 1\n plt.subplot(row, col, plot_num)\n plt.tick_params(labelbottom=False) # remove x axis\n plt.tick_params(labelleft=False) # remove y axis\n plt.imshow(img)\n plt.show()",
"def check_dataset(dataset):\n loader = torch.utils.data.DataLoader(dataset, batch_size=16)\n dataiter = iter(loader)\n images, labels = dataiter.next()\n imgs_grid = make_grid(images, padding=0)\n np_grid = imgs_grid.numpy()\n plt.figure(figsize=(10, 7))\n plt.imshow(np.transpose(np_grid, (1, 2, 0)))\n for i in labels:\n print(dataset.classes[i.item()])\n plt.show()",
"def plotgrid(data,d=10,shape=(30,30)):\n ion()\n gray()\n clf()\n for i in range(min(d*d,len(data))):\n subplot(d,d,i+1)\n row = data[i]\n if shape is not None: row = row.reshape(shape)\n imshow(row)\n ginput(1,timeout=0.1)",
"def generate_image_grid(sess, df, filenames,op, op2):\n #x_points = np.arange(0, 1, 1.5).astype(np.float32)\n #y_points = np.arange(0, 1, 1.5).astype(np.float32)\n\n nx, ny = 12, 1\n #plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=1, wspace=0.05)\n # input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n #\n # plt.imshow(np.array(df[0].tolist()).reshape(28, 28), cmap='gray')\n # plt.show()\n # x = sess.run(op, feed_dict={decoder_input: input_x[0].reshape(1,2)})\n # img = np.array(x.tolist()).reshape(28, 28)\n #\n # plt.imshow(img, cmap='gray')\n # plt.show()\n\n \"\"\" grid \"\"\"\n input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n for i, g in enumerate(gs):\n\n x = sess.run(op, feed_dict={decoder_input: input_x[i].reshape(1,2)})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()\n\n for i, g in enumerate(gs):\n\n ax = plt.subplot(g)\n img = np.array(df[i].tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()",
"def plot_final_grid(generated_images):\n\n fig = plt.figure(figsize=(8, 6))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/generated_image_grid.png')\n plt.savefig('/content/drive/My Drive/WGAN/results/DCGAN.png')\n plt.show()",
"def plot_final_grid(generated_images):\n\n fig = plt.figure(figsize=(GRID_SIZE, GRID_SIZE))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/generated_image_grid.png')\n plt.savefig('/content/drive/My Drive/WGAN/results/WGAN.png')\n plt.show()",
"def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()",
"def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()",
"def show_train_images(train_data, train_labels):\n plt.figure(1, figsize=(8, 8))\n n = 0\n\n for i in range(16):\n n += 1\n # each time random images are loaded\n # r = np.random.randint(0, train_data.shape[0], 1)\n plt.subplot(4, 4, n)\n plt.subplots_adjust(hspace=0.5, wspace=0.5)\n plt.imshow(train_data[i] / 255.)\n plt.title('{}'.format(train_labels[i]))\n plt.xticks([]), plt.yticks([])\n plt.show()",
"def plot_random_generated_images(self):\n dimensions=(10, 10)\n figsize=(10, 10)\n n_samples=100\n \n (X, _), _ = self.generate_generator_prediction_samples(n_samples)\n \n self.grid_plot(X, dimensions=dimensions, figsize=figsize)",
"def grid(images, cols = 2, save = False, filename = \"\", show = False):\n \n rows = ceil(len(images) / cols)\n \n fig, ax = plt.subplots(rows, 1)\n\n index = 0\n element = []\n for row in range(rows):\n for col in range(cols): \n if index < len(images):\n element.append(images[index])\n index += 1\n \n stack = np.hstack(tuple(element))\n ax[row].axis('off')\n ax[row].imshow(stack)\n element = []\n \n plt.tight_layout()\n \n if save:\n fig.savefig(filename)\n\n if show:\n plt.show(fig)\n \n return 0",
"def display_sample_images(self):\n if self.train_dataset is None:\n self.init_datasets()\n\n images, labels = next(self.train_dataset)\n plt.figure(figsize=(5,5))\n for n in range(min(25, images.shape[0])):\n ax = plt.subplot(5,5,n+1)\n plt.imshow(images[n])\n if len(labels.shape) == 1:\n plt.title(self.class_names[int(labels[n])].title())\n else:\n m = np.argmax(labels[n])\n plt.title(self.class_names[int(labels[n, m])].title())\n plt.axis('off')\n\n plt.tight_layout()\n plt.show()",
"def imshow_grid(images, shape=[2, 2], name='default', save=False):\n fig = plt.figure()\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n img = images[i]\n if img.shape[0]==3:\n img = img.transpose(1, 2, 0)\n img = (img - img.min())/(img.max() - img.min())\n grid[i].imshow(img, vmin=-132, vmax = 164) # The AxesGrid object work as a list of axes.\n\n plt.show()",
"def generate_image_grid(sess, op):\n n = 10\n x_points = np.linspace(-20, 20, n)\n y_points = np.linspace(-20, 20, n)\n\n nx, ny = len(x_points), len(y_points)\n plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=0.05, wspace=0.05)\n\n for i, g in enumerate(gs):\n z = np.concatenate(([x_points[int(i / ny)]], [y_points[int(i % nx)]]))\n z = np.reshape(z, (1, 2))\n x = sess.run(op, feed_dict={decoder_input: z})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_aspect('auto')\n plt.show()",
"def plot_grid(im_list, grid_shape, scale=0.1, axes_pad=0.07):\r\n # https://gist.github.com/lebedov/7018889ba47668c64bcf96aee82caec0\r\n\r\n # Grid must be 2D:\r\n assert len(grid_shape) == 2\r\n\r\n # Make sure all images can fit in grid:\r\n assert np.prod(grid_shape) >= len(im_list)\r\n\r\n grid = ImageGrid(plt.gcf(), 111, grid_shape, axes_pad=axes_pad)\r\n for i, data in enumerate(im_list):\r\n\r\n # Scale image:\r\n im = PIL.Image.fromarray(data)\r\n thumb_shape = [int(scale*j) for j in im.size]\r\n im.thumbnail(thumb_shape, PIL.Image.ANTIALIAS)\r\n data_thumb = np.array(im)\r\n grid[i].plot_nnua(data_thumb)\r\n\r\n # Turn off axes:\r\n grid[i].axes.get_xaxis().set_visible(False)\r\n grid[i].axes.get_yaxis().set_visible(False)",
"def plot_images(X: np.ndarray,\n grids: Optional[Tuple[int, int]] = None,\n image_shape: Optional[Tuple[int, int]] = None,\n image_spacing: Optional[Tuple[int, int]] = None,\n ax: Optional['Axes'] = None,\n fontsize: int = 12,\n title: Optional[str] = None):\n if X.ndim == 3 or X.ndim == 2:\n cmap = plt.cm.Greys_r\n elif X.ndim == 4:\n cmap = None\n X = tile_raster_images(X,\n grids=grids,\n image_shape=image_shape,\n image_spacing=image_spacing)\n ax = to_axis2D(ax)\n ax.imshow(X, cmap=cmap)\n if title is not None:\n ax.set_title(str(title), fontsize=fontsize, fontweight='regular')\n ax.axis('off')\n return ax",
"def plot_image_grid(ax, images, n=20, m=None, img_rows=28, img_cols=28):\n if m is None:\n m = n\n \n grid = images[:n*m].reshape(n, m, img_rows, img_cols)\n\n return ax.imshow(np.vstack(np.dstack(grid)), cmap='gray')",
"def visualize_image(images, save_name):\n dim = images.shape[0]\n n_image_rows = int(np.ceil(np.sqrt(dim)))\n n_image_cols = int(np.ceil(dim * 1.0 / n_image_rows))\n gs = gridspec.GridSpec(n_image_rows, n_image_cols, top=1., bottom=0.,\n right=1., left=0., hspace=0., wspace=0.)\n\n for g, count in zip(gs, range(int(dim))):\n ax = plt.subplot(g)\n ax.imshow(images[count, :].astype(np.float32).reshape((28, 28)))\n ax.set_xticks([])\n ax.set_yticks([])\n plt.savefig(save_name + '_vis.png')",
"def plot_images(imgs, layout, img_sz = 0.7, suptitle = ''):\n\tnrows, ncols = layout \n\tfig, axes = plt.subplots(nrows, ncols, \n\t\tfigsize = (img_sz * ncols, img_sz * nrows))\n\taxes = axes.ravel()\n\tfig.subplots_adjust(hspace = 0, wspace = 0)\n\tfig.suptitle(suptitle)\n\tfor i, img in enumerate(imgs):\n\t\taxes[i].get_xaxis().set_visible(False)\n\t\taxes[i].get_yaxis().set_visible(False)\n\t\taxes[i].imshow(img)",
"def gridPlot6(img_stack):\r\n F = plt.figure(figsize = (20,20))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (2,3), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:6]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot16.png')\r\n if 'gplot16.png' in os.listdir():\r\n plt.savefig('gplot16_2.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return",
"def visulize_5(X):\n fig, axes1 = plt.subplots(5,5,figsize=(3,3))\n for j in range(5):\n for k in range(5):\n i = np.random.choice(range(len(X)))\n axes1[j][k].set_axis_off()\n axes1[j][k].imshow(X[:,i].reshape(32, 32, 3))\n plt.show()",
"def plot_images_grid(x: torch.tensor, export_img, title: str = '', nrow=6, padding=2, normalize=False, pad_value=0, save = True, apply_transforms = False):\n logger = logging.getLogger()\n global global_counter\n #if apply_transforms:\n # #global_counter = 0\n \n # transform = transforms.Compose([\n # #transforms.ToPILImage(),transforms.Lambda(lambda x: transforms.functional.adjust_brightness(x, brightness_factor = 1.1)),\n # #transforms.Lambda(lambda x: normalize_to_zero_one_range(x)),\n # transforms.Lambda(lambda x: tensor_to_img(x)),\n # transforms.Lambda(lambda x: save_img_patch(x, prefix = 'original')),\n # transforms.Lambda(lambda x: generate_NCUT_segmented_image(x)),\n # transforms.Lambda(lambda x: save_img_patch(x, prefix = 'NCUT')),\n # #transforms.Lambda(lambda x: draw_feature_contours(x)),\n # transforms.ToTensor() \n # ])\n \n # for i in range(x.shape[0]): \n # try:\n # x[i] = transform(x[i])\n # except:\n # logger.error(\"Exception occurred while appliying transform {}\".format( sys.exc_info()[0]))\n # logger.error(\"Was processing image number {} with global counter {}\".format(str(i), global_counter))\n\n grid = make_grid(x, nrow=nrow, padding=padding, normalize=normalize, pad_value=pad_value)\n npgrid = grid.detach().cpu().numpy()\n\n plt.imshow(np.transpose(npgrid, (1, 2, 0)), interpolation='nearest')\n\n ax = plt.gca()\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n if not (title == ''):\n plt.title(title)\n if (save == True):\n plt.savefig(export_img, bbox_inches='tight', pad_inches=0.1)\n else:\n plt.show()\n plt.clf()",
"def show_batch(dataloader):\n bs = dataloader.batch_size\n num_samples = dataloader.dataset.data.shape[0]\n batches = num_samples // bs\n batch_id = np.random.choice(batches)\n one_batch = list(dataloader)[batch_id]\n batch_imgs, batch_labels = one_batch[0], one_batch[1]\n class_idx = dataloader.dataset.class_to_idx\n idx_class = idx_to_class(class_idx)\n n_rows = n_cols = int(np.sqrt(len(batch_imgs)))\n fig, axes = plt.subplots(n_rows, n_cols, figsize=(10, 10))\n if batch_imgs.shape[1] == 1:\n cmap = 'gray'\n else:\n cmap = None\n for i, ax in enumerate(axes.flatten()):\n ax.axis('off')\n title = f'Class : {idx_class[batch_labels[i].item()]}'\n single_img = np.clip(batch_imgs[i].squeeze().permute(1, 2, 0).numpy(), 0, 1)\n ax.imshow(single_img, cmap=cmap)\n ax.set_title(title)\n fig.tight_layout()",
"def gridPlot12(img_stack):\r\n F = plt.figure(figsize = (30,30))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (3,4), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:12]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot12.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return"
] |
[
"0.7369182",
"0.7340669",
"0.73215216",
"0.722925",
"0.7174287",
"0.7159908",
"0.7060308",
"0.7036506",
"0.7008614",
"0.70057887",
"0.70050985",
"0.69809043",
"0.6962196",
"0.6962196",
"0.6871114",
"0.6860198",
"0.6790377",
"0.6773561",
"0.67644835",
"0.67433405",
"0.670146",
"0.66196877",
"0.65494645",
"0.65441775",
"0.65329087",
"0.65273225",
"0.6523203",
"0.6510038",
"0.647565",
"0.64740777"
] |
0.7943561
|
0
|
Setup follow through (jiggle planes) to each selected transform object. Jiggle deformers are returned to allow users to animate the deformer attributes before baking.
|
def setup_follow_through():
# -- Grab the selected transforms
node_list = pm.selected(type='transform')
# -- Validate node list
if not node_list:
pm.warning(
'Select at least 1 transform object!'
)
return None
# -- Grab the current start and end frames
start_frame = pm.playbackOptions(q=True, min=True)
end_frame = pm.playbackOptions(q=True, max=True)
# -- Iterable variables for later
to_delete = []
plane_trans_list = []
# -- For each node
for node in node_list:
# -- Create a 10x10 poly plane
plane_trans = pm.polyPlane(
w=10,
h=10,
sx=1,
sy=1,
)[0]
plane_trans_list.append(plane_trans)
# -- Constrain the plane to the give node (delete later)
to_delete.append(
pm.parentConstraint(node, plane_trans, mo=False)
)
# -- Bake all planes in one go (translate and rotate).
# -- This is to sever any dependency on the selected transforms, and
# -- removing any potential cyclic issues.
pm.bakeResults(
plane_trans_list,
time=[
start_frame,
end_frame,
],
at=['t', 'r'],
sm=True,
)
# -- Delete plane constraints
pm.delete(to_delete)
# -- List of jiggle deformers to select & return
jiggle_list = []
# -- Go to the first frame
pm.currentTime(start_frame)
# -- Setup and connect each node to plane
for node, plane_trans in zip(node_list,
plane_trans_list):
# -- Create a jiggle deformer on the plane
pm.select(plane_trans)
pm.mel.CreateJiggleDeformer()
# -- Get the plane's shape
plane_shape = plane_trans.getShape()
# -- Get the Jiggle deformer
jiggle_deformer = plane_shape.inputs(type='jiggle')[0]
jiggle_list.append(jiggle_deformer)
# -- Set the default jiggle settings
for attr, value in JIGGLE_DEFAULTS.iteritems():
jiggle_deformer.attr(attr).set(value)
# -- Create and setup a follicle on the plane's shape
follicle = pm.createNode('follicle')
# -- Get the follicle's transform node (parent)
follicle_trans = follicle.getParent()
plane_shape.outMesh.connect(follicle.inputMesh)
plane_shape.worldMatrix[0].connect(follicle.inputWorldMatrix)
follicle.outRotate.connect(follicle_trans.rotate)
follicle.outTranslate.connect(follicle_trans.translate)
# -- Position the follicle in the center of the plane
follicle.parameterU.set(0.5)
follicle.parameterV.set(0.5)
# -- Lock the follicle_trans translate and rotate attributes
follicle_trans.translate.lock()
follicle_trans.rotate.lock()
# -- Constrain the original node to the follicle_trans
constraint = pm.parentConstraint(follicle_trans, node, mo=True)
# -- Connect the plane_trans to a custom attribute
# -- on the constraint for retrieval (when baking).
# -- Add it to the constraint as we clean it up
# -- anyway (no need to dirty the rig).
constraint.addAttr('ld_jiggle_node', at='message')
plane_trans.message.connect(constraint.ld_jiggle_node)
# -- Delete animation on the nodes (constrained
# -- by the planes by now anyway)
pm.cutKey(node_list, at=['t', 'r'], cl=True)
# -- Select the node list (allows the bake process to run
# -- immediately after).
# -- Would recommend selecting the jiggle_list to allow users
# -- to animate the jiggle deformer instead.
pm.select(node_list)
return jiggle_list
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def bake_follow_through():\r\n # -- Find all selected transforms that have a jiggle setup\r\n node_list = [\r\n node\r\n for node in pm.selected(type='transform')\r\n if any(pc.hasAttr('ld_jiggle_node')\r\n for pc in node.getChildren(type='parentConstraint'))\r\n ]\r\n \r\n # -- Validate node list\r\n if not node_list:\r\n pm.warning(\r\n 'Select at least 1 jiggled object!'\r\n )\r\n return None\r\n \r\n plane_trans_list = []\r\n to_delete = set()\r\n \r\n # -- Find connected parent constraints (already validated)\r\n for node in node_list:\r\n constraint = [\r\n pc\r\n for pc in node.getChildren(type='parentConstraint')\r\n if pc.hasAttr('ld_jiggle_node')\r\n ][0]\r\n\r\n # -- Retrieve the plane (transform) from the constraint\r\n plane_trans = constraint.ld_jiggle_node.get()\r\n plane_trans_list.append(plane_trans)\r\n\r\n # -- We'll want to delete this during cleanup\r\n to_delete.add(plane_trans)\r\n\r\n # -- Grab the plane's follicle\r\n to_delete.update(\r\n plane_trans.getShape().outputs(type='follicle')\r\n )\r\n \r\n # -- Geo cache the planes to avoid jitter.\r\n # -- Enable if you get jitter issues - possibly caused\r\n # -- by V2.0 / playback speed etc.\r\n # -- As this creates a geometry cache file using current settings, users\r\n # -- may be prompt to replace existing cache (recommended action).\r\n # -- HINT: This can be avoided if you investigate the pm.mel.geometryCache() call.\r\n pm.select(plane_trans_list)\r\n pm.mel.geometryCache()\r\n\r\n # -- Grab the current start and end frames\r\n start_frame = pm.playbackOptions(q=True, min=True)\r\n end_frame = pm.playbackOptions(q=True, max=True)\r\n \r\n # -- Bake all transforms\r\n pm.bakeResults(\r\n node_list,\r\n time=[\r\n start_frame,\r\n end_frame,\r\n ],\r\n at=['t', 'r'],\r\n sm=True,\r\n )\r\n \r\n # -- Delete setup\r\n pm.delete(to_delete)\r\n\r\n # -- Select the nodes (it's polite to leave as you entered).\r\n pm.select(node_list)",
"def setup(self):\n # Create your sprites and sprite lists here\n self.wall_list = arcade.SpriteList()\n for x in range(128, SCREEN_WIDTH, 196):\n for y in range(128, SCREEN_HEIGHT, 196):\n wall = arcade.Sprite(\"building.png\",.3)\n wall.center_x = x\n wall.center_y = y\n # wall.angle = 45\n self.wall_list.append(wall)\n self.player_sprite = arcade.Sprite(\"taxi.png\")\n self.player_sprite.center_x = 50\n self.player_sprite.center_y = 50\n self.player_sprite.scale = .2\n self.player_list = arcade.SpriteList()\n self.player_list.append(self.player_sprite)\n\n #Spawns people and makes list\n self.person = arcade.Sprite(\"person.png\")\n self.person.scale = .2\n self.person.center_x = random.randrange(SCREEN_WIDTH)\n self.person.center_y = random.randrange(SCREEN_HEIGHT)\n #Spawns target\n self.target = arcade.Sprite(\"target.png\")\n self.target.scale = .5\n self.target.center_x = random.randrange(60,SCREEN_WIDTH)\n self.target.center_y = random.randrange(60,SCREEN_HEIGHT)\n color_list = [\"BLUE\",\"RED\"]",
"def setupForRigPose(self):\n\n # unlock joint movers\n cmds.select(\"JointMover\", hi=True)\n jmNodes = cmds.ls(sl=True)\n for node in jmNodes:\n cmds.lockNode(node, lock=False)\n\n # find the mover shapes and set their visibility\n movers = self.returnJointMovers\n globalMovers = movers[0]\n shapes = []\n\n for each in movers:\n for mover in each:\n child = cmds.listRelatives(mover, children=True, shapes=True)\n if len(child) > 0:\n shapes.append(mover + \"|\" + child[0])\n\n for shape in shapes:\n cmds.setAttr(shape + \".v\", lock=False)\n cmds.setAttr(shape + \".v\", 0, lock=True)\n\n # show global movers\n shapes = []\n for mover in globalMovers:\n child = cmds.listRelatives(mover, children=True, shapes=True)\n if len(child) > 0:\n shapes.append(mover + \"|\" + child[0])\n\n for shape in shapes:\n cmds.setAttr(shape + \".v\", lock=False)\n cmds.setAttr(shape + \".v\", 1, lock=True)\n\n # unlock mover group for this module and make visible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", 1)\n\n # hide the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 0)\n cmds.lockNode(parent, lock=True)\n\n # get the joints created by this module\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.parentConstraint(joint + \"_mover_offset\", joint)\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.parentConstraint(self.name + \"_\" + jointBaseName + \"_mover_offset\", joint)\n\n # lock joint movers\n cmds.select(\"JointMover\", hi=True)\n jmNodes = cmds.ls(sl=True)\n for node in jmNodes:\n cmds.lockNode(node, lock=True)",
"def set_original_planes(self, display_opt):\n\n # get 4-chamber view\n four_ch_view_plane_normal = self.find_4ch_view(display_opt)\n\n # set rodriguez rotation around midline (apex to C)\n axis_of_rot = np.array(self.epi_apex_node - self.C)\n self.axis_of_rot_normalized = axis_of_rot/np.linalg.norm(axis_of_rot)\n\n # get 2-chamber view (90-counterclock rotation from 4ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized,\n math.radians(self.orig_view_angles[1])) # rodriguez rotation around midline\n two_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n # get 3-chamber view (additional 30-60 counterclock rotation from 3ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized, math.radians(self.orig_view_angles[2]))\n three_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n if display_opt:\n _ = self.mesh_slicer(four_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(two_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(three_ch_view_plane_normal, 'mesh')\n\n self.original_planes = np.vstack((four_ch_view_plane_normal,\n two_ch_view_plane_normal,\n three_ch_view_plane_normal))",
"def setup_3D( self ):\r\n # ~ Modes and Flags ~\r\n # Use 'GL_DEPTH_TEST' to ensure that OpenGL maintains a sensible drawing order for polygons no matter the viewing angle\r\n glEnable( GL_DEPTH_TEST ) # Do these setup functions really have to be run every single frame? # TODO: Try moving these to the '__init__' , see what happens\r\n # glEnable( GL_CULL_FACE ) # Uncomment to preform backface culling # This might erase arrowheads if they are away-facing!\r\n # ~ View Frustum Setup ~\r\n glMatrixMode( GL_PROJECTION )\r\n glLoadIdentity()\r\n gluPerspective( 70 , self.width / float( self.height ) , 0.1 , 200 )\r\n # ~ View Direction Setup ~\r\n glMatrixMode( GL_MODELVIEW )\r\n glLoadIdentity()\r\n gluLookAt( *self.camera )",
"def setup(self):\n\n # Sprite lists\n self.player_list = arcade.SpriteList()\n self.wall_list = arcade.SpriteList(use_spatial_hash=True,\n spatial_hash_cell_size=128)\n self.enemy_list = arcade.SpriteList()\n\n # Set up the player\n resource = \":resources:images/animated_characters/\" \\\n \"female_person/femalePerson_idle.png\"\n self.player = arcade.Sprite(resource, scale=SPRITE_SCALING)\n self.player.center_x = SPRITE_SIZE * 5\n self.player.center_y = SPRITE_SIZE * 1\n self.player_list.append(self.player)\n\n # Set enemies\n resource = \":resources:images/animated_characters/zombie/zombie_idle.png\"\n enemy = arcade.Sprite(resource, scale=SPRITE_SCALING)\n enemy.center_x = SPRITE_SIZE * 4\n enemy.center_y = SPRITE_SIZE * 7\n self.enemy_list.append(enemy)\n\n spacing = SPRITE_SIZE * 3\n for column in range(10):\n for row in range(15):\n sprite = arcade.Sprite(\":resources:images/tiles/grassCenter.png\",\n scale=SPRITE_SCALING)\n\n x = (column + 1) * spacing\n y = (row + 1) * sprite.height\n\n sprite.center_x = x\n sprite.center_y = y\n if random.randrange(100) > 30:\n self.wall_list.append(sprite)\n\n self.physics_engine = arcade.PhysicsEngineSimple(self.player,\n self.wall_list)\n\n # --- Path related\n # This variable holds the travel-path. We keep it as an attribute so\n # we can calculate it in on_update, and draw it in on_draw.\n self.path = None\n # Grid size for calculations. The smaller the grid, the longer the time\n # for calculations. Make sure the grid aligns with the sprite wall grid,\n # or some openings might be missed.\n grid_size = SPRITE_SIZE\n\n # Calculate the playing field size. We can't generate paths outside of\n # this.\n playing_field_left_boundary = -SPRITE_SIZE * 2\n playing_field_right_boundary = SPRITE_SIZE * 35\n playing_field_top_boundary = SPRITE_SIZE * 17\n playing_field_bottom_boundary = -SPRITE_SIZE * 2\n\n # This calculates a list of barriers. By calculating it here in the\n # init, we are assuming this list does not change. In this example,\n # our walls don't move, so that is ok. If we want moving barriers (such as\n # moving platforms or enemies) we need to recalculate. This can be an\n # time-intensive process depending on the playing field size and grid\n # resolution.\n\n # Note: If the enemy sprites are the same size, we only need to calculate\n # one of these. We do NOT need a different one for each enemy. The sprite\n # is just used for a size calculation.\n self.barrier_list = arcade.AStarBarrierList(enemy,\n self.wall_list,\n grid_size,\n playing_field_left_boundary,\n playing_field_right_boundary,\n playing_field_bottom_boundary,\n playing_field_top_boundary)",
"def animateTurntable(self, objects=[], startFrame=0, endFrame=100):\r\n if not objects:\r\n return\r\n objects = [o.nativePointer() for o in objects]\r\n cam = self.nativePointer()\r\n helper = mxs.cross3dhelper.turntableHelperBuilder(\r\n self.nativePointer(),\r\n startFrame,\r\n endFrame,\r\n )\r\n # Create an aggregate bounding box for all of our\r\n # objects so we know how \"big\" this stuff is, all\r\n # inclusive.\r\n from blur3d.lib import cartesian\r\n aggBBox = None\r\n for obj in objects:\r\n p1, p2 = mxs.nodeLocalBoundingBox(obj)\r\n oBBox = cartesian.BoundingBox(\r\n cartesian.Point.newFromMaxPoint(p1),\r\n cartesian.Point.newFromMaxPoint(p2),\r\n )\r\n if not aggBBox:\r\n aggBBox = oBBox\r\n else:\r\n aggBBox = cartesian.BoundingBox.union(\r\n aggBBox,\r\n oBBox,\r\n )\r\n # A bounding sphere conveniently gives us a center point.\r\n center, radius = aggBBox.boundingSphere()\r\n helper.pos = center.maxPoint()\r\n # Stick a target object at the center of the objects and\r\n # rotate it 360 degrees across our frame range, then link\r\n # the camera to that via a constraint.\r\n link = mxs.Link_Constraint()\r\n link.addTarget(helper, 0)\r\n cam.controller = link\r\n self.target().nativePointer().pos = center.maxPoint()\r\n cam.specify_fov = True\r\n cam.film_width = 36.0\r\n cam.fov = 40.0\r\n aspect = float(mxs.renderers.current.image_aspect)\r\n fovAngle = cartesian.radians(cam.fov)\r\n axisLength = aggBBox.length(aggBBox.maximumExtent())\r\n from math import sin, sqrt\r\n hypoLength = ((axisLength) / sin(fovAngle / 2.0))\r\n distance = sqrt(\r\n (hypoLength * hypoLength) - ((axisLength / 2.0) * (axisLength / 2.0)))\r\n cam.pos = (cam.pos + mxs.point3(0, -distance, 0))",
"def pickup_object_and_reorient_on_table(self):\n\n def set_position(t, pos):\n _, quat = transformUtils.poseFromTransform(t)\n return transformUtils.transformFromPose(pos, quat)\n\n speed = self.config[\"object_interaction\"][\"speed\"]\n pick_up_distance = self.config[\"object_interaction\"][\"pickup_distance\"]\n drop_distance_above_grasp = self.config[\"object_interaction\"][\"drop_distance_above_grasp\"]\n rotate_speed = self.config[\"object_interaction\"][\"rotate_speed\"]\n drop_location = self.config[\"object_interaction\"][\"drop_location\"] # z coordinate is overwritten later\n\n endEffectorFrame = self.tfBuffer.lookup_transform(self.config['base_frame_id'],\n self.config['end_effector_frame_id'], rospy.Time(0))\n\n grasp_ee_frame = spartanUtils.transformFromROSTransformMsg(endEffectorFrame.transform)\n\n # the frame of the end-effector after we have picked up the object\n pickup_ee_frame_vtk = transformUtils.copyFrame(grasp_ee_frame)\n pickup_ee_frame_vtk.PostMultiply()\n pickup_ee_frame_vtk.Translate(0, 0, pick_up_distance)\n\n vis.updateFrame(pickup_ee_frame_vtk, 'pickup frame', scale=0.15)\n\n self._cache['grasped_ee_frame'] = endEffectorFrame\n self._cache['pickup_ee_frame_vtk'] = pickup_ee_frame_vtk\n\n poseStamped = self.vtkFrameToPoseMsg(pickup_ee_frame_vtk)\n speed = 10 # joint degrees per second\n params = self.getParamsForCurrentLocation()\n above_table_pre_grasp = params['poses']['above_table_pre_grasp']\n pickup_ik_response = self.robotService.runIK(poseStamped, seedPose=above_table_pre_grasp,\n nominalPose=above_table_pre_grasp)\n\n # compute the drop frame location\n # This is done by rotating along the z-axis of the grasp frame by some random\n # amount in [-90, 90] and then just releasing\n\n\n rotate_x_angle = random.uniform(45, 90)\n # if random.random() < 0.5:\n # rotate_x_angle *= -1\n\n\n\n pre_drop_frame = transformUtils.copyFrame(pickup_ee_frame_vtk)\n pre_drop_frame.PreMultiply()\n pre_drop_frame.RotateX(rotate_x_angle)\n pre_drop_frame_pos, _ = transformUtils.poseFromTransform(pre_drop_frame)\n pre_drop_frame_pos[0:2] = drop_location[0:2]\n pre_drop_frame = set_position(pre_drop_frame, pre_drop_frame_pos)\n\n grasp_ee_height = grasp_ee_frame.GetPosition()[2]\n drop_frame_pos = copy.copy(pre_drop_frame_pos)\n drop_frame_pos[2] = grasp_ee_height + drop_distance_above_grasp\n\n print \"drop_frame_pos\", drop_frame_pos\n\n drop_frame = transformUtils.copyFrame(pre_drop_frame)\n drop_frame = set_position(drop_frame, drop_frame_pos)\n\n vis.updateFrame(pre_drop_frame, \"pre drop frame\", scale=0.15)\n vis.updateFrame(drop_frame, \"drop frame\", scale=0.15)\n\n # run IK\n pre_drop_frame_pose_stamped = self.vtkFrameToPoseMsg(pre_drop_frame)\n pre_drop_ik_response = self.robotService.runIK(pre_drop_frame_pose_stamped, seedPose=above_table_pre_grasp,\n nominalPose=above_table_pre_grasp)\n\n drop_frame_pose_stamped = self.vtkFrameToPoseMsg(drop_frame)\n drop_ik_response = self.robotService.runIK(drop_frame_pose_stamped, seedPose=above_table_pre_grasp,\n nominalPose=above_table_pre_grasp)\n\n if pickup_ik_response.success and pre_drop_ik_response.success and drop_ik_response.success:\n # pickup object\n self.robotService.moveToJointPosition(pickup_ik_response.joint_state.position,\n maxJointDegreesPerSecond=speed)\n\n # move to pre-drop\n self.robotService.moveToJointPosition(pre_drop_ik_response.joint_state.position,\n maxJointDegreesPerSecond=rotate_speed)\n\n # move to drop location\n self.robotService.moveToJointPosition(drop_ik_response.joint_state.position,\n maxJointDegreesPerSecond=speed)\n\n self.gripperDriver.send_open_gripper_set_distance_from_current()\n rospy.sleep(0.5)\n\n # move to pre-drop\n self.robotService.moveToJointPosition(pre_drop_ik_response.joint_state.position,\n maxJointDegreesPerSecond=rotate_speed)\n\n self.moveHome()\n\n else:\n print \"ik failed\"\n return False\n\n return True",
"def __init__(self, screen, maze_arrangement):\n pygame.sprite.Sprite.__init__(self)\n \n self.__walk_down = [pygame.image.load(\"./PlayerImages/stand_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_down.png\")]\n \n self.__walk_up = [pygame.image.load(\"./PlayerImages/stand_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_up.png\")] \n \n self.__walk_right = [pygame.image.load(\"./PlayerImages/stand_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_right.png\")]\n \n self.__walk_left = [pygame.image.load(\"./PlayerImages/stand_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_left.png\")]\n \n self.image = self.__walk_down[0]\n self.rect = self.image.get_rect()\n \n # Set direction, current frame index, animation state, and \n self.__direction = \"DOWN\"\n self.__frame_index = 0\n self.__animating = False\n self.__move_length = 0\n \n self.__maze_arrangement = maze_arrangement\n \n self.rect.x = 50\n self.rect.y = 50 \n \n self.__user_x = self.rect.x / 50\n self.__user_y = self.rect.y / 50 \n self.__x = self.rect.x\n self.__y = self.rect.y",
"def forward(self, anchors, pullers, pushers):\n\n x = self.features(anchors)\n y = self.features(pullers)\n z = self.features(pushers)\n\n return x, y, z",
"def __init__(self, gender=\"male\"):#, image, scale):\n\n # Call the parent init\n super().__init__() #image, scale)\n self.walk_direction = 1\n self.walk_from_x = 0\n self.walk_to_x = 0\n self.textures_walk_left = []\n self.textures_walk_right = []\n self.textures_idle_left = []\n self.textures_idle_right = []\n self.textures_attack_right = []\n self.textures_attack_left = []\n self.textures_dead_right = []\n self.textures_dead_left = []\n self.textures = []\n self.center_x = random.randrange(SCREEN_WIDTH)\n self.center_y = random.randrange(SCREEN_HEIGHT)\n attack_img = \"img/\" + gender + \"/Attack (%i).png\"\n walk_img = \"img/\" + gender + \"/Walk (%i).png\"\n idle_img = \"img/\" + gender + \"/Idle (%i).png\"\n dead_img = \"img/\" + gender + \"/Dead (%i).png\"\n self.direction = \"left\"\n for y in range(1, 9):\n self.textures_attack_right.append(arcade.load_texture(attack_img % y, scale=COIN_SCALE))\n self.textures_attack_left.append(arcade.load_texture(attack_img % y, mirrored=True, scale=COIN_SCALE))\n for y in range(1, 11):\n self.textures_walk_right.append(arcade.load_texture(walk_img % y, scale=COIN_SCALE))\n self.textures_walk_left.append(arcade.load_texture(walk_img % y, mirrored=True, scale=COIN_SCALE))\n for y in range(1, 16):\n self.textures_idle_left.append(arcade.load_texture(idle_img % y, mirrored=True, scale=COIN_SCALE))\n self.textures_idle_right.append(arcade.load_texture(idle_img % y, scale=COIN_SCALE))\n for y in range(1, 13):\n self.textures_dead_left.append(arcade.load_texture(dead_img % y, mirrored=True, scale=COIN_SCALE))\n self.textures_dead_right.append(arcade.load_texture(dead_img % y, scale=COIN_SCALE))\n\n self.textures = self.textures_walk_left\n self.cur_texture_index = random.randrange(len(self.textures))\n\n # Create a variable to hold our speed. 'angle' is created by the parent\n self.speed = 0",
"def setup_pipes(self):\n\n pipe1 = ground_step.Ground(393, 170, 35, 30)\n pipe2 = ground_step.Ground(560, 155, 35, 45)\n pipe3 = ground_step.Ground(1973, 366, 83, 170)\n pipe4 = ground_step.Ground(2445, 366, 83, 170)\n pipe5 = ground_step.Ground(6989, 452, 83, 82)\n pipe6 = ground_step.Ground(7675, 452, 83, 82)\n\n self.pipe_group = pygame.sprite.Group(pipe1, pipe2,\n pipe3, pipe4,\n pipe5, pipe6)",
"def __set_transform_matrices(self):\n self.tf_matrices_list = []\n\n transform_matrix = eye(4) # creates a unit matrix via passing argument.\n for i in range(self.joint_count):\n transform_matrix = transform_matrix * self.__create_tf_matrix(self.alpha[i], self.a[i], self.d[i], self.q[i]).subs(self.dh_params)\n self.tf_matrices_list.append(transform_matrix)",
"def lookThruAndFrame(obj):\n cmds.lookThru(obj)\n # Position the active camera to view the active objects\n pm.viewFit()\n\n # Position cameraShape-1 to view all objects\n pm.viewFit(obj, all=True)\n\n # Fill 50 percent of the active view with active objects\n pm.viewFit(f=0.5)\n pm.viewFit(all=True)",
"def __init__(self, teeth = 10, length = 0.3):\n self.teeth = teeth\n self.length = length\n # teeth are every other face\n spans = teeth * 2\n \n pipeObj = cmds.polyPipe(sa = spans)\n self.transform = pipeObj[0] + \"_gear\"\n self.constructor = pipeObj[1]\n \n # rename object\n cmds.rename(pipeObj[0], self.transform)\n\n # this is because the faces we want in Maya are numbered from [spans * 2, spans * 3)\n # *** if you run ls -sl in MEL, Maya gives you all the face names\n sideFaces = range(spans * 2, spans * 3, 2)\n\n # clear any selection you have\n cmds.select(clear = True)\n\n # iterate through every other side face\n for face in sideFaces:\n cmds.select(\"%s.f[%s]\" % (self.transform, face), add = True)\n\n # get the poly extrude face\n self.extrude = cmds.polyExtrudeFacet(ltz = length)[0]\n\n #clean up and return\n cmds.select(clear = True)",
"def setup(self, forward, feedback, character_id):\n self.background = arcade.load_texture(f\"{DATA_DIR}/14.png\")\n\n self.assigned_player = int(character_id) + 1\n\n # Sprite lists\n self.player_list = arcade.SpriteList()\n\n # jet sprites\n self.player1 = arcade.Sprite(f\"{DATA_DIR}/player1.png\", SPRITE_SCALING_PLAYER)\n self.player2 = arcade.Sprite(f\"{DATA_DIR}/player2.png\", SPRITE_SCALING_PLAYER)\n self.player3 = arcade.Sprite(f\"{DATA_DIR}/player3.png\", SPRITE_SCALING_PLAYER)\n\n self.player1.center_x = 100\n self.player1.center_y = 100\n\n self.player2.center_x = 500\n self.player2.center_y = 100\n\n self.player3.center_x = 1000\n self.player3.center_y = 100\n\n self.player_list.append(self.player1)\n self.player_list.append(self.player2)\n self.player_list.append(self.player3)\n\n build = Build(scale=0.1, image=f\"{DATA_DIR}/11.png\")\n build.lay((0, 1000, 15), \"x\", 10)\n self.wall_list = build.blocks\n\n self.physics_engine = arcade.PhysicsEnginePlatformer(\n getattr(self, f\"player{self.assigned_player}\"), self.wall_list, GRAVITY\n )\n\n self.forward = forward\n self.feedback = feedback\n if (\n os.getenv(\"SERVER\") == \"127.0.0.1\"\n or os.getenv(\"SERVER\") == socket.gethostname()\n ):\n arcade.schedule(self.stream, 0.2)",
"def __init__(\n self, im_width=512,\n im_height=424, fov=42.5,\n near_plane=0.1, far_plane=30.0,\n target_width=256, target_height=256,\n use_change_light=True, labels=None,\n save_dir='./', save_debug_image=False,\n gui=False, task_type='hanging', stop_per_data=False,\n random_texture_path=None):\n self.objects = []\n self.im_width = im_width\n self.im_height = im_height\n self.fov = fov\n self.near_plane = near_plane\n self.far_plane = far_plane\n self.target_width = target_width\n self.target_height = target_height\n self.save_dir = save_dir\n self.save_debug_image = save_debug_image\n self.task_type = task_type\n self.stop_per_data = stop_per_data\n\n if self.task_type == 'hanging':\n # direction of grabity\n self.translate_value = np.array([0, 0.005, 0])\n elif self.task_type == 'pouring':\n # direction opposite to gravity\n self.translate_value = np.array([-0.005, 0, 0])\n\n aspect = self.im_width / self.im_height\n self.camera_model \\\n = cameramodels.PinholeCameraModel.from_fov(\n fov, im_height, im_width)\n\n self.camera_model.target_size = (target_width, target_height)\n self.pm = pybullet.computeProjectionMatrixFOV(\n fov, aspect, near_plane, far_plane)\n\n self.camera_coords = coordinates.Coordinates(\n pos=np.array([0, 0, 0.5]),\n rot=coordinates.math.rotation_matrix_from_rpy([0, np.pi, 0]))\n\n self.annotation_img = np.zeros(\n (target_width, target_height), dtype=np.uint32)\n\n self.annotation_data = []\n self.rotation_map = RotationMap(target_width, target_height)\n self.rotations = None\n self.depth_map = DepthMap(target_width, target_height, circular=True)\n\n self.object_coords = coordinates.Coordinates(\n pos=np.array([0, 0, 0.1]),\n rot=coordinates.math.rotation_matrix_from_rpy([0, 0, 0]))\n\n self.labels = labels\n self.visible_labels = []\n\n if gui:\n self.cid = pybullet.connect(pybullet.GUI)\n pybullet.resetDebugVisualizerCamera(\n cameraDistance=1,\n cameraYaw=90,\n cameraPitch=0,\n cameraTargetPosition=[0, 0, 0.1])\n else:\n self.cid = pybullet.connect(pybullet.DIRECT)\n self.gui = gui\n self.no_visible_count = 0\n self.no_visible_skip_num = 300\n\n self.texture_paths = list(\n map(str, list(Path(random_texture_path).glob('**/*.jpg'))))\n current_dir = osp.dirname(osp.abspath(__file__))\n self.gray_texture = osp.join(current_dir, 'images', 'gray.jpg')\n\n pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())\n pybullet.setPhysicsEngineParameter(enableFileCaching=0)\n\n self.draw_camera_pos()\n self.lightDirection = [1, 1, 1]\n self.lightDistance = 1\n self.lightColor = [1, 1, 1]\n self.lightAmbientCoeff = 0.2\n self.lightDiffuseCoeff = 0.9\n self.lightSpecularCoeff = 0.9\n if use_change_light:\n self.change_light()\n self._rendered = None\n self._rendered_pos = None\n self._rendered_rot = None",
"def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()",
"def init_transforms(self):\n\t\t# Lifecycle of a frame\n\t\t# in dataset:\n\t\t# \tdset.tr_post_load_pre_cache\n\t\t# \tdset.tr_output\n\t\t# in experiment:\n\t\tpass",
"def startface(self):\n self.fan = (self.position.x,self.position.y,self.position.z)",
"def __init__(self, vertices, edges, surfaces):\n #self.target = target\n #self.support = support\n self.vertices = vertices\n self.edges = edges\n self.surfaces = surfaces",
"def map_face(self):\n #Array Order: U0,D1,R2,L3,F4,B5,\n \n cube_list = []\n cube_list = self.cube.definition()\n \n for index, cubit in enumerate(self.faces['Up']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index]])\n for index, cubit in enumerate(self.faces['Ri']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+9]])\n for index, cubit in enumerate(self.faces['Ft']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+18]])\n for index, cubit in enumerate(self.faces['Dn']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+27]])\n for index, cubit in enumerate(self.faces['Le']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+36]])\n for index, cubit in enumerate(self.faces['Bk']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+45]])",
"def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)",
"def add_cam(self, xpos, ypos, follower_len, bumps=[], horizontal=False, reverse_direction=False, axis_offset=0, axis=True, bump_height=3, slow_rise=False):\n attachment_body = self.groundBody\n offset = 0\n if not horizontal:\n offset = -0.25\n if reverse_direction:\n offset = 0.5\n for b in range(0,len(bumps)):\n bumps[b] = (bumps[b][0] + offset, bumps[b][1])\n radius = 30\n bump_height = radius+bump_height\n disc_fixture = fixtureDef(shape=circleShape(radius=radius, pos=(0,0)),density=1.0,filter=filters[0])\n bump_fixtures = []\n\n if slow_rise:\n ang=bumps[0][0]\n bump_points = [( radius*math.cos(-ang*math.pi*2), radius*math.sin(-ang*math.pi*2)) ]\n height = radius\n for point in range(0,14):\n ang += 0.015\n height += 0.3\n bump_points.append( ( height*math.cos(-ang*math.pi*2), height*math.sin(-ang*math.pi*2)) )\n bump_points.append(( radius*math.cos(-ang*math.pi*2), radius*math.sin(-ang*math.pi*2)))\n bump_fixtures.append(fixtureDef(shape=polygonShape(vertices=bump_points),density=0.0,filter=filters[0]))\n else:\n for (start, length) in bumps:\n ang = start\n bump_points = [( radius*math.cos(-ang*math.pi*2), radius*math.sin(-ang*math.pi*2)) ]\n points = 1\n # Max points in a polygon is 15 at the moment.\n while ang < (start+length) and points < 15:\n ang += 0.02\n bump_points.append( ( bump_height*math.cos(-ang*math.pi*2), bump_height*math.sin(-ang*math.pi*2)) )\n points += 1\n if points == 15:\n print(\"WARNING: Max points reached in cam bump; %2.2d%% of cam complete\"%(100*(ang-start)/length))\n\n bump_points.append(( radius*math.cos(-(ang+0.01)*math.pi*2), radius*math.sin(-(ang+0.01)*math.pi*2)))\n f = fixtureDef(shape=polygonShape(vertices=bump_points),density=0.0,filter=filters[0], userData=(0,0,255))\n bump_fixtures.append(f)\n cam_body = self.add_multifixture(bump_fixtures + [disc_fixture], xpos, ypos, (0,255,0))\n cam_driver = self.revolving_joint(attachment_body, cam_body, (xpos,ypos), motor=1, force=50)\n cam_driver.motorSpeed = 0\n follower_filter = filters[1]\n if axis:\n if horizontal:\n axle_y = ypos+radius\n if reverse_direction:\n axle_x = xpos-radius-axis_offset-5\n else:\n axle_x = xpos+radius+axis_offset+2.5\n follower_body = self.add_dynamic_polygon(box_polygon_shape(axle_x, axle_y-follower_len, 5, follower_len), 0, 0, filter=follower_filter)\n follower_wheel = self.add_dynamic_circle(axle_x+2.5, axle_y-radius, 5)\n self.revolving_joint(follower_wheel, follower_body, (axle_x+2.5,axle_y-radius), friction=False)\n else:\n axle_y = ypos+radius+axis_offset+2.5\n axle_x = xpos-radius\n follower_body = self.add_dynamic_polygon(box_polygon_shape(axle_x, axle_y, follower_len, 5), 0, 0, filter=follower_filter)\n follower_wheel = self.add_dynamic_circle(axle_x+radius, axle_y+2.5, 5)\n self.revolving_joint(follower_wheel, follower_body, (axle_x+radius,axle_y+2.5), friction=False)\n\n if horizontal:\n follower_body.attachment_point=(axle_x, axle_y-follower_len)\n else:\n follower_body.attachment_point=(axle_x+follower_len, axle_y)\n self.revolving_joint(attachment_body, follower_body, (axle_x+2.5,axle_y+2.5), friction=False)\n print(\"Creating cam: xpos= {}, ypos= {}, axle_x = {} ,axle_y= {}, follower_len={}\".format(xpos, ypos, axle_x, axle_y, follower_len))\n\n self.all_cam_drives.append(cam_driver)\n if axis:\n return follower_body\n else:\n return None",
"def attach_camera_floater(self):\r\n camera_behind = 8\r\n camera_above = 3\r\n self.camera_floater = NodePath(\"camera_floater\")\r\n self.camera_floater.reparent_to(self.vehicleNP)\r\n self.camera_floater.set_y(-camera_behind)\r\n self.camera_floater.set_z(camera_above)",
"def process(self, step_guess_orientation=True, step_advanced_alignement=True,\n step_gen_worldfiles=True, step_load_worldfiles=True,\n step_gen_vrts=True, step_load_vrts=True,\n step_load_debug=True ):\n\n QgsMessageLog.logMessage(\"1/ Instantiating all images...\", \"QuickDroneMap\", 0)\n for root, dirs, files in os.walk(self.folder):\n for file in files:\n if file.endswith(\".jpg\") or file.endswith(\".JPG\"):\n image_path = os.path.join(root, file)\n image = Image(self, image_path)\n self.images.append(image)\n self.images = self.images[70:90]\n # for i in [301,300,329]: # 3 images, transform fails on all of them\n # for i in [397,398,364]: # 3 images, transform fails on one of them\n # for i in [377,380,381]: # 3 images, transform works on all of them\n # path = \"C:\\\\Users\\\\Olivier\\\\Dropbox\\\\Affaires\\\\SPC\\\\Sources\\\\quickdronemap\\\\test\\\\data\\\\DJI_{0:04d}.JPG\".format(i)\n # self.images.append(Image(self, path))\n\n QgsMessageLog.logMessage(\"2/ Assigning ids\", \"QuickDroneMap\", 0)\n for i, image in enumerate(self.images):\n image.id = i\n\n\n QgsMessageLog.logMessage(\"2/ Loading image attributes and parsing exif tags...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.set_attributes()\n\n if step_guess_orientation:\n QgsMessageLog.logMessage(\"3/ Building image sequences...\", \"QuickDroneMap\", 0)\n sorted_images = sorted(self.images, key=lambda x: x.timestamp)\n for i in range(len(sorted_images)):\n\n prev_image = sorted_images[i-1] if i>0 else None\n image = sorted_images[i]\n next_image = sorted_images[i+1] if i<len(sorted_images)-1 else None\n\n if prev_image is None or next_image is None:\n continue\n\n angle_p_i = math.atan2(image.point.x()-prev_image.point.x(),-image.point.y()+prev_image.point.y())\n angle_i_n = math.atan2(next_image.point.x()-image.point.x(),-next_image.point.y()+image.point.y())\n\n # Checking if the three images are aligned (if not, we're probably at an angle)\n dA = absolute_angle_difference(angle_p_i, angle_i_n)\n if dA > ANGLE_THRESHOLD:\n continue\n\n # Checking if the three images are near enough timewise, if not, it could be separate flights\n dT1 = image.timestamp - prev_image.timestamp\n dT2 = next_image.timestamp - image.timestamp\n if dT1 > TIME_THRESHOLD or dT2 > TIME_THRESHOLD:\n continue\n\n prev_image.next_image = image\n image.prev_image = prev_image\n image.next_image = next_image\n next_image.prev_image = image\n\n QgsMessageLog.logMessage(\"4/ Deriving orientation from image sequence\", \"QuickDroneMap\", 0)\n for image in self.images:\n # if the direction wasn't set in the Exif tags, we derive it from the image sequences\n if image.direction is None:\n img_a = image.prev_image or image \n img_b = image.next_image or image\n image.angle = math.atan2(img_b.point.x()-img_a.point.x(),-img_b.point.y()+img_a.point.y())\n\n if step_advanced_alignement:\n QgsMessageLog.logMessage(\"5/ Building image neighbourhood graph...\", \"QuickDroneMap\", 0)\n from scipy.spatial import Delaunay\n points = [(i.point.x(),i.point.y()) for i in self.images]\n triangulation = Delaunay(points)\n\n done = [[False for _i2 in self.images] for _i1 in self.images]\n for tri in triangulation.simplices:\n i1,i2,i3 = tri\n if not done[i1][i2]:\n e = Edge(self.images[i1], self.images[i2])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i2].edges.append(e)\n done[i1][i2] = True\n if not done[i1][i3]:\n e = Edge(self.images[i1], self.images[i3])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i3].edges.append(e)\n done[i1][i3] = True\n if not done[i2][i3]:\n e = Edge(self.images[i2], self.images[i3])\n self.edges.append(e)\n self.images[i2].edges.append(e)\n self.images[i3].edges.append(e)\n done[i2][i3] = True\n\n QgsMessageLog.logMessage(\"6/ Computing similarities\", \"QuickDroneMap\", 0)\n for i, edge in enumerate(self.edges):\n QgsMessageLog.logMessage(\"Done {} out of {}\".format(i,len(self.edges)), \"QuickDroneMap\", 0)\n QApplication.processEvents()\n edge.compute_transform()\n\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # QgsMessageLog.logMessage(\"Initial fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n\n # print(\"TESTING QUALITY OF SIMILARITY (disable optimization to do this)\")\n # done = []\n # edges_to_delete = []\n # for edge in self.edges:\n # QApplication.processEvents()\n\n # if edge.imageA in done or edge.imageB in done:\n # edges_to_delete.append(edge)\n # continue\n\n # done.append(edge.imageA)\n # done.append(edge.imageB)\n\n # d_angle = edge.angle\n # edge.imageB.angle = edge.imageA.angle + d_angle\n\n # f_scale = edge.scale\n # edge.imageB.scale = edge.imageA.scale * f_scale\n\n # d_point = QgsPointXY(edge.tvec[0],edge.tvec[1])\n # d_point = d_point.rotated(edge.imageA.angle)\n # d_point *= edge.imageA.pixel_size/DOWNSCALING_FACTOR\n # edge.imageB.point = edge.imageA.point + d_point\n # for edge in edges_to_delete:\n # self.edges.remove(edge)\n\n\n # print(\"AFTER PROTOTYPE PLACEMENT\")\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # self.calculate_fitness(initial_guess_np)\n\n\n QgsMessageLog.logMessage(\"7/ Optimizing\", \"QuickDroneMap\", 0)\n QApplication.processEvents()\n\n initial_guess_np, bounds = self.get_initial_values_and_bounds() \n # res_1 = least_squares(calculate_fitness, initial_guess_np, bounds=([b[0] for b in bounds],[b[1] for b in bounds]))\n res_1 = minimize(self.calculate_fitness, initial_guess_np, bounds=bounds)\n\n for image in self.images:\n px = res_1.x[image.id*4+0]\n py = res_1.x[image.id*4+1]\n pa = res_1.x[image.id*4+2]\n ps = res_1.x[image.id*4+3]\n image.point = QgsPointXY(px, py)\n image.angle = pa\n image.psize = ps\n\n initial_guess_np, _ = self.get_initial_values_and_bounds()\n QgsMessageLog.logMessage(\"After optimization fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n \n QgsMessageLog.logMessage(\"8/ Computing all transforms...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.update_transform()\n\n if step_gen_worldfiles:\n QgsMessageLog.logMessage(\"9a/ Creating and loading worldfiles\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_worldfile()\n if step_load_worldfiles:\n image.load_worldfile(self.iface)\n\n if step_gen_vrts:\n QgsMessageLog.logMessage(\"9b/ Creating and loading vrts\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_vrt()\n if step_load_vrts:\n image.load_vrt(self.iface)\n\n if step_load_debug:\n QgsMessageLog.logMessage(\"10/ Creating debug jsons files\", \"QuickDroneMap\", 0)\n edg_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 32628}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.point.x(), edge.imageA.point.y()],[edge.imageB.point.x(), edge.imageB.point.y()]]\n props = {k:v for (k,v) in vars(edge).items()}\n props['angle_a'] = edge.imageA.angle\n props['angle_b'] = edge.imageB.angle\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n edg_data['features'].append(feature)\n \n edg_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(edg_data, edg_file, default=lambda o: str(o))\n edg_file.close()\n layer = self.iface.addVectorLayer(edg_file.name,\"[DEBUG] Edges\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_edges_style.qml'))\n \n graph_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 4326}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.lon, edge.imageA.lat],[edge.imageB.lon, edge.imageB.lat]]\n props = {k:v for (k,v) in vars(edge).items()}\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n graph_data['features'].append(feature)\n\n graph_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(graph_data, graph_file, default=lambda o: str(o))\n graph_file.close()\n layer = self.iface.addVectorLayer(graph_file.name,\"[DEBUG] Graph\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_graph_style.qml'))",
"def pre_cache():\n for transform, _ in self._pairs:\n world_matrix = transform[\"worldMatrix\"][0].asMatrix()\n parent_matrix = transform[\"parentMatrix\"][0].asMatrix()\n matrix = transform[\"matrix\"].asMatrix()\n translate = transform[\"translate\"].as_vector()\n rotate = transform[\"rotate\"].as_euler()\n\n if \"jointOrient\" in transform:\n joint_orient = transform[\"jointOrient\"].as_quaternion()\n else:\n # Only joints have these\n joint_orient = cmdx.Quaternion()\n\n self._cache[(transform, \"worldMatrix\")] = world_matrix\n self._cache[(transform, \"parentMatrix\")] = parent_matrix\n self._cache[(transform, \"matrix\")] = matrix\n self._cache[(transform, \"translate\")] = translate\n self._cache[(transform, \"rotate\")] = rotate\n self._cache[(transform, \"jointOrient\")] = joint_orient",
"def _setup_scene(self):\n\n scene = bpy.context.scene\n\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n # remove non mesh objects\n for obj in scene.objects:\n obj.select = (obj.type != \"MESH\")\n bpy.ops.object.delete()\n\n # empty sequences are false by default\n if scene.objects:\n\n # unlink objects (all meshes) from parents\n bpy.ops.object.select_all()\n bpy.ops.object.parent_clear(type=\"CLEAR_KEEP_TRANSFORM\")\n\n # join all meshes in one single object\n scene.objects.active = bpy.data.objects[0]\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)\n bpy.context.object.name = \"Object\"\n bpy.context.object.dimensions = bpy.context.object.dimensions / max(bpy.context.object.dimensions)\n\n # set the origin of the object to the cursor location\n scene.cursor_location = [0, 0, 0]\n bpy.ops.object.origin_set(type=\"ORIGIN_CURSOR\")\n # bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\", center=\"BOUNDS\")\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n\n if self.add_ground_plane:\n bpy.ops.mesh.primitive_plane_add(radius=10.)\n\n bpy.ops.object.select_all(action=\"DESELECT\")",
"def __init__(self, game_object, animation_list):\n self.animation_list = animation_list\n self.current_playing_animation = None\n self.animation_idx = 0\n self.is_paused = False\n for animation in self.animation_list:\n animation.set_animator(self)\n super().__init__(game_object)",
"def setUp(self):\n self.precip_cube = setup_precipitation_cube()\n self.oe_cube = setup_orographic_enhancement_cube()\n self.vel_x = set_up_xy_velocity_cube(\"advection_velocity_x\")\n self.vel_y = set_up_xy_velocity_cube(\"advection_velocity_y\")\n for cube in [self.precip_cube, self.oe_cube]:\n cube.coord(\"projection_x_coordinate\").points = 600 * np.arange(3)\n cube.coord(\"projection_y_coordinate\").points = 600 * np.arange(4)"
] |
[
"0.7095333",
"0.56289697",
"0.55895954",
"0.54822636",
"0.5440794",
"0.5423058",
"0.540257",
"0.53933394",
"0.53719753",
"0.5346639",
"0.5332166",
"0.5310055",
"0.5305138",
"0.5296076",
"0.529135",
"0.52912456",
"0.52699417",
"0.5257043",
"0.5248384",
"0.5242137",
"0.5236481",
"0.5195456",
"0.5193994",
"0.5192185",
"0.5189491",
"0.51697695",
"0.5154525",
"0.5151709",
"0.5148792",
"0.51457256"
] |
0.7946856
|
0
|
Bake and clean follow through (jiggle planes) on selected transform objects.
|
def bake_follow_through():
# -- Find all selected transforms that have a jiggle setup
node_list = [
node
for node in pm.selected(type='transform')
if any(pc.hasAttr('ld_jiggle_node')
for pc in node.getChildren(type='parentConstraint'))
]
# -- Validate node list
if not node_list:
pm.warning(
'Select at least 1 jiggled object!'
)
return None
plane_trans_list = []
to_delete = set()
# -- Find connected parent constraints (already validated)
for node in node_list:
constraint = [
pc
for pc in node.getChildren(type='parentConstraint')
if pc.hasAttr('ld_jiggle_node')
][0]
# -- Retrieve the plane (transform) from the constraint
plane_trans = constraint.ld_jiggle_node.get()
plane_trans_list.append(plane_trans)
# -- We'll want to delete this during cleanup
to_delete.add(plane_trans)
# -- Grab the plane's follicle
to_delete.update(
plane_trans.getShape().outputs(type='follicle')
)
# -- Geo cache the planes to avoid jitter.
# -- Enable if you get jitter issues - possibly caused
# -- by V2.0 / playback speed etc.
# -- As this creates a geometry cache file using current settings, users
# -- may be prompt to replace existing cache (recommended action).
# -- HINT: This can be avoided if you investigate the pm.mel.geometryCache() call.
pm.select(plane_trans_list)
pm.mel.geometryCache()
# -- Grab the current start and end frames
start_frame = pm.playbackOptions(q=True, min=True)
end_frame = pm.playbackOptions(q=True, max=True)
# -- Bake all transforms
pm.bakeResults(
node_list,
time=[
start_frame,
end_frame,
],
at=['t', 'r'],
sm=True,
)
# -- Delete setup
pm.delete(to_delete)
# -- Select the nodes (it's polite to leave as you entered).
pm.select(node_list)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setup_follow_through():\r\n # -- Grab the selected transforms\r\n node_list = pm.selected(type='transform')\r\n \r\n # -- Validate node list\r\n if not node_list:\r\n pm.warning(\r\n 'Select at least 1 transform object!'\r\n )\r\n return None\r\n\r\n # -- Grab the current start and end frames\r\n start_frame = pm.playbackOptions(q=True, min=True)\r\n end_frame = pm.playbackOptions(q=True, max=True)\r\n\r\n # -- Iterable variables for later\r\n to_delete = []\r\n plane_trans_list = []\r\n \r\n # -- For each node\r\n for node in node_list:\r\n \r\n # -- Create a 10x10 poly plane\r\n plane_trans = pm.polyPlane(\r\n w=10,\r\n h=10,\r\n sx=1,\r\n sy=1,\r\n )[0]\r\n plane_trans_list.append(plane_trans)\r\n \r\n # -- Constrain the plane to the give node (delete later)\r\n to_delete.append(\r\n pm.parentConstraint(node, plane_trans, mo=False)\r\n )\r\n\r\n # -- Bake all planes in one go (translate and rotate).\r\n # -- This is to sever any dependency on the selected transforms, and\r\n # -- removing any potential cyclic issues.\r\n pm.bakeResults(\r\n plane_trans_list,\r\n time=[\r\n start_frame,\r\n end_frame,\r\n ],\r\n at=['t', 'r'],\r\n sm=True,\r\n )\r\n \r\n # -- Delete plane constraints\r\n pm.delete(to_delete)\r\n\r\n # -- List of jiggle deformers to select & return\r\n jiggle_list = []\r\n \r\n # -- Go to the first frame\r\n pm.currentTime(start_frame)\r\n \r\n # -- Setup and connect each node to plane\r\n for node, plane_trans in zip(node_list,\r\n plane_trans_list):\r\n \r\n # -- Create a jiggle deformer on the plane\r\n pm.select(plane_trans)\r\n pm.mel.CreateJiggleDeformer()\r\n \r\n # -- Get the plane's shape\r\n plane_shape = plane_trans.getShape()\r\n \r\n # -- Get the Jiggle deformer\r\n jiggle_deformer = plane_shape.inputs(type='jiggle')[0]\r\n jiggle_list.append(jiggle_deformer)\r\n \r\n # -- Set the default jiggle settings\r\n for attr, value in JIGGLE_DEFAULTS.iteritems():\r\n jiggle_deformer.attr(attr).set(value)\r\n \r\n # -- Create and setup a follicle on the plane's shape\r\n follicle = pm.createNode('follicle')\r\n\r\n # -- Get the follicle's transform node (parent)\r\n follicle_trans = follicle.getParent()\r\n \r\n plane_shape.outMesh.connect(follicle.inputMesh)\r\n plane_shape.worldMatrix[0].connect(follicle.inputWorldMatrix)\r\n follicle.outRotate.connect(follicle_trans.rotate)\r\n follicle.outTranslate.connect(follicle_trans.translate)\r\n\r\n # -- Position the follicle in the center of the plane\r\n follicle.parameterU.set(0.5)\r\n follicle.parameterV.set(0.5)\r\n\r\n # -- Lock the follicle_trans translate and rotate attributes\r\n follicle_trans.translate.lock()\r\n follicle_trans.rotate.lock()\r\n \r\n # -- Constrain the original node to the follicle_trans\r\n constraint = pm.parentConstraint(follicle_trans, node, mo=True)\r\n \r\n # -- Connect the plane_trans to a custom attribute\r\n # -- on the constraint for retrieval (when baking).\r\n # -- Add it to the constraint as we clean it up\r\n # -- anyway (no need to dirty the rig).\r\n constraint.addAttr('ld_jiggle_node', at='message')\r\n plane_trans.message.connect(constraint.ld_jiggle_node)\r\n \r\n # -- Delete animation on the nodes (constrained\r\n # -- by the planes by now anyway)\r\n pm.cutKey(node_list, at=['t', 'r'], cl=True)\r\n \r\n # -- Select the node list (allows the bake process to run\r\n # -- immediately after).\r\n # -- Would recommend selecting the jiggle_list to allow users\r\n # -- to animate the jiggle deformer instead.\r\n pm.select(node_list)\r\n \r\n return jiggle_list",
"def freezeObjectsTransforms(self):\n\t\tmc.makeIdentity( self.objects, apply = True, t = 1, r = 1, s = 1, n = 0, pn = 1 )",
"def resetAllTransform(self):\n selectionList = mc.ls(selection=True, type='transform')\n if not selectionList:\n print \">> No Selection\"\n for obj in selectionList:\n mc.xform(obj, absolute=True, t=[0, 0, 0], ro=[0, 0, 0], s=[1, 1, 1])",
"def launch_test_mvbb_filtered(robotname, object_list, min_vertices = 0):\n\n world = WorldModel()\n world.loadElement(\"data/terrains/plane.env\")\n robot = make_moving_base_robot(robotname, world)\n xform = resource.get(\"default_initial_%s.xform\" % robotname, description=\"Initial hand transform\",\n default=se3.identity(), world=world, doedit=False)\n\n for object_name in object_list:\n obj = None\n for object_set, objects_in_set in objects.items():\n if object_name in objects_in_set:\n if world.numRigidObjects() > 0:\n world.remove(world.rigidObject(0))\n obj = make_object(object_set, object_name, world)\n if obj is None:\n print \"Could not find object\", object_name\n continue\n\n\n R,t = obj.getTransform()\n obj.setTransform(R, [0, 0, 0])\n object_vertices_or_none, tm_decimated = skip_decimate_or_return(obj, min_vertices, 2000)\n if object_vertices_or_none is None:\n print \"??????????????????????????????????????????????????\"\n print \"??????????????????????????????????????????????????\"\n print \"??????????????????????????????????????????????????\"\n print \"skipping object, too few vertices\", obj.getName()\n print \"??????????????????????????????????????????????????\"\n print \"??????????????????????????????????????????????????\"\n print \"??????????????????????????????????????????????????\"\n continue\n object_or_vertices = object_vertices_or_none\n\n print \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n print \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n print \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n print object_name\n print \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n print \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n print \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\n print \"------Computing poses for object:\", object_name\n poses, poses_variations, boxes = compute_poses(object_or_vertices)\n\n w_T_o = np.array(se3.homogeneous((R,[0, 0, 0]))) # object is at origin\n\n p_T_h = np.array(se3.homogeneous(xform))\n\n poses_h = []\n poses_variations_h = []\n\n for i in range(len(poses)):\n poses_h.append(w_T_o.dot(poses[i]).dot(p_T_h))\n for i in range(len(poses_variations)):\n poses_variations_h.append(w_T_o.dot(poses_variations[i]).dot(p_T_h))\n\n print \"-------Filtering poses:\"\n filtered_poses = []\n for i in range(len(poses)):\n if not CollisionTestPose(world, robot, obj, poses_h[i]):\n filtered_poses.append(poses[i])\n filtered_poses_variations = []\n for i in range(len(poses_variations)):\n if not CollisionTestPose(world, robot, obj, poses_variations_h[i]):\n filtered_poses_variations.append(poses_variations[i])\n print \"Filtered from\", len(poses+poses_variations), \"to\", len(filtered_poses+filtered_poses_variations)\n if len(filtered_poses+filtered_poses_variations) == 0:\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n print \"Filtering returned 0 feasible poses\"\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n continue\n\n # create a hand emulator from the given robot name\n module = importlib.import_module('plugins.' + robotname)\n # emulator takes the robot index (0), start link index (6), and start driver index (6)\n\n program = FilteredMVBBTesterVisualizer(filtered_poses,\n filtered_poses_variations,\n world,\n p_T_h,\n R,\n module)\n\n vis.setPlugin(None)\n vis.setPlugin(program)\n program.reshape(800, 600)\n\n vis.show()\n # this code manually updates the visualization\n while vis.shown():\n time.sleep(0.1)\n return",
"def clear_transforms(self): # -> None:\n ...",
"def objects_to_bmesh(objs, transform=True):\n\n # CAUTION: Removes/destroys custom layer props\n\n # Creates the mesh used to merge the entire scene\n bm_all = bmesh.new()\n\n # Adds the objects\" meshes to the bmesh\n for obj in objs:\n dprint(\"Preparing object {} for export...\".format(obj.name))\n # Creates a bmesh from the supplied object\n bm = bmesh.new()\n bm.from_mesh(obj.data)\n\n # Makes sure all layers exist so values don't get lost while exporting\n uv_layer = bm.loops.layers.uv.get(\"UVMap\")\n tex_layer = bm.faces.layers.tex.get(\"UVMap\")\n vc_layer = (bm.loops.layers.color.get(\"Col\") or\n bm.loops.layers.color.new(\"Col\"))\n env_layer = (bm.loops.layers.color.get(\"Env\") or\n bm.loops.layers.color.new(\"Env\"))\n env_alpha_layer = (bm.faces.layers.float.get(\"EnvAlpha\") or\n bm.faces.layers.float.new(\"EnvAlpha\"))\n va_layer = (bm.loops.layers.color.get(\"Alpha\") or\n bm.loops.layers.color.new(\"Alpha\"))\n texnum_layer = bm.faces.layers.int.get(\"Texture Number\")\n type_layer = (bm.faces.layers.int.get(\"Type\") or\n bm.faces.layers.int.new(\"Type\"))\n material_layer = (bm.faces.layers.int.get(\"Material\") or\n bm.faces.layers.int.new(\"Material\"))\n\n # Removes the parent for exporting and applies transformation\n parent = obj.parent\n if parent:\n mat = obj.matrix_world.copy()\n old_mat = obj.matrix_basis.copy()\n obj.parent = None\n obj.matrix_world = mat\n\n spc = obj.matrix_basis\n bmesh.ops.scale(\n bm,\n vec=obj.scale,\n space=spc,\n verts=bm.verts\n )\n if transform:\n bmesh.ops.transform(\n bm,\n matrix=Matrix.Translation(obj.location),\n space=spc,\n verts=bm.verts\n )\n bmesh.ops.rotate(\n bm,\n cent=obj.location,\n matrix=obj.rotation_euler.to_matrix(),\n space=spc,\n verts=bm.verts\n )\n\n # Restores the parent relationship\n if parent and not obj.parent:\n obj.parent = parent\n obj.matrix_basis = old_mat\n\n # Converts the transformed bmesh to mesh\n new_mesh = bpy.data.meshes.new(\"ncp_export_temp\")\n bm.to_mesh(new_mesh)\n\n # Adds the transformed mesh to the big bmesh\n bm_all.from_mesh(new_mesh)\n\n # Removes unused meshes\n bpy.data.meshes.remove(new_mesh, do_unlink=True)\n bm.free()\n\n return bm_all",
"def remove_objects(self, objects):\n for sprite_group in self.sprite_level_blocks:\n sprite_group.remove(objects)",
"def clean_face_rig():\r\n #TODO: Eventually create a non-linear way to re-add the nodes for a pose if you want to edit it later.\r\n if DRYRUN:\r\n print('clean face rig function - DRY RUN ONLY')\r\n return False\r\n\r\n def analyze_face():\r\n print('# All Nodes: {}'.format(len(pm.ls('*'))))\r\n print('# MLT Nodes: {}'.format(len(pm.ls(type='multiplyDivide'))))\r\n print('# MAP Nodes: {}'.format(len(pm.ls(type='remapValue'))))\r\n print('# JNT Nodes: {}'.format(len(pm.ls(type='joint'))))\r\n print('# TRS Nodes: {}'.format(len(pm.ls(type='transform'))))\r\n print('# ADD Nodes: {}'.format(len(pm.ls(type='plusMinusAverage'))))\r\n\r\n if DEBUG:\r\n analyze_face()\r\n\r\n for oPos in pm.ls('*_POSE', type='transform'):\r\n poseMLT = set( oPos.outputs(type='multiplyDivide') )\r\n for each in poseMLT:\r\n # track back to the MLT input to separate out each pose translate, rotate and scale.\r\n # sum the abs() values to see if the pose delta is empty.\r\n poseDelta = sum([abs(x.get()) for x in each.inputs(type='transform', plugs=True)])\r\n if poseDelta < 0.001:\r\n pm.delete(each)\r\n\r\n if DEBUG:\r\n analyze_face()\r\n\r\n allZones = pm.ls('*_zone', type='objectSet')\r\n pm.delete(allZones)\r\n print('The rig has been cleaned. Unused MLT and MAP nodes have been removed. Zone sets have been deleted.')\r\n return True",
"def transformAndClip(clipping_planes, instance, transform):\n center = instance.bounds_center\n radius = instance.bounds_radius\n model = instance.model\n\n for plane in clipping_planes:\n distance_to_plane = plane.normal.dot(center) + plane.distance\n if distance_to_plane < -model.bounds_radius:\n return\n\n transformed_vertexes = []\n for vertex in model.vertexes:\n vertexH = transform.transform_vec3(vertex)\n transformed_vertexes.append(vertexH)\n\n for triangle in model.triangles:\n for plane in clipping_planes:\n clipTriangle(triangle, plane, transformed_vertexes)",
"def _cleanup():\n for (\n _,\n transformation,\n transformation_dict,\n _,\n _,\n increfed,\n _,\n ) in _queued_transformations:\n # For some reason, the logic here is different than for the async version\n # (see run_transformation_dict_async)\n if (\n increfed\n and bytes.fromhex(transformation) in transformation_cache.transformations\n ):\n transformation_cache.decref_transformation(transformation_dict, increfed)",
"def createAssetPreview(objects):\n if not type(objects) is list:\n objects = [objects]\n\n bpy.ops.object.select_all(action='DESELECT')\n if objects:\n # Return if there is no 3d view available.\n if not get3dView():\n return {'WARNING'}, \"No 3d view found\"\n\n result = setupRender()\n # If the resulting path contains an error no rendering is\n # performed.\n # Cleanup is not necessary in this case.\n if type(result) is tuple:\n return result\n\n renderError = renderPreviews(objects, result)\n cleanup(result)\n\n if renderError:\n return renderError\n\n # Restore the selection.\n for obj in objects:\n if isinstance(obj, bpy.types.Object):\n obj.select_set(True)\n bpy.context.view_layer.objects.active = obj\n elif isinstance(obj, bpy.types.Collection):\n # Deselect all collection meshes.\n for mesh in collectionMeshes(obj):\n mesh.select_set(False)\n else:\n pass",
"def bakeModel(objlist, modelname, posename=\"\", decimate_type='COLLAPSE', decimate_parameter=0.1):\n if bpy.context.scene.phobosexportsettings.relativePath:\n # CHECK careful with path consistency (Windows)\n outpath = securepath(\n os.path.expanduser(\n os.path.join(bpy.path.abspath(\"//\"), bpy.context.scene.phobosexportsettings.path)\n )\n )\n else:\n # CHECK careful with path consistency (Windows)\n outpath = securepath(os.path.expanduser(bpy.context.scene.phobosexportsettings.path))\n\n # TODO delete me?\n # bake_outpath = securepath(os.path.join(outpath, modelname) if savetosubfolder else outpath)\n bake_outpath = outpath\n\n if bpy.context.scene.phobosexportsettings.structureExport:\n securepath(os.path.join(bake_outpath, 'bakes'))\n bake_outpath = os.path.join(bake_outpath, 'bakes/')\n\n export_name = modelname + '_' + posename\n\n visuals = [o for o in objlist if (\"phobostype\" in o and o.phobostype == \"visual\")]\n if len(visuals) > 0:\n\n log(\"Baking model to \" + bake_outpath, \"INFO\")\n sUtils.selectObjects(visuals, active=0)\n log(\"Copying objects for joining...\", \"INFO\")\n bpy.ops.object.duplicate(linked=False, mode='TRANSLATION')\n log(\"Joining...\", \"INFO\")\n bpy.ops.object.join()\n obj = bpy.context.active_object\n log(\"Deleting vertices...\", \"INFO\")\n bpy.ops.object.editmode_toggle()\n bpy.ops.mesh.select_all(action='TOGGLE')\n bpy.ops.mesh.select_all(action='TOGGLE')\n bpy.ops.mesh.remove_doubles()\n bpy.ops.object.editmode_toggle()\n log(\"Adding modifier...\", \"INFO\")\n\n bpy.ops.object.modifier_add(type='DECIMATE')\n bpy.context.object.modifiers[\"Decimate\"].decimate_type = decimate_type\n if decimate_type == 'COLLAPSE':\n bpy.context.object.modifiers[\"Decimate\"].ratio = decimate_parameter\n elif decimate_type == 'UNSUBDIV':\n bpy.context.object.modifiers[\"Decimate\"].iterations = decimate_parameter\n elif decimate_type == 'DISSOLVE':\n bpy.context.object.modifiers[\"Decimate\"].angle_limit = decimate_parameter\n\n log(\"Applying modifier...\", \"INFO\")\n bpy.ops.object.modifier_apply(apply_as='DATA', modifier=\"Decimate\")\n obj.name = export_name + \".obj\"\n\n # TODO use_selection might cause bugs, depending on Blender version\n bpy.ops.export_scene.obj(filepath=os.path.join(bake_outpath, obj.name), use_selection=True)\n\n obj.hide_render = True\n previewfile = export_name\n bUtils.createPreview(\n visuals, export_path=bake_outpath, modelname=modelname, previewfile=previewfile\n )\n\n obj.select_set(True)\n\n bpy.ops.object.delete()\n log(\"Done baking...\", \"INFO\")\n\n else:\n log(\"No visuals to bake!\", \"WARNING\")",
"def remove_all_objs(self):\n objs = self.scene.get_objects()\n objs_attached = self.scene.get_attached_objects()\n # remove add objects\n for key in objs.keys():\n self.remove_obj(key)\n # remove attached objects\n for key in objs_attached.keys():\n self.unlink_obj(objs_attached[key].link_name, key)",
"def main():\n doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document\n keymod = GetKeyMod() # Get keymodifier\n selected = doc.GetActiveObjects(0) # Get selected objects\n cameras = [] # Collect cameras to an array\n doc.StartUndo() # Start recording undos\n # Collect cameras and do preparation operations\n for s in selected: # Iterate through objects\n if (s.GetType() == 5103) or (s.GetType() == 1057516): # If object is a camera object (standard C4D camera or Redshift camera)\n if s.GetType() == 5103: # If standard C4D camera\n dummyCam = DummyStandardCamera(s, doc) # Dummy camera\n dataVault = GetDataVault(5103) # Get corresponding data vault\n elif s.GetType() == 1057516: # If RS camera\n dummyCam = DummyRedshiftCamera(s, doc) # Dummy camera\n dataVault = GetDataVault(1057516) # Get corresponding data vault\n bakeCam = dummyCam.GetClone() # Bake camera\n name = s.GetName() # Get camera's name\n bakeCam.SetName(name+suffix) # Set baked camera's name\n doc.InsertObject(bakeCam) # Insert camera to document\n doc.AddUndo(c4d.UNDOTYPE_NEW, bakeCam) # Add undo command for creating a new object\n MoveToLast(bakeCam, doc) # Move object to last\n RemoveTags(bakeCam) # Remove tags of the object\n cameras.append([s, dummyCam, bakeCam, dataVault]) # Original camera, dummy camera, camera to bake\n\n doc.ExecutePasses(None, True, True, True, 0) # Animate the current frame of the document\n Bake(cameras) # Bake the camera (standard C4D camera)\n CleanKeys(cameras) # Clean keyframes\n\n # Remove dummy cameras\n for i in range(0, len(cameras)):\n cameras[i][1].Remove() # Delete Dummy camera(s)\n\n # Sort baked cameras\n for i in reversed(range(0, len(cameras))):\n MoveToFirst(cameras[i][2], doc) # Move camera to top of the hierarchy list\n\n #if keymod == \"Shift\":\n # CopyRendererTags(s, bakeCam) # Copies renderer tags from source camera to bake camera\n\n doc.EndUndo() # Stop recording undos\n c4d.EventAdd() # Refresh Cinema 4D\n c4d.StatusClear() # Clear status",
"def resetTransforms(self, translate, rotate, scale, name):\n\n cmds.select(name + \"_mover_grp\", hi=True)\n selection = cmds.ls(sl=True)\n\n globalMovers = []\n offsetMovers = []\n geoMovers = []\n\n for each in selection:\n if each.find(\"_mover\") != -1:\n if each.partition(\"_mover\")[2] == \"\":\n globalMovers.append(each)\n if each.find(\"_mover_offset\") != -1:\n if each.partition(\"_mover_offset\")[2] == \"\":\n offsetMovers.append(each)\n if each.find(\"_mover_geo\") != -1:\n if each.partition(\"_mover_geo\")[2] == \"\":\n geoMovers.append(each)\n\n cmds.select(clear=True)\n\n for moverList in [globalMovers, offsetMovers, geoMovers]:\n for each in moverList:\n if translate:\n for attr in [\".tx\", \".ty\", \".tz\"]:\n try:\n cmds.setAttr(each + attr, 0)\n except:\n pass\n if rotate:\n for attr in [\".rx\", \".ry\", \".rz\"]:\n try:\n cmds.setAttr(each + attr, 0)\n except:\n pass\n if scale:\n for attr in [\".sx\", \".sy\", \".sz\"]:\n try:\n cmds.setAttr(each + attr, 1)\n except:\n pass\n if cmds.window(\"ART_ResetXformsModeWin\", exists=True):\n cmds.deleteUI(\"ART_ResetXformsModeWin\", wnd=True)",
"def deleteAllModelsFromScene(self):\r\n # research\r\n self.deleteNeedleDetectionModelsFromScene()\r\n self.deleteNeedleValidationModelsFromScene()",
"def _publish_objects(self):\n\n for obj in self._cozmo.world.visible_objects:\n now = rospy.Time.now()\n x = obj.pose.position.x * 0.001\n y = obj.pose.position.y * 0.001\n z = obj.pose.position.z * 0.001\n q = (obj.pose.rotation.q1, obj.pose.rotation.q2, obj.pose.rotation.q3, obj.pose.rotation.q0)\n self._tfb.send_transform(\n (x, y, z), q, now, 'cube_' + str(obj.object_id), self._odom_frame\n )\n \n try:\n if obj.cube_id and self.target_cube != obj:\n self._tfb.send_transform((x, y, z), q, now, 'cube_' + str(obj.object_id), self._odom_frame)\n print(\"Found {}\".format(obj.cube_id))\n if not self.cube_found and self.robots_distance_to_object(self._cozmo, obj) < 400:\n self.target_cube = obj\n self.cube_found = True\n print(\"Locking on to {}\".format(obj.cube_id))\n else:\n if self.cube_found:\n print(\"Found that one already!\")\n else:\n print(\"Cube too far away!\")\n \n except:\n # print('OBJECT IS NOT A LIGHT CUBE')\n if(obj==self._cozmo.world.charger):\n return\n if(obj.object_type==CustomObjectTypes.CustomType00 and (self.front_wall_pose == None or not self.front_wall_pose.is_accurate)):\n self.front_wall_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Front', self._odom_frame)\n print('*** Comzmo has found the front wall! ***')\n if(obj.object_type==CustomObjectTypes.CustomType01 and (self.ramp_bottom_pose == None or not self.ramp_bottom_pose.is_accurate)):\n self.ramp_bottom_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Ramp', self._odom_frame)\n print('*** Comzmo has found the front wall! ***')\n if(obj.object_type==CustomObjectTypes.CustomType03 and (self.drop_spot_pose == None or not self.drop_spot_pose.is_accurate)):\n self.drop_spot_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Drop', self._odom_frame)\n print('*** Comzmo has found the drop Spot! ***')\n if(obj.object_type==CustomObjectTypes.CustomType04 and (self.back_wall_pose == None or not self.back_wall_pose.is_accurate)):\n self.back_wall_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Back', self._odom_frame)\n print('*** Comzmo has found the back wall! ***')\n if(obj.object_type==CustomObjectTypes.CustomType05 and (self.drop_target_pose == None or not self.drop_target_pose.is_accurate)):\n self.drop_target_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Target', self._odom_frame)\n print('*** Comzmo has found the Dropt Target! ***')\n if(obj.object_type==CustomObjectTypes.CustomType06 and (self.drop_clue_pose == None or not self.drop_clue_pose.is_accurate)):\n self.drop_clue_pose=obj.pose\n self._tfb.send_transform((x, y, z), q, now, 'Clue', self._odom_frame)\n print('*** Comzmo has found the Dropt Clue! ***')",
"def preprocess(self):\n for texgroup in self.textureGroups.itervalues():\n texgroup.dirty = True",
"def clear_points(self):\n print \"clearing each frame of selected points\"\n self.point_3d = None\n self.allUVs = []\n for iFrame in self.frames:\n iFrame.lastClick = None; \n self.displayImage(iFrame)",
"def dupeIt(*args):\n\n sel=cmds.ls(sl=True, type=\"transform\", l=True)\n inputs = cmds.radioButtonGrp(\"inputsRBG\", q=True, sl=True)\n if sel:\n base=sel[0]\n if len(sel)>1:\n objs=sel[1:]\n transforms = {}\n x=0\n\n for obj in objs:\n #get pos, rot, scale\n pos = cmds.xform(obj, ws=True, q=True, t=True)\n rot = cmds.xform(obj, ws=True, q=True, ro=True)\n scal = cmds.getAttr(\"%s.scale\"%obj)[0]\n transforms[x] = [pos, rot, scal]\n\n #delete the obj\n cmds.delete(obj)\n x=x+1\n\n for key in transforms.keys():\n if inputs == 1:\n dupe = cmds.duplicate(base)[0]\n elif inputs == 3:\n dupe = cmds.duplicate(base, un=True, rr=True)[0]\n elif inputs == 2:\n dupe = cmds.duplicate(base, ic=True)[0]\n print dupe\n cmds.xform(dupe, ws=True, t=transforms[key][0])\n cmds.xform(dupe, ws=True, ro=transforms[key][1])\n cmds.setAttr(\"%s.scale\"%dupe, transforms[key][2][0], transforms[key][2][1], transforms[key][2][2])\n\n#TODO - checkbox to copy inputs on orig objects to corresponding inputs on top level of duplicates\n\n else:\n cmds.warning(\"You need to select more than one object in order to swap!\")\n else:\n cmds.warning(\"Please select some transform nodes to dupe!\")",
"def b_transform_cube(b_obj):\n \n b_scale_object()\n b_scale_single_face(b_obj)",
"def run_mug_shelf_3D_pipeline(self):\n\n self.state.clear()\n self._clear_cache()\n\n # move home\n speed = self.graspingParams['speed']['fast']\n super_fast_speed = self.graspingParams['speed']['fast']\n # q = self._stored_poses_director[\"General\"][\"home\"]\n # q = self._stored_poses_director[\"mug\"][\"image_capture_for_mug_shelf\"]\n q = self._stored_poses_director[\"General\"][\"center_back\"]\n self.robotService.moveToJointPosition(q,\n maxJointDegreesPerSecond=super_fast_speed)\n\n self.run_keypoint_detection(wait_for_result=False, move_to_stored_pose=False, clear_state=False)\n\n # run keypoint detection\n # move to center back to capture another RGBD image\n q = self._stored_poses_director[\"General\"][\"home\"]\n self.robotService.moveToJointPosition(q,\n maxJointDegreesPerSecond=super_fast_speed)\n\n rgbd_with_pose = self.captureRgbdAndCameraTransform()\n self.state.cache['rgbd_with_pose_list'].append(rgbd_with_pose)\n\n self.wait_for_keypoint_detection_result()\n\n if not self.check_keypoint_detection_succeeded():\n self.state.set_status(\"FAILED\")\n return False\n\n # run category manip\n code = self.run_category_manipulation_goal_estimation(capture_rgbd=False)\n if not code:\n self.state.set_status(\"FAILED\")\n return False\n\n\n self.wait_for_category_manipulation_goal_result()\n if not self.check_category_goal_estimation_succeeded():\n self.state.set_status(\"PLANNING_FAILED\")\n return False\n\n # run the manipulation\n # need safety checks in there before running autonomously\n code = self.run_mug_shelf_manipulation()\n if not (code == True):\n self.state.set_status(\"FAILED\")\n return False\n\n # if the place was successful then retract\n self.retract_from_mug_shelf()\n\n if EXPERIMENT_MODE:\n output_dir = self.state.cache['keypoint_detection_result']['output_dir']\n print \"\\n\\n\", os.path.split(output_dir)[1]",
"def move_objects(self):\n\n def get_new_obj_pose(curr_pos, curr_quat):\n angular_disp = 0.0\n delta_alpha = np.random.uniform(-angular_disp, angular_disp)\n delta_rot = Quaternion(axis=(0.0, 0.0, 1.0), radians=delta_alpha)\n curr_quat = Quaternion(curr_quat)\n newquat = delta_rot * curr_quat\n\n pos_ok = False\n while not pos_ok:\n const_dist = True\n if const_dist:\n alpha = np.random.uniform(-np.pi, np.pi, 1)\n d = 0.25\n delta_pos = np.array([d * np.cos(alpha), d * np.sin(alpha), 0.])\n else:\n pos_disp = 0.1\n delta_pos = np.concatenate([np.random.uniform(-pos_disp, pos_disp, 2), np.zeros([1])])\n newpos = curr_pos + delta_pos\n lift_object = False\n if lift_object:\n newpos[2] = 0.15\n if np.any(newpos[:2] > high_bound[:2]) or np.any(newpos[:2] < low_bound[:2]):\n pos_ok = False\n else:\n pos_ok = True\n\n return newpos, newquat\n\n for i in range(self.num_objects):\n curr_pos = self.sim.data.qpos[self._n_joints + i * 7: self._n_joints + 3 + i * 7]\n curr_quat = self.sim.data.qpos[self._n_joints + 3 + i * 7: self._n_joints + 7 + i * 7]\n obji_xyz, obji_quat = get_new_obj_pose(curr_pos, curr_quat)\n self.sim.data.qpos[self._n_joints + i * 7: self._n_joints + 3 + i * 7] = obji_xyz\n self.sim.data.qpos[self._n_joints + 3 + i * 7: self._n_joints + 7 + i * 7] = obji_quat.elements\n\n sim_state = self.sim.get_state()\n # sim_state.qpos[:] = sim_state.qpos\n sim_state.qvel[:] = np.zeros_like(sim_state.qvel)\n self.sim.set_state(sim_state)\n self.sim.forward()",
"def generate(self, objects):\n\n 'Initialize/reset each target volume with zeros'\n self.classification_mask = np.zeros([self.num_classes, self.target_height, self.target_width])\n self.bbox_mask = np.zeros([self.num_coords, self.target_height, self.target_width])\n self.depth_mask = np.zeros([self.num_depth, self.target_height, self.target_width])\n\n 'Set entire mask as background until objects are processed'\n self.classification_mask[0, :, :] = 1\n\n for obj in objects:\n obj_class = self._get_class(obj)\n shrink_factor = self._get_shrink_factor(obj_class)\n mask_coords = self._get_mask_coords(obj, shrink_factor)\n\n self._update_classification_mask(obj_class, mask_coords)\n self._update_bbox_mask(obj.bounding_box, mask_coords)\n self._update_depth_mask(obj.bounding_box, mask_coords)\n\n \"Suppress background and dont care classes\"\n suppress_mask = self._suppress_bg_dc()\n\n return np.copy(self.classification_mask), np.copy(self.bbox_mask), \\\n np.copy(self.depth_mask), np.copy(suppress_mask)",
"def delete_transform_from_nodes(nodes):\n\n for node in nodes:\n try:\n shape = [x for x in cmds.listHistory(node, future=True)\n if x not in cmds.listHistory(node, future=True, pdo=True)]\n transform = cmds.listRelatives(shape, parent=True)\n cmds.delete(transform)\n except ValueError:\n return",
"def remesh_blocks():\n \n # Get the active object\n obj = bpy.context.active_object\n \n nameCopy = \"temp_copy\"\n\n # Switch in object mode \n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Remove all modifiers from the object\n obj.modifiers.clear()\n\n # Delete the existing copy \n for o in bpy.data.objects:\n if o.type == 'MESH' and o.name == nameCopy:\n # Delete the existing copy\n object_to_delete = bpy.data.objects[nameCopy]\n bpy.data.objects.remove(object_to_delete, do_unlink=True) \n \n \n # Make a copy of the object\n new_obj = obj.copy()\n new_obj.data = obj.data.copy()\n new_obj.animation_data_clear()\n bpy.context.collection.objects.link(new_obj)\n\n # Rename the copy\n new_obj.name = nameCopy\n\n # Hide the copy\n new_obj.hide_viewport = True\n\n # Remesh the faces of the object with blocks\n bpy.ops.object.modifier_add(type='REMESH')\n bpy.context.object.modifiers[\"Remesh\"].mode = 'BLOCKS'\n bpy.context.object.modifiers[\"Remesh\"].octree_depth = bpy.context.scene.level_blocks\n bpy.context.object.modifiers[\"Remesh\"].scale = 0.99\n bpy.context.object.modifiers[\"Remesh\"].use_remove_disconnected = False\n bpy.context.object.modifiers[\"Remesh\"].threshold = 1\n bpy.context.object.modifiers[\"Remesh\"].use_smooth_shade = False\n\n # Make intersection between the remesh object and the original\n bpy.ops.object.modifier_add(type='BOOLEAN')\n bpy.context.object.modifiers[\"Boolean\"].operation = 'INTERSECT'\n bpy.context.object.modifiers[\"Boolean\"].operand_type = 'OBJECT'\n bpy.context.object.modifiers[\"Boolean\"].object = bpy.data.objects[nameCopy]\n bpy.context.object.modifiers[\"Boolean\"].solver = 'FAST'\n bpy.context.object.modifiers[\"Boolean\"].double_threshold = 0",
"def kill_all_objects(self):\n # Iterate and call pygame.sprite.Sprite kill() function to remove from any pygame.sprite.Groups()\n for o in self.celest_objs:\n o.kill()\n \n # Clear all transients\n self.transient_objs.clear()\n\n print(f\"Killed all objects: Celestials: {len(self.celest_objs)}, Transients: {len(self.transient_objs)}\")",
"def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()",
"def _clean_objects(*args):\n\n for arg in args:\n attrs = dir(arg)\n # QuadContourSet\n if 'collections' in attrs:\n for item in arg.collections: item.remove()\n # Wind barbs and contour labels\n elif '__len__' in attrs:\n for item in arg:\n item.remove()\n # Text strings and everything else\n elif 'get_text' in attrs: arg.remove()",
"def resetTransformations():\n dislin.trfres()"
] |
[
"0.65062296",
"0.61384195",
"0.61042535",
"0.6094468",
"0.58585864",
"0.5814782",
"0.5727799",
"0.572588",
"0.5693036",
"0.56779474",
"0.5572873",
"0.55572915",
"0.55426896",
"0.5536677",
"0.5533747",
"0.552068",
"0.55170244",
"0.54940814",
"0.5483527",
"0.54787827",
"0.5472806",
"0.54317546",
"0.5426946",
"0.5410326",
"0.53812236",
"0.5375295",
"0.53726125",
"0.53347063",
"0.53143126",
"0.5292721"
] |
0.74857163
|
0
|
DSSAT model water uptake
|
def water_uptake_dssat(self, soil):
CONV1 = 1e-4 # convert m/m3 to cm/cm3
CONV2 = 100 # convert m to cm
CONV3 = 10 # convert cm to mm
daily_ref_evap_transp = soil.daily_ref_evap_transp
transp_pot = daily_ref_evap_transp * self.light_intercpt
root_dens = self.root_dens * CONV1 #cm root / cm3 soil
CONST1 = 1.3e-3
CONST2 = np.zeros(soil.total_layers)
CONST3 = 7.01
layer_thickness = soil.layer_thickness * CONV2
water_uptake = np.zeros(soil.total_layers)
# Constant 2
for lyr in soil.layers:
CONST2[lyr] = 120 - 250 * soil.perm_wilt_point[lyr]
if soil.perm_wilt_point[lyr] > 0.3:
CONST2[lyr] = 45
# Water uptake per unit root length
for lyr in soil.layers:
if root_dens[lyr] <= 0.00001 or (soil.water_content[lyr] <=
soil.perm_wilt_point[lyr]):
water_uptake[lyr] = 0
else:
water_uptake[lyr] = (CONST1 * math.exp(min((CONST2[lyr] *
(soil.water_content[lyr] -
soil.perm_wilt_point[lyr])), 40)) /
(CONST3 - math.log(root_dens[lyr])))
water_uptake[lyr] = min(water_uptake[lyr],
self.dssat_max_water_uptake)
# Water uptake in [cm/d] volume
water_uptake[lyr] = (water_uptake[lyr] * layer_thickness[lyr] *
root_dens[lyr])
# Water uptake in [mm/d] volume
water_uptake[lyr] = water_uptake[lyr] * CONV3
# Total water uptake [mm/d]
crop_transp = water_uptake.sum()
min_transp = min(transp_pot, crop_transp)
# Update crop arrays
for lyr in soil.layers:
if min_transp > 0:
self.water_uptake[lyr] = (water_uptake[lyr] *
(min_transp / crop_transp))
else:
self.water_uptake[lyr] = 0
self.att_transp = self.water_uptake.sum() # mm/day
self.cum_transp += self.att_transp # mm
self.transp_ratio = self.att_transp / transp_pot
self.expect_transp = transp_pot
self.cum_pot_transp += self.expect_transp
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def water_uptake_apsim(self, soil):\r\n soil_wat_avail = np.zeros(soil.total_layers)\r\n soil_wat_supply = np.zeros(soil.total_layers)\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n # Water available in each layer [mm]\r\n for lyr in soil.layers:\r\n soil_wat_avail[lyr] = ((soil.water_content[lyr] -\r\n soil.perm_wilt_point[lyr]) *\r\n soil.layer_thickness[lyr] *\r\n soil.WATER_DENSITY)\r\n # Water supply\r\n for lyr in soil.layers:\r\n soil_wat_supply[lyr] = (soil_wat_avail[lyr] * soil.kl[lyr])\r\n\r\n # Water uptake (no supply or demand)\r\n if (soil_wat_supply.sum() <= 0) or (transp_pot <= 0):\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = 0\r\n else:\r\n # Water uptake (water is not limiting)\r\n if transp_pot < soil_wat_supply.sum():\r\n # distribute demand proportionately to the water supply\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = (soil_wat_supply[lyr] /\r\n soil_wat_supply.sum() *\r\n transp_pot)\r\n else:\r\n # Water uptake (water is limiting)\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = soil_wat_supply[lyr]\r\n\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp # mm\r\n self.transp_ratio = self.att_transp / transp_pot\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += transp_pot",
"def main():\n\n # path of model that should be pruned\n model_path = ('saved_models/PATH_TO_MODEL/model.h5')\n\n # weights below this threshold will be set to zero\n # thresholds can be defined per layer\n thresholds = [0.03, 0.01, 0.01]\n\n # specify training epochs for retraining\n epochs = [1, 1, 1]\n # define the layer index that should be pruned\n # only feedforward layers can be pruned!!!\n layers = [3, 4, 5]\n\n # TrainingData section\n # specify input dimension of the sliding window using 'slice_len'\n slice_len = 30\n\n # output delay for AREUS data\n delay = 6\n\n td1 = TrainingData()\n training_data = td1.window_dim_1_sized_td(slice_len, delay)\n\n # Pruning runs for each layer\n p_run = PruningRun(model_path, training_data)\n for i, layer in enumerate(layers):\n p_run.prune_layer(layer, thresholds[i], epochs[i])\n\n # when no retraining is needed\n #p_run.prune_layer_no_retraining(layer, thresholds[i])",
"def water_uptake_feddes(self, soil):\r\n\r\n # Value of the pressure head, below which roots extract water at the\r\n # maximum possible rate\r\n P1 = soil.field_capacity_water_potential.mean()#-25 # J/kg\r\n P3 = soil.perm_wilt_point_pot.mean()#-8000 # J/kg wilting point\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n for lyr in soil.layers:\r\n stress_fact = feddes_stress_factor(transp_pot,\r\n soil.water_potential[lyr],\r\n self.P0, P1, self.P2L, self.P2H,\r\n P3, self.R2H, self.R2L)\r\n self.water_uptake[lyr] = (stress_fact * self.root_fraction[lyr] *\r\n transp_pot)\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += self.expect_transp\r\n self.transp_ratio = self.att_transp / transp_pot",
"def optim_optuna(modelname=\"model_dl.1_lstm.py\", \n pars= {}, \n df = None,\n optim_method=\"normal/prune\",\n save_folder=\"/mymodel/\", log_folder=\"\",ntrials=2) :\n \n module = module_load(modelname) \n\n def objective(trial):\n param_dict = module.get_params(choice=\"test\", ncol_input=df.shape[1], ncol_output=df.shape[1])\n for t,p in pars.items():\n pres = None\n #p = pars[t]\n x = p['type']\n \n if x=='log_uniform':\n pres = trial.suggest_loguniform(t,p['range'][0], p['range'][1])\n \n elif x=='int':\n pres = trial.suggest_int(t,p['range'][0], p['range'][1])\n \n elif x=='categorical':\n pres = trial.suggest_categorical(t,p['value'])\n \n elif x=='discrete_uniform':\n pres = trial.suggest_discrete_uniform(t, p['init'],p['range'][0],p['range'][1])\n \n elif x=='uniform':\n pres = trial.suggest_uniform(t,p['range'][0], p['range'][1])\n \n else:\n raise Exception('Not supported type {}'.format(p['type']))\n\n param_dict[t] = pres\n \n model = module.Model(**param_dict)\n sess = module.fit(model,df)\n stats = model.stats[\"loss\"]\n del sess\n del model\n tf.reset_default_graph()\n return stats\n \n if optim_method=='prune':\n study = optuna.create_study(pruner=optuna.pruners.MedianPruner())\n else:\n study = optuna.create_study() # Create a new study.\n \n \"\"\"\n optuna create-study --study-name \"distributed-example\" --storage \"sqlite:///example.db\"\n \n https://optuna.readthedocs.io/en/latest/tutorial/distributed.html\n if __name__ == '__main__':\n study = optuna.load_study(study_name='distributed-example', storage='sqlite:///example.db')\n study.optimize(objective, n_trials=100)\n \n \n \n \"\"\"\n study.optimize(objective, n_trials=ntrials) # Invoke optimization of the objective function.\n param_dict = study.best_params\n param_dict.update(module.get_params(choice=\"test\", ncol_input=df.shape[1], \n ncol_output=df.shape[1]))\n \n ### Run best model\n model = module.Model(**param_dict)\n sess = module.fit(model,df)\n \n #### Saving \n modelname = modelname.replace(\".\", \"_\") # this is the module name which contains .\n save_folder = save_folder + \"/\" + modelname\n if not(os.path.isdir(save_folder)):\n os.makedirs(save_folder)\n file_path = os.path.join(save_folder,modelname+'.ckpt')\n\n save(sess,file_path)\n\n\n ### Update with Best values\n study_trials = study.trials_dataframe()\n study_trials.to_csv(os.path.join(save_folder,modelname+'_study.csv'))\n \n param_dict[\"best_value\"] = study.best_value\n param_dict[\"file_path\"] = file_path \n json.dump( param_dict, os.path.join(save_folder, modelname+'_params.csv') )\n \n return param_dict",
"def main():\n\n\n #parse arguments (measuring data or not, name of text file, input pins, smoothing out, arduino board, usb port)\n parser = argparse.ArgumentParser(description = \"Train your Arduino.\")\n parser.add_argument('-m', dest='measuring', default='True', nargs='?',\n help = 'measuring new data True/False (default: True)')\n parser.add_argument('-n', dest = 'file_name', default = 'trainingData.txt', nargs='?',\n help = 'name of .txt file for saving or reading data (default: trainingData.txt)')\n parser.add_argument('-p', dest='pins', nargs='+', default=['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7'],\n help='input pins (default: A1 A2 A3 A4 A5 A6 A7)')\n parser.add_argument('-s', dest='smoothing', default=0, nargs='?',\n help='maximum value to smooth out to zero in measured data (default: 0)')\n parser.add_argument('-b', dest='board', default=\"arduino:mbed:nano33ble\", nargs='?',\n help='arduino board (default: arduino:mbed:nano33ble)')\n parser.add_argument('-usb', dest='port', default=\"COM22\", nargs='?',\n help='usb port connected with board (default: COM22')\n\n\n parse_args = parser.parse_args()\n file_name = parse_args.file_name\n #make sure it will be a text file\n if file_name[-4:] != '.txt':\n file_name = file_name + '.txt'\n\n\n pins = parse_args.pins\n board = parse_args.board\n port = parse_args.port\n smoothing = int(parse_args.smoothing)\n\n #potentially measure new data, or skip\n measuring = parse_args.measuring\n if measuring.lower() != 'false':\n measure_success = measure.main(file_name, port, board, pins)\n else:\n measure_success = True\n\n if measure_success is True:\n\n\n #compute centroids\n ordered_centroids, class_labels = centroid.main(file_name, pins, smoothing)\n\n\n #write ino script\n create_classification_script(ordered_centroids, list(class_labels), pins, smoothing)\n\n #automatically upload classification script to board\n try:\n subprocess.run([\"arduino-cli\", \"compile\", \"--fqbn\", board, \"arduino_scripts/predict\"], check=True)\n except subprocess.CalledProcessError:\n print(\"Can't compile predict file\")\n\n try:\n subprocess.run([\"arduino-cli\", \"upload\", \"-p\", port, \"--fqbn\", board, \"arduino_scripts/predict\"], check=True)\n print(\"Wait for port\")\n time.sleep(3)\n print(\"Done\")\n except subprocess.CalledProcessError:\n print(\"Can't upload sketch\")\n\n\n\n\n return None",
"def train():\n pass",
"def set_up_model(dt, model, update = False):\n \n start_scope()\n \n ##### Update model parameters (should be done, if original parameters have been changed)\n if update:\n ###### Temperature in Kelvin\n model.T_kelvin = model.zero_celsius + model.T_celsius*kelvin\n \n ##### Potentials\n # Resting potential (calculated with Goldman equation)\n model.V_res = (model.R*model.T_kelvin)/model.F * np.log((model.P_K*model.n_init**2*model.K_e + model.P_Na*model.h_init*model.m_init**3*model.Na_e)/\\\n (model.P_K*model.n_init**2*model.K_i + model.P_Na*model.h_init*model.m_init**3*model.Na_i))\n \n # Nerst potential for leakage current; leakage chanels were excluded but could be added by using: g_L*(E_L-(v-V_res)) \n model.E_L = (-1/model.g_L)*(model.P_Na*model.m_init**3*model.h_init*(model.V_res*model.F**2)/(model.R*model.T_kelvin) * \\\n (model.Na_e-model.Na_i*exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))) + \\\n model.P_K*model.n_init**2*(model.V_res*model.F**2)/(model.R*model.T_kelvin) *\\\n (model.K_e-model.K_i*np.exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))))\n \n \n ##### structure of ANF\n # terminal = 0\n # internode = 1\n # node = 2\n # presomatic region = 3\n # Soma = 4\n # postsomatic region = 5)\n model.structure = np.array(list(np.tile([2,1],model.nof_internodes)) + [2])\n model.nof_comps = len(model.structure)\n \n ##### Compartment lengths\n # initialize\n model.compartment_lengths = np.zeros_like(model.structure)*um\n # length internodes\n model.compartment_lengths[model.structure == 1] = model.length_internodes\n # length nodes\n model.compartment_lengths[model.structure == 2] = model.length_nodes\n # total length neuron\n model.length_neuron = sum(model.compartment_lengths)\n \n ##### Compartment diameters\n # initialize\n model.compartment_diameters = np.zeros(model.nof_comps+1)*um\n # dendrite\n model.fiber_inner_diameter = 0.7* model.fiber_outer_diameter\n model.compartment_diameters[:] = model.fiber_inner_diameter\n \n ##### Compartment middle point distances (needed for plots)\n model.distance_comps_middle = np.zeros_like(model.compartment_lengths)\n model.distance_comps_middle[0] = 0.5*model.compartment_lengths[0]\n for ii in range(0,model.nof_comps-1):\n model.distance_comps_middle[ii+1] = 0.5* model.compartment_lengths[ii] + 0.5* model.compartment_lengths[ii+1]\n \n ##### Capacitivites\n # initialize\n model.c_m = np.zeros_like(model.structure)*uF/cm**2\n # internodes\n model.c_m[np.where(model.structure == 1)] = 0*uF/cm**2\n # nodes\n model.c_m[np.where(model.structure == 2)] = model.c_m_layer\n \n ##### Axoplasmatic resistances\n model.compartment_center_diameters = np.zeros(model.nof_comps)*um\n model.compartment_center_diameters = (model.compartment_diameters[0:-1] + model.compartment_diameters[1:]) / 2 \n model.R_a = (model.compartment_lengths*model.rho_in) / ((model.compartment_center_diameters*0.5)**2*np.pi)\n \n ##### Surface arias\n # lateral surfaces\n m = [np.sqrt(abs(model.compartment_diameters[i+1] - model.compartment_diameters[i])**2 + model.compartment_lengths[i]**2)\n for i in range(0,model.nof_comps)]\n # total surfaces\n model.A_surface = [(model.compartment_diameters[i+1] + model.compartment_diameters[i])*np.pi*m[i]*0.5\n for i in range(0,model.nof_comps)]\n \n ##### Noise term\n model.P_Na_vector = np.zeros(model.nof_comps)*um/second\n model.P_Na_vector[model.structure == 2] = model.P_Na\n model.noise_term = np.sqrt(model.A_surface*model.P_Na_vector)\n \n ##### Compartments to plot\n model.comps_to_plot = range(1,model.nof_comps)\n \n ##### initialize defaultclock\n defaultclock.dt = dt\n\n ##### define morphology\n morpho = Section(n = model.nof_comps,\n length = model.compartment_lengths,\n diameter = model.compartment_diameters)\n \n ##### define neuron\n neuron = SpatialNeuron(morphology = morpho,\n model = model.eqs,\n Cm = model.c_m,\n Ri = model.rho_in,\n method=\"exponential_euler\")\n \n ##### initial values\n neuron.v = model.V_res\n neuron.m = model.m_init\n neuron.n = model.n_init\n neuron.h = model.h_init\n \n ##### Set parameter values of differential equations\n # conductances active compartments\n neuron.g_Na = model.g_Na\n neuron.g_K = model.g_K\n \n # conductances internodes\n neuron.g_Na[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n neuron.g_K[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n \n # other parameters\n neuron.V_res = model.V_res\n neuron.T_celsius = model.T_celsius\n neuron.E_Na = model.E_Na\n neuron.E_K = model.E_K\n neuron.E_L = model.E_L\n neuron.g_L = model.g_L \n \n return neuron, model",
"def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)",
"def setup_pwn(name,pwndata,phase, free_radius=5, tempdir=None, emin=1.0e2, emax=1.0e5,maxroi=10,model=None,**kwargs):\n sources=yaml.load(open(pwndata))\n\n catalog_name=sources[name]['catalog']\n ltcube=sources[name]['ltcube']\n pulsar_position=SkyDir(*sources[name]['dir'])\n ft2=sources[name]['ft2']\n ft1=sources[name]['ft1']\n\n # in case no list was passed\n if len(phase)==2 and isinstance(phase[0],numbers.Real) and \\\n isinstance(phase[1],numbers.Real):\n\n # write in case phase wraps around.\n if phase[0]>phase[1]:\n phase=[[phase[0],1.0],[0.0,phase[1]]]\n else:\n phase = [phase] \n\n phase_factor=get_phase_factor(phase)\n print \"phase\"\n print phase\n print \"phase_factor=%.2f\"%phase_factor\n\n catalog=FermiCatalog(e(\"$FERMI/catalogs/gll_psc_v02.fit\"),free_radius=free_radius)\n catalog_source=[i for i in catalog.get_sources(SkyDir(),180) if i.name==catalog_name][0]\n\n center=catalog_source.skydir\n\n if tempdir is None: tempdir=mkdtemp(prefix='/scratch/')\n\n binfile=j(tempdir,'binned_phased.fits')\n\n # apply phase cut to ft1 file\n phased_ft1 = j(tempdir,'ft1_phased.fits')\n phasetools.phase_cut(ft1,phased_ft1,phaseranges=phase)\n\n # create a temporary ltcube scaled by the phase factor\n# phased_ltcube=j(tempdir,'phased_ltcube.fits')\n# phase_ltcube(ltcube,phased_ltcube, phase=phase)\n phased_ltcube=ltcube\n from uw.like.pointspec import DataSpecification\n data_specification = DataSpecification(\n ft1files = phased_ft1,\n ft2files = ft2,\n ltcube = phased_ltcube,\n binfile = binfile)\n\n spectral_analysis = SpectralAnalysis(data_specification,\n binsperdec = 4,\n emin = 100,\n emax = 100000,\n irf = \"P6_V3_DIFFUSE\",\n roi_dir = center,\n maxROI = maxroi,\n minROI = maxroi)\n\n if model == None :\n roi=spectral_analysis.roi(\n roi_dir=center,\n diffuse_sources=get_default_diffuse(diffdir=e(\"$FERMI/diffuse\"),\n gfile=\"gll_iem_v02.fit\",\n ifile=\"isotropic_iem_v02.txt\"),\n catalogs = catalog,\n phase_factor = 1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n else :\n roi=spectral_analysis.roi(\n roi_dir=center,\n xmlfile = model,\n phase_factor =1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n\n print \"---------------------Energy range--------------------\"\n \n print \"emin=\"+str(roi.bands[0].emin)+\"\\n\"\n print \"emax=\"+str(roi.bands[len(roi.bands)-1].emax)+\"\\n\"\n \n\n # keep overall flux of catalog source,\n # but change the starting index to 2.\n roi.modify(which=catalog_name, name=name, index=2, \n keep_old_flux=True)\n\n return roi",
"def main():\n tpd_file_name = get_nonexisting_file(\"Enter name of new tpd file: \")\n tpd = TrainPredictData(tpd_file_name)\n\n print \"You can now enter the file paths of the the newly created tpd file.\"\n print \"If you want to skip a data set, just press enter without typing anything.\"\n\n train_raw_path = get_existing_file(\"Enter training raw path: \", skip=True)\n if train_raw_path is not None:\n train_raw_key = extract_h5_key(train_raw_path, \"Enter training raw h5 key: \")\n tpd.set_train_raw(train_raw_path, train_raw_key)\n\n train_gt_path = get_existing_file(\"Enter training gt path: \", skip=True)\n if train_gt_path is not None:\n train_gt_key = extract_h5_key(train_gt_path, \"Enter training gt h5 key: \")\n tpd.set_train_gt(train_gt_path, train_gt_key)\n\n train_pred_path = get_existing_file(\"Enter training pred path: \", skip=True)\n if train_pred_path is not None:\n train_pred_key = extract_h5_key(train_pred_path, \"Enter training pred h5 key: \")\n tpd.set_train_pred(train_pred_path, train_pred_key)\n\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n while train_feat_path is not None:\n train_feat_key = extract_h5_key(train_feat_path, \"Enter training feature path: \")\n tpd.add_train_feature(train_feat_path, train_feat_key)\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n\n test_raw_path = get_existing_file(\"Enter test raw path: \", skip=True)\n if test_raw_path is not None:\n test_raw_key = extract_h5_key(test_raw_path, \"Enter test raw h5 key: \")\n tpd.set_test_raw(test_raw_path, test_raw_key)\n\n test_gt_path = get_existing_file(\"Enter test gt path: \", skip=True)\n if test_gt_path is not None:\n test_gt_key = extract_h5_key(test_gt_path, \"Enter test gt h5 key: \")\n tpd.set_test_gt(test_gt_path, test_gt_key)\n\n test_pred_path = get_existing_file(\"Enter test pred path: \", skip=True)\n if test_pred_path is not None:\n test_pred_key = extract_h5_key(test_pred_path, \"Enter test pred h5 key: \")\n tpd.set_test_pred(test_pred_path, test_pred_key)\n\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n while test_feat_path is not None:\n test_feat_key = extract_h5_key(test_feat_path, \"Enter test feature path: \")\n tpd.add_test_feature(test_feat_path, test_feat_key)\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n\n return 0",
"def setup_model(outdir, record_dir, outname, params, dust_file, wav_range, aperture,\n tsc=True, idl=False, plot=False, low_res=True, max_rCell=100,\n scale=1, radmc=False, mono_wave=None, norecord=False,\n dstar=200., dyn_cav=False, fix_params=None,\n power=2, mc_photons=1e6, im_photons=1e6, ellipsoid=False,\n TSC_dir='~/programs/misc/TSC/',\n IDL_path='/Applications/exelis/idl83/bin/idl', auto_disk=0.25,\n fast_plot=False, image_only=False, ulrich=False):\n import numpy as np\n import astropy.constants as const\n import scipy as sci\n # to avoid X server error\n import matplotlib as mpl\n mpl.use('Agg')\n #\n import matplotlib.pyplot as plt\n import os\n from matplotlib.colors import LogNorm\n from scipy.integrate import nquad\n from hyperion.model import Model\n from record_hyperion import record_hyperion\n from pprint import pprint\n\n # Constants setup\n c = const.c.cgs.value\n AU = const.au.cgs.value # Astronomical Unit [cm]\n pc = const.pc.cgs.value # Parsec [cm]\n MS = const.M_sun.cgs.value # Solar mass [g]\n LS = const.L_sun.cgs.value # Solar luminosity [erg/s]\n RS = const.R_sun.cgs.value # Solar radius [cm]\n G = const.G.cgs.value # Gravitational constant [cm3/g/s^2]\n yr = 60*60*24*365 # Years in seconds\n PI = np.pi # PI constant\n sigma = const.sigma_sb.cgs.value # Stefan-Boltzmann constant\n mh = const.m_p.cgs.value + const.m_e.cgs.value\n g2d = 100.\n mmw = 2.37 # Kauffmann 2008\n\n m = Model()\n\n # min and max wavelength to compute (need to define them first for checking dust properties)\n wav_min, wav_max, wav_num = wav_range\n\n # Create dust properties\n # Hyperion needs nu, albedo, chi, g, p_lin_max\n from hyperion.dust import HenyeyGreensteinDust\n dust = dict()\n [dust['nu'], dust['albedo'], dust['chi'], dust['g']] = np.genfromtxt(dust_file).T\n d = HenyeyGreensteinDust(dust['nu'], dust['albedo'], dust['chi'], dust['g'], dust['g']*0)\n # dust sublimation option\n # dust sublimation temperture specified here\n T_sub = 1600.0\n d.set_sublimation_temperature('slow', temperature=T_sub)\n d.set_lte_emissivities(n_temp=3000,\n temp_min=0.1,\n temp_max=2000.)\n # if the min and/or max wavelength fall out of range\n if c/wav_min/1e-4 > dust['nu'].max():\n d.optical_properties.extrapolate_nu(dust['nu'].min(), c/wav_min/1e-4)\n print('minimum wavelength is out of dust model. The dust model is extrapolated.')\n if c/wav_max/1e-4 < dust['nu'].min():\n d.optical_properties.extrapolate_nu(c/wav_max/1e-4, dust['nu'].max())\n print('maximum wavelength is out of dust model. The dust model is extrapolated.')\n\n # try to solve the freq. problem\n d.optical_properties.extrapolate_nu(3.28e15, 5e15)\n #\n d.write(outdir+os.path.basename(dust_file).split('.')[0]+'.hdf5')\n d.plot(outdir+os.path.basename(dust_file).split('.')[0]+'.png')\n plt.clf()\n\n # Grids and Density\n\n # Grid Parameters\n nx = 300L\n if low_res == True:\n nx = 100L\n ny = 400L\n nz = 50L\n [nx, ny, nz] = [int(scale*nx), int(scale*ny), int(scale*nz)]\n\n # TSC model input setting\n dict_params = params\n # TSC model parameter\n cs = dict_params['Cs']*1e5\n t = dict_params['age'] # year\n omega = dict_params['Omega0']\n # calculate related parameters\n M_env_dot = 0.975*cs**3/G\n mstar = M_env_dot * t * yr\n R_cen = omega**2 * G**3 * mstar**3 /(16*cs**8)\n R_inf = cs * t * yr\n # protostar parameter\n tstar = dict_params['tstar']\n R_env_max = dict_params['R_env_max']*AU\n theta_cav = dict_params['theta_cav']\n rho_cav_center = dict_params['rho_cav_center']\n rho_cav_edge = dict_params['rho_cav_edge']*AU\n rstar = dict_params['rstar']*RS\n # Mostly fixed parameter\n M_disk = dict_params['M_disk']*MS\n beta = dict_params['beta']\n h100 = dict_params['h100']*AU\n rho_cav = dict_params['rho_cav']\n # make M_disk varies with mstar, which is the mass of star+disk\n if auto_disk != None:\n if M_disk != 0:\n print('M_disk is reset to %4f of mstar (star+disk)' % auto_disk)\n M_disk = mstar * auto_disk\n else:\n print('M_disk = 0 is found. M_disk is set to 0.')\n\n # ellipsoid cavity parameter\n if ellipsoid == True:\n print('Use ellipsoid cavity (experimental)')\n # the numbers are given in arcsec\n a_out = 130 * dstar * AU\n b_out = 50 * dstar * AU\n z_out = a_out\n a_in = dict_params['a_in'] * dstar * AU\n b_in = a_in/a_out*b_out\n z_in = a_in\n rho_cav_out = dict_params['rho_cav_out'] * mh\n rho_cav_in = dict_params['rho_cav_in'] * mh\n\n # Calculate the dust sublimation radius\n # dust sublimation temperature specified when setting up the dust properties\n # realistic dust\n # a = 1 # in micron\n # d_sub = 2.9388e7*(a/0.1)**-0.2 * (4*np.pi*rstar**2*sigma*tstar**4/LS)**0.5 / T_sub**3 *AU\n # black body dust\n d_sub = (LS/16./np.pi/sigma/AU**2*(4*np.pi*rstar**2*sigma*tstar**4/LS)/T_sub**4)**0.5 *AU\n # use the dust sublimation radius as the inner radius of disk and envelope\n R_disk_min = d_sub\n R_env_min = d_sub\n rin = rstar\n rout = R_env_max\n R_disk_max = R_cen\n\n # print the variables\n print('Dust sublimation radius %6f AU' % (d_sub/AU))\n print('M_star %4f Solar mass' % (mstar/MS))\n print('Infall radius %4f AU' % (R_inf / AU))\n\n # if there is any parameter found in fix_params, then fix them\n if fix_params != None:\n if 'R_min' in fix_params.keys():\n R_disk_min = fix_params['R_min']*AU\n R_env_min = fix_params['R_min']*AU\n\n # Make the Coordinates\n #\n ri = rin * (rout/rin)**(np.arange(nx+1).astype(dtype='float')/float(nx))\n ri = np.hstack((0.0, ri))\n thetai = PI*np.arange(ny+1).astype(dtype='float')/float(ny)\n phii = PI*2.0*np.arange(nz+1).astype(dtype='float')/float(nz)\n\n # Keep the constant cell size in r-direction at large radii\n #\n if max_rCell != None:\n ri_cellsize = ri[1:-1]-ri[0:-2]\n ind = np.where(ri_cellsize/AU > max_rCell)[0][0] # The largest cell size is 100 AU\n ri = np.hstack((ri[0:ind],\n ri[ind]+np.arange(np.ceil((rout-ri[ind])/max_rCell/AU))*max_rCell*AU))\n nxx = nx\n nx = len(ri)-1\n # Assign the coordinates of the center of cell as its coordinates.\n #\n rc = 0.5*( ri[0:nx] + ri[1:nx+1] )\n thetac = 0.5*( thetai[0:ny] + thetai[1:ny+1] )\n phic = 0.5*( phii[0:nz] + phii[1:nz+1] )\n\n # for non-TSC model\n if ulrich:\n import hyperion as hp\n from hyperion.model import AnalyticalYSOModel\n\n non_tsc = AnalyticalYSOModel()\n\n # Define the luminsoity source\n nt_source = non_tsc.add_spherical_source()\n nt_source.luminosity = (4*PI*rstar**2)*sigma*(tstar**4) # [ergs/s]\n nt_source.radius = rstar # [cm]\n nt_source.temperature = tstar # [K]\n nt_source.position = (0., 0., 0.)\n nt_source.mass = mstar\n\n # Envelope structure\n #\n nt_envelope = non_tsc.add_ulrich_envelope()\n nt_envelope.mdot = M_env_dot # Infall rate\n nt_envelope.rmin = rin # Inner radius\n nt_envelope.rc = R_cen # Centrifugal radius\n nt_envelope.rmax = R_env_max # Outer radius\n nt_envelope.star = nt_source\n\n nt_grid = hp.grid.SphericalPolarGrid(ri, thetai, phii)\n\n rho_env_ulrich = nt_envelope.density(nt_grid).T\n rho_env_ulrich2d = np.sum(rho_env_ulrich**2, axis=2)/np.sum(rho_env_ulrich, axis=2)\n\n # Make the dust density model\n #\n # total mass counter\n total_mass = 0\n\n # normalization constant for cavity shape\n if theta_cav != 0:\n # using R = 10000 AU as the reference point\n c0 = (10000.*AU)**(-0.5)*\\\n np.sqrt(1/np.sin(np.radians(theta_cav))**3-1/np.sin(np.radians(theta_cav)))\n else:\n c0 = 0\n\n # empty density grid to be filled later\n rho = np.zeros([len(rc), len(thetac), len(phic)])\n\n # Normalization for the total disk mass\n def f(w, z, beta, rstar, h100):\n f = 2*PI*w*(1-np.sqrt(rstar/w))*(rstar/w)**(beta+1)*np.exp(-0.5*(z/(w**beta*h100/100**beta))**2)\n return f\n rho_0 = M_disk/(nquad(f,[[R_disk_min,R_disk_max],[-R_env_max,R_env_max]], args=(beta,rstar,h100)))[0]\n\n # TODO: review\n if dyn_cav == True:\n if not tsc:\n print('WARNING: Calculation of interdependent cavity property has not implemented in infall-only solution!')\n else:\n from outflow_inner_edge import outflow_inner_edge\n # typical no used. Just an approach I tried to make the size of the\n # constant desnity region self-consistent with the outflow cavity.\n print 'Calculate the cavity properties using the criteria that swept-up mass = outflowed mass'\n # using swept-up mass = flow mass to derive the edge of the extended flat density region\n v_outflow = 1e2 * 1e5\n rho_cav_edge = outflow_inner_edge(np.copy(rho_env), (ri,thetai,phii),M_env_dot,v_outflow,theta_cav, R_env_min)\n dict_params['rho_cav_edge'] = rho_cav_edge\n # assume gas-to-dust ratio = 100\n rho_cav_center = 0.01 * 0.1*M_env_dot*rho_cav_edge/v_outflow/2 / (2*np.pi/3*rho_cav_edge**3*(1-np.cos(np.radians(theta_cav))))\n dict_params['rho_cav_center'] = rho_cav_center\n print 'inner edge is %5f AU and density is %e g/cm3' % (rho_cav_edge/AU, rho_cav_center)\n\n\n # default setting for the density profile in cavity\n if 'rho_cav_center' in locals() == False:\n rho_cav_center = 5e-19\n print('Use 5e-19 as the default value for cavity center')\n if 'rho_cav_edge' in locals() == False:\n rho_cav_edge = 40*AU\n print('Use 40 AU as the default value for size of the inner region')\n # discontinuity factor inside and outside of cavity inner edge\n discont = 1\n # determine the edge of constant region in the cavity\n if rho_cav_edge == 0:\n rho_cav_edge = R_env_min\n\n\n if not tsc:\n print('Calculating the dust density profile with infall solution...')\n\n for ir in range(0,len(rc)):\n for itheta in range(0,len(thetac)):\n for iphi in range(0,len(phic)):\n if rc[ir] > R_env_min:\n # related coordinates\n w = abs(rc[ir]*np.cos(np.pi/2 - thetac[itheta]))\n z = rc[ir]*np.sin(np.pi/2 - thetac[itheta])\n\n # Disk profile or envelope/cavity\n if ((w >= R_disk_min) and (w <= R_disk_max)):\n h = ((w/(100*AU))**beta)*h100\n rho_dum = rho_0*(1-np.sqrt(rstar/w))*(rstar/w)**(beta+1)*np.exp(-0.5*(z/h)**2)\n else:\n # determine whether the current cell is in the cavity\n if ellipsoid == False:\n z_cav = c0*abs(w)**1.5\n if z_cav == 0:\n z_cav = R_env_max\n cav_con = abs(z) > abs(z_cav)\n if theta_cav == 90:\n cav_con = True\n else:\n # condition for the outer ellipsoid\n cav_con = (2*(w/b_out)**2 + ((abs(z)-z_out)/a_out)**2) < 1\n\n # cavity density\n if cav_con:\n # open cavity\n if ellipsoid == False:\n if (rc[ir] <= rho_cav_edge) & (rc[ir] >= R_env_min):\n rho_dum = g2d * rho_cav_center\n else:\n rho_dum = g2d * rho_cav_center*discont*(rho_cav_edge/rc[ir])**power\n else:\n # condition for the inner ellipsoid\n if (2*(w/b_in)**2 + ((abs(z)-z_in)/a_in)**2) > 1:\n rho_dum = rho_cav_out\n else:\n rho_dum = rho_cav_in\n # envelope density\n else:\n mu = abs(np.cos(thetac[itheta]))\n # Implement new root finding algorithm\n roots = np.roots(np.array([1.0, 0.0, rc[ir]/R_cen-1.0, -mu*rc[ir]/R_cen]))\n if len(roots[roots.imag == 0]) == 1:\n if (abs(roots[roots.imag == 0]) - 1.0) <= 0.0:\n mu_o_dum = roots[roots.imag == 0]\n else:\n mu_o_dum = -0.5\n print('Problem with cubic solving, cos(theta) = ', mu_o_dum)\n print('parameters are ', np.array([1.0, 0.0, rc[ir]/R_cen-1.0, -mu*rc[ir]/R_cen]))\n else:\n mu_o_dum = -0.5\n for imu in range(0, len(roots)):\n if roots[imu]*mu >= 0.0:\n if (abs((abs(roots[imu]) - 1.0)) <= 1e-5):\n mu_o_dum = 1.0 * np.sign(mu)\n else:\n mu_o_dum = roots[imu]\n if mu_o_dum == -0.5:\n print('Problem with cubic solving, roots are: ', roots)\n mu_o = mu_o_dum.real\n rho_dum = M_env_dot/(4*PI*(G*mstar*R_cen**3)**0.5)*(rc[ir]/R_cen)**(-3./2)*(1+mu/mu_o)**(-0.5)*(mu/mu_o+2*mu_o**2*R_cen/rc[ir])**(-1)\n rho[ir,itheta,iphi] = rho_dum\n else:\n rho[ir,itheta,iphi] = 1e-30\n # add the dust mass into the total count\n cell_mass = rho[ir, itheta, iphi] * (1/3.)*(ri[ir+1]**3 - ri[ir]**3) * (phii[iphi+1]-phii[iphi]) * -(np.cos(thetai[itheta+1])-np.cos(thetai[itheta]))\n total_mass = total_mass + cell_mass\n\n # TSC model\n else:\n print('Calculating the dust density profile with TSC solution...')\n\n # If needed, calculate the TSC model via IDL\n #\n if idl == True:\n print('Using IDL to calculate the TSC model. Make sure you are running this on mechine with IDL.')\n import pidly\n idl = pidly.IDL(IDL_path)\n idl('.r '+TSC_dir+'tsc.pro')\n idl('.r '+TSC_dir+'tsc_run.pro')\n #\n # only run TSC calculation within infall radius\n # modify the rc array\n ind_infall = np.where(rc >= R_inf)[0][0]\n if max(ri) > R_inf:\n rc_idl = rc[0:ind_infall+1]\n else:\n rc_idl = rc[rc < max(ri)]\n idl.pro('tsc_run', indir=TSC_dir, outdir=outdir, rc=rc_idl, thetac=thetac, time=t,\n c_s=cs, omega=omega, renv_min=R_env_min)\n file_idl = 'rhoenv.dat'\n else:\n print('Read the pre-computed TSC model.')\n ind_infall = np.where(rc >= R_inf)[0][0]\n if max(ri) > R_inf:\n rc_idl = rc[0:ind_infall+1]\n else:\n rc_idl = rc[rc < max(ri)]\n if idl != False:\n file_idl = idl\n\n # read in the exist file\n rho_env_tsc_idl = np.genfromtxt(outdir+file_idl).T\n # because only region within infall radius is calculated by IDL program,\n # need to project it to the original grid\n rho_env_tsc = np.zeros([len(rc), len(thetac)])\n for irc in range(len(rc)):\n if rc[irc] in rc_idl:\n rho_env_tsc[irc,:] = rho_env_tsc_idl[np.squeeze(np.where(rc_idl == rc[irc])),:]\n\n # extrapolate for the NaN values at the outer radius, usually at radius beyond the infall radius\n # using r^-2 profile at radius greater than infall radius\n # and map the 2d strcuture onto 3-D grid\n # map TSC solution from IDL to actual 2-D grid\n rho_env_tsc2d = np.empty((nx,ny))\n if max(ri) > R_inf:\n for i in range(0, len(rc)):\n if i <= ind_infall:\n rho_env_tsc2d[i,:] = rho_env_tsc[i,:]\n else:\n rho_env_tsc2d[i,:] = 10**(np.log10(rho_env_tsc[ind_infall,:]) - 2*(np.log10(rc[i]/rc[ind_infall])))\n else:\n rho_env_tsc2d = rho_env_tsc\n\n # map it to 3-D grid\n rho_env = np.repeat(rho_env_tsc2d[:,:,np.newaxis], nz, axis=2)\n\n for ir in range(0,len(rc)):\n for itheta in range(0,len(thetac)):\n for iphi in range(0,len(phic)):\n if rc[ir] > R_env_min:\n # related coordinates\n w = abs(rc[ir]*np.cos(np.pi/2 - thetac[itheta]))\n z = rc[ir]*np.sin(np.pi/2 - thetac[itheta])\n\n # initialize dummer rho for disk and cavity\n rho_dum = 0\n # Disk profile\n if ((w >= R_disk_min) and (w <= R_disk_max)) == True:\n h = ((w/(100*AU))**beta)*h100\n rho_dum = rho_0*(1-np.sqrt(rstar/w))*(rstar/w)**(beta+1)*np.exp(-0.5*(z/h)**2)\n else:\n # determine whether the current cell is in the cavity\n if ellipsoid == False:\n z_cav = c0*abs(w)**1.5\n if z_cav == 0:\n z_cav = R_env_max\n cav_con = abs(z) > abs(z_cav)\n else:\n # condition for the outer ellipsoid\n cav_con = (2*(w/b_out)**2 + ((abs(z)-z_out)/a_out)**2) < 1\n\n if cav_con:\n # open cavity\n if ellipsoid == False:\n if (rc[ir] <= rho_cav_edge) & (rc[ir] >= R_env_min):\n rho_dum = g2d * rho_cav_center\n else:\n rho_dum = g2d * rho_cav_center*discont*(rho_cav_edge/rc[ir])**power\n else:\n # condition for the inner ellipsoid\n if (2*(w/b_in)**2 + ((abs(z)-z_in)/a_in)**2) > 1:\n rho_dum = rho_cav_out\n else:\n rho_dum = rho_cav_in\n\n rho[ir, itheta, iphi] = rho_env[ir, itheta, iphi] + rho_dum\n\n else:\n rho[ir,itheta,iphi] = 1e-40\n\n # add the dust mass into the total count\n cell_mass = rho[ir, itheta, iphi] * (1/3.)*(ri[ir+1]**3 - ri[ir]**3) * (phii[iphi+1]-phii[iphi]) * -(np.cos(thetai[itheta+1])-np.cos(thetai[itheta]))\n total_mass = total_mass + cell_mass\n # apply gas-to-dust ratio of 100\n rho_dust = rho/g2d\n total_mass_dust = total_mass/MS/g2d\n print('Total dust mass = %f Solar mass' % total_mass_dust)\n\n # Insert the calculated grid and dust density profile into hyperion\n m.set_spherical_polar_grid(ri, thetai, phii)\n m.add_density_grid(rho_dust.T, d)\n\n # Define the luminsoity source\n source = m.add_spherical_source()\n source.luminosity = (4*PI*rstar**2)*sigma*(tstar**4) # [ergs/s]\n source.radius = rstar # [cm]\n source.temperature = tstar # [K]\n source.position = (0., 0., 0.)\n print('L_center = % 5.2f L_sun' % ((4*PI*rstar**2)*sigma*(tstar**4)/LS))\n\n # radiative transfer settigs\n m.set_raytracing(True)\n\n # determine the number of photons for imaging\n # the case of monochromatic\n if mono_wave != None:\n if (type(mono_wave) == int) or (type(mono_wave) == float) or (type(mono_wave) == str):\n mono_wave = float(mono_wave)\n mono_wave = [mono_wave]\n\n # Monochromatic radiative transfer setting\n m.set_monochromatic(True, wavelengths=mono_wave)\n m.set_n_photons(initial=mc_photons, imaging_sources=im_photon,\n imaging_dust=im_photon, raytracing_sources=im_photon,\n raytracing_dust=im_photon)\n # regular SED\n else:\n m.set_n_photons(initial=mc_photons, imaging=im_photon * wav_num,\n raytracing_sources=im_photon,\n raytracing_dust=im_photon)\n # number of iteration to compute dust specific energy (temperature)\n m.set_n_initial_iterations(20)\n m.set_convergence(True, percentile=dict_params['percentile'],\n absolute=dict_params['absolute'],\n relative=dict_params['relative'])\n m.set_mrw(True) # Gamma = 1 by default\n\n # Setting up images and SEDs\n if not image_only:\n # SED setting\n # Infinite aperture\n syn_inf = m.add_peeled_images(image=False)\n # use the index of wavelength array used by the monochromatic radiative transfer\n if mono_wave == None:\n syn_inf.set_wavelength_range(wav_num, wav_min, wav_max)\n syn_inf.set_viewing_angles([dict_params['view_angle']], [0.0])\n syn_inf.set_uncertainties(True)\n syn_inf.set_output_bytes(8)\n\n # aperture\n # 7.2 in 10 um scaled by lambda / 10\n # flatten beyond 20 um\n # default aperture (should always specify a set of apertures)\n\n # assign wl_aper and aper from dictionary of aperture\n wl_aper = aperture['wave']\n aper = aperture['aperture']\n # create the non-repetitive aperture list and index array\n aper_reduced = sorted(list(set(aper)))\n index_reduced = np.arange(1, len(aper_reduced)+1)\n\n dict_peel_sed = {}\n for i in range(0, len(aper_reduced)):\n aper_dum = aper_reduced[i]/2 * (1/3600.*np.pi/180.)*dstar*pc\n dict_peel_sed[str(index_reduced[i])] = m.add_peeled_images(image=False)\n # use the index of wavelength array used by the monochromatic radiative transfer\n if mono == False:\n dict_peel_sed[str(index_reduced[i])].set_wavelength_range(wav_num, wav_min, wav_max)\n dict_peel_sed[str(index_reduced[i])].set_viewing_angles([dict_params['view_angle']], [0.0])\n # aperture should be given in cm and its the radius of the aperture\n dict_peel_sed[str(index_reduced[i])].set_aperture_range(1, aper_dum, aper_dum)\n dict_peel_sed[str(index_reduced[i])].set_uncertainties(True)\n dict_peel_sed[str(index_reduced[i])].set_output_bytes(8)\n\n # image setting\n syn_im = m.add_peeled_images(sed=False)\n # use the index of wavelength array used by the monochromatic radiative transfer\n if mono_wave == None:\n syn_im.set_wavelength_range(wav_num, wav_min, wav_max)\n pix_num = 300\n else:\n pix_num = 8000\n #\n syn_im.set_image_size(pix_num, pix_num)\n syn_im.set_image_limits(-R_env_max, R_env_max, -R_env_max, R_env_max)\n syn_im.set_viewing_angles([dict_params['view_angle']], [0.0])\n syn_im.set_uncertainties(True)\n syn_im.set_output_bytes(8)\n\n # Output setting\n # Density\n m.conf.output.output_density = 'last'\n # Density difference (shows where dust was destroyed)\n m.conf.output.output_density_diff = 'none'\n # Energy absorbed (using pathlengths)\n m.conf.output.output_specific_energy = 'last'\n # Number of unique photons that passed through the cell\n m.conf.output.output_n_photons = 'last'\n m.write(outdir+outname+'.rtin')\n\n if plot:\n # rho2d is the 2-D projection of gas density\n # take the weighted average\n rho2d = np.sum(rho**2, axis=2)/np.sum(rho, axis=2)\n\n if fast_plot == False:\n # Plot the azimuthal averaged density\n fig = plt.figure(figsize=(8,6))\n ax_env = fig.add_subplot(111, projection='polar')\n\n # zmin = 1e-22/mmw/mh\n zmin = 1e-1\n cmap = plt.cm.CMRmap\n rho2d_exp = np.hstack((rho2d, rho2d, rho2d[:,0:1]))\n thetac_exp = np.hstack((thetac-PI/2, thetac+PI/2, thetac[0]-PI/2))\n # plot the gas density\n img_env = ax_env.pcolormesh(thetac_exp, rc/AU, rho2d_exp/mmw/mh,\n cmap=cmap,\n norm=LogNorm(vmin=zmin,vmax=1e6))\n\n ax_env.set_xlabel(r'$\\rm{Polar\\,angle\\,(Degree)}$',fontsize=20)\n ax_env.set_ylabel('', fontsize=20, labelpad=-140)\n ax_env.tick_params(labelsize=18)\n ax_env.set_yticks(np.hstack((np.arange(0,(int(R_env_max/AU/10000.)+1)*10000, 10000),R_env_max/AU)))\n ax_env.set_xticklabels([r'$\\rm{90^{\\circ}}$',r'$\\rm{45^{\\circ}}$',r'$\\rm{0^{\\circ}}$',r'$\\rm{-45^{\\circ}}$',\\\n r'$\\rm{-90^{\\circ}}$',r'$\\rm{-135^{\\circ}}$',r'$\\rm{180^{\\circ}}$',r'$\\rm{135^{\\circ}}$'])\n ax_env.set_yticklabels([])\n # fix the tick label font\n ticks_font = mpl.font_manager.FontProperties(family='STIXGeneral',size=20)\n for label in ax_env.get_yticklabels():\n label.set_fontproperties(ticks_font)\n\n ax_env.grid(True, color='LightGray', linewidth=1.5)\n cb = fig.colorbar(img_env, pad=0.1)\n cb.ax.set_ylabel(r'$\\rm{Averaged\\,Gas\\,Density\\,(cm^{-3})}$',fontsize=20)\n cb.set_ticks([1e-1,1e0,1e1,1e2,1e3,1e4,1e5,1e6])\n cb.set_ticklabels([r'$\\rm{10^{-1}}$',r'$\\rm{10^{0}}$',r'$\\rm{10^{1}}$',r'$\\rm{10^{2}}$',r'$\\rm{10^{3}}$',\n r'$\\rm{10^{4}}$',r'$\\rm{10^{5}}$',r'$\\rm{\\geq 10^{6}}$'])\n\n cb_obj = plt.getp(cb.ax.axes, 'yticklabels')\n plt.setp(cb_obj, fontsize=20)\n fig.savefig(outdir+outname+'_gas_density.png', format='png', dpi=300, bbox_inches='tight')\n fig.clf()\n\n # Plot the radial density profile\n fig = plt.figure(figsize=(12,9))\n ax = fig.add_subplot(111)\n\n plot_grid = [0, 49, 99, 149, 199]\n color_grid = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00']\n label = [r'$\\rm{\\theta='+str(int(np.degrees(thetai[plot_grid[0]])))+'^{\\circ}}$',\n r'$\\rm{\\theta='+str(int(np.degrees(thetai[plot_grid[1]])))+'^{\\circ}}$',\n r'$\\rm{\\theta='+str(1+int(np.degrees(thetai[plot_grid[2]])))+'^{\\circ}}$',\n r'$\\rm{\\theta='+str(int(np.degrees(thetai[plot_grid[3]])))+'^{\\circ}}$',\n r'$\\rm{\\theta='+str(1+int(np.degrees(thetai[plot_grid[4]])))+'^{\\circ}}$']\n alpha = np.linspace(0.3, 1.0, len(plot_grid))\n for i in plot_grid:\n ax.plot(np.log10(rc[rc > 0.14*AU]/AU), np.log10(rho2d[rc > 0.14*AU,i]/g2d/mmw/mh)+plot_grid[::-1].index(i)*-0.2,'-',color=color_grid[plot_grid.index(i)],mec='None',linewidth=2.5, \\\n markersize=3, label=label[plot_grid.index(i)])\n ax.axvline(np.log10(R_inf/AU), linestyle='--', color='k', linewidth=1.5, label=r'$\\rm{infall\\,radius}$')\n ax.axvline(np.log10(R_cen/AU), linestyle=':', color='k', linewidth=1.5, label=r'$\\rm{centrifugal\\,radius}$')\n\n lg = plt.legend(fontsize=20, numpoints=1, ncol=2, framealpha=0.7, loc='upper right')\n\n ax.set_xlabel(r'$\\rm{log(Radius)\\,(AU)}$', fontsize=20)\n ax.set_ylabel(r'$\\rm{log(Dust\\,Density)\\,(cm^{-3})}$', fontsize=20)\n [ax.spines[axis].set_linewidth(1.5) for axis in ['top','bottom','left','right']]\n ax.minorticks_on()\n ax.tick_params('both', labelsize=18, width=1.5, which='major', pad=15, length=5)\n ax.tick_params('both', labelsize=18, width=1.5, which='minor', pad=15, length=2.5)\n\n # fix the tick label font\n ticks_font = mpl.font_manager.FontProperties(family='STIXGeneral',size=18)\n for label in ax.get_xticklabels():\n label.set_fontproperties(ticks_font)\n for label in ax.get_yticklabels():\n label.set_fontproperties(ticks_font)\n\n ax.set_ylim([0,11])\n fig.gca().set_xlim(left=np.log10(0.05))\n fig.savefig(outdir+outname+'_gas_radial.pdf',format='pdf',dpi=300,bbox_inches='tight')\n fig.clf()\n\n # Record the input and calculated parameters\n if not norecord == True:\n params = dict_params.copy()\n params.update({'d_sub': d_sub/AU,\n 'M_env_dot': M_env_dot/MS*yr,\n 'R_inf': R_inf/AU,\n 'R_cen': R_cen/AU,\n 'mstar': mstar/MS,\n 'M_tot_gas': total_mass/MS})\n record_hyperion(params,record_dir)\n\n\n return m",
"def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return",
"def skel_model(action, install_path_mp, install_path_zfs, jname):\n # init vars\n # mp - mount point, zfs - zfs point\n skel_path_mp = '%s-SKELETON' % install_path_mp\n skel_path_zfs = '%s-SKELETON' % install_path_zfs\n rw_path_mp = '%s-RW' % install_path_mp\n rw_path_zfs = '%s-RW' % install_path_zfs\n \n if action == 'init':\n# create SKELETON MODEL\n# http://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/jails-application.html\n log(\" INFO: Init BASE-SKELETON zfs START\")\n# Create a skeleton for the read-write portion of the system\n os.system('zfs create %s' % skel_path_zfs)\n os.system('zfs set mountpoint=%s %s' % (skel_path_mp, skel_path_zfs))\n os.system('zfs create %s' % rw_path_zfs)\n os.system('zfs set mountpoint=%s %s' % (rw_path_mp, rw_path_zfs))\n\n os.system('mkdir -p %s/home %s/usr-X11R6 %s/distfiles %s/usr-share-keys/pkg' % (skel_path_mp, skel_path_mp, skel_path_mp, skel_path_mp))\n os.system('mv %s/etc %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/usr/local %s/usr-local' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/tmp %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/var %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/root %s' % (install_path_mp, skel_path_mp ))\n# mergemaster to install missing configuration files. Then, remove the the extra directories that mergemaster creates:\n# os.system('mergemaster -t %s/var/tmp/temproot -D %s -i' % (skel_path, skel_path))\n# os.system('rm -R %(key)s/bin %(key)s/boot %(key)s/lib %(key)s/libexec %(key)s/mnt %(key)s/proc %(key)s/rescue %(key)s/sbin %(key)s/sys %(key)s/usr %(key)s/dev' % {'key': skel_path})\n# Now, symlink the read-write file system to the read-only file system. Ensure that the symlinks are created in the correct s/ locations as the creation of directories in the wrong locations will cause the installation to fail.\n os.chdir('%s' % install_path_mp)\n os.system('mkdir SROOT')\n os.system('ln -s SROOT/etc etc')\n os.system('ln -s SROOT/home home')\n os.system('ln -s SROOT/root root')\n os.system('ln -s /SROOT/usr-local usr/local')\n os.system('ln -s /SROOT/usr-share-keys usr/share/keys')\n os.system('ln -s /SROOT/usr-X11R6 usr/X11R6')\n os.system('ln -s /SROOT/distfiles usr/ports/distfiles')\n os.system('ln -s SROOT/tmp tmp')\n os.system('ln -s SROOT/var var')\n# Create a generic /home/j/skel/etc/make.conf containing this line\n os.system('echo \\\"WRKDIRPREFIX?= /SROOT/portbuild\\\" > %s/etc/make.conf' % skel_path_mp )\n# Create zfs BASE-SKELETON snapshot which will be used for installation \n os.system('zfs snapshot %s@install' % skel_path_zfs)\n log(\" INFO: Init BASE-SKELETON zfs FINISH\")\n \n# install SKELETON jail \n if action == 'install':\n# install RW fs for jail\n os.system('zfs send %s/BASE-SKELETON@install | zfs receive -F %s/BASE-RW/%s' % (jzfs, jzfs, jname))\n# remove receive snapshot \n os.system('zfs destroy %s/BASE-RW/%s@install' % (jzfs, jname))\n# create jail local config - mount skel model for jail hosme dir\n if jname == 'BASE-update':\n os.system('echo \\\"%sBASE %s%s nullfs rw 0 0\\\" > %sBASE-RW/%s/etc/fstab' % (jpath, jpath, jname, jpath, jname))\n else:\n os.system('echo \\\"%sBASE %s%s nullfs ro 0 0\\\" > %sBASE-RW/%s/etc/fstab' % (jpath, jpath, jname, jpath, jname))\n \n os.system('echo \\\"%sBASE-RW/%s %s%s/SROOT nullfs rw 0 0\\\" >> %sBASE-RW/%s/etc/fstab' % (jpath, jname, jpath, jname, jpath, jname))\n temp_add_cfg = ['### BASE mount settings ###', 'mount.fstab=\"%sBASE-RW/%s/etc/fstab\";' % (jpath, jname), 'mount.devfs;']\n return temp_add_cfg",
"def usped(self):\n\n # assign variables\n ls_factor = 'ls_factor'\n slope = 'slope'\n aspect = 'aspect'\n flowacc = 'flowacc'\n qsx = 'qsx'\n qsxdx = 'qsxdx'\n qsy = 'qsy'\n qsydy = 'qsydy'\n grow_slope = 'grow_slope'\n grow_aspect = 'grow_aspect'\n grow_qsxdx = 'grow_qsxdx'\n grow_qsydy = 'grow_qsydy'\n erdep = 'erdep' # kg/m^2s\n sedflow = 'sedflow'\n\n # parse, advance, and stamp time\n (evolved_elevation, time, depth, sediment_flux, erosion_deposition,\n difference) = self.parse_time()\n\n # compute event-based erosivity (R) factor (MJ mm ha^-1 hr^-1 yr^-1)\n r_factor = self.event_based_r_factor()\n\n # compute slope and aspect\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n aspect=aspect,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=aspect,\n value=grow_aspect,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{aspect}={grow_aspect}\".format(\n aspect=aspect,\n grow_aspect=grow_aspect),\n overwrite=True)\n\n # compute flow accumulation\n gscript.run_command(\n 'r.watershed',\n elevation=self.elevation,\n accumulation=flowacc,\n flags=\"a\",\n overwrite=True)\n region = gscript.parse_command(\n 'g.region', flags='g')\n res = region['nsres']\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{depth}\"\n \"=({flowacc}*{res})\".format(\n depth=depth,\n flowacc=flowacc,\n res=res),\n overwrite=True)\n # add depression parameter to r.watershed\n # derive from landcover class\n\n\n # compute dimensionless topographic factor\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{ls_factor}\"\n \"=({flowacc}^{m})*(sin({slope})^{n})\".format(\n ls_factor=ls_factor,\n m=self.m,\n flowacc=depth,\n slope=slope,\n n=self.n),\n overwrite=True)\n\n # compute sediment flow at sediment transport capacity\n \"\"\"\n T = R * K * C * P * LST\n where\n T is sediment flow at transport capacity\n R is rainfall factor\n K is soil erodibility factor\n C is a dimensionless land cover factor\n P is a dimensionless prevention measures factor\n LST is the topographic component of sediment transport capacity\n of overland flow\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{sedflow}\"\n \"={r_factor}\"\n \"*{k_factor}\"\n \"*{c_factor}\"\n \"*{ls_factor}\".format(\n r_factor=r_factor,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n ls_factor=ls_factor,\n sedflow=sedflow),\n overwrite=True)\n\n # convert sediment flow from tons/ha/yr to kg/m^2s\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{converted_sedflow}\"\n \"={sedflow}\"\n \"*{ton_to_kg}\"\n \"/{ha_to_m2}\"\n \"/{yr_to_s}\".format(\n converted_sedflow=sediment_flux,\n sedflow=sedflow,\n ton_to_kg=1000.,\n ha_to_m2=10000.,\n yr_to_s=31557600.),\n overwrite=True)\n\n # compute sediment flow rate in x direction (m^2/s)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsx}={sedflow}*cos({aspect})\".format(\n sedflow=sediment_flux,\n aspect=aspect, qsx=qsx),\n overwrite=True)\n\n # compute sediment flow rate in y direction (m^2/s)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsy}={sedflow}*sin({aspect})\".format(\n sedflow=sediment_flux,\n aspect=aspect,\n qsy=qsy),\n overwrite=True)\n\n # compute change in sediment flow in x direction\n # as partial derivative of sediment flow field\n gscript.run_command(\n 'r.slope.aspect',\n elevation=qsx,\n dx=qsxdx,\n overwrite=True)\n\n # compute change in sediment flow in y direction\n # as partial derivative of sediment flow field\n gscript.run_command(\n 'r.slope.aspect',\n elevation=qsy,\n dy=qsydy,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=qsxdx,\n value=grow_qsxdx,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsxdx}={grow_qsxdx}\".format(\n qsxdx=qsxdx,\n grow_qsxdx=grow_qsxdx),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=qsydy,\n value=grow_qsydy,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsydy}={grow_qsydy}\".format(\n qsydy=qsydy,\n grow_qsydy=grow_qsydy),\n overwrite=True)\n\n # compute net erosion-deposition (kg/m^2s)\n # as divergence of sediment flow\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erdep} = {qsxdx} + {qsydy}\".format(\n erdep=erdep,\n qsxdx=qsxdx,\n qsydy=qsydy),\n overwrite=True)\n\n # filter outliers\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erosion_deposition}\"\n \"=if({erdep}<{erdepmin},\"\n \"{erdepmin},\"\n \"if({erdep}>{erdepmax},{erdepmax},{erdep}))\".format(\n erosion_deposition=erosion_deposition,\n erdep=erdep,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax),\n overwrite=True)\n\n # set color table\n gscript.write_command(\n 'r.colors',\n map=erosion_deposition,\n rules='-',\n stdin=erosion_colors)\n\n # evolve landscape\n \"\"\"\n change in elevation (m)\n = change in time (s)\n * net erosion-deposition (kg/m^2s)\n / sediment mass density (kg/m^3)\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{evolved_elevation}\"\n \"={elevation}\"\n \"+({rain_interval}*60\"\n \"*{erosion_deposition}\"\n \"/{density})\".format(\n evolved_elevation=evolved_elevation,\n elevation=self.elevation,\n rain_interval=self.rain_interval,\n erosion_deposition=erosion_deposition,\n density=self.density),\n overwrite=True)\n\n # gravitational diffusion\n evolved_elevation = self.gravitational_diffusion(evolved_elevation)\n\n # compute elevation change\n difference = self.compute_difference(evolved_elevation, difference)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['slope',\n 'aspect',\n 'flowacc',\n 'qsx',\n 'qsy',\n 'qsxdx',\n 'qsydy',\n 'grow_slope',\n 'grow_aspect',\n 'grow_qsxdx',\n 'grow_qsydy',\n 'erdep',\n 'sedflow',\n 'r_factor',\n 'ls_factor'],\n flags='f')\n\n return (evolved_elevation, time, depth, erosion_deposition, difference)",
"def ssd_synthia_car_fine_tune():\n merged_annotation = '/home/public/synthia/ssd_car_fine_tune/SYNTHIA-SEQS-01-TRAIN_MERGED-shuffle.json'\n if False:\n print('we combine the training and validation here')\n annotations_url_1 = '/home/public/synthia/SYNTHIA-SEQS-01-TRAIN-shuffle.json'\n annotations_url_2 = '/home/public/synthia/SYNTHIA-SEQS-01-VALIDATE-shuffle.json'\n combine_gt(annotations_url_1, annotations_url_2, merged_annotation)\n\n if False:\n print('collect front and rear cars')\n annotations_url_1 = '/home/public/synthia/SYNTHIA-SEQS-01-TRAIN-shuffle.json'\n annotations_url_2 = '/home/public/synthia/SYNTHIA-SEQS-01-VALIDATE-shuffle.json'\n save_dir = '/home/stevenwudi/PycharmProjects/autonomous_driving/Experiments/SEQ_01_SEQ_06_cars'\n collect_front_and_rear_gt(annotations_url_1, annotations_url_2, save_dir, image_interval=50)\n\n gt_file = '/home/public/synthia/ssd_car_fine_tune/ssd_car_fine_tune_gt-shuffle.pkl'\n if False:\n print('Training annotation conversion')\n converting_gt(merged_annotation, gt_file, POR=1e-3)\n # POR: 1e-3 Finish converting, total annotated car number is 22332 in total image of 8814.\n # POR: 5e-4: Finish converting, total annotated fish number is 26800 in total image of 8814.\n\n model_checkpoint = '/home/public/synthia/ssd_car_fine_tune/weights_512.54-0.19.hdf5'\n if False:\n print('Start DDS 512 training')\n train_ssd512(gt_file, model_checkpoint=model_checkpoint, base_lr=1e-5)\n\n test_gt_file = '/home/public/synthia/ssd_car_fine_tune/ssd_car_test_gt-shuffle.pkl'\n if False:\n print('Converting testing GT')\n annotations_url = '/home/public/synthia/SYNTHIA-SEQS-01-TEST-shuffle.json'\n converting_gt(annotations_url, test_gt_file)\n if False:\n # Examine test data\n examine_ssd512(test_gt_file, model_checkpoint)\n\n test_json_file = '/home/public/synthia/ssd_car_fine_tune/ssd_car_test-shuffle_nms_'+str(nms_thresh)+'.json'\n if False:\n test_ssd512(test_gt_file, model_checkpoint, test_json_file)\n # A separate file for accepting gt file and predicted json fil\n if True:\n calculate_iou(test_gt_file, test_json_file, POR=2e-3, draw=False)\n\n test_gt_file = '/home/public/synthia/ssd_car_fine_tune/ssd_car_test_gt-shuffle.pkl'\n test_json_file = '/home/public/synthia/ssd_car_test_faster-shuffle.json'\n\n calculate_iou(test_gt_file, test_json_file, POR=2e-3, draw=False)\n \"\"\"\n<<<<<<< HEAD\n This is the network results train by SSD512 (with 0.05% POR trained)\n Conf: [ 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]\n \n ### POR=1e-3\n Total GT: 2327. \n Total prediction: [ 2016. 1986. 1951. 1918. 1888. 1852. 1815. 1774. 1711. 1641.]\n Precision: [ 0.913 0.908 0.9 0.895 0.886 0.879 0.867 0.853 0.83 0.804]\n Recall: [ 0.791 0.787 0.78 0.775 0.768 0.761 0.751 0.739 0.719 0.697]\n F score: [[ 0.847 0.843 0.837 0.825 0.816 0.79 0.748 0.666 0.449 0.106]\n [ 0.843 0.839 0.834 0.823 0.813 0.788 0.748 0.665 0.449 0.106]\n [ 0.835 0.833 0.829 0.818 0.809 0.786 0.747 0.665 0.449 0.106]\n [ 0.831 0.829 0.826 0.815 0.807 0.784 0.746 0.664 0.449 0.105]\n [ 0.823 0.822 0.82 0.81 0.802 0.781 0.744 0.663 0.447 0.105]\n [ 0.816 0.816 0.815 0.806 0.799 0.778 0.741 0.661 0.446 0.105]\n [ 0.805 0.805 0.806 0.797 0.792 0.772 0.738 0.66 0.445 0.105]\n [ 0.792 0.793 0.793 0.789 0.784 0.766 0.734 0.658 0.445 0.105]\n [ 0.77 0.773 0.775 0.771 0.768 0.753 0.725 0.652 0.441 0.104]\n [ 0.746 0.749 0.751 0.751 0.75 0.741 0.716 0.645 0.439 0.103]]\n<<<<<<< HEAD\n\n ### faster RCNN detection result POR= 1e-3\n Total GT: 2327.\n Total prediction: [ 2120. 2084. 2062. 2037. 2003. 1971. 1947. 1901. 1862. 1785.]\n Precision: [ 0.83 0.826 0.825 0.823 0.82 0.818 0.817 0.807 0.801 0.787]\n Recall: [ 0.756 0.752 0.751 0.75 0.747 0.745 0.744 0.735 0.73 0.717]\n F score: [[ 0.791 0.758 0.72 0.669 0.59 0.485 0.332 0.174 0.054 0.006]\n [ 0.787 0.756 0.719 0.669 0.59 0.485 0.332 0.174 0.054 0.006]\n [ 0.786 0.755 0.718 0.668 0.589 0.485 0.332 0.174 0.054 0.006]\n [ 0.785 0.754 0.718 0.668 0.589 0.484 0.332 0.174 0.054 0.006]\n [ 0.782 0.752 0.716 0.666 0.588 0.484 0.331 0.174 0.054 0.006]\n [ 0.78 0.75 0.715 0.666 0.588 0.484 0.331 0.174 0.054 0.006]\n [ 0.779 0.749 0.714 0.665 0.588 0.484 0.331 0.174 0.054 0.006]\n [ 0.77 0.745 0.711 0.663 0.586 0.483 0.331 0.174 0.054 0.006]\n [ 0.764 0.74 0.709 0.66 0.584 0.482 0.331 0.173 0.054 0.006]\n [ 0.75 0.73 0.703 0.657 0.582 0.481 0.33 0.173 0.054 0.006]]\n\n=======\n \n ### POR shuffle\n Total GT: 2563. \n Total prediction: [ 2185. 2137. 2081. 2047. 2010. 1976. 1937. 1893. 1831. 1713.]\n Precision: [ 0.843 0.836 0.826 0.819 0.813 0.805 0.795 0.784 0.766 0.726]\n Recall: [ 0.719 0.712 0.704 0.698 0.693 0.686 0.678 0.669 0.653 0.619]\n F score: [[ 0.776 0.698 0.646 0.619 0.597 0.571 0.525 0.447 0.31 0.106]\n [ 0.769 0.693 0.642 0.616 0.595 0.57 0.524 0.447 0.31 0.106]\n [ 0.76 0.686 0.637 0.612 0.591 0.568 0.523 0.445 0.31 0.106]\n [ 0.754 0.681 0.632 0.608 0.588 0.565 0.521 0.443 0.31 0.106]\n [ 0.749 0.677 0.629 0.605 0.586 0.563 0.52 0.442 0.309 0.106]\n [ 0.741 0.671 0.624 0.602 0.583 0.561 0.518 0.441 0.309 0.106]\n [ 0.732 0.664 0.619 0.597 0.579 0.558 0.516 0.44 0.308 0.106]\n [ 0.722 0.656 0.612 0.59 0.573 0.553 0.515 0.439 0.308 0.106]\n [ 0.705 0.642 0.6 0.58 0.564 0.545 0.508 0.435 0.306 0.105]\n [ 0.668 0.614 0.578 0.561 0.548 0.531 0.496 0.426 0.302 0.105]]\n \n POR = 2e-3\n=======\n ############################# SSD512 NMS 0.6 ###########################\n Conf: [ 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]\n>>>>>>> 419d28c4e27b82c4cfef6a3aa01425cf29929973\n Total GT: 1433. \n Total prediction: [ 1617. 1605. 1593. 1579. 1568. 1551. 1537. 1517. 1498. 1465.]\n Precision: [ 0.821 0.819 0.818 0.818 0.816 0.811 0.809 0.804 0.8 0.792]\n Recall: [ 0.927 0.925 0.923 0.923 0.92 0.915 0.913 0.907 0.902 0.894]\n F score: [[ 0.871 0.868 0.859 0.852 0.836 0.81 0.742 0.636 0.48 0.161]\n [ 0.869 0.866 0.857 0.85 0.834 0.808 0.741 0.635 0.48 0.161]\n [ 0.867 0.864 0.857 0.849 0.834 0.808 0.741 0.635 0.48 0.161]\n [ 0.867 0.864 0.857 0.849 0.834 0.808 0.741 0.635 0.48 0.161]\n [ 0.865 0.862 0.855 0.848 0.832 0.807 0.741 0.635 0.48 0.161]\n [ 0.86 0.857 0.851 0.844 0.828 0.804 0.739 0.633 0.479 0.161]\n [ 0.858 0.855 0.849 0.843 0.827 0.802 0.738 0.633 0.478 0.161]\n [ 0.852 0.85 0.844 0.839 0.823 0.8 0.736 0.631 0.477 0.16 ]\n [ 0.848 0.846 0.841 0.836 0.82 0.797 0.734 0.631 0.477 0.16 ]\n [ 0.84 0.838 0.834 0.829 0.814 0.792 0.729 0.629 0.476 0.16 ]]\n \n ############################# SSD512 NMS 0.45 ###########################\n Total GT: 1433. \n Total prediction: [ 1438. 1428. 1421. 1413. 1403. 1398. 1384. 1374. 1348. 1305.]\n Precision: [ 0.92 0.918 0.917 0.914 0.911 0.91 0.905 0.902 0.894 0.874]\n Recall: [ 0.923 0.921 0.92 0.918 0.914 0.913 0.908 0.905 0.897 0.877]\n F score: [[ 0.922 0.918 0.91 0.901 0.883 0.851 0.785 0.673 0.476 0.164]\n [ 0.92 0.916 0.908 0.9 0.882 0.851 0.785 0.673 0.476 0.164]\n [ 0.918 0.915 0.907 0.899 0.882 0.851 0.785 0.673 0.476 0.164]\n [ 0.916 0.913 0.905 0.897 0.88 0.849 0.785 0.673 0.476 0.164]\n [ 0.913 0.91 0.903 0.895 0.878 0.847 0.783 0.671 0.475 0.164]\n [ 0.911 0.909 0.901 0.894 0.877 0.846 0.782 0.671 0.475 0.164]\n [ 0.906 0.905 0.898 0.89 0.874 0.843 0.781 0.67 0.475 0.164]\n [ 0.904 0.901 0.895 0.888 0.871 0.84 0.78 0.67 0.475 0.164]\n [ 0.896 0.894 0.888 0.882 0.866 0.836 0.776 0.668 0.474 0.164]\n [ 0.876 0.876 0.872 0.869 0.855 0.829 0.77 0.663 0.471 0.164]]\n \n \n<<<<<<< HEAD\n>>>>>>> bb5caf05d4bc41e182e41686b8b5e497053f9ca5\n ### POR = None (consider all testing examples)\n Total GT: 2696. \n Total prediction: [ 2273. 2221. 2166. 2111. 2055. 2000. 1945. 1875. 1786. 1684.]\n Precision: [ 0.822 0.818 0.81 0.805 0.796 0.789 0.777 0.763 0.741 0.716]\n Recall: [ 0.693 0.69 0.683 0.678 0.671 0.665 0.655 0.643 0.625 0.604]\n F score: [[ 0.752 0.746 0.74 0.729 0.724 0.703 0.668 0.597 0.405 0.096]\n [ 0.748 0.744 0.737 0.727 0.722 0.702 0.668 0.597 0.405 0.096]\n [ 0.741 0.737 0.733 0.723 0.718 0.699 0.667 0.596 0.405 0.096]\n [ 0.736 0.733 0.729 0.72 0.716 0.698 0.666 0.596 0.404 0.095]\n [ 0.728 0.726 0.724 0.716 0.712 0.695 0.664 0.595 0.403 0.095]\n [ 0.722 0.721 0.72 0.712 0.708 0.692 0.661 0.593 0.402 0.095]\n [ 0.711 0.711 0.711 0.704 0.703 0.687 0.659 0.592 0.401 0.095]\n [ 0.698 0.7 0.7 0.696 0.696 0.682 0.655 0.59 0.401 0.095]\n [ 0.678 0.682 0.684 0.681 0.681 0.67 0.647 0.585 0.398 0.095]\n [ 0.655 0.659 0.662 0.663 0.665 0.659 0.639 0.578 0.395 0.094]]\n\n POR = 2e-3 fastercnn result\n Conf: [ 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]\n Total GT: 1433.\n Total prediction: [ 1614. 1601. 1593. 1585. 1576. 1561. 1557. 1544. 1532. 1507.]\n=======\n ############################# Faster-RCNN ###########################\n Conf: [ 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95]\n Total GT: 1433. \n Total prediction: [ 1614. 1601. 1593. 1585. 1576. 1561. 1557. 1544. 1532. 1507.]\n>>>>>>> 419d28c4e27b82c4cfef6a3aa01425cf29929973\n Precision: [ 0.812 0.812 0.811 0.811 0.811 0.81 0.81 0.807 0.805 0.8 ]\n Recall: [ 0.914 0.914 0.913 0.913 0.913 0.913 0.913 0.909 0.907 0.902]\n F score: [[ 0.86 0.843 0.808 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.86 0.843 0.808 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.859 0.842 0.807 0.75 0.666 0.577 0.459 0.311 0.14 0.016]\n [ 0.855 0.839 0.804 0.748 0.665 0.576 0.458 0.311 0.14 0.016]\n [ 0.853 0.838 0.804 0.747 0.664 0.575 0.458 0.311 0.14 0.016]\n [ 0.848 0.834 0.8 0.745 0.662 0.574 0.457 0.31 0.139 0.016]]\n \"\"\"",
"def set_up_model(dt, model, update = False):\n \n start_scope()\n \n ##### Update model parameters (should be done, if original parameters have been changed)\n if update:\n ##### Structure of ANF\n # terminal = 0\n # internode = 1\n # node = 2\n # presomatic region = 3\n # Soma = 4\n # postsomatic region = 5\n model.structure = np.array(list(np.tile([2] + np.tile([1],model.nof_segments_internodes).tolist(),model.nof_internodes)) + [2])\n model.nof_comps = len(model.structure)\n \n ##### compartment lengths\n # initialize\n model.compartment_lengths = np.zeros_like(structure)*um\n # length internodes\n model.compartment_lengths[model.structure == 1] = model.length_internodes / model.nof_segments_internodes\n # length nodes\n model.compartment_lengths[model.structure == 2] = model.length_nodes\n # total length neuron\n model.length_neuron = sum(model.compartment_lengths)\n \n ##### Compartment diameters\n # initialize\n model.compartment_diameters = np.zeros(model.nof_comps+1)*um\n # same diameter for whole fiber\n model.compartment_diameters[:] = model.diameter_fiber\n \n ##### conductivity of leakage channels\n model.g_L = model.g_L_node/model.surface_aria_node\n\n ##### Surface arias\n # lateral surfaces\n m = [np.sqrt(abs(model.compartment_diameters[i+1] - model.compartment_diameters[i])**2 + model.compartment_lengths[i]**2)\n for i in range(0,model.nof_comps)]\n # total surfaces\n model.A_surface = [(model.compartment_diameters[i+1] + model.compartment_diameters[i])*np.pi*m[i]*0.5\n for i in range(0,model.nof_comps)]\n \n ##### Compartment middle point distances (needed for plots)\n model.distance_comps_middle[0] = 0.5*model.compartment_lengths[0]\n model.distance_comps_middle = np.zeros_like(model.compartment_lengths)\n for ii in range(0,model.nof_comps-1):\n model.distance_comps_middle[ii+1] = 0.5* model.compartment_lengths[ii] + 0.5* model.compartment_lengths[ii+1]\n \n ##### Capacities\n # initialize\n model.c_m = np.zeros_like(model.structure)*uF/cm**2\n # nodes\n model.c_m[model.structure == 2] = model.c_m_node/model.surface_aria_node\n # internodes\n model.c_m[structure == 1] = model.c_m_layer/(1+model.nof_myelin_layers)\n \n ##### Condactivities internodes\n # initialize\n model.g_m = np.zeros_like(model.structure)*msiemens/cm**2\n # internodes\n model.g_m[model.structure == 1] = model.g_m_layer/(1+model.nof_myelin_layers)\n \n ##### Axoplasmatic resistances\n model.compartment_center_diameters = np.zeros(model.nof_comps)*um\n model.compartment_center_diameters = (model.compartment_diameters[0:-1] + model.compartment_diameters[1:]) / 2\n model.R_a = (model.compartment_lengths*model.rho_in) / ((model.compartment_center_diameters*0.5)**2*np.pi)\n \n ##### Noise term\n model.gamma_Na_vector = np.zeros(model.nof_comps)*psiemens\n model.gamma_Na_vector[model.structure == 2] = model.gamma_Na\n model.noise_term = np.sqrt(model.A_surface*model.gamma_Na_vector*model.rho_Na)\n \n ##### Compartments to plot\n # get indexes of all compartments that are not segmented\n model.indexes_comps = np.where(model.structure == 2)[0]\n # calculate middle compartments of internodes\n model.middle_comps_internodes = np.ceil(model.indexes_comps[:-1] + model.nof_segments_internodes/2).astype(int)\n # create array with all compartments to plot\n model.comps_to_plot = np.sort(np.append(model.indexes_comps, model.middle_comps_internodes))\n \n ##### initialize defaultclock\n defaultclock.dt = dt\n\n ##### define morphology\n morpho = Section(n = model.nof_comps,\n length = model.compartment_lengths,\n diameter = model.compartment_diameters)\n \n ##### define neuron\n neuron = SpatialNeuron(morphology = morpho,\n model = model.eqs,\n Cm = model.c_m,\n Ri = model.rho_in,\n method=\"exponential_euler\")\n \n ##### initial values\n neuron.v = model.V_res\n neuron.m = model.m_init\n neuron.h = model.h_init\n neuron.n = model.n_init\n \n ##### Set parameter values of differential equations\n # conductances nodes\n neuron.gamma_Na = model.gamma_Na\n neuron.gamma_K = model.gamma_K\n neuron.g_L = model.g_L\n \n # conductances internodes\n neuron.g_myelin = model.g_m\n neuron.gamma_Na[np.asarray(np.where(model.structure == 1))] = 0*psiemens\n neuron.gamma_K[np.asarray(np.where(model.structure == 1))] = 0*psiemens\n neuron.g_L[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n \n # conductances peripheral terminal\n neuron.gamma_Na[np.where(model.structure == 0)[0]] = model.gamma_Na_terminal\n neuron.gamma_K[np.where(model.structure == 0)[0]] = model.gamma_K_terminal\n neuron.g_L[np.where(model.structure == 0)[0]] = model.g_L_terminal\n \n # conductances soma\n neuron.gamma_Na[index_soma] = 0*psiemens\n neuron.gamma_K[index_soma] = 0*psiemens\n neuron.g_L[index_soma] = 0*msiemens/cm**2\n \n # Nernst potential for leakage current\n neuron.E_Leak = model.E_L\n neuron.E_Leak[np.where(model.structure == 0)[0]] = E_L_terminal\n \n # other parameters\n neuron.V_res = model.V_res\n neuron.T_celsius = model.T_celsius\n neuron.E_Na = model.E_Na\n neuron.E_K = model.E_K\n neuron.rho_Na = model.rho_Na\n neuron.rho_K = model.rho_K\n \n return neuron, model",
"def E_step_precompute(self, model_params, my_suff_stat, my_data):",
"def run_the_training(args, clus=None, expert=None):\n # load data\n cfg, lbl = util.get_label_cfg_by_args(args)\n uid = cfg['uniqueid']\n print('We are playing with %s' % uid)\n data = npload(cfg['file_path'], uid)\n data_feed = {'x': data[cfg['x_name']], 'y': data[cfg['y_name']]}\n dimx = data_feed['x'].shape[1]\n dimy = data_feed['y'].shape[1]\n\n # create gate and expert\n if clus is None:\n n_model = 5\n clus = GaoNet([dimx, 100, n_model])\n expert = Experts([[dimx, 60, dimy]] * n_model)\n # cuda it\n clus.cuda()\n expert.cuda()\n\n # set data loader\n xname, yname = 'x', 'y'\n factory = KeyFactory(data_feed, xname, yname, scalex=True, scaley=True)\n factory.shuffle(None)\n\n draw_clus_region(clus, data_feed['x'], factory)\n\n # create two sets\n trainsize = 0.8\n trainSet = SubFactory(factory, 0.0, trainsize)\n testSet = SubFactory(factory, trainsize, 1.0)\n batch_size = 32\n test_batch_size = -1\n trainLder = DataLoader(trainSet, batch_size=batch_size, shuffle=False)\n testLder = DataLoader(testSet, batch_size=test_batch_size, shuffle=False)\n\n # set up file output\n outname = 'gate_expert_model.pt'\n outdir = 'models/pen/gate_expert'\n if KLLOSS:\n outname = 'gate_expert_kldiv_model.pt'\n if args.warm:\n outname = outname.replace('.pt', '_warm.pt')\n\n # set optimizer\n lr = 1e-3\n opt_G = torch.optim.Adam(clus.parameters(), lr=lr)\n opt_E = torch.optim.Adam(expert.parameters(), lr=lr)\n\n # set other training stuff\n n_epoch = 500\n back_check_epoch = 8\n best_test_loss = np.inf\n best_test_loss_expert = np.inf\n best_test_epoch = 0\n\n def get_mean_error(g_y, exp_y, feedy):\n \"\"\"Calculate two loss\"\"\"\n error_traj = torch.mean((exp_y - feedy.expand_as(exp_y)) ** 2, dim=2).t()\n g = f.softmax(g_y)\n log_g = f.log_softmax(g_y)\n posterior = g * torch.exp(-0.5 * error_traj) # b by r probability, not scaled to 1\n traj_prob = torch.mean(-torch.log(torch.sum(posterior, dim=1)))\n if KLLOSS:\n posterior_scale = Variable((posterior / torch.sum(posterior, dim=1, keepdim=True)).data) # do not use gradient of it\n div_error = f.kl_div(log_g, posterior_scale)\n return traj_prob, div_error\n else:\n Og = torch.sum(exp_y * g.t().unsqueeze(2), dim=0)\n traj_error = f.smooth_l1_loss(Og, feedy)\n return traj_prob, traj_error\n\n # start training\n for epoch in range(n_epoch):\n sum_train_loss = 0\n sum_train_loss_prob = 0\n for idx, batch_data in enumerate(trainLder):\n feedy = Variable(batch_data[yname], requires_grad=False).cuda()\n feedx = Variable(batch_data[xname], requires_grad=False).cuda()\n # train experts\n opt_E.zero_grad()\n opt_G.zero_grad()\n exp_y = expert(feedx)\n g_y = clus(feedx)\n g = f.softmax(g_y) # this is prior\n log_g = f.log_softmax(g_y)\n error_traj = torch.mean((exp_y - feedy.expand_as(exp_y)) ** 2, dim=2).t()\n posterior = g * torch.exp(-0.5 * error_traj) # b by r probability, not scaled to 1\n posterior_scale = Variable((posterior / torch.sum(posterior, dim=1, keepdim=True)).data) # do not use gradient of it\n lossi = torch.mean(-torch.log(torch.sum(posterior, dim=1)))\n lossi.backward(retain_graph=True)\n sum_train_loss_prob += lossi.cpu().data.numpy() * feedx.size()[0]\n opt_E.step()\n # update h by regression error\n all_pred = exp_y\n if KLLOSS:\n error = f.kl_div(log_g, posterior_scale)\n else:\n Og_before = all_pred * g.t().unsqueeze(2)\n Og = torch.sum(Og_before, dim=0)\n error = f.smooth_l1_loss(Og, feedy)\n sum_train_loss += error.cpu().data.numpy() * feedx.size()[0]\n error.backward()\n opt_G.step()\n # val = clus.printWeights(3)\n mean_train_loss = sum_train_loss / trainLder.getNumData()\n mean_train_loss_prob = sum_train_loss_prob / trainLder.getNumData()\n\n # evaluate on test data\n sum_test_loss_gate = 0\n sum_test_loss_expert = 0\n n_test_data = testLder.getNumData()\n for idx, batch_data in enumerate(testLder):\n feedy = Variable(batch_data[yname], volatile=True).cuda()\n feedx = Variable(batch_data[xname], volatile=True).cuda()\n exp_y = expert(feedx)\n g_y = clus(feedx)\n traj_prob, div_error = get_mean_error(g_y, exp_y, feedy)\n sum_test_loss_gate += div_error.cpu().data.numpy() * feedx.size()[0]\n sum_test_loss_expert += traj_prob.cpu().data.numpy() * feedx.size()[0]\n mean_test_loss_gate = sum_test_loss_gate / n_test_data\n mean_test_loss_expert = sum_test_loss_expert / n_test_data\n print('epoch %d gate loss %f expert loss %f test gate loss %f expert loss %f' \\\n % (epoch, mean_train_loss, mean_train_loss_prob, mean_test_loss_gate, mean_test_loss_expert))\n if mean_test_loss_gate < best_test_loss:\n best_test_loss = mean_test_loss_gate\n best_test_epoch = epoch\n if mean_test_loss_expert < best_test_loss_expert:\n best_test_loss_expert = mean_test_loss_expert\n best_test_epoch = epoch\n if epoch > best_test_epoch + back_check_epoch:\n break\n print('Save model now')\n\n # draw region for classifier\n draw_clus_region(clus, data_feed['x'], factory)\n\n clus.cpu()\n expert.cpu()\n model = {'gate': clus, 'expert': expert, 'xScale': [trainLder.xmean, trainLder.xstd], 'yScale': [trainLder.ymean, trainLder.ystd]}\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n torch.save(model, os.path.join(outdir, outname))",
"def get_problem():\n\n #User Defined Terrain Elevation\n #def terr( x_pos, y_pos ):\n #Defines terrain elevation [m] as a function of x and y positions [m]\n # elev=100.0*(np.sin(0.5*(x_pos/1000.0)))**2.0 #User defined elevation map\n # return elev\n\n #User Defined Tunnel Cost\n #def tunnel(depth):\n #Defines additional cost for placing a 1 meter length of track a non-zero\n #depth below the ground.\n # TunnelCost=(50e3)/(1+np.exp(-(depth-5))) #Tunneling Cost (2016 USD)\n # return TunnelCost\n\n #def bridge(height):\n #Defines additional cost for placing a 1 meter length of track a non-zero\n #heigh above the ground.\n # BridgeCost=10e3*(height/10)**2 #Bridge Cost (2016 USD)\n # return BridgeCost\n\n # Rename this and/or move to optim package?\n problem = beluga.optim.Problem('surftest_noinc')\n\n #Define independent variables\n problem.independent('t', 's')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','m') \\\n .state('y','V*sin(hdg)','m') \\\n .state('V','amax*sin(thrA) + eps*(cos(thrA)+cos(hdgA))','m/s') \\\n .state('hdg','cmax/V*sin(hdgA)','rad')\n\n # Define controls\n #problem.control('thrA','rad') \\\n # .control('hdgA','rad')\n problem.control('hdgA','rad')\n\n # Define Cost Functional\n problem.cost['path'] = Expression('1','s')\n\n #problem.cost['path'] = Expression('TimeToUSD+trk*V', 'USD')\n\n #+ \\\n #'(50e3)/(1.0+exp(-1.0*(z-0.0*(sin(0.5*(x/1000.0)))**2.0-5.0)))+'+ \\\n #'10e3*((0.0*(sin(0.5*(x/1000.0)))**2.0-z)/10.0)**2.0','USD')\n\n #Define constraints\n problem.constraints().initial('x-x_0','m') \\\n .initial('y-y_0','m') \\\n .initial('V-V_0','m/s') \\\n .initial('hdg-hdg_0','rad') \\\n .terminal('x-x_f','m') \\\n .terminal('y-y_f','m')\n #.terminal('V-V_f','m/s')\n #.initial('hdg-hdg_0','rad') \\\n\n #Define constants\n problem.constant('g',9.81,'m/s^2') #Acceleration due to gravity\n problem.constant('trk',1,'USD/m') #Basic cost of 1 m of track on ground (10k per m)\n problem.constant('amax',1.0,'m/s^2') #Maximum thrust acceleration of vehicle\n problem.constant('cmax',1.0,'m/s^2') #Maximum allowed centripetal acceleration\n problem.constant('eps',10,'m/s^2') #Error constant\n problem.constant('TimeToUSD',1,'USD/s') #Time is Money!!\n problem.constant('thrA',0,'rad')\n\n #Unit scaling\n problem.scale.unit('m','x') \\\n .unit('s','x/V') \\\n .unit('rad',1) \\\n .unit('USD',1)\n\n #Configure solver\n problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=2)\n\n #Initial Guess\n problem.guess.setup('auto',start=[0.0,0.0,1.0,pi/4-0.2], costate_guess=-0.1) #City A\n\n #Add Continuation Steps\n problem.steps.add_step().num_cases(10) \\\n .terminal('x', 10) \\\n .terminal('y', 0)\n\n problem.steps.add_step().num_cases(10) \\\n .const('eps', 0.2)\n\n #problem.steps.add_step().num_cases(10) \\\n # .terminal('y', 2*pi*1000) \\\n # .terminal('z', 0.0) \\\n # .terminal('inc', 0.0)\n #^ City B\n return problem",
"def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('usrtrack', type=str, help='ustsuw binary output')\n parser.add_argument('root', type=str, nargs='?', help='output ROOT file name', default=\"\")\n parser.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbose', help='print what is being done')\n \n args = parser.parse_args()\n\n if not path.isfile(args.usrtrack):\n print(\"ustsuw2root: File %s does not exist.\" % args.usrtrack, file=sys.stderr)\n return 1\n\n if args.root == \"\":\n rootFileName = \"%s%s\" % (args.usrtrack,\".root\")\n else:\n rootFileName = args.root\n \n b = Usrtrack()\n b.readHeader(args.usrtrack)\n\n ND = len(b.detector)\n \n if args.verbose:\n #b.sayHeader()\n for i in range(ND):\n b.printHeader(i)\n print(\"\")\n\n fout = ROOT.TFile(rootFileName, \"recreate\")\n for i in range(ND):\n val = Data.unpackArray(b.readData(i,b.detector[i].lowneu))\n err = Data.unpackArray(b.readStat(i,b.detector[i].lowneu))\n\n det = b.detector[i]\n\n h = hist(det)\n hn = histN(det) # filled only if det.lowneu\n \n n = h.GetNbinsX()\n print(n, len(val), det.ne, val)\n\n for i in range(det.ne):\n h.SetBinContent(i+1, val[i])\n\n for i in range(det.ne):\n h.SetBinError(i+1, err[n-i-1]*val[i])\n\n h.SetEntries(b.weight)\n h.Write()\n if det.lowneu:\n hn.Write()\n\n fout.Close()",
"def unsup_train_exp(model, criterions, optimizer, scheduler, dataloaders,unsup_path,margin,num_epochs=10, vis=None):\n print('>> Fine-tune a Model.')\n best_roc = 0.0\n \n iters = 0\n plot_data = {'X': [], 'Y': [], 'legend': ['Sup. Loss', 'Unsup. Loss', 'Tot. Loss']}\n\n for epoch in range(num_epochs):\n scheduler.step()\n # Training\n model.train()\n for i, sup_data in enumerate(dataloaders['sup_train']):\n unsup_data = dataloaders['unsup_train'][i % len(dataloaders['unsup_train'])]\n sup_inputs = sup_data[0]\n sup_labels = sup_data[1].cuda()\n unsup_inputs = unsup_data[0]\n data_inputs=torch.cat((sup_inputs,unsup_inputs),axis=0).cuda()\n \n # unsup_labels = unsup_data[1].cuda()\n iters += 1\n\n optimizer.zero_grad()\n out_1, out_2 = model(data_inputs)\n sup_out_1,sup_out_2=out_1[:sup_inputs.shape[0]],out_2[:sup_inputs.shape[0]]\n unsup_out_1,unsup_out_2=out_1[sup_inputs.shape[0]:],out_2[sup_inputs.shape[0]:]\n\n loss_sup = criterions['sup'](sup_out_1, sup_labels) + criterions['sup'](sup_out_2, sup_labels) # Step A\n\n loss_unsup = criterions['unsup'](unsup_out_1, unsup_out_2,margin) # Step B\n loss = loss_unsup + loss_sup\n loss.backward()\n optimizer.step()\n\n # visualize\n if (iters % 10 == 0) and (vis != None) and (plot_data != None):\n plot_data['X'].append(iters)\n plot_data['Y'].append([\n loss_sup.item(),\n loss_unsup.item(),\n loss.item()\n ])\n vis.line(\n X=np.stack([np.array(plot_data['X'])] * len(plot_data['legend']), 1),\n Y=np.array(plot_data['Y']),\n opts={\n 'title': 'Loss over Time',\n 'legend': plot_data['legend'],\n 'xlabel': 'Iterations',\n 'ylabel': 'Loss',\n 'width': 1200,\n 'height': 390,\n },\n win=2\n )\n \"\"\"\n path_dir='/'.join(unsup_path.split('/')[:-1])\n if not os.path.exists(path_dir):\n os.makedirs(path_dir)\n torch.save(model.state_dict(),'{}'.format(unsup_path))\n print('Model saved.')\n \"\"\"\n # Validate\n model.eval()\n labels = torch.zeros((0, )).cuda() # initialize\n discs = torch.zeros((0, )).cuda() \n with torch.no_grad():\n for i, (input, label,cls) in enumerate(dataloaders['unsup_val']):\n inputs = input.cuda()\n label = label.cuda()\n\n out_1, out_2 = model(inputs)\n score_1 = F.softmax(out_1, dim=1)\n score_2 = F.softmax(out_2, dim=1)\n disc = torch.sum(torch.abs(score_1 - score_2), dim=1).reshape((label.shape[0], ))\n\n discs=torch.cat((discs,disc))\n label=label.reshape((label.shape[0], )).float()\n labels=torch.cat((labels,label))\n\n #labels = 1 - labels\n\n labels = labels.cpu()\n discs = discs.cpu()\n roc = evaluate(labels, discs, metric='roc',save_to='./pic.png')\n print('Epoch{} AUROC: {:.3f}'.format(epoch, roc))\n if best_roc < roc:\n best_roc = roc\n path_dir='/'.join(unsup_path.split('/')[:-1])\n if not os.path.exists(path_dir):\n os.makedirs(path_dir)\n torch.save(model.state_dict(),'{}'.format(unsup_path))\n print('Model saved.')\n print('>> Finished.')",
"def make_stehle(self):\n\n temp_k = self.temp * e / k # temperature in K\n dens_cm = self.e_dens * 1.e-6 # electronic density in cm-3\n prefix = 'n_' + str(self.n_upper) + '_' + str(self.n_lower) + '_'\n\n # extract raw tabulated tabulated_data\n tab_temp_k = np.array(pystark.nc.variables[prefix + 'tempe'].data) # tabulated electron temperatures (K)\n olam0 = pystark.nc.variables[prefix + 'olam0'].data # line centre wavelength (A)\n num_tab_dens = pystark.nc.variables[prefix + 'id_max'].data\n fainom = pystark.nc.variables[prefix + 'fainom'].data\n tab_dens_cm = np.array(pystark.nc.variables[prefix + 'dense'].data) # tabulated electron densities (cm ** -3)\n f00 = np.array(pystark.nc.variables[prefix + 'f00'].data) # normal Holtsmark field strength (30 kV / m)\n dl12 = np.array(pystark.nc.variables[prefix + 'dl12'].data)\n dl12s = np.array(pystark.nc.variables[prefix + 'dl12s'].data)\n fainu = pystark.nc.variables[\n prefix + 'fainu'].data # Asymptotic value of iStark * (alpha ** 2.5) (\"wings factor in alfa units\")\n pr0 = np.array(pystark.nc.variables[\n prefix + 'pr0'].data) # Ratio of the mean interelectronic distance to the electronic Debye length\n jtot = np.array(pystark.nc.variables[prefix + 'jtot'].data,\n dtype=np.int) # \"number of wave lengths for the couple (T,Ne)\"\n dom = np.array(pystark.nc.variables[prefix + 'dom'].data) # frequency detunings in units (rad / (s*ues)\n d1om = np.array(pystark.nc.variables[prefix + 'd1om'].data)\n o1line = np.array(pystark.nc.variables[prefix + 'o1line'].data)\n o1lines = np.array(pystark.nc.variables[prefix + 'o1lines'].data)\n\n # ensure given temperature + density falls within tabulated values\n # change sligtly the value of the input density\n # dens_cm in order to remain , as far as possible, inside the tabulation\n # JSA: this first step seems bogus!\n\n if np.abs(dens_cm - tab_dens_cm[0]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[0] * 1.001\n\n for id in np.arange(1, num_tab_dens + 1):\n if np.abs(dens_cm - tab_dens_cm[id]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[id] * 0.999\n\n if dens_cm >= 2.0 * tab_dens_cm[num_tab_dens]:\n raise Exception(\n 'Your input density is higher than the largest tabulated value %f' % tab_dens_cm[num_tab_dens])\n\n if dens_cm <= tab_dens_cm[0]:\n raise Exception('Your input density is smaller than the smallest tabulated value %f' % tab_dens_cm[0])\n\n if temp_k >= tab_temp_k[9]:\n raise Exception('Your input temperature is higher than the largest tabulated value %f' % tab_temp_k[9])\n\n if temp_k <= tab_temp_k[0]:\n raise Exception('Your input temperature is lower than the smallest tabulated value %f' % tab_temp_k[0])\n\n normal_holtsmark_field = 1.25e-9 * (dens_cm ** (2. / 3.)) # normal field value in ues\n\n # calculate line centre wavelength and frequency using Rydberg formula\n # JSA: I have made this step clearer and corrected for deuteron mass in the Rydberg constant (though the effect is small)\n # TODO make sure this matches olam0 parameter above -- why were there two variables in the first place?!\n # rydberg_m = Rydberg / (1. + (electron_mass / physical_constants['deuteron mass'][0]))\n # wl_0_angst = 1e10 * (rydberg_m * (1 / n_lower ** 2 - 1 / n_upper ** 2)) ** -1\n\n wl_centre_angst = self.wl_centre * 1e10\n\n c_angst = c * 1e10 # velocity of light in Ansgtroms / s\n angular_freq_0 = 2 * np.pi * c_angst / wl_centre_angst # rad / s\n\n otrans = -2 * np.pi * c_angst / wl_centre_angst ** 2\n\n olines = o1lines / np.abs(otrans)\n oline = o1line / np.abs(otrans)\n\n # Limit analysis_tools to uncorrelated plasmas.\n # check that mean interelectronic distance is smaller than the electronic Debye length (equ. 10)\n PR0_exp = 0.0898 * (dens_cm ** (1. / 6.)) / np.sqrt(temp_k) # = (r0 / debye)\n if PR0_exp > 1.:\n raise Exception('The plasma is too strongly correlated\\ni.e. r0/debye=0.1\\nthe line cannot be computed.')\n\n # fainom_exp=fainom*(F00_exp**1.5)\n # fainum_exp=fainom_exp/( (OPI*2.)**1.5)\n\n # ========================\n # TABULATION Format CDS\n # si on veut ecrire\n # n -np lambda0 kalpha Ne E0 T R0/Debye Dalpha iDoppler iStark\n\n # IN_cds= N+0.01\n # INP_cds = NP+0.01\n\n # ***********************************************************\n # Don't edit the CDS format...\n # ***********************************************************\n\n # Skipped the code in the IF statement starting at line 470, since it\n # isn't used, if (.FALSE.) ...\n\n # ==============================================\n # define an unique detunings grid - domm - for the tabulated\n # profiles ( various temperatures , densities)\n # calculate all the line shapes for this common grid\n # units used at this points are Domega_new= Delta(omega)/F00\n # in rd/(s-1 ues)\n\n max_num_dens = 30 # Maximum number of densities\n max_num_tab_temp = 10\n max_num_detunings = 60 # Maximum number of detunings\n jtot = jtot.astype(np.int)\n domm = np.zeros(100000)\n dom0 = np.zeros(10000)\n tprof = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n tprofs = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n uprof = np.zeros([max_num_dens, 10000])\n uprofs = np.zeros([max_num_dens, 10000])\n\n inc = 0\n domm[inc] = 0.0\n # ---- Look to replace this loop\n for id in np.arange(num_tab_dens + 1): # loop over tab densities\n for j in np.arange(max_num_tab_temp): # loop over tab temperatures (?)\n for i in np.arange(1, jtot[id, j]):\n inc += 1\n dom0[inc] = dom[id, j, i]\n\n inc = np.count_nonzero(dom)\n npik = inc + 1\n # nut=10000\n\n # Calling numpy sort instead of piksrt\n tmp = np.sort(dom0[0:npik])\n dom0[0:npik] = tmp[0:npik]\n # dom0 seems to agree with the FORTRAN version\n\n inc = 0\n domm[0] = 0.0\n # print 'npik',npik\n # ---- Look to replace this loop\n for i in np.arange(1, npik):\n dif = (dom0[i] - dom0[i - 1])\n if dif <= 1.0E-6:\n continue\n if dif / np.abs(dom0[i]) <= 0.1:\n continue\n inc = inc + 1\n domm[inc] = dom0[i]\n\n jdom = inc + 1 # One line after marker 35\n\n for id in np.arange(num_tab_dens):\n for j in np.arange(10):\n if pr0[id, j] > 1.0:\n continue\n\n tprof[id, j, 0] = oline[id, j, 0]\n tprofs[id, j, 0] = olines[id, j, 0]\n\n if jtot[id, j] == 0:\n continue\n\n for i in np.arange(1, jdom + 1):\n skip1 = False\n skip2 = False\n # print 'i',i\n domeg = domm[i]\n ij_max = jtot[id, j]\n # print 'domeg,ij_max',domeg,ij_max\n for ij in np.arange(1, ij_max - 1):\n # print 'ij',ij\n test = (domeg - dom[id, j, ij]) * (domeg - dom[id, j, ij - 1])\n # print 'test1:',test\n if test <= 0.0:\n # print 'triggered test1'\n x1 = dom[id, j, ij - 1]\n x2 = dom[id, j, ij]\n x3 = dom[id, j, ij + 1]\n y1 = oline[id, j, ij - 1]\n y2 = oline[id, j, ij]\n y3 = oline[id, j, ij + 1]\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij - 1]\n y2 = olines[id, j, ij]\n y3 = olines[id, j, ij + 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n skip1 = True\n skip2 = True\n break\n\n if skip1 is False:\n test = (domeg - dom[id, j, ij_max - 2]) * (domeg - dom[id, j, ij_max - 1])\n # print 'test2:',test\n # print 'domeg',domeg\n # print 'dom[id,j,ij_max-1]',dom[id,j,ij_max-2]\n # print 'dom[id,j,ij_max]',dom[id,j,ij_max-1]\n if test <= 0.0:\n # print 'triggered test2'\n x1 = dom[id, j, ij_max - 3]\n x2 = dom[id, j, ij_max - 2]\n x3 = dom[id, j, ij_max - 1]\n y1 = oline[id, j, ij_max - 3]\n y2 = oline[id, j, ij_max - 2]\n y3 = oline[id, j, ij_max - 1]\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij_max - 3]\n y2 = olines[id, j, ij_max - 2]\n y3 = olines[id, j, ij_max - 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n skip2 = True\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n continue\n\n if skip2 is False:\n if domeg > dom[id, j, ij_max]:\n # print 'triggered test3'\n tprof[id, j, i] = fainom / (domeg ** 2.5)\n tprofs[id, j, i] = tprof[id, j, i]\n continue\n\n # We can skip writing the intermediate file\n\n\n for id in np.arange(num_tab_dens):\n otest_dens = (dens_cm - tab_dens_cm[id]) * (dens_cm - tab_dens_cm[id + 1])\n if otest_dens <= 0.0:\n dense1 = tab_dens_cm[id]\n dense2 = tab_dens_cm[id + 1]\n id1 = id\n id2 = id + 1\n break\n\n if dens_cm >= tab_dens_cm[num_tab_dens]:\n dense1 = tab_dens_cm[num_tab_dens - 1]\n dense2 = tab_dens_cm[num_tab_dens]\n id1 = num_tab_dens - 1\n id2 = num_tab_dens\n\n for it in np.arange(10):\n otest = (temp_k - tab_temp_k[it]) * (temp_k - tab_temp_k[it + 1])\n if otest <= 0.0:\n it1 = it\n it2 = it + 1\n # pr01 = pr0[id2,it1] # max value of pr0 for T1,T2,dense1,dense2\n tempe1 = tab_temp_k[it]\n tempe2 = tab_temp_k[it + 1]\n break\n\n # interpolation in temperature\n for id in np.arange(id1, id2 + 1):\n for i in np.arange(jdom):\n uprof[id, i] = tprof[id, it1, i] + (temp_k - tempe1) * (tprof[id, it2, i] - tprof[id, it1, i]) / (\n tempe2 - tempe1)\n uprofs[id, i] = tprofs[id, it1, i] + (temp_k - tempe1) * (tprofs[id, it2, i] - tprofs[id, it1, i]) / (\n tempe2 - tempe1)\n\n delta_lambda = np.zeros(jdom)\n delta_nu = np.zeros(jdom)\n wprof_nu = np.zeros(jdom)\n wprofs_nu = np.zeros(jdom)\n\n for i in np.arange(jdom):\n wprof = uprof[id1, i] + (dens_cm - dense1) * (uprof[id2, i] - uprof[id1, i]) / (dense2 - dense1)\n wprofs = uprofs[id1, i] + (dens_cm - dense1) * (uprofs[id2, i] - uprofs[id1, i]) / (dense2 - dense1)\n delta_omega = domm[i] * normal_holtsmark_field\n delta_nu[i] = delta_omega / (2 * np.pi)\n delta_lambda[i] = wl_centre_angst * delta_omega / (angular_freq_0 + delta_omega)\n # print(delta_lambda[i])\n wprof_nu[i] = (wprof / normal_holtsmark_field) * (2. * np.pi)\n wprofs_nu[i] = (wprofs / normal_holtsmark_field) * (2. * np.pi)\n # print '%e %e %e %e' %(delta_lambda[i],delta_nu[i],wprof_nu[i],wprofs_nu[i])\n\n delta_lambda2 = np.concatenate((-delta_lambda[::-1], delta_lambda)) + wl_centre_angst # + olam0\n delta_nu2 = np.concatenate((-delta_nu[::-1], delta_nu))\n wprof_nu2 = np.concatenate((wprof_nu[::-1], wprof_nu))\n wprofs_nu2 = np.concatenate((wprofs_nu[::-1], wprofs_nu))\n\n # for some reason, i only get a good agreement with the other models if i take the pure Stark broadened Stehle\n # output and manually convolve it with the Doppler profile -- not sure why...\n ls_sd = wprofs_nu2\n\n # interpolate onto frequency axis\n ls_sd = np.interp(self.freq_axis, delta_nu2 + self.freq_centre, ls_sd)\n\n return ls_sd",
"def __init__(self, encut, ldaul, Uparam, Jparam, name=\"DFTU_settings\"):\n\n dftu_settings = {\"LDAU\": \".TRUE.\" , \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LADAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)",
"def dynstall_oye_dxdt(t,fs,u,p):\n alpha = u['alpha'](t)\n f_st = p['F_st'](alpha)\n return 1/p['tau'] * (f_st - fs)",
"def main():\n\n # Load settings from file\n settings_file = 'pypet_settings.pkl'\n settings = load_obj(settings_file)\n # Print settings dictionary\n print('\\nSettings dictionary:')\n for key, value in settings.items():\n print(key, ' : ', value)\n print('\\nParameters to explore:')\n for key, value in settings.items():\n if isinstance(value, list):\n print(key, ' : ', value)\n\n # Create new folder to store results\n traj_dir = os.getcwd()\n # Read output path (if provided)\n if len(sys.argv) > 1:\n # Add trailing slash if missing\n dir_provided = os.path.join(sys.argv[1], '')\n # Check if provided directory exists\n if os.path.isdir(dir_provided):\n # Convert to full path\n traj_dir = os.path.abspath(dir_provided)\n else:\n print('WARNING: Output path not found, current directory will be used instead')\n else:\n print('WARNING: Output path not provided, current directory will be used instead')\n # Add time stamp (the final '' is to make sure there is a trailing slash)\n traj_dir = os.path.join(traj_dir, datetime.now().strftime(\"%Y_%m_%d_%Hh%Mm%Ss\"), '')\n # Create directory with time stamp\n os.makedirs(traj_dir)\n # Change current directory to the one containing the trajectory files\n os.chdir(traj_dir)\n print('Trajectory and results will be stored in: {0}'.format(traj_dir))\n\n # Create new pypet Trajectory object\n traj_filename = 'traj.hdf5'\n traj_fullpath = os.path.join(traj_dir, traj_filename)\n traj = Trajectory(filename=traj_fullpath)\n\n # -------------------------------------------------------------------\n # Add config parameters (those that DO NOT influence the final result of the experiment)\n traj.f_add_config('debug', False, comment='Activate debug mode')\n# #traj.f_add_config('max_mem_frac', 0.7, comment='Fraction of global GPU memory to use')\n\n # Set up trajectory parameters\n param_to_explore = {}\n for key, val in settings.items():\n if isinstance(val, list):\n param_to_explore[key] = val\n traj.f_add_parameter(key, val[0])\n else:\n traj.f_add_parameter(key, val)\n\n # Define parameter combinations to explore (a trajectory in\n # the parameter space). The second argument, the tuple, specifies the order\n # of the cartesian product.\n # The variable on the right most side changes fastest and defines the\n # 'inner for-loop' of the cartesian product\n explore_dict = cartesian_product(\n param_to_explore,\n tuple(param_to_explore.keys()))\n\n print(explore_dict)\n traj.f_explore(explore_dict)\n\n # Store trajectory parameters to disk\n pypet_utils.print_traj_leaves(\n traj,\n 'parameters',\n file=os.path.join(traj_dir, 'traj_parameters.txt'))\n\n # Store trajectory\n traj.f_store()\n\n # Define PBS script\n bash_lines = '\\n'.join([\n '#! /bin/bash',\n '#PBS -P InfoDynFuncStruct',\n '#PBS -l select=1:ncpus=1:mem=1GB',\n #'#PBS -l select=1:ncpus=1:ngpus=1:mem=1GB',\n '#PBS -M [email protected]',\n '#PBS -m abe',\n 'module load java',\n 'module load python/3.5.1',\n 'module load cuda/8.0.44',\n 'source /project/RDS-FEI-InfoDynFuncStruct-RW/Leo/idtxl_env/bin/activate',\n 'cd ${traj_dir}',\n 'python ${python_script_path} ${traj_dir} ${traj_filename} ${file_prefix} $PBS_ARRAY_INDEX'\n ])\n\n # Save PBS script file (automatically generated)\n bash_script_name = 'run_python_script.pbs'\n job_script_path = os.path.join(traj_dir, bash_script_name)\n with open(job_script_path, 'w', newline='\\n') as bash_file:\n bash_file.writelines(bash_lines)\n\n # Run job array\n job_walltime_hours = 0\n job_walltime_minutes = 5\n #after_job_array_ends = 1573895\n job_settings = {\n 'N': 'run_traj',\n 'l': 'walltime={0}:{1}:00'.format(job_walltime_hours, job_walltime_minutes),\n #'W': 'depend=afteranyarray:{0}[]'.format(after_job_array_ends),\n 'q': 'defaultQ'\n }\n if len(traj.f_get_run_names()) > 1:\n job_settings['J'] = '{0}-{1}'.format(0, len(traj.f_get_run_names()) - 1)\n\n job_args = {\n 'python_script_path': '/project/RDS-FEI-InfoDynFuncStruct-RW/Leo/inference/hpc_pypet_single_run.py',\n 'traj_dir': traj_dir,\n 'traj_filename': traj_filename,\n 'file_prefix': 'none'\n }\n run_job_array(job_script_path, job_settings, job_args)",
"def main():\n\t#first check args and file paths\n\tcheckArgs(args)\n\t\n\tdata = args.dataset_file\n\tf_name = data.split(\".\")\n\tprint \"\\n[AP]\\t\"+\"######## \"+f_name[0] + '.' + f_name[1]+\" ########\"\n\tprint \"\\n[AP]\\tChecked inputs, now acquiring data\"\n\n\thost = \"localhost\"\n\tuser = \"readonly\"\n\tpasswd = \"readonlypswd\"\n\tdb = args.db_schema\n\tdb_table = args.db_table\n\n\tnameFile = data[0:-20]\n\tdataset = queryDataset(host,user,passwd,db,db_table,\"tmpFile.txt\",nameFile)\n\tif dataset is not None:\n\t\tdataset = dataset.rstrip('\\n')\n\t\tdataset = dataset.replace(\"/\",\"-\")\n\n\t\tlocations_list, length_list = generateDataset(data)\n\n\t\tif len(locations_list) < 2:\n\t\t\tprint \"\\n[SKIP]\\t{dataset} has only one unique line! Can't estimate anything.\\n\\tSKIP THIS FILE!\\n\".format(dataset=str(dataset))\n\t\t\treturn 0\n\n\t\t# Alias for estAbund calling\n\t\testAbund = sonicLength.estAbund\n\n\t\t# Call estAbund and store returned object in results\n\t\tresults = estAbund(robjects.StrVector(locations_list), robjects.FloatVector(length_list))\n\n\t\t# Put estimation for theta in estimations_theta and associated locations in locations_theta; then organize data in dic_of_theta\n\t\ttheta = results.rx2(\"theta\")\n\t\testimations_theta = tuple(theta)\n\t\tlocations_theta = tuple(theta.names)\n\t\t# dic_of_theta\n\t\tdic_of_theta = {}\n\t\tfor i in range(len(locations_theta)):\n\t\t\tdic_of_theta.update({locations_theta[i]:estimations_theta[i]})\n\n\t\t# Put different fragment lengths in length_phi and associated frequencies in freq_phi\n\t\tphi = results.rx2(\"phi\")\n\t\tfreq_phi = tuple(phi)\n\t\tlength_phi = tuple(phi.names)\n\n\t\tlength_phi_numbers = fragmentsLengthPlot(length_phi,freq_phi,length_list,nameFile,dataset)\n\n\t\tprintThetaInfo(estimations_theta,locations_theta,nameFile)\n\n\t\t# Retrieving redundant reads data\n\t\tdic_of_redundant_reads_count, sequence_count_list = redundant_reads_count(from_file_to_list(data,'.tsv'))\n\n\t\t# Box Plot\n\t\tsequence_count = []\n\t\tfor v in sequence_count_list:\n\t\t\tsequence_count.append(int(v))\n\t\tbox_plot(sequence_count, estimations_theta, nameFile,dataset)\n\n\t\t# Plot: unique lengths retrieved for a genomic location VS expected number of parent fragment for the same location\n\t\tphi_VS_theta(length_phi, freq_phi, nameFile, dataset)\n\n\n\t\t#######################################################################################################\n\t\t# Produce .tsv output about measured redundant reads count, abundance-corrected redundant reads count # \n\t\t# and some descriptive of unique fragments lengths #\n\t\t#######################################################################################################\n\n\t\t# Retrieving data\n\t\tdic_of_relative_abundance, dic_of_corrected_reads_count, dic_of_percentage_difference = corrected_reads_count(dic_of_redundant_reads_count, dic_of_theta)\n\t\tdic_of_unique_lengths, dic_of_unique_lengths_number, dic_of_median_of_unique_lengths, dic_of_MAD = fragment_lengths_statistics(data)\n\t\tdic_of_lengths = lengths_explicit_list(from_file_to_list(data,'.txt'))\n\n\t\t# Writing File\n\t\tcorrected_file = open(dataset + \".\" + nameFile+\".outcomes\"+\".tsv\", 'w')\n\t\tcorrected_file.write(\"Chromosome\\tIntegration_locus\\tStrand\\tSequence_Count\\tEstimated_Relative_Abundance\\tCorrected_Sequence_Count\\tPercentage_Variation\\tNumber_of_fragments_of_unique_lengths\\tLength_Min\\tLength_Max\\tLenght_Median\\tRounded_Lenght_Median\\tMAD\\tUnique_Lengths_List\\tUnique_Lengths_Amount\\tCEM_region_?\") ## ! NB ! ## \\tCEM_region_?\" has to remain the last!!!\n\t\tgenome_locations = dic_of_redundant_reads_count.keys()\n\t\tgenome_locations.sort()\n\t\tfor key in genome_locations:\n\t\t\tsplitted_location = key.split(' ')\n\t\t\tcorrected_file.write(\"\\n\" + splitted_location[0] + \"\\t\" + splitted_location[1] + \"\\t\" + splitted_location[2] + \"\\t\" + str(dic_of_redundant_reads_count[key]) + \"\\t\" + str(round(dic_of_relative_abundance[key],5)) + \"\\t\" + str(round(dic_of_corrected_reads_count[key],0)) + \"\\t\" + str(dic_of_percentage_difference[key]) + \"\\t\" + str(dic_of_unique_lengths_number[key]) + \"\\t\" + str(min(dic_of_unique_lengths[key])) + \"\\t\" + str(max(dic_of_unique_lengths[key])) + \"\\t\" + str(dic_of_median_of_unique_lengths[key]) + \"\\t\" + str(math.ceil(dic_of_median_of_unique_lengths[key]))+ \"\\t\" + str(dic_of_MAD[key]) + \"\\t\" + str(dic_of_unique_lengths[key]) + \"\\t\" + str(dic_of_lengths[key]))\n\t\t\tresponse, cem_symbol, cem_coordinates = is_CEM(key)\n\t\t\tif (response == True):\n\t\t\t\tcorrected_file.write(\"\\t\" + cem_symbol)\n\n\t\t# Write database file - Like corrected_file with more field appended in the end\n\t\tdb_file = open(dataset + \".\" + nameFile+\".db_file\"+\".tsv\", 'w')\n\t\tgenome_locations = dic_of_redundant_reads_count.keys()\n\t\tgenome_locations.sort()\n\t\tdataset_split = dataset.split('.')\n\t\tdataset_label = '_'.join(dataset_split)\n\t\tfor key in genome_locations:\n\t\t\tsplitted_location = key.split(' ')\n\t\t\tdb_file.write(splitted_location[0] + \"\\t\" + splitted_location[1] + \"\\t\" + splitted_location[2] + \"\\t\" + str(dic_of_redundant_reads_count[key]) + \"\\t\" + str(round(dic_of_relative_abundance[key],5)) + \"\\t\" + str(round(dic_of_corrected_reads_count[key],0)) + \"\\t\" + str(dic_of_percentage_difference[key]) + \"\\t\" + str(dic_of_unique_lengths_number[key]) + \"\\t\" + str(min(dic_of_unique_lengths[key])) + \"\\t\" + str(max(dic_of_unique_lengths[key])) + \"\\t\" + str(dic_of_median_of_unique_lengths[key]) + \"\\t\" + str(math.ceil(dic_of_median_of_unique_lengths[key]))+ \"\\t\" + str(dic_of_MAD[key]) + \"\\t\" + str(dic_of_unique_lengths[key])[1:-1] + \"\\t\" + str(dic_of_lengths[key])[1:-1] + \"\\t\")\n\t\t\tdb_file.write(\"\\t\".join(dataset_split) + \"\\t\" + dataset_label + \"\\t\")\n\t\t\tresponse, cem_symbol, cem_coordinates = is_CEM(key)\n\t\t\tif (response == True):\n\t\t\t\tdb_file.write(cem_symbol + \"\\t\" + cem_coordinates)\n\t\t\telse:\n\t\t\t\tdb_file.write(\"\\t\")\n\t\t\tdb_file.write(\"\\n\")\n\n\t\tdb_file.close()\n\n\t\t#######################################################################################################\n\n\t\t# Last print for user\n\t\tprint \"\\n[AP]\\tTask Finished, closing.\\n\"\n\telse:\n\t\tprint \"\\n[AP]\\tThe dataset is not in the reference DB. Skipped.\\n\"\n\n\treturn 0",
"def train(self, hyps):\n\n # Print Hyperparameters To Screen\n items = list(hyps.items())\n for k, v in sorted(items):\n print(k+\":\", v)\n\n # Make Save Files\n if \"save_folder\" in hyps:\n save_folder = hyps['save_folder']\n else:\n save_folder = \"./saved_data/\"\n\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n base_name = save_folder + hyps['exp_name']\n net_save_file = base_name+\"_net.p\"\n best_net_file = base_name+\"_best.p\"\n optim_save_file = base_name+\"_optim.p\"\n log_file = base_name+\"_log.txt\"\n if hyps['resume']: log = open(log_file, 'a')\n else: log = open(log_file, 'w')\n for k, v in sorted(items):\n log.write(k+\":\"+str(v)+\"\\n\")\n\n # Miscellaneous Variable Prep\n logger = Logger()\n shared_len = hyps['n_tsteps']*hyps['n_rollouts']\n env = gym.make(hyps['env_type'])\n obs = env.reset()\n prepped = hyps['preprocess'](obs)\n hyps['state_shape'] = [hyps['n_frame_stack']] + [*prepped.shape[1:]]\n if hyps['env_type'] == \"Pong-v0\":\n action_size = 3\n else:\n action_size = env.action_space.n*(hyps['env_type']!=\"Pong-v0\")\n hyps['action_shift'] = (4-action_size)*(hyps['env_type']==\"Pong-v0\") \n print(\"Obs Shape:,\",obs.shape)\n print(\"Prep Shape:,\",prepped.shape)\n print(\"State Shape:,\",hyps['state_shape'])\n print(\"Num Samples Per Update:\", shared_len)\n print(\"Samples Wasted in Update:\", shared_len % hyps['batch_size'])\n del env\n\n # Make Network\n net = hyps['model'](hyps['state_shape'],action_size,h_size=hyps['h_size'],bnorm=hyps['use_bnorm'])\n if hyps['resume']:\n net.load_state_dict(torch.load(net_save_file))\n base_net = copy.deepcopy(net)\n net = cuda_if(net)\n net.share_memory()\n base_net = cuda_if(base_net)\n\n # Prepare Shared Variables\n shared_data = {'states': cuda_if(torch.zeros(shared_len, *hyps['state_shape']).share_memory_()),\n 'rewards': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'deltas': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'dones': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'actions': torch.zeros(shared_len).long().share_memory_()}\n if net.is_recurrent:\n shared_data['h_states'] = cuda_if(torch.zeros(shared_len, hyps['h_size']).share_memory_())\n n_rollouts = hyps['n_rollouts']\n gate_q = mp.Queue(n_rollouts)\n stop_q = mp.Queue(n_rollouts)\n reward_q = mp.Queue(1)\n reward_q.put(-1)\n\n # Make Runners\n runners = []\n for i in range(hyps['n_envs']):\n runner = Runner(shared_data, hyps, gate_q, stop_q, reward_q)\n runners.append(runner)\n\n # Start Data Collection\n print(\"Making New Processes\")\n procs = []\n for i in range(len(runners)):\n proc = mp.Process(target=runners[i].run, args=(net,))\n procs.append(proc)\n proc.start()\n print(i, \"/\", len(runners), end='\\r')\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Make Updater\n updater = Updater(base_net, hyps)\n if hyps['resume']:\n updater.optim.load_state_dict(torch.load(optim_save_file))\n updater.optim.zero_grad()\n updater.net.train(mode=True)\n updater.net.req_grads(True)\n\n # Prepare Decay Precursors\n entr_coef_diff = hyps['entr_coef'] - hyps['entr_coef_low']\n epsilon_diff = hyps['epsilon'] - hyps['epsilon_low']\n lr_diff = hyps['lr'] - hyps['lr_low']\n\n # Training Loop\n past_rews = deque([0]*hyps['n_past_rews'])\n last_avg_rew = 0\n best_rew_diff = 0\n best_avg_rew = -1000\n epoch = 0\n T = 0\n while T < hyps['max_tsteps']:\n basetime = time.time()\n epoch += 1\n\n # Collect data\n for i in range(n_rollouts):\n stop_q.get()\n collection_time = time.time() - col_start_time\n\n T += shared_len\n\n # Reward Stats\n avg_reward = reward_q.get()\n reward_q.put(avg_reward)\n last_avg_rew = avg_reward\n if avg_reward > best_avg_rew:\n best_avg_rew = avg_reward\n updater.save_model(best_net_file, None)\n\n # Calculate the Loss and Update nets\n start_time = time.time()\n updater.update_model(shared_data)\n update_time = time.time() - start_time\n net.load_state_dict(updater.net.state_dict()) # update all collector nets\n \n # Resume Data Collection\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Decay HyperParameters\n if hyps['decay_eps']:\n updater.epsilon = (1-T/(hyps['max_tsteps']))*epsilon_diff + hyps['epsilon_low']\n print(\"New Eps:\", updater.epsilon)\n if hyps['decay_lr']:\n new_lr = (1-T/(hyps['max_tsteps']))*lr_diff + hyps['lr_low']\n updater.new_lr(new_lr)\n print(\"New lr:\", new_lr)\n if hyps['decay_entr']:\n updater.entr_coef = entr_coef_diff*(1-T/(hyps['max_tsteps']))+hyps['entr_coef_low']\n print(\"New Entr:\", updater.entr_coef)\n\n # Periodically save model\n if epoch % 10 == 0:\n updater.save_model(net_save_file, optim_save_file)\n\n # Print Epoch Data\n past_rews.popleft()\n past_rews.append(avg_reward)\n max_rew, min_rew = deque_maxmin(past_rews)\n updater.print_statistics()\n avg_action = shared_data['actions'].float().mean().item()\n print(\"Epoch\", epoch, \"– T =\", T)\n print(\"Grad Norm:\",float(updater.norm),\"– Avg Action:\",avg_action,\"– Best AvgRew:\",best_avg_rew)\n print(\"Avg Rew:\", avg_reward, \"– High:\", max_rew, \"– Low:\", min_rew, end='\\n')\n updater.log_statistics(log, T, avg_reward, avg_action, best_avg_rew)\n updater.info['AvgRew'] = avg_reward\n logger.append(updater.info, x_val=T)\n\n # Check for memory leaks\n gc.collect()\n max_mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n print(\"Time:\", time.time()-basetime, \"– Collection:\", collection_time, \"– Update:\", update_time)\n if 'hyp_search_count' in hyps and hyps['hyp_search_count'] > 0 and hyps['search_id'] != None:\n print(\"Search:\", hyps['search_id'], \"/\", hyps['hyp_search_count'])\n print(\"Memory Used: {:.2f} memory\\n\".format(max_mem_used / 1024))\n\n logger.make_plots(base_name)\n log.write(\"\\nBestRew:\"+str(best_avg_rew))\n log.close()\n # Close processes\n for p in procs:\n p.terminate()\n return best_avg_rew",
"def run_model():\n\n # Read in boundary from ordered sts file\n event_sts = anuga.create_sts_boundary(project.event_sts)\n\n # Reading the landward defined points, this incorporates the original\n # clipping polygon minus the 100m contour\n landward_boundary = anuga.read_polygon(project.landward_boundary_file)\n\n # Combine sts polyline with landward points\n bounding_polygon_sts = event_sts + landward_boundary\n\n # Number of boundary segments\n num_ocean_segments = len(event_sts) - 1\n # Number of landward_boundary points\n num_land_points = anuga.file_length(project.landward_boundary_file)\n\n # Boundary tags refer to project.landward_boundary_file\n # 4 points equals 5 segments start at N\n boundary_tags={'back': range(num_ocean_segments+1,\n num_ocean_segments+num_land_points),\n 'side': [num_ocean_segments,\n num_ocean_segments+num_land_points],\n 'ocean': range(num_ocean_segments)}\n\n # Build mesh and domain\n log.debug('bounding_polygon_sts=%s' % str(bounding_polygon_sts))\n log.debug('boundary_tags=%s' % str(boundary_tags))\n log.debug('project.bounding_maxarea=%s' % str(project.bounding_maxarea))\n log.debug('project.interior_regions=%s' % str(project.interior_regions))\n log.debug('project.mesh_file=%s' % str(project.mesh_file))\n\n domain = anuga.create_domain_from_regions(bounding_polygon_sts,\n boundary_tags=boundary_tags,\n maximum_triangle_area=project.bounding_maxarea,\n interior_regions=project.interior_regions,\n mesh_filename=project.mesh_file,\n use_cache=False,\n verbose=False)\n\n domain.geo_reference.zone = project.zone_number\n log.info('\\n%s' % domain.statistics())\n\n domain.set_name(project.scenario)\n domain.set_datadir(project.output_folder)\n domain.set_minimum_storable_height(0.01) # Don't store depth less than 1cm\n\n # set friction in interior regions, if any defined\n friction_list = []\n for (irtype, filename, friction) in project.interior_regions_list:\n if irtype.lower() == 'friction':\n friction_list.append([filename, friction])\n if friction_list:\n log.debug('friction_list=%s' % str(friction_list))\n poly_friction = []\n for (fname, friction) in friction_list:\n full_fname = os.path.join(project.polygons_folder, fname)\n log.debug('Reading friction polygon: %s' % full_fname)\n poly = anuga.read_polygon(full_fname)\n poly_friction.append((poly, friction))\n log.debug('poly=%s' % str(poly))\n domain.set_quantity('friction',\n anuga.Polygon_function(poly_friction,\n default=project.friction,\n geo_reference=domain.geo_reference))\n\n # Set the initial stage in the offcoast region only\n if project.land_initial_conditions:\n IC = anuga.Polygon_function(project.land_initial_conditions,\n default=project.initial_tide,\n geo_reference=domain.geo_reference)\n else:\n IC = project.initial_tide\n\n domain.set_quantity('stage', IC, use_cache=True, verbose=False)\n domain.set_quantity('friction', project.friction)\n domain.set_quantity('elevation',\n filename=project.combined_elevation_file,\n use_cache=True, verbose=False, alpha=project.alpha)\n\n # Setup boundary conditions\n log.debug('Set boundary - available tags: %s' % domain.get_boundary_tags())\n\n Br = anuga.Reflective_boundary(domain)\n Bt = anuga.Transmissive_stage_zero_momentum_boundary(domain)\n Bd = anuga.Dirichlet_boundary([project.initial_tide, 0, 0])\n Bf = anuga.Field_boundary(project.event_sts+'.sts',\n domain, mean_stage=project.initial_tide, time_thinning=1,\n default_boundary=anuga.Dirichlet_boundary([0, 0, 0]),\n boundary_polygon=bounding_polygon_sts,\n use_cache=True, verbose=False)\n\n domain.set_boundary({'back': Br,\n 'side': Bt,\n 'ocean': Bf})\n\n # Evolve system through time\n t0 = time.time()\n for t in domain.evolve(yieldstep=project.yieldstep,\n finaltime=project.finaltime,\n skip_initial_step=False):\n if Logger:\n Logger(domain.timestepping_statistics())\n log.info('\\n%s' % domain.timestepping_statistics())\n log.info('\\n%s' % domain.boundary_statistics(tags='ocean'))\n\n log.info('Simulation took %.2f seconds' % (time.time()-t0))",
"def train(self)->None:",
"def set_shunt_model(self, model):\r\n print('\\nSet shunt model')\r\n\r\n self.shunt_model = model\r\n\r\n keras.models.save_model(self.shunt_model, Path(self.folder_name_logging, \"shunt_model.h5\"))\r\n logging.info('')\r\n logging.info('Shunt model saved to {}'.format(self.folder_name_logging))\r\n\r\n # calculate flops\r\n flops_shunt = calculate_flops_model(self.shunt_model)\r\n self.flops_dict['shunt'] = flops_shunt\r\n logging.info('')\r\n logging.info('FLOPs of shunt model: {}'.format(flops_shunt))"
] |
[
"0.63875747",
"0.5779599",
"0.56435364",
"0.5643399",
"0.5585187",
"0.55371416",
"0.55039304",
"0.55021054",
"0.55002487",
"0.5457778",
"0.54443103",
"0.5432549",
"0.54273343",
"0.5426112",
"0.54110813",
"0.54018486",
"0.53810084",
"0.53796864",
"0.5377646",
"0.535681",
"0.5354632",
"0.53513867",
"0.53509617",
"0.5330933",
"0.5325307",
"0.5319753",
"0.53162175",
"0.53140575",
"0.5302485",
"0.5299011"
] |
0.69317263
|
0
|
APSIM model water uptake
|
def water_uptake_apsim(self, soil):
soil_wat_avail = np.zeros(soil.total_layers)
soil_wat_supply = np.zeros(soil.total_layers)
daily_ref_evap_transp = soil.daily_ref_evap_transp
transp_pot = daily_ref_evap_transp * self.light_intercpt
# Water available in each layer [mm]
for lyr in soil.layers:
soil_wat_avail[lyr] = ((soil.water_content[lyr] -
soil.perm_wilt_point[lyr]) *
soil.layer_thickness[lyr] *
soil.WATER_DENSITY)
# Water supply
for lyr in soil.layers:
soil_wat_supply[lyr] = (soil_wat_avail[lyr] * soil.kl[lyr])
# Water uptake (no supply or demand)
if (soil_wat_supply.sum() <= 0) or (transp_pot <= 0):
for lyr in soil.layers:
self.water_uptake[lyr] = 0
else:
# Water uptake (water is not limiting)
if transp_pot < soil_wat_supply.sum():
# distribute demand proportionately to the water supply
for lyr in soil.layers:
self.water_uptake[lyr] = (soil_wat_supply[lyr] /
soil_wat_supply.sum() *
transp_pot)
else:
# Water uptake (water is limiting)
for lyr in soil.layers:
self.water_uptake[lyr] = soil_wat_supply[lyr]
self.att_transp = self.water_uptake.sum() # mm/day
self.cum_transp += self.att_transp # mm
self.transp_ratio = self.att_transp / transp_pot
self.expect_transp = transp_pot
self.cum_pot_transp += transp_pot
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def main():\n\n # path of model that should be pruned\n model_path = ('saved_models/PATH_TO_MODEL/model.h5')\n\n # weights below this threshold will be set to zero\n # thresholds can be defined per layer\n thresholds = [0.03, 0.01, 0.01]\n\n # specify training epochs for retraining\n epochs = [1, 1, 1]\n # define the layer index that should be pruned\n # only feedforward layers can be pruned!!!\n layers = [3, 4, 5]\n\n # TrainingData section\n # specify input dimension of the sliding window using 'slice_len'\n slice_len = 30\n\n # output delay for AREUS data\n delay = 6\n\n td1 = TrainingData()\n training_data = td1.window_dim_1_sized_td(slice_len, delay)\n\n # Pruning runs for each layer\n p_run = PruningRun(model_path, training_data)\n for i, layer in enumerate(layers):\n p_run.prune_layer(layer, thresholds[i], epochs[i])\n\n # when no retraining is needed\n #p_run.prune_layer_no_retraining(layer, thresholds[i])",
"def water_uptake_dssat(self, soil):\r\n CONV1 = 1e-4 # convert m/m3 to cm/cm3\r\n CONV2 = 100 # convert m to cm\r\n CONV3 = 10 # convert cm to mm\r\n\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n root_dens = self.root_dens * CONV1 #cm root / cm3 soil\r\n CONST1 = 1.3e-3\r\n CONST2 = np.zeros(soil.total_layers)\r\n CONST3 = 7.01\r\n layer_thickness = soil.layer_thickness * CONV2\r\n water_uptake = np.zeros(soil.total_layers)\r\n # Constant 2\r\n for lyr in soil.layers:\r\n CONST2[lyr] = 120 - 250 * soil.perm_wilt_point[lyr]\r\n if soil.perm_wilt_point[lyr] > 0.3:\r\n CONST2[lyr] = 45\r\n # Water uptake per unit root length\r\n for lyr in soil.layers:\r\n if root_dens[lyr] <= 0.00001 or (soil.water_content[lyr] <=\r\n soil.perm_wilt_point[lyr]):\r\n water_uptake[lyr] = 0\r\n else:\r\n water_uptake[lyr] = (CONST1 * math.exp(min((CONST2[lyr] *\r\n (soil.water_content[lyr] -\r\n soil.perm_wilt_point[lyr])), 40)) /\r\n (CONST3 - math.log(root_dens[lyr])))\r\n water_uptake[lyr] = min(water_uptake[lyr],\r\n self.dssat_max_water_uptake)\r\n # Water uptake in [cm/d] volume\r\n water_uptake[lyr] = (water_uptake[lyr] * layer_thickness[lyr] *\r\n root_dens[lyr])\r\n # Water uptake in [mm/d] volume\r\n water_uptake[lyr] = water_uptake[lyr] * CONV3\r\n # Total water uptake [mm/d]\r\n crop_transp = water_uptake.sum()\r\n min_transp = min(transp_pot, crop_transp)\r\n # Update crop arrays\r\n for lyr in soil.layers:\r\n if min_transp > 0:\r\n self.water_uptake[lyr] = (water_uptake[lyr] *\r\n (min_transp / crop_transp))\r\n else:\r\n self.water_uptake[lyr] = 0\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp # mm\r\n self.transp_ratio = self.att_transp / transp_pot\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += self.expect_transp",
"def main():\n\n\n #parse arguments (measuring data or not, name of text file, input pins, smoothing out, arduino board, usb port)\n parser = argparse.ArgumentParser(description = \"Train your Arduino.\")\n parser.add_argument('-m', dest='measuring', default='True', nargs='?',\n help = 'measuring new data True/False (default: True)')\n parser.add_argument('-n', dest = 'file_name', default = 'trainingData.txt', nargs='?',\n help = 'name of .txt file for saving or reading data (default: trainingData.txt)')\n parser.add_argument('-p', dest='pins', nargs='+', default=['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7'],\n help='input pins (default: A1 A2 A3 A4 A5 A6 A7)')\n parser.add_argument('-s', dest='smoothing', default=0, nargs='?',\n help='maximum value to smooth out to zero in measured data (default: 0)')\n parser.add_argument('-b', dest='board', default=\"arduino:mbed:nano33ble\", nargs='?',\n help='arduino board (default: arduino:mbed:nano33ble)')\n parser.add_argument('-usb', dest='port', default=\"COM22\", nargs='?',\n help='usb port connected with board (default: COM22')\n\n\n parse_args = parser.parse_args()\n file_name = parse_args.file_name\n #make sure it will be a text file\n if file_name[-4:] != '.txt':\n file_name = file_name + '.txt'\n\n\n pins = parse_args.pins\n board = parse_args.board\n port = parse_args.port\n smoothing = int(parse_args.smoothing)\n\n #potentially measure new data, or skip\n measuring = parse_args.measuring\n if measuring.lower() != 'false':\n measure_success = measure.main(file_name, port, board, pins)\n else:\n measure_success = True\n\n if measure_success is True:\n\n\n #compute centroids\n ordered_centroids, class_labels = centroid.main(file_name, pins, smoothing)\n\n\n #write ino script\n create_classification_script(ordered_centroids, list(class_labels), pins, smoothing)\n\n #automatically upload classification script to board\n try:\n subprocess.run([\"arduino-cli\", \"compile\", \"--fqbn\", board, \"arduino_scripts/predict\"], check=True)\n except subprocess.CalledProcessError:\n print(\"Can't compile predict file\")\n\n try:\n subprocess.run([\"arduino-cli\", \"upload\", \"-p\", port, \"--fqbn\", board, \"arduino_scripts/predict\"], check=True)\n print(\"Wait for port\")\n time.sleep(3)\n print(\"Done\")\n except subprocess.CalledProcessError:\n print(\"Can't upload sketch\")\n\n\n\n\n return None",
"def __init__(self, travel_model_dir_name, mode='full', years_to_run=None, procedure_file=\"opus.par\"):\n\n\ttravel_model_configuration = {}\n\t\n\ttravel_model_configuration.update( {'visum_version_number': 10} )\n\t\n\t### mapping from visum matrice name to urbansim travel_data variable name\n\t## dict key is used as matrix number for VisumPy.helpers.GetODMatrix and VisumPy.helpers.GetSkimMatrix\n\t## dict value is used as attribute name for urbansim travel_data table\n\ttm_to_urbansim_variables = {\n\t'od':{\n\t ## need data for zone index, e.g.\n # -1:'from_zone_id',\n\t # -2:'to_zone_id',\n\t1:'transit_trips', #'transit (PuT - public transport) trips',\n\t2:'auto_trips', #'auto trips',\n\t}, \n\t'skim':{ \n\t ## need data for zone index, e.g.\n # -1:'from_zone_id',\n\t # -2:'to_zone_id',\n\t1: 'auto_travel_time', #'auto assigned travel time (ttc)',\n\t2: 'transit_in_vehicle_time' #'PuT in-vehicle time (ivt)',\n\t} \n\t}\n \n\t### TAZ attributes to be transferred from urbansim to visum\n\turbansim_to_tm_variables = [\n\t 'TAZ=(zone.zone_id).astype(int16)',\n\t 'retail_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_retail)', \n\t ## the employment groups below need to be defined in employment_adhoc_sector_groups and \n\t ## employment_adhoc_sector_group_definitions before they can be used\n\t #'fires_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_fires)',\n\t #'gov_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_gov)',\n\t #\"educ_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_educ)\",\n\t #\"wtcu_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_wtcu)\",\n\t #\"manu_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_manu)\",\n\t #\"univ_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_univ)\",\n\t ## need to change income categories to 4 instead of 3\n\t \"low_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_low_income_households)\",\n\t \"mid_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_mid_income_households)\",\n\t #\"upper_mid_income_hh_by_taz=?\",\n\t \"upper_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_high_income_households)\",\n\t ## need variable specification\n\t #\"pctmf=?\",\n\t #\"gqi=?\",\n\t #\"gqn=?\",\n\t #\"fteuniv=?\",\n\t #\"density=?\"\n ]\n \n\ttravel_model_configuration.update( {\n\t \"tm_to_urbansim_variables\":tm_to_urbansim_variables,\n\t \"urbansim_to_tm_variables\":urbansim_to_tm_variables,\n\t} )\n\t\n\tself.__add_models(travel_model_configuration, mode)\n\tself.__add_years(travel_model_configuration, travel_model_dir_name, years_to_run, procedure_file)\n\n\tself.merge(travel_model_configuration)",
"def train():\n pass",
"def main(model,pmap):\n\n addPppParams(model)\n\n# addTransportParams(model,pmap)\n\n #translationSources(model)\n\n #addLipidMetabs(model)\n\n return",
"def train(self, hyps):\n\n # Print Hyperparameters To Screen\n items = list(hyps.items())\n for k, v in sorted(items):\n print(k+\":\", v)\n\n # Make Save Files\n if \"save_folder\" in hyps:\n save_folder = hyps['save_folder']\n else:\n save_folder = \"./saved_data/\"\n\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n base_name = save_folder + hyps['exp_name']\n net_save_file = base_name+\"_net.p\"\n best_net_file = base_name+\"_best.p\"\n optim_save_file = base_name+\"_optim.p\"\n log_file = base_name+\"_log.txt\"\n if hyps['resume']: log = open(log_file, 'a')\n else: log = open(log_file, 'w')\n for k, v in sorted(items):\n log.write(k+\":\"+str(v)+\"\\n\")\n\n # Miscellaneous Variable Prep\n logger = Logger()\n shared_len = hyps['n_tsteps']*hyps['n_rollouts']\n env = gym.make(hyps['env_type'])\n obs = env.reset()\n prepped = hyps['preprocess'](obs)\n hyps['state_shape'] = [hyps['n_frame_stack']] + [*prepped.shape[1:]]\n if hyps['env_type'] == \"Pong-v0\":\n action_size = 3\n else:\n action_size = env.action_space.n*(hyps['env_type']!=\"Pong-v0\")\n hyps['action_shift'] = (4-action_size)*(hyps['env_type']==\"Pong-v0\") \n print(\"Obs Shape:,\",obs.shape)\n print(\"Prep Shape:,\",prepped.shape)\n print(\"State Shape:,\",hyps['state_shape'])\n print(\"Num Samples Per Update:\", shared_len)\n print(\"Samples Wasted in Update:\", shared_len % hyps['batch_size'])\n del env\n\n # Make Network\n net = hyps['model'](hyps['state_shape'],action_size,h_size=hyps['h_size'],bnorm=hyps['use_bnorm'])\n if hyps['resume']:\n net.load_state_dict(torch.load(net_save_file))\n base_net = copy.deepcopy(net)\n net = cuda_if(net)\n net.share_memory()\n base_net = cuda_if(base_net)\n\n # Prepare Shared Variables\n shared_data = {'states': cuda_if(torch.zeros(shared_len, *hyps['state_shape']).share_memory_()),\n 'rewards': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'deltas': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'dones': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'actions': torch.zeros(shared_len).long().share_memory_()}\n if net.is_recurrent:\n shared_data['h_states'] = cuda_if(torch.zeros(shared_len, hyps['h_size']).share_memory_())\n n_rollouts = hyps['n_rollouts']\n gate_q = mp.Queue(n_rollouts)\n stop_q = mp.Queue(n_rollouts)\n reward_q = mp.Queue(1)\n reward_q.put(-1)\n\n # Make Runners\n runners = []\n for i in range(hyps['n_envs']):\n runner = Runner(shared_data, hyps, gate_q, stop_q, reward_q)\n runners.append(runner)\n\n # Start Data Collection\n print(\"Making New Processes\")\n procs = []\n for i in range(len(runners)):\n proc = mp.Process(target=runners[i].run, args=(net,))\n procs.append(proc)\n proc.start()\n print(i, \"/\", len(runners), end='\\r')\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Make Updater\n updater = Updater(base_net, hyps)\n if hyps['resume']:\n updater.optim.load_state_dict(torch.load(optim_save_file))\n updater.optim.zero_grad()\n updater.net.train(mode=True)\n updater.net.req_grads(True)\n\n # Prepare Decay Precursors\n entr_coef_diff = hyps['entr_coef'] - hyps['entr_coef_low']\n epsilon_diff = hyps['epsilon'] - hyps['epsilon_low']\n lr_diff = hyps['lr'] - hyps['lr_low']\n\n # Training Loop\n past_rews = deque([0]*hyps['n_past_rews'])\n last_avg_rew = 0\n best_rew_diff = 0\n best_avg_rew = -1000\n epoch = 0\n T = 0\n while T < hyps['max_tsteps']:\n basetime = time.time()\n epoch += 1\n\n # Collect data\n for i in range(n_rollouts):\n stop_q.get()\n collection_time = time.time() - col_start_time\n\n T += shared_len\n\n # Reward Stats\n avg_reward = reward_q.get()\n reward_q.put(avg_reward)\n last_avg_rew = avg_reward\n if avg_reward > best_avg_rew:\n best_avg_rew = avg_reward\n updater.save_model(best_net_file, None)\n\n # Calculate the Loss and Update nets\n start_time = time.time()\n updater.update_model(shared_data)\n update_time = time.time() - start_time\n net.load_state_dict(updater.net.state_dict()) # update all collector nets\n \n # Resume Data Collection\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Decay HyperParameters\n if hyps['decay_eps']:\n updater.epsilon = (1-T/(hyps['max_tsteps']))*epsilon_diff + hyps['epsilon_low']\n print(\"New Eps:\", updater.epsilon)\n if hyps['decay_lr']:\n new_lr = (1-T/(hyps['max_tsteps']))*lr_diff + hyps['lr_low']\n updater.new_lr(new_lr)\n print(\"New lr:\", new_lr)\n if hyps['decay_entr']:\n updater.entr_coef = entr_coef_diff*(1-T/(hyps['max_tsteps']))+hyps['entr_coef_low']\n print(\"New Entr:\", updater.entr_coef)\n\n # Periodically save model\n if epoch % 10 == 0:\n updater.save_model(net_save_file, optim_save_file)\n\n # Print Epoch Data\n past_rews.popleft()\n past_rews.append(avg_reward)\n max_rew, min_rew = deque_maxmin(past_rews)\n updater.print_statistics()\n avg_action = shared_data['actions'].float().mean().item()\n print(\"Epoch\", epoch, \"– T =\", T)\n print(\"Grad Norm:\",float(updater.norm),\"– Avg Action:\",avg_action,\"– Best AvgRew:\",best_avg_rew)\n print(\"Avg Rew:\", avg_reward, \"– High:\", max_rew, \"– Low:\", min_rew, end='\\n')\n updater.log_statistics(log, T, avg_reward, avg_action, best_avg_rew)\n updater.info['AvgRew'] = avg_reward\n logger.append(updater.info, x_val=T)\n\n # Check for memory leaks\n gc.collect()\n max_mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n print(\"Time:\", time.time()-basetime, \"– Collection:\", collection_time, \"– Update:\", update_time)\n if 'hyp_search_count' in hyps and hyps['hyp_search_count'] > 0 and hyps['search_id'] != None:\n print(\"Search:\", hyps['search_id'], \"/\", hyps['hyp_search_count'])\n print(\"Memory Used: {:.2f} memory\\n\".format(max_mem_used / 1024))\n\n logger.make_plots(base_name)\n log.write(\"\\nBestRew:\"+str(best_avg_rew))\n log.close()\n # Close processes\n for p in procs:\n p.terminate()\n return best_avg_rew",
"def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)",
"def optim_optuna(modelname=\"model_dl.1_lstm.py\", \n pars= {}, \n df = None,\n optim_method=\"normal/prune\",\n save_folder=\"/mymodel/\", log_folder=\"\",ntrials=2) :\n \n module = module_load(modelname) \n\n def objective(trial):\n param_dict = module.get_params(choice=\"test\", ncol_input=df.shape[1], ncol_output=df.shape[1])\n for t,p in pars.items():\n pres = None\n #p = pars[t]\n x = p['type']\n \n if x=='log_uniform':\n pres = trial.suggest_loguniform(t,p['range'][0], p['range'][1])\n \n elif x=='int':\n pres = trial.suggest_int(t,p['range'][0], p['range'][1])\n \n elif x=='categorical':\n pres = trial.suggest_categorical(t,p['value'])\n \n elif x=='discrete_uniform':\n pres = trial.suggest_discrete_uniform(t, p['init'],p['range'][0],p['range'][1])\n \n elif x=='uniform':\n pres = trial.suggest_uniform(t,p['range'][0], p['range'][1])\n \n else:\n raise Exception('Not supported type {}'.format(p['type']))\n\n param_dict[t] = pres\n \n model = module.Model(**param_dict)\n sess = module.fit(model,df)\n stats = model.stats[\"loss\"]\n del sess\n del model\n tf.reset_default_graph()\n return stats\n \n if optim_method=='prune':\n study = optuna.create_study(pruner=optuna.pruners.MedianPruner())\n else:\n study = optuna.create_study() # Create a new study.\n \n \"\"\"\n optuna create-study --study-name \"distributed-example\" --storage \"sqlite:///example.db\"\n \n https://optuna.readthedocs.io/en/latest/tutorial/distributed.html\n if __name__ == '__main__':\n study = optuna.load_study(study_name='distributed-example', storage='sqlite:///example.db')\n study.optimize(objective, n_trials=100)\n \n \n \n \"\"\"\n study.optimize(objective, n_trials=ntrials) # Invoke optimization of the objective function.\n param_dict = study.best_params\n param_dict.update(module.get_params(choice=\"test\", ncol_input=df.shape[1], \n ncol_output=df.shape[1]))\n \n ### Run best model\n model = module.Model(**param_dict)\n sess = module.fit(model,df)\n \n #### Saving \n modelname = modelname.replace(\".\", \"_\") # this is the module name which contains .\n save_folder = save_folder + \"/\" + modelname\n if not(os.path.isdir(save_folder)):\n os.makedirs(save_folder)\n file_path = os.path.join(save_folder,modelname+'.ckpt')\n\n save(sess,file_path)\n\n\n ### Update with Best values\n study_trials = study.trials_dataframe()\n study_trials.to_csv(os.path.join(save_folder,modelname+'_study.csv'))\n \n param_dict[\"best_value\"] = study.best_value\n param_dict[\"file_path\"] = file_path \n json.dump( param_dict, os.path.join(save_folder, modelname+'_params.csv') )\n \n return param_dict",
"def uav_example():\n env = holodeck.make(\"UrbanCity-MaxDistance\")\n\n # This line can be used to change the control scheme for an agent\n # env.agents[\"uav0\"].set_control_scheme(ControlSchemes.UAV_ROLL_PITCH_YAW_RATE_ALT)\n\n for i in range(10):\n env.reset()\n\n # This command tells the UAV to not roll or pitch, but to constantly yaw left at 10m altitude.\n command = np.array([0, 0, 2, 1000])\n for _ in range(1000):\n state, reward, terminal, _ = env.step(command)\n # To access specific sensor data:\n pixels = state[\"RGBCamera\"]\n velocity = state[\"VelocitySensor\"]\n # For a full list of sensors the UAV has, consult the configuration file \"InfiniteForest-MaxDistance.json\"\n\n # You can control the AgentFollower camera (what you see) by pressing V to toggle spectator\n # mode. This detaches the camera and allows you to move freely about the world.\n # You can also press C to snap to the location of the camera to see the world from the perspective of the\n # agent. See the Controls section of the ReadMe for more details.",
"def unsup_train_exp(model, criterions, optimizer, scheduler, dataloaders,unsup_path,margin,num_epochs=10, vis=None):\n print('>> Fine-tune a Model.')\n best_roc = 0.0\n \n iters = 0\n plot_data = {'X': [], 'Y': [], 'legend': ['Sup. Loss', 'Unsup. Loss', 'Tot. Loss']}\n\n for epoch in range(num_epochs):\n scheduler.step()\n # Training\n model.train()\n for i, sup_data in enumerate(dataloaders['sup_train']):\n unsup_data = dataloaders['unsup_train'][i % len(dataloaders['unsup_train'])]\n sup_inputs = sup_data[0]\n sup_labels = sup_data[1].cuda()\n unsup_inputs = unsup_data[0]\n data_inputs=torch.cat((sup_inputs,unsup_inputs),axis=0).cuda()\n \n # unsup_labels = unsup_data[1].cuda()\n iters += 1\n\n optimizer.zero_grad()\n out_1, out_2 = model(data_inputs)\n sup_out_1,sup_out_2=out_1[:sup_inputs.shape[0]],out_2[:sup_inputs.shape[0]]\n unsup_out_1,unsup_out_2=out_1[sup_inputs.shape[0]:],out_2[sup_inputs.shape[0]:]\n\n loss_sup = criterions['sup'](sup_out_1, sup_labels) + criterions['sup'](sup_out_2, sup_labels) # Step A\n\n loss_unsup = criterions['unsup'](unsup_out_1, unsup_out_2,margin) # Step B\n loss = loss_unsup + loss_sup\n loss.backward()\n optimizer.step()\n\n # visualize\n if (iters % 10 == 0) and (vis != None) and (plot_data != None):\n plot_data['X'].append(iters)\n plot_data['Y'].append([\n loss_sup.item(),\n loss_unsup.item(),\n loss.item()\n ])\n vis.line(\n X=np.stack([np.array(plot_data['X'])] * len(plot_data['legend']), 1),\n Y=np.array(plot_data['Y']),\n opts={\n 'title': 'Loss over Time',\n 'legend': plot_data['legend'],\n 'xlabel': 'Iterations',\n 'ylabel': 'Loss',\n 'width': 1200,\n 'height': 390,\n },\n win=2\n )\n \"\"\"\n path_dir='/'.join(unsup_path.split('/')[:-1])\n if not os.path.exists(path_dir):\n os.makedirs(path_dir)\n torch.save(model.state_dict(),'{}'.format(unsup_path))\n print('Model saved.')\n \"\"\"\n # Validate\n model.eval()\n labels = torch.zeros((0, )).cuda() # initialize\n discs = torch.zeros((0, )).cuda() \n with torch.no_grad():\n for i, (input, label,cls) in enumerate(dataloaders['unsup_val']):\n inputs = input.cuda()\n label = label.cuda()\n\n out_1, out_2 = model(inputs)\n score_1 = F.softmax(out_1, dim=1)\n score_2 = F.softmax(out_2, dim=1)\n disc = torch.sum(torch.abs(score_1 - score_2), dim=1).reshape((label.shape[0], ))\n\n discs=torch.cat((discs,disc))\n label=label.reshape((label.shape[0], )).float()\n labels=torch.cat((labels,label))\n\n #labels = 1 - labels\n\n labels = labels.cpu()\n discs = discs.cpu()\n roc = evaluate(labels, discs, metric='roc',save_to='./pic.png')\n print('Epoch{} AUROC: {:.3f}'.format(epoch, roc))\n if best_roc < roc:\n best_roc = roc\n path_dir='/'.join(unsup_path.split('/')[:-1])\n if not os.path.exists(path_dir):\n os.makedirs(path_dir)\n torch.save(model.state_dict(),'{}'.format(unsup_path))\n print('Model saved.')\n print('>> Finished.')",
"def train(self)->None:",
"def train():\n # YOUR TRAINING CODE GOES HERE",
"def create_umap(name):\n\tglobal dir\n\tdirec = dir + \"/\" + name + \"/\"\n\tos.chdir(direc + \"representations/\")\n\t\n\t# Palette size of 2x50 required. 1-49 for labeled nat data, 51-100 for labeled syn data, 50 for unlabeled nat data\n\tpalette = sns.color_palette(\"Blues_d\", 30)# Syn data in blue\n\tpalette.extend(sns.dark_palette(\"purple\", 20)) # Unimportant, just a filler\n\tpalette.extend(sns.color_palette(\"Reds_d\", 30))# Nat data in red\n\tpalette.extend(sns.dark_palette(\"purple\", 20))# Unimportant, just a filler\n\tpalette[49]=\"#50B689\"# Unlabeled nat data in green\n\t# print(\"size of palette \" + str(len(palette)))\n\t\n\tfor file in glob.glob(\"*.pt\"):\n\t\t\trepresentation = torch.load(file)\n\t\t\ttarfile = file[:-3] # Removes the .pt ending\n\t\t\ttarfile = \"tar\" + tarfile[4:] + \".log\"\n\t\t\tall_targets = []\n\t\t\twith open(tarfile, \"r\") as f:\n\t\t\t\tfor tar in f:\n\t\t\t\t\tall_targets.append(float(tar.strip()))\n\n\t\t\tsns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\n\t\t\treducer = umap.UMAP()\n\t\t\tembedding = reducer.fit_transform(representation.cpu())\n\t\t\t\n\t\t\tprint(\"scattering\")\n\t\t\t# print(all_targets)\n\t\t\tplt.scatter(embedding[:, 0], embedding[:, 1], c=[palette[int(y-1)] for y in all_targets], alpha=0.8)\n\t\t\tplt.gca().set_aspect('equal', 'datalim')\n\t\t\tplt.title('UMAP projection of cell data', fontsize=24);\n\t\t\tplt.savefig(\"./umap_\" + str(file[4:-3]) + \".png\")\n\t\t\tplt.clf()\n\tos.chdir(\"../../../../\")",
"def eval_model(t,lat,lon,head,pitch,tide=0,temp=None,press=None):\n #get the sun positions for each timestamp, at our known lat,lon\n #sun_head, sun_zen = sunpos_mag(t,lat,lon,tide,temp,press,radians=True)\n sun_head = sunpos_mag(t, lat, lon, tide, temp, press, radians=True)\n sun_zen = sun_head[...,1]\n sun_head = sun_head[...,0]\n\n #TODO: input and output argument mismatch\n #get the ocean model aop values for each camera position\n aop = oceanaop(sun_head,sun_zen,head,pitch,1.33)\n return sun_zen,sun_head,aop",
"def main():\n tpd_file_name = get_nonexisting_file(\"Enter name of new tpd file: \")\n tpd = TrainPredictData(tpd_file_name)\n\n print \"You can now enter the file paths of the the newly created tpd file.\"\n print \"If you want to skip a data set, just press enter without typing anything.\"\n\n train_raw_path = get_existing_file(\"Enter training raw path: \", skip=True)\n if train_raw_path is not None:\n train_raw_key = extract_h5_key(train_raw_path, \"Enter training raw h5 key: \")\n tpd.set_train_raw(train_raw_path, train_raw_key)\n\n train_gt_path = get_existing_file(\"Enter training gt path: \", skip=True)\n if train_gt_path is not None:\n train_gt_key = extract_h5_key(train_gt_path, \"Enter training gt h5 key: \")\n tpd.set_train_gt(train_gt_path, train_gt_key)\n\n train_pred_path = get_existing_file(\"Enter training pred path: \", skip=True)\n if train_pred_path is not None:\n train_pred_key = extract_h5_key(train_pred_path, \"Enter training pred h5 key: \")\n tpd.set_train_pred(train_pred_path, train_pred_key)\n\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n while train_feat_path is not None:\n train_feat_key = extract_h5_key(train_feat_path, \"Enter training feature path: \")\n tpd.add_train_feature(train_feat_path, train_feat_key)\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n\n test_raw_path = get_existing_file(\"Enter test raw path: \", skip=True)\n if test_raw_path is not None:\n test_raw_key = extract_h5_key(test_raw_path, \"Enter test raw h5 key: \")\n tpd.set_test_raw(test_raw_path, test_raw_key)\n\n test_gt_path = get_existing_file(\"Enter test gt path: \", skip=True)\n if test_gt_path is not None:\n test_gt_key = extract_h5_key(test_gt_path, \"Enter test gt h5 key: \")\n tpd.set_test_gt(test_gt_path, test_gt_key)\n\n test_pred_path = get_existing_file(\"Enter test pred path: \", skip=True)\n if test_pred_path is not None:\n test_pred_key = extract_h5_key(test_pred_path, \"Enter test pred h5 key: \")\n tpd.set_test_pred(test_pred_path, test_pred_key)\n\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n while test_feat_path is not None:\n test_feat_key = extract_h5_key(test_feat_path, \"Enter test feature path: \")\n tpd.add_test_feature(test_feat_path, test_feat_key)\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n\n return 0",
"def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )",
"def finetuning_single(phase,token2id_dict,id2embedding_dict,inference,dataloaders,model,optimizer,device,weighted_sampling,criterion,classification,auxiliary_loss=False,attn_loss=False,epoch_count=None,new_task_epochs=None,trial=None,goal='IC',save_path_dir=None): #b/c it is single, models_list contains one model only\n running_loss = 0.0\n \n# outputs_list = []\n# representations_list = []\n# labels_list = []\n# modality_list = []\n# indices_list = []\n# task_names_list = []\n# attn_coefs_list = []\n# sentence_lens_list = []\n# class_labels_list = []\n# class_predictions_list = []\n \n \"\"\" Initialize Dictionaries to Store Results \"\"\" \n outputs_dict = dict()\n representations_dict = dict()\n attn_coefs_dict = dict()\n labels_dict = dict()\n sentence_lens_dict = dict()\n class_labels_dict = dict()\n class_predictions_dict = dict()\n epoch_bleu = dict()\n epoch_rouge = dict()\n epoch_meteor = dict()\n\n for dest_lang in token2id_dict.keys():\n outputs_dict[dest_lang] = list()\n attn_coefs_dict[dest_lang] = list()\n representations_dict[dest_lang] = list()\n labels_dict[dest_lang] = list()\n sentence_lens_dict[dest_lang] = list()\n class_labels_dict[dest_lang] = list()\n class_predictions_dict[dest_lang] = list()\n epoch_bleu[dest_lang] = 0\n epoch_rouge[dest_lang] = 0\n epoch_meteor[dest_lang] = 0\n\n batch_num = 0\n batch = 0\n #class label is that in IC setting, but class label is answer in VQA setting\n for inputs, text_indices, sentence_lens, class_labels, languages, document_level_text_indices, document_level_sentence_lens in tqdm(dataloaders[phase]):\n \"\"\" Weaning Off of Teacher Forcing in a Linear Manner \"\"\"\n #sampling_prob = (0.4/30000)*(batch+1)*(epoch_count+1)\n #uniform_value = np.random.uniform(0,1)\n #sampling = True if uniform_value < sampling_prob else False\n sampling = False\n batch += 1\n \"\"\" Send Data to Device \"\"\"\n inputs = inputs.to(device)\n class_labels = class_labels.to(device)\n #print(text_indices)\n with torch.set_grad_enabled('train1' in phase):# and inference == False): #('train' in phase and inference == False)\n \"\"\" Image Captioning Path \"\"\"\n if goal == 'IC':\n \"\"\" Perform Forward Pass i.e. Encoder and Decoder \"\"\"\n current_labels_dict = dict() #text\n# current_class_labels_dict = dict()\n# current_class_predictions_dict = dict()\n current_outputs_dict = dict()\n# current_attn_coefs_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n outputs, representations = model(inputs,current_text_indices,current_sentence_lens,token2id_dict[dest_lang],id2embedding_dict[dest_lang],dest_lang,phase,sampling,device) #outputs is B x S x Words\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n #current_text_indices = current_text_indices[:,1:] # B x (S-1)\n if phase == 'train1':\n attn_coefs = 5\n class_predictions = 6\n loss = calculate_IC_loss(criterion,outputs,current_text_indices[:,1:],class_predictions,class_labels,attn_coefs,auxiliary_loss,attn_loss)\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \"\"\" Store Results \"\"\"\n current_labels_dict[dest_lang] = current_text_indices[:,1:].cpu().detach().numpy()\n# current_class_labels_dict[dest_lang] = class_labels\n# current_class_predictions_dict[dest_lang] = class_predictions\n current_outputs_dict[dest_lang] = outputs.cpu().detach().numpy() #text\n# current_attn_coefs_dict[dest_lang] = attn_coefs\n# current_representations_dict[dest_lang] = representations\n #\"\"\" Detach Outputs and Attn Coefs To Avoid Memory Leakage \"\"\"\n #outputs = outputs.detach()\n #attn_coefs = attn_coefs.detach()\n current_text_indices.detach()\n elif goal == 'VQA':\n \"\"\" Perform Forward Pass and Get Answers \"\"\"\n outputs, representations, attn_coefs, class_predictions = model(inputs,text_indices,sentence_lens,id2embedding_dict,phase,device)\n \"\"\" Calculate MSE Loss \"\"\"\n #criterion = nn.MSELoss()\n #class_labels = class_labels.type(torch.float)\n \"\"\" Calculate CrossEntropyLoss \"\"\"\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n #print(outputs,outputs.shape)\n loss = criterion(outputs,class_labels)\n elif goal == 'Supervised': #encoder supervised pre-training\n h, representations, class_predictions = model(inputs)#,text_indices,sentence_lens,id2embedding_dict,phase,device)\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n loss = criterion(class_predictions,class_labels)\n elif goal == 'Text_Supervised':\n #h, class_predictions = model.supervised_forward(text_indices,sentence_lens,token2id_dict,id2embedding_dict,phase,device)\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n class_predictions = model.supervised_forward(current_text_indices,current_sentence_lens,token2id_dict[dest_lang],id2embedding_dict[dest_lang],phase,device)\n loss = criterion(class_predictions,class_labels)\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n\n current_class_labels_dict[dest_lang] = class_labels.cpu().detach().numpy()\n current_class_predictions_dict[dest_lang] = class_predictions.cpu().detach().numpy()\n# current_representations_dict[dest_lang] = h\n #loss = criterion(class_predictions,class_labels)\n #print(loss)\n elif goal == 'Language_Change_Detection':\n criterion = nn.BCEWithLogitsLoss()\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n \"\"\" Forward Pass \"\"\"\n replacement_predictions, replacement_labels = model.language_change_detection_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device)\n #replacement_labels = replacement_labels.type(torch.float) #needed for BCELoss\n \"\"\" Instance-Wise Loss Because Each Sentence is of a Different Length \"\"\"\n loss = 0\n for i,(replacement_prediction,replacement_label) in enumerate(zip(replacement_predictions,replacement_labels)):\n current_loss = criterion(replacement_prediction,replacement_label)\n loss = loss + current_loss\n if i == len(replacement_predictions)-1:\n loss = loss / len(replacement_predictions)\n #loss = torch.mean(torch.tensor([criterion(replacement_prediction,replacement_label) for replacement_prediction,replacement_label in zip(replacement_predictions,replacement_labels)]))\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n \"\"\" Store Representations and Labels \"\"\"\n current_class_predictions_dict[dest_lang] = [predictions.cpu().detach().numpy() for predictions in replacement_predictions]\n current_class_labels_dict[dest_lang] = [labels.cpu().detach().numpy() for labels in replacement_labels]\n# current_representations_dict[dest_lang] = h \n elif goal == 'Language_Detection':\n criterion = nn.CrossEntropyLoss(ignore_index=0)\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n \"\"\" Forward Pass \"\"\"\n replacement_predictions, replacement_labels = model.language_detection_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device)\n #replacement_labels = replacement_labels.type(torch.long) #needed for CrossEntropyLoss\n \"\"\" Instance-Wise Loss Because Each Sentence is of a Different Length \"\"\"\n# loss = 0\n# for i,(replacement_prediction,replacement_label) in enumerate(zip(replacement_predictions,replacement_labels)):\n# replacement_label = replacement_label.type(torch.long)\n# current_loss = criterion(replacement_prediction,replacement_label)\n# loss = loss + current_loss\n# if i == len(replacement_predictions)-1:\n# loss = loss / len(replacement_predictions)\n #print(replacement_predictions.shape,replacement_labels.shape)\n loss = criterion(replacement_predictions.permute(0,2,1),replacement_labels)\n #print(loss)\n total_loss = total_loss + loss\n #print(dest_lang,total_loss)\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n \"\"\" Store Representations and Labels \"\"\"\n current_class_predictions_dict[dest_lang] = [predictions.cpu().detach().numpy() for predictions in replacement_predictions]\n current_class_labels_dict[dest_lang] = [labels.cpu().detach().numpy() for labels in replacement_labels]\n# current_representations_dict[dest_lang] = h\n elif goal == 'MLM':\n criterion = nn.CrossEntropyLoss(reduction='none')\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n outputs, replacement_predictions = model.MLM_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device) #outputs is B x S x Words\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n \"\"\" Obtain Applicable Loss Locations (i.e., Where Token Was Masked) \"\"\"\n token_loss_mask = torch.where(replacement_predictions == 1,torch.tensor(1,device=device),torch.tensor(0,device=device)).type(torch.bool)\n #print(outputs.shape)\n #if phase == 'train1':\n \"\"\" Obtain Each Token's Loss \"\"\"\n token_loss = criterion(outputs.permute(0,2,1),current_text_indices)\n \"\"\" Retrieve Only Relevant Losses (Masked) \"\"\"\n loss = torch.mean(token_loss.masked_select(token_loss_mask))\n \"\"\" Aggregate Loss Across Languages \"\"\"\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n del current_text_indices\n del token_loss\n del token_loss_mask\n# \"\"\" Store Results \"\"\"\n# current_labels_dict[dest_lang] = current_text_indices.cpu().detach().numpy()\n# current_outputs_dict[dest_lang] = outputs.cpu().detach().numpy() #text\n elif goal == 'ELECTRA':\n generator_criterion = nn.CrossEntropyLoss(reduction='none')\n discriminator_criterion = nn.BCEWithLogitsLoss(reduction='none')\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Perform Forward Pass Through ELECTRA \"\"\"\n generator_outputs, generator_labels, discriminator_outputs, discriminator_labels = model.ELECTRA_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,sampling,device) #outputs is B x S x Words\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n \"\"\" Generator Loss Mask (i.e., Only Consider Originally Masked Tokens ) \"\"\"\n generator_token_loss_mask = torch.where(generator_labels == 1,torch.tensor(1,device=device),torch.tensor(0,device=device)).type(torch.bool)\n \"\"\" Discrimiantor Loss Mask (i.e., Do Not Consider Padded Regions ) \"\"\"\n discriminator_labels = discriminator_labels.view_as(discriminator_outputs) \n discriminator_token_loss_mask = torch.ones_like(discriminator_labels)\n for i,sentence_len in zip(range(discriminator_token_loss_mask.shape[0]),current_sentence_lens):\n discriminator_token_loss_mask[i,sentence_len:] = 0\n \n #if phase == 'train1':\n \"\"\" Obtain Each Generator Token's Loss \"\"\"\n generator_token_loss = generator_criterion(generator_outputs.permute(0,2,1),current_text_indices) # B x S\n #print(generator_token_loss.shape,generator_token_loss_mask.shape)\n \"\"\" Retrieve Only Relevant Loss (Masked) \"\"\"\n generator_loss = torch.mean(generator_token_loss.masked_select(generator_token_loss_mask)) #scalar\n \n \"\"\" Obtain Each Discriminator Token's Loss \"\"\" \n discriminator_token_loss = discriminator_criterion(discriminator_outputs,discriminator_labels) # B x S\n #print(discriminator_token_loss.shape,discriminator_token_loss_mask.shape)\n \"\"\" Retrieve Only Relevant Loss (Masked) \"\"\"\n discriminator_loss = torch.mean(discriminator_token_loss.masked_select(discriminator_token_loss_mask.type(torch.bool))) #scalar\n \n #print(generator_loss,discriminator_loss)\n \"\"\" Aggregate Loss Across Languages \"\"\"\n total_loss = total_loss + generator_loss + discriminator_loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \"\"\" Store Results \"\"\"\n# current_labels_dict[dest_lang] = discriminator_labels.cpu().detach().numpy()\n# current_outputs_dict[dest_lang] = discriminator_outputs.cpu().detach().numpy() #text\n elif goal == 'MARGE':\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n #total_loss = 0\n #for (dest_lang,current_text_indices),current_sentence_lens,current_languages in zip(text_indices.items(),sentence_lens.values(),languages.values()): #, sorted_indices, attn_coefs, class_predictions\n \"\"\" Randomly Choose Target Lang for This Mini-Batch \"\"\"\n #lang_list = list(text_indices.keys())\n #target_lang = random.sample(lang_list,1).item()\n #target_lang = 'de' #option to change based on dataset (MUST CHANGE IN PAD COLLATE)\n outputs, target_lang = model(text_indices,sentence_lens,languages,document_level_text_indices,document_level_sentence_lens,token2id_dict,id2embedding_dict,phase,device)\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = text_indices[target_lang].to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n #if phase == 'train1':\n \"\"\" Obtain Each Token's Loss \"\"\"\n loss = criterion(outputs.permute(0,2,1),current_text_indices)\n #print(loss)\n #\"\"\" Aggregate Loss Across Languages \"\"\"\n #total_loss = total_loss + loss\n #\"\"\" Average Loss if This is Final Loss Collected \"\"\"\n #if dest_lang == list(text_indices.keys())[-1]:\n # loss = total_loss / len(text_indices)\n# print(loss)\n# \"\"\" Store Results \"\"\"\n# current_labels_dict[target_lang] = current_text_indices.cpu().detach().numpy()\n# current_outputs_dict[target_lang] = outputs.cpu().detach().numpy() #text\n \n\n \"\"\" Backpropagation and Update Step \"\"\"\n if phase == 'train1': #only perform backprop for train1 phase \n loss.backward()\n \n \"\"\" Network Parameters \"\"\"\n if isinstance(optimizer,tuple):\n optimizer[0].step()\n \"\"\" Task-Instance Parameters \"\"\"\n optimizer[1].step() \n optimizer[0].zero_grad()\n optimizer[1].zero_grad()\n else:\n optimizer.step()\n optimizer.zero_grad()\n \n \"\"\" Calculate Metrics \"\"\"\n if goal == 'IC':\n if phase == 'train1':\n running_loss += loss.item() * inputs.shape[0]\n elif goal == 'VQA':\n running_loss += loss.item() * inputs.shape[0] \n elif goal in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection','MLM','ELECTRA','MARGE']:\n running_loss += loss.item() * inputs.shape[0] \n \n# \"\"\" These Need to be Language Specific \"\"\"\n \n if goal in ['IC']:\n batch_bleu = calculate_bleu_score(current_outputs_dict,current_labels_dict,token2id_dict)\n batch_rouge = calculate_rouge_score(current_outputs_dict,current_labels_dict,token2id_dict)\n batch_meteor = calculate_meteor_score(current_outputs_dict,current_labels_dict,token2id_dict) \n \n for dest_lang in batch_bleu.keys():\n epoch_bleu[dest_lang] = epoch_bleu[dest_lang] + (1/batch)*(batch_bleu[dest_lang] - epoch_bleu[dest_lang])\n epoch_rouge[dest_lang] = epoch_rouge[dest_lang] + (1/batch)*(batch_rouge[dest_lang] - epoch_rouge[dest_lang])\n epoch_meteor[dest_lang] = epoch_meteor[dest_lang] + (1/batch)*(batch_meteor[dest_lang] - epoch_meteor[dest_lang])\n \n if phase in ['val']:\n for dest_lang in text_indices.keys():\n predicted_sentences = convert_predicted_ids_to_sentences(current_outputs_dict[dest_lang],token2id_dict[dest_lang],dest_lang)\n target_sentences = convert_target_ids_to_sentences(current_labels_dict[dest_lang],token2id_dict[dest_lang],dest_lang)\n outputs_dict[dest_lang].extend(predicted_sentences)\n labels_dict[dest_lang].extend(target_sentences)\n \n elif goal in ['Language_Change_Detection','Language_Detection']:\n for dest_lang in text_indices.keys():\n if goal in ['Language_Change_Detection','Language_Detection']:\n \"\"\" Store Batch Data in The Dictionaries \"\"\"\n class_labels_dict[dest_lang].extend(current_class_labels_dict[dest_lang]) #.cpu().detach().numpy())\n class_predictions_dict[dest_lang].extend(current_class_predictions_dict[dest_lang]) #.cpu().detach().numpy())\n \n# elif goal in ['Text_Supervised']:\n## current_class_labels = current_class_labels_dict[dest_lang]\n## current_class_predictions = current_class_predictions_dict[dest_lang]\n## current_class_labels = current_class_labels.cpu().detach().numpy()\n## current_class_predictions = current_class_predictions.cpu().detach().numpy()\n# \n# \"\"\" Store Batch Data in The Dictionaries \"\"\"\n# #sentence_lens_dict[dest_lang].extend(current_sentence_lens)\n# class_labels_dict[dest_lang].extend(current_class_labels_dict[dest_lang]) #.cpu().detach().numpy())\n# class_predictions_dict[dest_lang].extend(current_class_predictions_dict[dest_lang]) #.cpu().detach().numpy())\n#\n# elif goal in ['MARGE']:\n# labels_dict[target_lang].extend(current_labels_dict[target_lang]) #.cpu().detach().numpy())\n# outputs_dict[target_lang].extend(current_outputs_dict[target_lang]) #.cpu().detach().numpy())\n# break # because only one target language per minibatch \n# if goal not in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection']:\n## if current_labels_dict[dest_lang].data.dtype != torch.long:\n## current_labels_dict[dest_lang].data = current_labels_dict[dest_lang].data.type(torch.long)\n# \n## current_text_indices = current_labels_dict[dest_lang]\n## current_outputs = current_outputs_dict[dest_lang]\n## current_attn_coefs = current_attn_coefs_dict[dest_lang]\n## current_representations = current_representations_dict[dest_lang]\n# \"\"\" Store Batch Data in The Dictionaries \"\"\" \n# labels_dict[dest_lang].extend(current_labels_dict[dest_lang]) #.cpu().detach().numpy())\n# outputs_dict[dest_lang].extend(current_outputs_dict[dest_lang]) #.cpu().detach().numpy())\n## attn_coefs_dict[dest_lang].extend(current_attn_coefs.cpu().detach().numpy())\n## representations_dict[dest_lang].extend(current_representations.cpu().detach().numpy())\n## elif goal in ['Text_Supervised']:\n## current_representations = current_representations_dict[dest_lang]\n## representations_dict[dest_lang].extend(current_representations.squeeze().cpu().detach().numpy()) \n## else:\n## current_representations = current_representations_dict[dest_lang]\n## if goal in ['Language_Change_Detection','Language_Detection']:\n## current_representations = [representations.cpu().detach().numpy() for representations in current_representations]\n## else:\n## current_representations = current_representations.cpu().detach().numpy()\n## representations_dict[dest_lang].extend(current_representations) \n# \n## modality_list.append(modality)\n## indices_list.append(indices)\n## task_names_list.append(task_names)\n \n batch_num += 1\n #if batch_num == 2:\n # break\n \n #outputs_list, labels_list, modality_list, indices_list, task_names_list, pids_list = flatten_arrays(outputs_list,labels_list,modality_list,indices_list,task_names_list,pids_list)\n if goal == 'IC':\n if phase == 'train1':\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n else:\n epoch_loss = 0 #filler\n elif goal in ['VQA','Supervised','Text_Supervised','Language_Change_Detection','Language_Detection','MLM','ELECTRA','MARGE']:\n epoch_loss = running_loss / len(dataloaders[phase].dataset) \n \n \"\"\" Removed Recently \"\"\"\n #representations_list = np.concatenate(representations_list)\n \n if goal == 'IC':\n \"\"\" BLEU Score Evaluation \"\"\"\n# epoch_bleu = calculate_bleu_score(outputs_dict,labels_dict,token2id_dict)\n# epoch_rouge = calculate_rouge_score(outputs_dict,labels_dict,token2id_dict)\n# epoch_meteor = calculate_meteor_score(outputs_dict,labels_dict,token2id_dict) \n return epoch_loss, epoch_bleu, epoch_rouge, epoch_meteor, outputs_dict, labels_dict #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal == 'VQA':\n \"\"\" Accuracy of Answers \"\"\"\n epoch_acc = calculate_answer_accuracy(outputs_dict,class_labels_dict)\n return epoch_loss, epoch_acc #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection']:\n if goal in ['Language_Change_Detection','Language_Detection']:\n epoch_acc = calculate_language_detection_accuracy(class_predictions_dict,class_labels_dict,goal)\n else:\n \"\"\" Accuracy of Answers \"\"\"\n epoch_acc = calculate_answer_accuracy(class_predictions_dict,class_labels_dict)\n return epoch_loss, epoch_acc #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal in ['MLM','ELECTRA','MARGE']:\n return epoch_loss#, outputs_dict, labels_dict #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list",
"def main():\n print(\"Getting Urban Observatory data...\")\n config = get_config()\n save_dir = config[\"save_dir\"]\n lad20cd = lad20nm_to_lad20cd(config[\"la\"])\n uo_dir = config[\"urb_obs\"][\"save_dir\"]\n uo_name = config[\"urb_obs\"][\"filename\"]\n save_path = Path(save_dir, lad20cd, uo_dir, uo_name)\n os.makedirs(save_path.parent, exist_ok=True)\n\n uo_sensors = save_uo_sensors(lad20cd, save_path)\n add_n_uo_sensors_to_config(uo_sensors, config)",
"def E_step_precompute(self, model_params, my_suff_stat, my_data):",
"def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('usrtrack', type=str, help='ustsuw binary output')\n parser.add_argument('root', type=str, nargs='?', help='output ROOT file name', default=\"\")\n parser.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbose', help='print what is being done')\n \n args = parser.parse_args()\n\n if not path.isfile(args.usrtrack):\n print(\"ustsuw2root: File %s does not exist.\" % args.usrtrack, file=sys.stderr)\n return 1\n\n if args.root == \"\":\n rootFileName = \"%s%s\" % (args.usrtrack,\".root\")\n else:\n rootFileName = args.root\n \n b = Usrtrack()\n b.readHeader(args.usrtrack)\n\n ND = len(b.detector)\n \n if args.verbose:\n #b.sayHeader()\n for i in range(ND):\n b.printHeader(i)\n print(\"\")\n\n fout = ROOT.TFile(rootFileName, \"recreate\")\n for i in range(ND):\n val = Data.unpackArray(b.readData(i,b.detector[i].lowneu))\n err = Data.unpackArray(b.readStat(i,b.detector[i].lowneu))\n\n det = b.detector[i]\n\n h = hist(det)\n hn = histN(det) # filled only if det.lowneu\n \n n = h.GetNbinsX()\n print(n, len(val), det.ne, val)\n\n for i in range(det.ne):\n h.SetBinContent(i+1, val[i])\n\n for i in range(det.ne):\n h.SetBinError(i+1, err[n-i-1]*val[i])\n\n h.SetEntries(b.weight)\n h.Write()\n if det.lowneu:\n hn.Write()\n\n fout.Close()",
"def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0",
"def skel_model(action, install_path_mp, install_path_zfs, jname):\n # init vars\n # mp - mount point, zfs - zfs point\n skel_path_mp = '%s-SKELETON' % install_path_mp\n skel_path_zfs = '%s-SKELETON' % install_path_zfs\n rw_path_mp = '%s-RW' % install_path_mp\n rw_path_zfs = '%s-RW' % install_path_zfs\n \n if action == 'init':\n# create SKELETON MODEL\n# http://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/jails-application.html\n log(\" INFO: Init BASE-SKELETON zfs START\")\n# Create a skeleton for the read-write portion of the system\n os.system('zfs create %s' % skel_path_zfs)\n os.system('zfs set mountpoint=%s %s' % (skel_path_mp, skel_path_zfs))\n os.system('zfs create %s' % rw_path_zfs)\n os.system('zfs set mountpoint=%s %s' % (rw_path_mp, rw_path_zfs))\n\n os.system('mkdir -p %s/home %s/usr-X11R6 %s/distfiles %s/usr-share-keys/pkg' % (skel_path_mp, skel_path_mp, skel_path_mp, skel_path_mp))\n os.system('mv %s/etc %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/usr/local %s/usr-local' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/tmp %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/var %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/root %s' % (install_path_mp, skel_path_mp ))\n# mergemaster to install missing configuration files. Then, remove the the extra directories that mergemaster creates:\n# os.system('mergemaster -t %s/var/tmp/temproot -D %s -i' % (skel_path, skel_path))\n# os.system('rm -R %(key)s/bin %(key)s/boot %(key)s/lib %(key)s/libexec %(key)s/mnt %(key)s/proc %(key)s/rescue %(key)s/sbin %(key)s/sys %(key)s/usr %(key)s/dev' % {'key': skel_path})\n# Now, symlink the read-write file system to the read-only file system. Ensure that the symlinks are created in the correct s/ locations as the creation of directories in the wrong locations will cause the installation to fail.\n os.chdir('%s' % install_path_mp)\n os.system('mkdir SROOT')\n os.system('ln -s SROOT/etc etc')\n os.system('ln -s SROOT/home home')\n os.system('ln -s SROOT/root root')\n os.system('ln -s /SROOT/usr-local usr/local')\n os.system('ln -s /SROOT/usr-share-keys usr/share/keys')\n os.system('ln -s /SROOT/usr-X11R6 usr/X11R6')\n os.system('ln -s /SROOT/distfiles usr/ports/distfiles')\n os.system('ln -s SROOT/tmp tmp')\n os.system('ln -s SROOT/var var')\n# Create a generic /home/j/skel/etc/make.conf containing this line\n os.system('echo \\\"WRKDIRPREFIX?= /SROOT/portbuild\\\" > %s/etc/make.conf' % skel_path_mp )\n# Create zfs BASE-SKELETON snapshot which will be used for installation \n os.system('zfs snapshot %s@install' % skel_path_zfs)\n log(\" INFO: Init BASE-SKELETON zfs FINISH\")\n \n# install SKELETON jail \n if action == 'install':\n# install RW fs for jail\n os.system('zfs send %s/BASE-SKELETON@install | zfs receive -F %s/BASE-RW/%s' % (jzfs, jzfs, jname))\n# remove receive snapshot \n os.system('zfs destroy %s/BASE-RW/%s@install' % (jzfs, jname))\n# create jail local config - mount skel model for jail hosme dir\n if jname == 'BASE-update':\n os.system('echo \\\"%sBASE %s%s nullfs rw 0 0\\\" > %sBASE-RW/%s/etc/fstab' % (jpath, jpath, jname, jpath, jname))\n else:\n os.system('echo \\\"%sBASE %s%s nullfs ro 0 0\\\" > %sBASE-RW/%s/etc/fstab' % (jpath, jpath, jname, jpath, jname))\n \n os.system('echo \\\"%sBASE-RW/%s %s%s/SROOT nullfs rw 0 0\\\" >> %sBASE-RW/%s/etc/fstab' % (jpath, jname, jpath, jname, jpath, jname))\n temp_add_cfg = ['### BASE mount settings ###', 'mount.fstab=\"%sBASE-RW/%s/etc/fstab\";' % (jpath, jname), 'mount.devfs;']\n return temp_add_cfg",
"def main(u_net_settings):\n model = build_u_net(*u_net_settings)\n print(model.summary())",
"def main():\n # Model setup\n source = np.array([1500, 8, 10, 5]) # assume source concentration and 3D coordinates\n u, pg_stability = 2, 'F' # setup environment\n sample_path = r\"data/ObservedData.csv\"\n # Build model object\n func = GaussianPlumeEAAI(lower=(10, -500, -500, 0), upper=(5000, 500, 500, 10), u=u,\n pg_stability=pg_stability, sample_path=sample_path)\n # Generate sample observed data\n func.generate_observed_data(source[0], source[1], source[2], source[3])\n\n # Reverse search source use observed data and PSO (assume unknown the source)\n pso_search_with_recommended_param(func)\n pso_search_with_optimized_param(func)",
"def run(self,\n altitude: float,\n day_of_year: float,\n local_time: float,\n latitude: float,\n longitude: float,\n f107: float,\n f107m: float,\n kp1: float,\n kp2: float,\n get_uncertainty: bool = False\n ):\n\n output_file = tempfile.NamedTemporaryFile(\n delete=False, suffix=\".out\", prefix=\"swami_\", mode=\"r+\")\n\n data_dtm = str(self.path_to_data)\n data_dtm = data_dtm + \"/\" if data_dtm[-1] != \"/\" else data_dtm\n data_um = str(os.path.join(self.path_to_data, \"um\"))\n data_um = data_um + \"/\" if data_um[-1] != \"/\" else data_um\n\n is_mcm = True if self.model is _AtmModel.MCM else False\n is_dtm = True if self.model is _AtmModel.DTM2020 else False\n is_um = True if self.model is _AtmModel.UM else False\n\n input_dict = {\n \"altitude\": float(altitude),\n \"day_of_year\": float(day_of_year),\n \"local_time\": float(local_time),\n \"latitude\": float(latitude),\n \"longitude\": float(longitude),\n \"f107\": float(f107),\n \"f107m\": float(f107m),\n \"kp1\": float(kp1),\n \"kp2\": float(kp2),\n \"bMCM\": is_mcm,\n \"bDTM\": is_dtm,\n \"bUM\": is_um,\n \"bUMstd\": bool(get_uncertainty), # and is_um,\n \"bDTMunc\": bool(get_uncertainty), # and is_dtm,\n \"data_dtm\": data_dtm,\n \"data_um\": data_um,\n \"output_file\": str(output_file.name)\n }\n\n input_file = self._generate_nml_from_dict(input_dict)\n\n cmd = [str(self.path_to_bin), input_file]\n\n proc = subprocess.run(cmd, check=True)\n\n out = self._read_output_file(output_file.name)\n out[\"_input\"] = input_dict\n\n os.unlink(input_file)\n os.unlink(output_file.name)\n\n return out",
"def preberi_pot(ukazi):",
"def preberi_pot(ukazi):",
"def preberi_pot(ukazi):",
"def preberi_pot(ukazi):"
] |
[
"0.5799173",
"0.56623626",
"0.5652759",
"0.5641658",
"0.5605927",
"0.5587602",
"0.55677724",
"0.5556802",
"0.55124027",
"0.55102175",
"0.5488853",
"0.5479511",
"0.5422332",
"0.5416626",
"0.54125524",
"0.5384084",
"0.53825665",
"0.53712916",
"0.5370684",
"0.5368271",
"0.5364906",
"0.5339671",
"0.5317651",
"0.53130746",
"0.5311721",
"0.53023577",
"0.53018427",
"0.53018427",
"0.53018427",
"0.53018427"
] |
0.6915624
|
0
|
WOFOST model water uptake
|
def water_uptake_wofost(self, soil):
daily_ref_evap_transp = soil.daily_ref_evap_transp
transp_pot = daily_ref_evap_transp * self.light_intercpt
DROUGHT_CAT = 4
p_value = p_wofost(transp_pot, DROUGHT_CAT)
# WOFOST does not account for different layers
for lyr in soil.layers:
# Root fraction values over-written to simulate as there is only
# one soil layer
self.root_fraction[lyr] = soil.layer_thickness[lyr]
crit_soil_moist = (
(1 - p_value) * (soil.field_capacity[lyr] -
soil.perm_wilt_point[lyr]) +
soil.perm_wilt_point[lyr])
stress_fact = (
(soil.water_content[lyr] - soil.perm_wilt_point[lyr]) /
(crit_soil_moist - soil.perm_wilt_point[lyr]))
if stress_fact > 1:
stress_fact = 1
if stress_fact < 0:
stress_fact = 0
self.water_uptake[lyr] = (stress_fact * self.root_fraction[lyr] *
transp_pot)
self.att_transp = self.water_uptake.sum() # mm/day
self.cum_transp += self.att_transp
self.expect_transp = transp_pot
self.cum_pot_transp += self.expect_transp
self.transp_ratio = self.att_transp / transp_pot
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def water_uptake_apsim(self, soil):\r\n soil_wat_avail = np.zeros(soil.total_layers)\r\n soil_wat_supply = np.zeros(soil.total_layers)\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n # Water available in each layer [mm]\r\n for lyr in soil.layers:\r\n soil_wat_avail[lyr] = ((soil.water_content[lyr] -\r\n soil.perm_wilt_point[lyr]) *\r\n soil.layer_thickness[lyr] *\r\n soil.WATER_DENSITY)\r\n # Water supply\r\n for lyr in soil.layers:\r\n soil_wat_supply[lyr] = (soil_wat_avail[lyr] * soil.kl[lyr])\r\n\r\n # Water uptake (no supply or demand)\r\n if (soil_wat_supply.sum() <= 0) or (transp_pot <= 0):\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = 0\r\n else:\r\n # Water uptake (water is not limiting)\r\n if transp_pot < soil_wat_supply.sum():\r\n # distribute demand proportionately to the water supply\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = (soil_wat_supply[lyr] /\r\n soil_wat_supply.sum() *\r\n transp_pot)\r\n else:\r\n # Water uptake (water is limiting)\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = soil_wat_supply[lyr]\r\n\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp # mm\r\n self.transp_ratio = self.att_transp / transp_pot\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += transp_pot",
"def water_uptake_dssat(self, soil):\r\n CONV1 = 1e-4 # convert m/m3 to cm/cm3\r\n CONV2 = 100 # convert m to cm\r\n CONV3 = 10 # convert cm to mm\r\n\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n root_dens = self.root_dens * CONV1 #cm root / cm3 soil\r\n CONST1 = 1.3e-3\r\n CONST2 = np.zeros(soil.total_layers)\r\n CONST3 = 7.01\r\n layer_thickness = soil.layer_thickness * CONV2\r\n water_uptake = np.zeros(soil.total_layers)\r\n # Constant 2\r\n for lyr in soil.layers:\r\n CONST2[lyr] = 120 - 250 * soil.perm_wilt_point[lyr]\r\n if soil.perm_wilt_point[lyr] > 0.3:\r\n CONST2[lyr] = 45\r\n # Water uptake per unit root length\r\n for lyr in soil.layers:\r\n if root_dens[lyr] <= 0.00001 or (soil.water_content[lyr] <=\r\n soil.perm_wilt_point[lyr]):\r\n water_uptake[lyr] = 0\r\n else:\r\n water_uptake[lyr] = (CONST1 * math.exp(min((CONST2[lyr] *\r\n (soil.water_content[lyr] -\r\n soil.perm_wilt_point[lyr])), 40)) /\r\n (CONST3 - math.log(root_dens[lyr])))\r\n water_uptake[lyr] = min(water_uptake[lyr],\r\n self.dssat_max_water_uptake)\r\n # Water uptake in [cm/d] volume\r\n water_uptake[lyr] = (water_uptake[lyr] * layer_thickness[lyr] *\r\n root_dens[lyr])\r\n # Water uptake in [mm/d] volume\r\n water_uptake[lyr] = water_uptake[lyr] * CONV3\r\n # Total water uptake [mm/d]\r\n crop_transp = water_uptake.sum()\r\n min_transp = min(transp_pot, crop_transp)\r\n # Update crop arrays\r\n for lyr in soil.layers:\r\n if min_transp > 0:\r\n self.water_uptake[lyr] = (water_uptake[lyr] *\r\n (min_transp / crop_transp))\r\n else:\r\n self.water_uptake[lyr] = 0\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp # mm\r\n self.transp_ratio = self.att_transp / transp_pot\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += self.expect_transp",
"def main():\n\n # path of model that should be pruned\n model_path = ('saved_models/PATH_TO_MODEL/model.h5')\n\n # weights below this threshold will be set to zero\n # thresholds can be defined per layer\n thresholds = [0.03, 0.01, 0.01]\n\n # specify training epochs for retraining\n epochs = [1, 1, 1]\n # define the layer index that should be pruned\n # only feedforward layers can be pruned!!!\n layers = [3, 4, 5]\n\n # TrainingData section\n # specify input dimension of the sliding window using 'slice_len'\n slice_len = 30\n\n # output delay for AREUS data\n delay = 6\n\n td1 = TrainingData()\n training_data = td1.window_dim_1_sized_td(slice_len, delay)\n\n # Pruning runs for each layer\n p_run = PruningRun(model_path, training_data)\n for i, layer in enumerate(layers):\n p_run.prune_layer(layer, thresholds[i], epochs[i])\n\n # when no retraining is needed\n #p_run.prune_layer_no_retraining(layer, thresholds[i])",
"def water_uptake_feddes(self, soil):\r\n\r\n # Value of the pressure head, below which roots extract water at the\r\n # maximum possible rate\r\n P1 = soil.field_capacity_water_potential.mean()#-25 # J/kg\r\n P3 = soil.perm_wilt_point_pot.mean()#-8000 # J/kg wilting point\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n for lyr in soil.layers:\r\n stress_fact = feddes_stress_factor(transp_pot,\r\n soil.water_potential[lyr],\r\n self.P0, P1, self.P2L, self.P2H,\r\n P3, self.R2H, self.R2L)\r\n self.water_uptake[lyr] = (stress_fact * self.root_fraction[lyr] *\r\n transp_pot)\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += self.expect_transp\r\n self.transp_ratio = self.att_transp / transp_pot",
"def train(self)->None:",
"def o_wo_per_head(self):\n assert self.ff % self.heads == 0\n # fuse ff->e and projection layer of self-attention\n return (self.ff // (self.heads-self.padded_heads)) + self.qkv",
"def set_shunt_model(self, model):\r\n print('\\nSet shunt model')\r\n\r\n self.shunt_model = model\r\n\r\n keras.models.save_model(self.shunt_model, Path(self.folder_name_logging, \"shunt_model.h5\"))\r\n logging.info('')\r\n logging.info('Shunt model saved to {}'.format(self.folder_name_logging))\r\n\r\n # calculate flops\r\n flops_shunt = calculate_flops_model(self.shunt_model)\r\n self.flops_dict['shunt'] = flops_shunt\r\n logging.info('')\r\n logging.info('FLOPs of shunt model: {}'.format(flops_shunt))",
"def setup_pwn(name,pwndata,phase, free_radius=5, tempdir=None, emin=1.0e2, emax=1.0e5,maxroi=10,model=None,**kwargs):\n sources=yaml.load(open(pwndata))\n\n catalog_name=sources[name]['catalog']\n ltcube=sources[name]['ltcube']\n pulsar_position=SkyDir(*sources[name]['dir'])\n ft2=sources[name]['ft2']\n ft1=sources[name]['ft1']\n\n # in case no list was passed\n if len(phase)==2 and isinstance(phase[0],numbers.Real) and \\\n isinstance(phase[1],numbers.Real):\n\n # write in case phase wraps around.\n if phase[0]>phase[1]:\n phase=[[phase[0],1.0],[0.0,phase[1]]]\n else:\n phase = [phase] \n\n phase_factor=get_phase_factor(phase)\n print \"phase\"\n print phase\n print \"phase_factor=%.2f\"%phase_factor\n\n catalog=FermiCatalog(e(\"$FERMI/catalogs/gll_psc_v02.fit\"),free_radius=free_radius)\n catalog_source=[i for i in catalog.get_sources(SkyDir(),180) if i.name==catalog_name][0]\n\n center=catalog_source.skydir\n\n if tempdir is None: tempdir=mkdtemp(prefix='/scratch/')\n\n binfile=j(tempdir,'binned_phased.fits')\n\n # apply phase cut to ft1 file\n phased_ft1 = j(tempdir,'ft1_phased.fits')\n phasetools.phase_cut(ft1,phased_ft1,phaseranges=phase)\n\n # create a temporary ltcube scaled by the phase factor\n# phased_ltcube=j(tempdir,'phased_ltcube.fits')\n# phase_ltcube(ltcube,phased_ltcube, phase=phase)\n phased_ltcube=ltcube\n from uw.like.pointspec import DataSpecification\n data_specification = DataSpecification(\n ft1files = phased_ft1,\n ft2files = ft2,\n ltcube = phased_ltcube,\n binfile = binfile)\n\n spectral_analysis = SpectralAnalysis(data_specification,\n binsperdec = 4,\n emin = 100,\n emax = 100000,\n irf = \"P6_V3_DIFFUSE\",\n roi_dir = center,\n maxROI = maxroi,\n minROI = maxroi)\n\n if model == None :\n roi=spectral_analysis.roi(\n roi_dir=center,\n diffuse_sources=get_default_diffuse(diffdir=e(\"$FERMI/diffuse\"),\n gfile=\"gll_iem_v02.fit\",\n ifile=\"isotropic_iem_v02.txt\"),\n catalogs = catalog,\n phase_factor = 1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n else :\n roi=spectral_analysis.roi(\n roi_dir=center,\n xmlfile = model,\n phase_factor =1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n\n print \"---------------------Energy range--------------------\"\n \n print \"emin=\"+str(roi.bands[0].emin)+\"\\n\"\n print \"emax=\"+str(roi.bands[len(roi.bands)-1].emax)+\"\\n\"\n \n\n # keep overall flux of catalog source,\n # but change the starting index to 2.\n roi.modify(which=catalog_name, name=name, index=2, \n keep_old_flux=True)\n\n return roi",
"def train():\n pass",
"def WaterVaporTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/828.5e-9,lp.c/828e-9]),sim_nu=np.array([])):\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n ext_wv = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(lp.mH2O*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True).T\n T_wv = np.exp(-np.cumsum(n_wv[np.newaxis,:]*ext_wv,axis=1)*dr)\n \n return T_wv,sim_nu",
"def run(self):\n self.coffee_machine.water_tank.decrease_weight(self.coffee_machine.chosen_coffee_data.get('water_weight'))",
"def setgeo(rundata):\n#-------------------\n\n try:\n geodata = rundata.geodata\n except:\n print \"*** Error, this rundata has no geodata attribute\"\n raise AttributeError(\"Missing geodata attribute\")\n\n # == setgeo.data values ==\n geodata.variable_dt_refinement_ratios = True ## Overrides clawdata.inratt, above\n\n geodata.igravity = 1\n geodata.gravity = 9.81\n geodata.icoordsys = 2\n geodata.Rearth = 6367.5e3\n geodata.icoriolis = 0\n\n # == settsunami.data values ==\n geodata.sealevel = 0.\n geodata.drytolerance = 1.e-2\n geodata.wavetolerance = 1.e-1 ##\n geodata.depthdeep = 1.e6 ## Definition of \"deep\" water\n geodata.maxleveldeep = 10 ## Restriction on the number of deep water levels\n geodata.ifriction = 1 ## Friction switch. 0=off, 1=on\n # geodata.coeffmanning =0.0\n geodata.coeffmanning =.025\n geodata.frictiondepth = 10.\n\n #okushiri_dir = '/Users/FrankGonzalez/daily/modeling/tsunami-benchmarks/github/' \\\n #+ 'FrankGonzalez/geoclaw-group/benchmarks/bp09' ##\n okushiri_dir = '..' ## this directory\n \n # == settopo.data values ==\n geodata.topofiles = []\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n # geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/OK24.tt1']) ## 24-s, ~550-740 m Entire Domain (Dmitry's version of Kansai U.)\n geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n okushiri_dir + '/OK08.tt1']) ## 8-s, ~184-247 m Okushiri (Dmitry's version of Kansai U.)\n geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n okushiri_dir + '/OK03.tt1']) ## 2.67 s (8/3s), ~61-82 m Okushiri (Dmitry's version of Kansai U.)\n geodata.topofiles.append([1, 1, 1, 0., 1.e10, \\\n okushiri_dir + '/AO15.tt1']) ## 0.53-0.89 s, ~16.5-20.4 m, Aonae (Dmitry's version of Kansai U.)\n # geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/MO01.tt1']) ## 0.89 s, ~20-27 m, Monai (Dmitry's version of Kansai U.)\n # geodata.topofiles.append([1, 1, 1, 0., 1.e10, \\\n # okushiri_dir + '/MB05.tt1']) ## 0.13-0.18 s, ~4 m Monai (Dmitry's version of Kansai U.)\n\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth40_138.txt']) ## JODC 500 m\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth40_140.txt']) ## JODC 500 m\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth42_138.txt']) ## JODC 500 m\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth42_140.txt']) ## JODC 500 m\n \n # == setdtopo.data values ==\n geodata.dtopofiles = []\n # for moving topography, append lines of the form: (<= 1 allowed for now!)\n # [topotype, minlevel,maxlevel,fname]\n geodata.dtopofiles.append([1,2,3, okushiri_dir + '/HNO1993.txyz']) ## Dmitry N.'s version of Kansai U.\n\n # == setqinit.data values ==\n geodata.iqinit = 0\n geodata.qinitfiles = []\n # for qinit perturbations, append lines of the form: (<= 1 allowed for now!)\n # [minlev, maxlev, fname]\n #geodata.qinitfiles.append([1, 1, 'hump.xyz'])\n\n # == setregions.data values ==\n geodata.regions = []\n # to specify regions of refinement append lines of the form\n # [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]\n # Note: Level 1 = 24 s & Levels [2,3,4,5] = RF [3,3,3,8] => Res of 8 sec to 8/3 sec to 8/9 to 1/9 sec/cell\n # Grid Limits\n # Name x1 x2 y1 y2\n # OK24 137.53666670 141.53000000 39.53666670 44.26333330\n # HNO 138.50000000 140.55000000 40.51666670 43.30000000\n # OK08 138.50111110 140.55222220 40.52111110 43.29888890\n # OK03 139.38925930 139.66407410 41.99592590 42.27074070\n # AO15 139.43419750 139.49987650 42.03118520 42.07251850\n # MO01 139.41123460 139.43320990 42.07790120 42.14580250\n # MB05 139.41385190 139.42639510 42.09458550 42.10343920\n \n #geodata.regions.append([1, 1, 0., 1e9, 0.0, 360.0, -90.0, 90.0]) ## OK24: 24-s, ~550-740 m Entire Domain\n geodata.regions.append([1, 2, 0., 1e9, 138.5, 139.7, 41.4, 43.3]) ## OK08: 8-s, ~184-247 m Okushiri \n geodata.regions.append([1, 3, 0., 1e9, 139.39, 139.6, 42.0, 42.25]) ## OK03: 2.67 s (8/3s), ~61-82 m Okushiri \n # geodata.regions.append([1, 4, 0., 1e9, 139.42, 139.57, 42.03, 42.23]) ## AO15: 0.53-8/9 s, ~16.5-20.4 m, Aonae \n #geodata.regions.append([1, 4, 0., 1e9, 139.40, 139.46, 42.03, 42.22]) ## West coast Okushiri\n geodata.regions.append([4, 4, 90., 1e9, 139.42, 139.431, 42.07, 42.12])\n \n\n # == setgauges.data values ==\n geodata.gauges = []\n # for gauges append lines of the form [gaugeno, x, y, t1, t2]\n \n # geodata.gauges.append([1,139.429211710298,42.188181491811,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([3,139.411185686023,42.162762869034,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([5,139.418261206409,42.137404393442,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([6,139.428035766149,42.093012384481,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([7,139.426244998662,42.116554785296,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([8,139.423714744650,42.100414145210,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([9,139.428901803617,42.076636582137,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([10,139.427853421935,42.065461519438,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([11,139.451539852594,42.044696547058,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([12,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([13,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs\n # \n # == setfixedgrids.data values ==\n\n geodata.fixedgrids = []\n \n for g in geodata.gauges:\n xg = g[1]\n yg = g[2]\n xg1 = xg - 0.001\n xg2 = xg + 0.002\n yg1 = yg - 0.001\n yg2 = yg + 0.002\n nx = 31\n ny = 31\n gaugeno = g[0]\n if gaugeno == 9:\n xg2 = xg + 0.003\n nx = 41\n if gaugeno == 8:\n xg1 = xg - 0.002\n xg2 = xg + 0.001\n yg1 = yg - 0.002\n yg2 = yg + 0.001\n \n geodata.fixedgrids.append([210.0,360.0,11,xg1,xg2,yg1,yg2,nx,ny,0,1])\n geodata.regions.append([5, 5, 180., 1e9, xg1,xg2,yg1,yg2])\n \n \n return rundata\n\n # end of function setgeo\n # ----------------------",
"def define_ufl_stress_work(self):\n\n if hasattr(self, 'ufl_stress_work'):\n return None\n\n # THIS NEEDS TO BE GENERALIZED.\n if self.config['material']['type'] == 'elastic':\n # stress_tensor = self._material.stress_tensor(self.displacement,\n # self.pressure)\n stress_tensor = self._material.stress_tensor(self.deformationGradient,\n self.jacobian,\n self.pressure)\n else:\n stress_tensor = self._material.stress_tensor(self.velocityGradient,\n self.pressure)\n\n xi = self.test_vector\n self.ufl_stress_work = dlf.inner(dlf.grad(xi), stress_tensor)*dlf.dx\n if self.config['formulation']['time']['unsteady']:\n if self.config['material']['type'] == 'elastic':\n stress_tensor0 = self._material.stress_tensor(self.deformationGradient0,\n self.jacobian0,\n self.pressure0)\n else:\n stress_tensor0 = self._material.stress_tensor(self.velocityGradient,\n self.pressure)\n self.ufl_stress_work0 = dlf.inner(dlf.grad(xi), stress_tensor0)*dlf.dx\n else:\n self.ufl_stress_work0 = 0\n\n return None",
"def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return",
"def internal_wave_KE(U, V, z, bin_idx, wl_min, wl_max, bin_size):\n \n \n Uspeci = []\n Vspeci = []\n Uspec = []\n Vspec = []\n Upowi = []\n Vpowi = []\n Upower = []\n Vpower = []\n U = U**2\n V = V**2\n \n sp = np.nanmean(np.gradient(z, axis=0))\n \n U_mx, U_kx = specGrid(U[bin_idx[0,:],0], sp, bin_size)\n \n for Ui, Vi in zip(U.T, V.T):\n \n for binIn in bin_idx:\n Uspec1 = SpectrumGen(Ui[binIn], bin_size)\n Upowi.append(power_spec(Uspec1))\n Uspeci.append(Uspec1)\n Vspec1 = SpectrumGen(Vi[binIn], bin_size)\n Vpowi.append(power_spec(Vspec1))\n Vspeci.append(Vspec1)\n \n Uspeci = np.vstack(Uspeci)\n Vspeci = np.vstack(Vspeci)\n Upowi = np.vstack(Upowi)\n Vpowi = np.vstack(Vpowi)\n \n Uspec.append(Uspeci)\n Vspec.append(Vspeci)\n Upower.append(Upowi)\n Vpower.append(Vpowi)\n Uspeci = []\n Vspeci = []\n Upowi = []\n Vpowi = []\n \n # integrate Power Spec of U and V between chosen vertical wavelengths\n Uint = []\n Vint = []\n \n for Us, Vs in zip(Upower, Vpower):\n Ui = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Us])\n Vi = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Vs])\n Uint.append(Ui)\n Vint.append(Vi)\n \n Ui = []\n Vi = []\n \n \n Uint = np.hstack(Uint)\n Vint = np.hstack(Vint)\n \n Ek = 0.5*(Uint + Vint)\n \n return Ek, Upower, Vpower, U_kx, Uspec, Vspec",
"def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('usrtrack', type=str, help='ustsuw binary output')\n parser.add_argument('root', type=str, nargs='?', help='output ROOT file name', default=\"\")\n parser.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbose', help='print what is being done')\n \n args = parser.parse_args()\n\n if not path.isfile(args.usrtrack):\n print(\"ustsuw2root: File %s does not exist.\" % args.usrtrack, file=sys.stderr)\n return 1\n\n if args.root == \"\":\n rootFileName = \"%s%s\" % (args.usrtrack,\".root\")\n else:\n rootFileName = args.root\n \n b = Usrtrack()\n b.readHeader(args.usrtrack)\n\n ND = len(b.detector)\n \n if args.verbose:\n #b.sayHeader()\n for i in range(ND):\n b.printHeader(i)\n print(\"\")\n\n fout = ROOT.TFile(rootFileName, \"recreate\")\n for i in range(ND):\n val = Data.unpackArray(b.readData(i,b.detector[i].lowneu))\n err = Data.unpackArray(b.readStat(i,b.detector[i].lowneu))\n\n det = b.detector[i]\n\n h = hist(det)\n hn = histN(det) # filled only if det.lowneu\n \n n = h.GetNbinsX()\n print(n, len(val), det.ne, val)\n\n for i in range(det.ne):\n h.SetBinContent(i+1, val[i])\n\n for i in range(det.ne):\n h.SetBinError(i+1, err[n-i-1]*val[i])\n\n h.SetEntries(b.weight)\n h.Write()\n if det.lowneu:\n hn.Write()\n\n fout.Close()",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def use_w(args):\n try:\n bounddata = Table.read(\n f'./Input/UseWv/WaveRegions_{args.WRegion}_{args.band}.csv',\n format='csv')\n except IOError:\n sys.exit(\n f'WaveRegions FILE \"./Input/UseWv/WaveRegions'\n '_{args.WRegion}_{args.band}.csv\" NOT FOUND!')\n\n wavesols = pd.read_csv(f'./Input/UseWv/WaveSolns_{args.band}.csv')\n#-------------------------------------------------------------------------------\n XRegion_dir = f'./Input/UseWv/XRegions_{args.WRegion}_{args.band}.csv'\n with open(XRegion_dir,'w') as filew:\n filew.write('order, start, end, masks\\n')\n\n m_order = np.array(bounddata['order'])\n starts = np.array(bounddata['start'])\n ends = np.array(bounddata['end'])\n ords = list( sorted(OrderDictCla().orderdict[args.band].keys()) )\n\n Ostarts = [OrderDictCla().orderdict[args.band][k][0] for k in ords]\n Oends = [OrderDictCla().orderdict[args.band][k][1] for k in ords]\n labels = []\n\n m_orders_unique = np.unique(m_order)\n\n # For each order specified, find what pixel numbers correspond to the\n # wavelength bounds presented.\n # If multiple wavelength bounds given for a single order, output a\n # pixel mask between the two, as well.\n for o in range(len(m_orders_unique)):\n\n # if len(m_orders_unique) == 9:\n # filew.write('9, 150, 1950, []\\n')\n # continue\n\n pixs = []\n mini = np.where(m_order == m_orders_unique[o])[0]\n for j in range(len(mini)):\n i = mini[j]\n\n wavebounds = [starts[i],ends[i]]\n wO = wavesols['w'+str(m_orders_unique[o])]\n pixO = wavesols['x'+str(m_orders_unique[o])]\n pix = [pixO[(np.argmin(abs(wO-wavebounds[k])))] for k in [0,1]]\n pixs = pixs + pix\n\n pixsS = list(sorted(pixs))\n q = pixsS[1:-1]\n if len(pixsS) == 2:\n filew.write('{}, {}, {},[]\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1])\n )\n else:\n filew.write('{}, {}, {},\"{}\"\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1],\n [[first,second] for first, second in zip(q[0::2], q[1::2])]\n ))",
"def calculate_wake(self, no_wake=False):\n\n # define the center of rotation with reference to 270 deg\n center_of_rotation = Vec3(0, 0, 0)\n\n # Rotate the turbines such that they are now in the frame of reference\n # of the wind direction simpifying computing the wakes and wake overlap\n rotated_map = self.turbine_map.rotated(\n self.wind_direction, center_of_rotation)\n\n # rotate the discrete grid and turbine map\n rotated_x, rotated_y, rotated_z = self._rotated_dir(\n self.wind_direction, center_of_rotation, rotated_map)\n\n # sort the turbine map\n sorted_map = rotated_map.sorted_in_x_as_list()\n\n # calculate the velocity deficit and wake deflection on the mesh\n u_wake = np.zeros(np.shape(self.u))\n v_wake = np.zeros(np.shape(self.u))\n w_wake = np.zeros(np.shape(self.u))\n for coord, turbine in sorted_map:\n\n # update the turbine based on the velocity at its hub\n turbine.update_velocities(\n u_wake, coord, self, rotated_x, rotated_y, rotated_z)\n\n # get the wake deflecton field\n deflection = self._compute_turbine_wake_deflection(\n rotated_x, rotated_y, turbine, coord, self)\n\n # get the velocity deficit accounting for the deflection\n turb_u_wake, turb_v_wake, turb_w_wake = self._compute_turbine_velocity_deficit(\n rotated_x, rotated_y, rotated_z, turbine, coord, deflection, self.wake, self)\n\n # include turbulence model for the gaussian wake model from Porte-Agel\n if self.wake.velocity_model.model_string == 'gauss':\n\n # compute area overlap of wake on other turbines and update downstream turbine turbulence intensities\n for coord_ti, turbine_ti in sorted_map:\n\n if coord_ti.x1 > coord.x1 and np.abs(coord.x2 - coord_ti.x2) < 2*turbine.rotor_diameter:\n # only assess the effects of the current wake\n\n freestream_velocities = turbine_ti.calculate_swept_area_velocities(\n self.wind_direction,\n self.u_initial,\n coord_ti,\n rotated_x,\n rotated_y,\n rotated_z)\n\n wake_velocities = turbine_ti.calculate_swept_area_velocities(\n self.wind_direction,\n self.u_initial - turb_u_wake,\n coord_ti,\n rotated_x,\n rotated_y,\n rotated_z)\n\n area_overlap = self._calculate_area_overlap(\n wake_velocities, freestream_velocities, turbine)\n if area_overlap > 0.0:\n turbine_ti.turbulence_intensity = turbine_ti.calculate_turbulence_intensity(\n self.turbulence_intensity,\n self.wake.velocity_model,\n coord_ti,\n coord,\n turbine\n )\n\n # combine this turbine's wake into the full wake field\n if not no_wake:\n # TODO: why not use the wake combination scheme in every component?\n u_wake = self.wake.combination_function(u_wake, turb_u_wake)\n v_wake = (v_wake + turb_v_wake)\n w_wake = (w_wake + turb_w_wake)\n\n # apply the velocity deficit field to the freestream\n if not no_wake:\n # TODO: are these signs correct?\n self.u = self.u_initial - u_wake\n self.v = self.v_initial + v_wake\n self.w = self.w_initial + w_wake\n\n # rotate the grid if it is curl\n if self.wake.velocity_model.model_string == 'curl':\n self.x, self.y, self.z = self._rotated_grid(\n -1 * self.wind_direction, center_of_rotation)",
"def sketch_und_part(self):\n if (self.dimension == '3D'):\n #Sketch Wuerfel zeichnen\n self.sketch_Wuerfel = self.model.ConstrainedSketch(\n name='Seitenansicht_Wuerfel',\n sheetSize=200.0)\n self.sketch_Wuerfel.rectangle(\n point1=(-self.laenge_x/2.0, -self.laenge_y/2.0),\n point2=(self.laenge_x/2.0, self.laenge_y/2.0))\n #Part Wuerfel generieren\n self.part_Wuerfel = self.model.Part(\n name=self.name+'_Wuerfel',\n dimensionality=THREE_D,\n type=DEFORMABLE_BODY)\n self.part_Wuerfel.BaseSolidExtrude(\n sketch=self.sketch_Wuerfel,\n depth=self.laenge_z/2.0) #z-Symmetrie\n #Sketch Pore zeichnen (fuer Quader und Zylinder)\n self.sketch_Pore = self.model.ConstrainedSketch(\n name='Seitenansicht_Pore',\n sheetSize=200.0)\n if (self.typ_Pore == 'Quader'):\n self.sketch_Pore.rectangle(\n point1=(-self.porenparameter_x/2.0, -self.porenparameter_y/2.0),\n point2=(self.porenparameter_x/2.0, self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Zylinder'):\n self.sketch_Pore.EllipseByCenterPerimeter(\n center=(0.0, 0.0),\n axisPoint1=(self.porenparameter_x/2.0, 0.0),\n axisPoint2=(0.0, self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Ellipsoid' ):\n matlab.ellipsoidIgesOut(\n self.porenparameter_x,\n self.porenparameter_y,\n self.porenparameter_z,\n 'Ellipsoid')\n # if (self.porenparameter_x == self.porenparameter_z):\n # self.sketch_Pore.ConstructionLine(\n # point1=(0.0, -100.0),\n # point2=(0.0, 100.0))\n # self.sketch_Pore.EllipseByCenterPerimeter(\n # center=(0.0, 0.0),\n # axisPoint1=(self.porenparameter_x/2.0, 0.0),\n # axisPoint2=(0.0, self.porenparameter_y/2.0))\n # self.sketch_Pore.autoTrimCurve(\n # curve1=self.sketch_Pore.geometry[3],\n # point1=(-self.porenparameter_x/2.0, 0.0))\n # self.sketch_Pore.Line(\n # point1=(0.0, self.porenparameter_y/2.0),\n # point2=(0.0, -self.porenparameter_y/2.0))\n else:\n print('typ_Pore Error!')\n #Part Pore generieren\n if (self.typ_Pore == 'Ellipsoid' ):\n # if (self.porenparameter_x == self.porenparameter_z):\n # self.part_Pore.BaseSolidRevolve(\n # sketch=self.sketch_Pore,\n # angle=360.0,\n # flipRevolveDirection=OFF)\n self.iges_Datei = mdb.openIges(\n 'Ellipsoid.igs',\n msbo=False,\n trimCurve=DEFAULT,\n scaleFromFile=OFF)\n self.model.PartFromGeometryFile(\n name=self.name+'_Pore',\n geometryFile=self.iges_Datei,\n combine=False,\n stitchTolerance=1.0,\n dimensionality=THREE_D,\n type=DEFORMABLE_BODY,\n convertToAnalytical=1,\n stitchEdges=1,\n scale=1) # Skalierung\n self.part_Pore = self.model.parts[self.name+'_Pore']\n self.part_Pore.AddCells(\n faceList = self.part_Pore.faces,\n flipped=False)\n del self.iges_Datei\n os.remove('abaqus_read_iges0.log') #Arbeitsordner aufraeumen\n os.remove('temp-Ellipsoid-new.sat')\n os.remove('Ellipsoid.igs')\n elif (self.typ_Pore == 'Quader' or 'Zylinder'):\n self.part_Pore = self.model.Part(\n name=self.name+'_Pore',\n dimensionality=THREE_D,\n type=DEFORMABLE_BODY)\n self.part_Pore.BaseSolidExtrude(\n sketch=self.sketch_Pore,\n depth=self.porenparameter_z)\n #Assemble\n self.assembly = self.model.rootAssembly\n self.assembly.DatumCsysByDefault(CARTESIAN)\n self.assembly.Instance(\n name=self.name+'_Pore',\n part=self.part_Pore,\n dependent=ON)\n self.assembly.Instance(\n name=self.name+'_Wuerfel',\n part=self.part_Wuerfel,\n dependent=ON)\n #Translation\n self.assembly.translate(\n instanceList=(self.name+'_Wuerfel', ),\n vector=(0.0, 0.0, -self.laenge_z/2.0))\n if (self.typ_Pore == 'Ellipsoid'):\n self.assembly.translate(\n instanceList=(self.name+'_Pore', ),\n vector=(0.0, 0.0, 0.0))\n elif (self.typ_Pore == 'Quader' or 'Zylinder'):\n self.assembly.translate(\n instanceList=(self.name+'_Pore', ),\n vector=(0.0, 0.0, -self.porenparameter_z/2.0))\n #Rotation\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, 0.0),\n axisDirection=(1.0, 0.0, 0.0),\n angle=self.porenparameter_rx)\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, 0.0),\n axisDirection=(0.0, 1.0, 0.0),\n angle=self.porenparameter_ry)\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, 0.0),\n axisDirection=(0.0, 0.0,1.0),\n angle=self.porenparameter_rz)\n #Schneiden\n self.assembly.InstanceFromBooleanCut(\n name='RVE',\n instanceToBeCut=self.assembly.instances[self.name+'_Wuerfel'],\n cuttingInstances=(self.assembly.instances[self.name+'_Pore'], ),\n originalInstances=SUPPRESS)\n self.assembly.deleteFeatures((self.name+'_Wuerfel', self.name+'_Pore', ))\n # del self.model.parts[self.name+'_Wuerfel']\n # del self.model.parts[self.name+'_Pore']\n self.part_RVE = self.model.parts[self.name]\n elif (self.dimension == '2D'):\n #Sketch Wuerfel zeichnen\n self.sketch_Wuerfel = self.model.ConstrainedSketch(\n name='Seitenansicht_Wuerfel',\n sheetSize=200.0)\n self.sketch_Wuerfel.rectangle(\n point1=(0.0, 0.0),\n point2=(self.laenge_x/2.0, self.laenge_y/2.0)) #x- und y-Symmetrie\n #Part Wuerfel generieren\n self.part_Wuerfel = self.model.Part(\n name=self.name+'_Wuerfel',\n dimensionality=TWO_D_PLANAR,\n type=DEFORMABLE_BODY)\n self.part_Wuerfel.BaseShell(sketch=self.sketch_Wuerfel)\n #Sketch Pore zeichnen\n self.sketch_Pore = self.model.ConstrainedSketch(\n name='Seitenansicht_Pore',\n sheetSize=200.0)\n if (self.typ_Pore == 'Ellipsoid'):\n self.sketch_Pore.ConstructionLine(\n point1=(0.0, -100.0),\n point2=(0.0, 100.0))\n self.sketch_Pore.EllipseByCenterPerimeter(\n center=(0.0, 0.0),\n axisPoint1=(self.porenparameter_x/2.0, 0.0),\n axisPoint2=(0.0, self.porenparameter_y/2.0))\n self.sketch_Pore.autoTrimCurve(\n curve1=self.sketch_Pore.geometry[3],\n point1=(-self.porenparameter_x/2.0, 0.0))\n self.sketch_Pore.Line(\n point1=(0.0, self.porenparameter_y/2.0),\n point2=(0.0, -self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Quader'):\n self.sketch_Pore.rectangle(\n point1=(-self.porenparameter_x/2.0, -self.porenparameter_y/2.0),\n point2=(self.porenparameter_x/2.0, self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Zylinder'):\n self.sketch_Pore.EllipseByCenterPerimeter(\n center=(0.0, 0.0),\n axisPoint1=(self.porenparameter_x/2.0, 0.0),\n axisPoint2=(0.0, self.porenparameter_y/2.0))\n else:\n print('typ_Pore Error!')\n #Part Pore generieren\n self.part_Pore = self.model.Part(\n name=self.name+'_Pore',\n dimensionality=TWO_D_PLANAR,\n type=DEFORMABLE_BODY)\n self.part_Pore.BaseShell(sketch=self.sketch_Pore)\n #Assemble\n self.assembly = self.model.rootAssembly\n self.assembly.DatumCsysByDefault(CARTESIAN)\n self.assembly.Instance(\n name=self.name+'_Wuerfel',\n part=self.part_Wuerfel,\n dependent=ON)\n self.assembly.Instance(\n name=self.name+'_Pore',\n part=self.part_Pore,\n dependent=ON)\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, self.laenge_z/2.0),\n axisDirection=(0.0, 0.0, self.laenge_z/2.0+1),\n angle=self.porenparameter_rz)\n self.assembly.InstanceFromBooleanCut(\n name='RVE',\n instanceToBeCut=self.assembly.instances[self.name+'_Wuerfel'],\n cuttingInstances=(self.assembly.instances[self.name+'_Pore'], ),\n originalInstances=SUPPRESS)\n self.assembly.deleteFeatures((self.name+'_Wuerfel', self.name+'_Pore', ))\n del self.model.parts[self.name+'_Wuerfel']\n #del self.model.parts[self.name+'_Pore']\n self.part_RVE = self.model.parts[self.name]\n else:\n print('dimension Error!')",
"def abs_units(wb_run,sample_run,mono_van,wb_mono,samp_rmm,samp_mass,ei_guess,rebin,map_file,monovan_mapfile,**kwargs): \n #available keywords\n #abs_units_van_range\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n print 'Output will be in absolute units of mb/str/mev/fu'\n\n #reducer.van_rmm =50.94\n reducer.van_mass=van_mass\n #sample info\n reducer.sample_mass=samp_mass\n reducer.sample_rmm =samp_rmm\n print 'Using vanadium mass: ',van_mass\n print ' sample mass: ',samp_mass \n print ' sample_rmm : ',samp_rmm \n # check if mono-vanadium is provided as multiple files list or just put in brackets ocasionally\n if isinstance(mono_van,list):\n if len(mono_van)>1:\n raise IOError(' Can currently work only with single monovan file but list supplied')\n else:\n mono_van = mono_van[0];\n\n \n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=str(sample_run)+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n \n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n \n #######DIAG###########\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #####diad end########\n \n \n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n map_file = \"\"\n print 'one2one selected'\n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file = map_file+'.map'\n reducer.map_file = map_file;\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n reducer.energy_bins = rebin\n #monovan info\n fileName, fileExtension = os.path.splitext(monovan_mapfile)\n if (not fileExtension):\n monovan_mapfile=monovan_mapfile+'.map'\n reducer.abs_map_file =monovan_mapfile \n\n if kwargs.has_key('abs_units_van_range'):\n reducer.monovan_integr_range = kwargs.get('abs_units_van_range')\n print 'Setting absolute units vanadium integration range to: ', kwargs.get('abs_units_van_range')\n else:\n reducer.monovan_integr_range=[-40,40]\n\n \n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskOnly'):\n if (kwargs.get('hardmaskOnly')): \n totalmask = kwargs.get('hardmaskOnly')\n print 'Using hardmask from ', totalmask\n #next stable version can replace this with loadmask algoritum\n specs=diag_load_mask(totalmask)\n else:\n specs=\"\"\n \n CloneWorkspace(InputWorkspace=sample_run,OutputWorkspace='mask_wksp')\n MaskDetectors(Workspace='mask_wksp',SpectraList=specs)\n masking =mtd['mask_wksp']\n else:\n print '########### Run diagnose for sample run ##############'\n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(masking) \n print 'first Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n if kwargs.has_key('use_sam_msk_on_monovan') and kwargs.get('use_sam_msk_on_monovan')==True:\n print 'applying sample run mask to mono van'\n reducer.spectra_masks=masking\n fail_list=get_failed_spectra_list(masking) \n else:\n print '########### Run diagnose for monochromatic vanadium run ##############'\n masking2 = reducer.diagnose(wb_mono, \n sample=mono_van,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n total_mask=masking+masking2 \n reducer.spectra_masks=total_mask \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(total_mask)\n #fail_list=get_failed_spectra_list('total_mask')\n \n \n print 'Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n \n \n #Run the conversion first on the sample\n deltaE_wkspace_sample = reducer.convert_to_energy(sample_run, ei_guess, wb_run)\n\n \n if kwargs.has_key('mono_correction_factor'):\n absnorm_factor=kwargs.get('mono_correction_factor')\n print 'Using supplied correction factor for absolute units'\n else:\n print '##### Evaluate the integral from the monovan run and calculate the correction factor ######'\n print ' Using absolute units vanadion integration range : ', reducer.monovan_integr_range \n #now on the mono_vanadium run swap the mapping file\n reducer.map_file = monovan_mapfile \n deltaE_wkspace_monovan = reducer.convert_to_energy(mono_van, ei_guess, wb_mono)\n \n (absnorm_factorL,absnorm_factorSS,absnorm_factorP,absnorm_factTGP) = getAbsNormalizationFactor(deltaE_wkspace_monovan.getName(),str(reducer.monovan_integr_range[0]),str(reducer.monovan_integr_range[1])) \n \n print 'Absolute correction factor S^2 =',absnorm_factorSS,' Libisis: ',absnorm_factorL,' Puasonian: ',absnorm_factorP, ' TGP : ',absnorm_factTGP\n CreateSingleValuedWorkspace(OutputWorkspace='AbsFactor',DataValue=absnorm_factTGP)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n ei= (deltaE_wkspace_sample.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n \n print 'Incident energy found for sample run ',ei,' meV'\n print 'Incident energy found for mono vanadium run ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace_sample,OutputWorkspace=results_name)\n if results_name != wksp_out:\n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n Divide(LHSWorkspace=wksp_out,RHSWorkspace='AbsFactor',OutputWorkspace=wksp_out)\n DeleteWorkspace(Workspace='AbsFactor')\n return mtd[wksp_out]",
"def preberi_pot(ukazi):",
"def preberi_pot(ukazi):",
"def preberi_pot(ukazi):",
"def preberi_pot(ukazi):",
"def preberi_pot(ukazi):"
] |
[
"0.6477119",
"0.59873176",
"0.5760386",
"0.5455309",
"0.5430531",
"0.5388874",
"0.53816193",
"0.53550756",
"0.5325988",
"0.5319237",
"0.53131473",
"0.52663887",
"0.5250491",
"0.5246521",
"0.51907617",
"0.518856",
"0.5131376",
"0.5131376",
"0.5131376",
"0.5131376",
"0.5131376",
"0.51300293",
"0.51096445",
"0.51054066",
"0.509993",
"0.5095386",
"0.5095386",
"0.5095386",
"0.5095386",
"0.5095386"
] |
0.6451454
|
1
|
CropSyst/Campbell model daily water uptake
|
def water_uptake_campbell(self, soil):
daily_ref_evap_transp = soil.daily_ref_evap_transp
root_hydr_cond = np.zeros(soil.total_layers)
shoot_hydr_cond = np.zeros(soil.total_layers)
plant_hydr_cond = np.zeros(soil.total_layers)
root_activity = np.zeros(soil.total_layers)
root_cond_adj = np.zeros(soil.total_layers)
tot_root_cond_adj = 0
salinity_factor = np.zeros(soil.total_layers)
soil_water_pot_avg = 0
WAT_POT_FIELD_CAP = -33
# Transpiration
self.pot_transp = daily_ref_evap_transp * self.light_intercpt
self.max_pot_transp = (self.campbell_max_daily_transp *
self.light_intercpt)
self.expect_transp = min(self.pot_transp, self.max_pot_transp) # mm/day
# Plant hydraulic conductance (kg s m-4)
tot_plant_hydr_cond = (self.max_pot_transp /
(WAT_POT_FIELD_CAP -
self.leaf_water_pot_stress_onset))
# assumption of 2/3 of plant hydraulic conductance is from roots
tot_root_hydr_cond = tot_plant_hydr_cond / 0.65
# assumption of 1/3 of plant hydraulic conductivity is from shoots
tot_shoot_hydr_cond = tot_plant_hydr_cond / 0.35
for lyr in soil.layers:
root_activity[lyr] = 1
salinity_factor[lyr] = 1
root_cond_adj[lyr] = (root_activity[lyr] * self.root_fraction[lyr]
* salinity_factor[lyr])
root_hydr_cond[lyr] = tot_root_hydr_cond * root_cond_adj[lyr]
tot_root_cond_adj += root_cond_adj[lyr]
# Root, shoot and plant hydraulic conductance(kg s m-4)
for lyr in soil.layers:
if root_cond_adj[lyr] > 0:
shoot_hydr_cond[lyr] = (tot_shoot_hydr_cond *
root_cond_adj[lyr] / tot_root_cond_adj)
plant_hydr_cond[lyr] = (root_hydr_cond[lyr] *
shoot_hydr_cond[lyr] /
(root_hydr_cond[lyr] +
shoot_hydr_cond[lyr]))
else:
plant_hydr_cond[lyr] = 0
tot_root_hydr_cond *= tot_root_cond_adj
tot_plant_hydr_cond = ((tot_root_hydr_cond * tot_shoot_hydr_cond) /
(tot_root_hydr_cond + tot_shoot_hydr_cond))
if tot_plant_hydr_cond > 0:
for lyr in soil.layers:
soil_water_pot_avg += (soil.water_potential[lyr] *
root_cond_adj[lyr])
leaf_water_pot = (soil_water_pot_avg - self.expect_transp /
tot_plant_hydr_cond)
if leaf_water_pot < self.leaf_water_pot_stress_onset:
leaf_water_pot = ((tot_plant_hydr_cond * soil_water_pot_avg *
(self.leaf_water_pot_stress_onset -
self.leaf_water_pot_wilt_point) +
self.leaf_water_pot_wilt_point *
self.expect_transp)
/ (tot_plant_hydr_cond *
(self.leaf_water_pot_stress_onset -
self.leaf_water_pot_wilt_point) +
self.expect_transp))
if leaf_water_pot < self.leaf_water_pot_wilt_point:
leaf_water_pot = self.leaf_water_pot_wilt_point
self.att_transp = 0
transp_ratio = self.att_transp / self.expect_transp
elif leaf_water_pot < self.leaf_water_pot_stress_onset:
self.att_transp = (self.expect_transp * (leaf_water_pot -
self.leaf_water_pot_wilt_point) / (
self.leaf_water_pot_stress_onset -
self.leaf_water_pot_wilt_point))
transp_ratio = self.att_transp / self.expect_transp
else:
self.att_transp = self.expect_transp
transp_ratio = 1
# crop water uptake (kg/m2/d = mm/d)
for lyr in soil.layers:
self.water_uptake[lyr] = (plant_hydr_cond[lyr] *
(soil.water_potential[lyr] -
leaf_water_pot) * transp_ratio)
if self.water_uptake[lyr] < 0:
self.water_uptake[lyr] = 0
self.crop_transp = self.water_uptake.sum() # mm/day
self.cum_transp += self.crop_transp
self.cum_pot_transp += self.expect_transp
self.transp_ratio = self.crop_transp / self.expect_transp
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def water_uptake_dssat(self, soil):\r\n CONV1 = 1e-4 # convert m/m3 to cm/cm3\r\n CONV2 = 100 # convert m to cm\r\n CONV3 = 10 # convert cm to mm\r\n\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n root_dens = self.root_dens * CONV1 #cm root / cm3 soil\r\n CONST1 = 1.3e-3\r\n CONST2 = np.zeros(soil.total_layers)\r\n CONST3 = 7.01\r\n layer_thickness = soil.layer_thickness * CONV2\r\n water_uptake = np.zeros(soil.total_layers)\r\n # Constant 2\r\n for lyr in soil.layers:\r\n CONST2[lyr] = 120 - 250 * soil.perm_wilt_point[lyr]\r\n if soil.perm_wilt_point[lyr] > 0.3:\r\n CONST2[lyr] = 45\r\n # Water uptake per unit root length\r\n for lyr in soil.layers:\r\n if root_dens[lyr] <= 0.00001 or (soil.water_content[lyr] <=\r\n soil.perm_wilt_point[lyr]):\r\n water_uptake[lyr] = 0\r\n else:\r\n water_uptake[lyr] = (CONST1 * math.exp(min((CONST2[lyr] *\r\n (soil.water_content[lyr] -\r\n soil.perm_wilt_point[lyr])), 40)) /\r\n (CONST3 - math.log(root_dens[lyr])))\r\n water_uptake[lyr] = min(water_uptake[lyr],\r\n self.dssat_max_water_uptake)\r\n # Water uptake in [cm/d] volume\r\n water_uptake[lyr] = (water_uptake[lyr] * layer_thickness[lyr] *\r\n root_dens[lyr])\r\n # Water uptake in [mm/d] volume\r\n water_uptake[lyr] = water_uptake[lyr] * CONV3\r\n # Total water uptake [mm/d]\r\n crop_transp = water_uptake.sum()\r\n min_transp = min(transp_pot, crop_transp)\r\n # Update crop arrays\r\n for lyr in soil.layers:\r\n if min_transp > 0:\r\n self.water_uptake[lyr] = (water_uptake[lyr] *\r\n (min_transp / crop_transp))\r\n else:\r\n self.water_uptake[lyr] = 0\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp # mm\r\n self.transp_ratio = self.att_transp / transp_pot\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += self.expect_transp",
"def cover_crop_added(self):\n\n ## getting input parameter\n crop_input = self.soil_inputs.crop_cover.values[0]\n if pd.isnull(crop_input):\n crop_input = \"nan\"\n #climate_input = self.soil_inputs.climate.values[0]\n years_cropcover_tech = self.soil_inputs.time_using_crop_cover.values[0]\n\n if np.isnan(years_cropcover_tech):\n years_cropcover_tech = 10\n\n if self.language == \"spanish\":\n #climate_options = [i.lower() for i in tl.climate_options[0]]\n cover_crop_options = [i.lower() for i in tl.cover_crop_options[0]]\n else:\n #climate_options = [i.lower() for i in tl.climate_options[1]]\n cover_crop_options = [i.lower() for i in tl.cover_crop_options[1]]\n\n if crop_input.lower() in cover_crop_options:\n\n cc_eng_input = tl.cover_crop_options[1][cover_crop_options.index(crop_input.lower())]\n self._cc_eng_input = cc_eng_input\n #cl_eng_input = tl.climate_options[1][climate_options.index(self._cl_eng_input.lower())]\n\n covercropfilter = ef.cover_cropping_factors.Change.str.lower() == cc_eng_input.lower()\n climatefilter = ef.cover_cropping_factors.Climate.str.lower() == self._cl_eng_input.lower()\n\n if climatefilter.sum() == 0:\n cl_eng_input = tl.world_climate_bouwman[1][tl.world_climate_bouwman[0].index(self._cl_eng_input)]\n climatefilter = ef.cover_cropping_factors.Climate.str.lower() == cl_eng_input.lower()\n\n filter_conditions = climatefilter & covercropfilter\n if np.array(filter_conditions).sum() != 0:\n factor_change_20years = ef.cover_cropping_factors.Factor.loc[filter_conditions].values[0]\n else:\n factor_change_20years = 1\n\n self.cover_crop_soc_change = cumulative_socemissions_for_20years(years_cropcover_tech,\n factor_change_20years,\n self.soil_c_stock)\n else:\n self.cover_crop_soc_change = [0]",
"def __init__(self, crop_no, sim_length, book, soil):\r\n sheet_inputs = book.sheet_by_name('inputs')\r\n # Campbell max canopy transpiration, mm/d:\r\n self.campbell_max_daily_transp = sheet_inputs.cell(6, crop_no).value\r\n # DSSAT max water uptake, cm3water/cm3root:\r\n self.dssat_max_water_uptake = sheet_inputs.cell(7, crop_no).value\r\n # Feddes stress threshold water potential for low T demand, J/kg\r\n self.P2L = sheet_inputs.cell(8, crop_no).value\r\n # Feddes stress threshold water potential for high T demand, J/kg\r\n self.P2H = sheet_inputs.cell(9, crop_no).value\r\n # Feddes high transpiration demand, mm/day\r\n self.R2H = sheet_inputs.cell(10, crop_no).value\r\n # Feddes low transpiration demand, mm/day\r\n self.R2L = sheet_inputs.cell(11, crop_no).value\r\n # Feddes alue of the pressure head, below which roots start to extract\r\n # water from the soil\r\n self.P0 = sheet_inputs.cell(12, crop_no).value\r\n #Campbell leaf water potential at onset of stomatal closure [J/kg]:\r\n self.leaf_water_pot_stress_onset = sheet_inputs.cell(13, crop_no).value\r\n #Campbell leaf water potential at wilting point [J/kg]:\r\n self.leaf_water_pot_wilt_point = sheet_inputs.cell(14, crop_no).value\r\n # EPIC water extraction distribution\r\n self.water_extraction_dist = sheet_inputs.cell(15, crop_no).value\r\n\r\n self.leaf_water_pot = 0 #J/kg\r\n self.sim_length = sim_length #d\r\n self.conductance = np.ones(soil.total_layers)\r\n self.water_uptake = np.zeros(soil.total_layers)\r\n self.leaf_water_potential = np.zeros(soil.total_layers)\r\n self.soil_water_pot_avg = 0\r\n self.transp_ratio = 0 # to quantify crop water stress\r\n self.crop_transp = 0\r\n self.pot_transp = 0\r\n self.att_transp = 0\r\n self.expect_transp = 0\r\n self.cum_transp = 0\r\n self.cum_pot_transp = 0\r\n self.root_dens = np.zeros(soil.total_layers) # m root / m3 soil\r\n self.root_fraction = np.zeros(soil.total_layers) #m root / m soil\r\n sheet_soil = book.sheet_by_name('soil')\r\n for lyr in soil.layers:\r\n self.root_dens[lyr] = sheet_soil.cell(9+lyr, 9).value\r\n self.root_fraction[lyr] = sheet_soil.cell(9+lyr, 10).value\r\n self.root_depth = sheet_soil.cell(3, 4).value",
"def main():\n\n # path of model that should be pruned\n model_path = ('saved_models/PATH_TO_MODEL/model.h5')\n\n # weights below this threshold will be set to zero\n # thresholds can be defined per layer\n thresholds = [0.03, 0.01, 0.01]\n\n # specify training epochs for retraining\n epochs = [1, 1, 1]\n # define the layer index that should be pruned\n # only feedforward layers can be pruned!!!\n layers = [3, 4, 5]\n\n # TrainingData section\n # specify input dimension of the sliding window using 'slice_len'\n slice_len = 30\n\n # output delay for AREUS data\n delay = 6\n\n td1 = TrainingData()\n training_data = td1.window_dim_1_sized_td(slice_len, delay)\n\n # Pruning runs for each layer\n p_run = PruningRun(model_path, training_data)\n for i, layer in enumerate(layers):\n p_run.prune_layer(layer, thresholds[i], epochs[i])\n\n # when no retraining is needed\n #p_run.prune_layer_no_retraining(layer, thresholds[i])",
"def water_uptake_apsim(self, soil):\r\n soil_wat_avail = np.zeros(soil.total_layers)\r\n soil_wat_supply = np.zeros(soil.total_layers)\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n # Water available in each layer [mm]\r\n for lyr in soil.layers:\r\n soil_wat_avail[lyr] = ((soil.water_content[lyr] -\r\n soil.perm_wilt_point[lyr]) *\r\n soil.layer_thickness[lyr] *\r\n soil.WATER_DENSITY)\r\n # Water supply\r\n for lyr in soil.layers:\r\n soil_wat_supply[lyr] = (soil_wat_avail[lyr] * soil.kl[lyr])\r\n\r\n # Water uptake (no supply or demand)\r\n if (soil_wat_supply.sum() <= 0) or (transp_pot <= 0):\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = 0\r\n else:\r\n # Water uptake (water is not limiting)\r\n if transp_pot < soil_wat_supply.sum():\r\n # distribute demand proportionately to the water supply\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = (soil_wat_supply[lyr] /\r\n soil_wat_supply.sum() *\r\n transp_pot)\r\n else:\r\n # Water uptake (water is limiting)\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = soil_wat_supply[lyr]\r\n\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp # mm\r\n self.transp_ratio = self.att_transp / transp_pot\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += transp_pot",
"def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)",
"def workflow(save_dir):\n year = 2016\n month_series = range(1, 13)\n total_potential_biomass_multiplier = 48.8\n total_standing_biomass_multiplier = 45.25\n biomass_jitter = 3.\n diet_sufficiency_multiplier = 0.28\n diet_sufficiency_jitter = 0.01\n avg_animal_density = 0.0175\n animal_density_jitter = 0.005\n\n # twelve months of precipitation rasters covering the study area\n precip_basename_list = [\n 'chirps-v2.0.{}.{:02d}.tif'.format(year, month) for month in\n month_series]\n\n # reclassify 0 to NoData in CHIRPS rasters\n output_precip_dir = os.path.join(save_dir, 'precip')\n if not os.path.exists(output_precip_dir):\n os.makedirs(output_precip_dir)\n for bn in precip_basename_list:\n base_raster = os.path.join(PRECIP_DIR, bn)\n target_raster = os.path.join(output_precip_dir, bn)\n pygeoprocessing.raster_calculator(\n [(base_raster, 1)], zero_to_nodata, target_raster,\n gdal.GDT_Float32, _IC_NODATA)\n\n # generate outputs\n for month in month_series:\n precip_raster = os.path.join(\n output_precip_dir, 'chirps-v2.0.{}.{:02d}.tif'.format(year, month))\n\n total_potential_biomass_path = os.path.join(\n save_dir, 'potential_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_potential_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_potential_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n total_standing_biomass_path = os.path.join(\n save_dir, 'standing_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_standing_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_standing_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n diet_sufficiency_path = os.path.join(\n save_dir, 'diet_sufficiency_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n diet_sufficiency_multiplier,\n diet_sufficiency_jitter]],\n precip_to_correlated_output, diet_sufficiency_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n animal_density_path = os.path.join(\n save_dir, 'animal_density_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n avg_animal_density,\n animal_density_jitter]],\n precip_to_animal_density, animal_density_path,\n gdal.GDT_Float32, _IC_NODATA)",
"def water_uptake_wofost(self, soil):\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n DROUGHT_CAT = 4\r\n p_value = p_wofost(transp_pot, DROUGHT_CAT)\r\n # WOFOST does not account for different layers\r\n for lyr in soil.layers:\r\n # Root fraction values over-written to simulate as there is only\r\n # one soil layer\r\n self.root_fraction[lyr] = soil.layer_thickness[lyr]\r\n crit_soil_moist = (\r\n (1 - p_value) * (soil.field_capacity[lyr] -\r\n soil.perm_wilt_point[lyr]) +\r\n soil.perm_wilt_point[lyr])\r\n stress_fact = (\r\n (soil.water_content[lyr] - soil.perm_wilt_point[lyr]) /\r\n (crit_soil_moist - soil.perm_wilt_point[lyr]))\r\n if stress_fact > 1:\r\n stress_fact = 1\r\n if stress_fact < 0:\r\n stress_fact = 0\r\n self.water_uptake[lyr] = (stress_fact * self.root_fraction[lyr] *\r\n transp_pot)\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += self.expect_transp\r\n self.transp_ratio = self.att_transp / transp_pot",
"def _run_water_bal(self, i, today, interception, whc, field_capacity, saturation,\n rf_coeff, k_factor, ndvi_factor, water_factor, bias_corr, alfa_factor, watermask, outdir,\n yest_snwpck=None, yest_swf=None, geoproperties_file=None, daily_mode=True):\n\n #dynamic inputs to the model\n self.ndvi = self.pmanager.get_dynamic_data(today, self.ndvi_settings)\n self.pet = self.pmanager.get_dynamic_data(today, self.pet_settings)\n self.ppt = self.pmanager.get_dynamic_data(today, self.precip_settings)\n self.tavg = self.pmanager.get_dynamic_data(today, self.tavg_settings)\n self.tmin = self.pmanager.get_dynamic_data(today, self.tmin_settings)\n self.tmax = self.pmanager.get_dynamic_data(today, self.tmax_settings)\n\n # Call Raster Manager function to standardize all the input dataset.\n dynamic_inpts = [self.ndvi, self.pet, self.ppt, self.tavg, self.tmin, self.tmax]\n\n # All the variables are now Numpy Arrays!\n self.ndvi, self.pet, self.ppt, self.tavg, self.tmin, self.tmax = \\\n self.rmanager.normalize_to_std_grid(inputs=dynamic_inpts, resamplemethod='nearest')\n\n # ====== Call the functions ======\n # output SWi and SNWpk\n SWi, SNWpk, RAIN, SWE, snow_melt = self._soil_water(i, self.ppt, interception, self.tmin, self.tmax, self.tavg,\n self.melt_factor, self.rf_high_thresh_temp, self.rf_low_thresh_temp,\n yest_swf, yest_snwpck)\n DOY, year = self._day_of_year(today=today)\n\n SWiout = f'swi_{year}{DOY}.tif'\n print('swout', SWiout)\n SNWpkout = f'snwpk_{year}{DOY}.tif'\n RAINout = f'rain_{year}{DOY}.tif'\n SWEout = f'swe_{year}{DOY}.tif'\n snow_meltout = f'snowmelt_{year}{DOY}.tif'\n\n if daily_mode:\n self.rmanager.output_rasters(SWi, self.outdir, outname=SWiout)\n self.rmanager.output_rasters(SNWpk, self.outdir, outname=SNWpkout)\n self.rmanager.output_rasters(RAIN, self.outdir, outname=RAINout)\n self.rmanager.output_rasters(SWE, self.outdir, outname=SWEout)\n self.rmanager.output_rasters(snow_melt, self.outdir, outname=snow_meltout)\n\n # output DDRAIN and SRf\n DDrain, SRf = self._surface_runoff(SWi, saturation=self.saturation, field_capacity=self.field_capacity,\n whc=self.whc, rf_coeff=self.rf_coeff)\n DDrainout = f'dd_{year}{DOY}.tif'\n SRfout = f'srf_{year}{DOY}.tif'\n if daily_mode:\n self.rmanager.output_rasters(DDrain, self.outdir, outname=DDrainout)\n self.rmanager.output_rasters(SRf, self.outdir, outname=SRfout)\n\n # output eta and SWf\n etasw, SWf, etasw5 = self._veg_et(k_factor, ndvi_factor, water_factor, bias_corr, alfa_factor, watermask,\n self.pet, self.ndvi, SWi)\n etaswout = f'etasw_{year}{DOY}.tif'\n SWfout = f'swf_{year}{DOY}.tif'\n etasw5out = f'etasw5_{year}{DOY}.tif'\n if daily_mode:\n self.rmanager.output_rasters(etasw, outdir, outname=etaswout)\n self.rmanager.output_rasters(SWf, outdir, outname=SWfout)\n self.rmanager.output_rasters(etasw5, outdir, outname=etasw5out)\n\n return SWf, SNWpk, etasw, DDrain, SRf",
"def _crop_data(cfg, raw, subject):\n if subject != 'emptyroom' and cfg.crop_runs is not None:\n raw.crop(*crop_runs)",
"def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return",
"def main():\n\n\t# =========== Skim file & output file ===========\n\tskimLoc = \"$MJDDATADIR/surfmjd/analysis/skim/DS1/20160621_265313037/*.root\"\n\t# skimLoc = \"/Users/wisecg/datasets/ds1/*.root\"\n\t# wsOut = \"./output/waveSkim-1550-1650.root\"\n\twsOut = \"./output/waveSkim-1500-2000-mH-2.root\"\n\n\t# =========== Skim file cuts ===========\n\tburstCut = \"!(time_s > 2192e3 && time_s < 2195e3) && !(time_s > 7370e3 && time_s < 7371e3) && !(time_s > 7840e3 && time_s < 7860e3) && !(time_s > 8384e3 && time_s < 8387e3) && !(time_s > 8984e3 && time_s < 8985e3) && !(time_s > 9002e3 && time_s < 9005e3) && run != 13075 && run != 13093 && run != 13116\"\n\n\t# low-energy noisy runs cut - need to research & refine\n\t# runCut = \"run!=13312 && run!=13121 && run!=13004 && run!=12766 && run!=12735 && run!=12445 && run!=11175 && run!=12723 && run!=12746 && run!=12767 && run!=13071 && run!=13073 && run!=13074 && run!=13120 && run!=13205 && run!=13306 && run!=13307 && run!=9857 && run!=9862 && run!=9863\"\n\n\t# bigCut = \"channel%2==0 && mH==1 && (trapENFCal>1550 && trapENFCal<1650) && !wfDCBits && !muVeto && !isLNFill &&\" + burstCut\n\n\tbigCut = \"channel%2==0 && mH>1 && sumEH>1500 && !wfDCBits && isGood && \" + burstCut\n\n\t# =========== Ready? Go! ===========\n\tskimmer(bigCut, skimLoc, wsOut)\n\t# skimChecker(wsOut)",
"def run(self):\n self.coffee_machine.water_tank.decrease_weight(self.coffee_machine.chosen_coffee_data.get('water_weight'))",
"def main():\n # Verbosity: 1=Selection Results, >1 is various debugging information\n verbose = 0\n print \"build_all.py running with verbose=%s\"%(str(verbose))\n if verbose:\n print \"Fiducial Cut: \",fid_cut_hex,\"(apethum, z_min, z_max)\"\n print \"Max Drift Distance = %.4f us\"%(max_drift_time)\n\n tree = get_data_tree(list='All') # Golden All\n \n # We use the EXOFitting processed tree to get high-level physical quantities\n # like the anticorrelated energy, etc. \n #ptree_file = ROOT.TFile(preprocessed_tree)\n #ptree = ROOT.Get(\"dataTree\")\n #if verbose: print \"Indexing EXOFitting PreProcessed tree\"\n #ptree.BuildIndex(\"runNum\", \"eventNum\")\n #if verbose: print \" ...done\"\n\n cuts = \"\"\n\n #There must be at least 1 scintillation cluster:\n #cuts = \"@fScintClusters.size()>=1\"\n #cuts = \"(fScintClusters.GetCountsOnAPDPlane(0)+fScintClusters.GetCountsOnAPDPlane(1))>20000\"\n\n # The minimum scintinlation counts must be > 20000 and <70000\n # I observe that three peaks presumable alphas are at 38500, 42200, and 55000\n # So Rn222=5.4MeV, Po218=6MeV, Po214=7.7MeV\n # calibrate:: y=mx+b, m=6167, b=5198\n #cuts = \"fScintClusters.fRawEnergy>20000 && fScintClusters.fRawEnergy<70000\"\n #cuts += \"&& fScintClusters.fRawEnergy>22000 && fScintClusters.fRawEnergy<80000\"\n #cuts += \" && Sum$(fAPDs.fRawCounts) > 8000\"\n\n # Ignore Noise and Muon tagged events\n cuts +=\"fEventHeader.fTaggedAsNoise==0 && fEventHeader.fTaggedAsMuon==0\" \n\n # That's the last of the cuts, lets show the user what the cut looks like\n print \"Applying Cuts to data: \\n%s\"%cuts\n\n #Draw is the fastest method to apply cuts, in the end what we want is a reduced data list\n # to perform a more targeted analysis...\n tree.Draw(\">>+elist_alpha_canidates\",cuts,\"goff\")\n elist_alpha_canidates = ROOT.gDirectory.Get(\"elist_alpha_canidates\")\n print \"There are %d events passing the initial cuts\"%elist_alpha_canidates.GetN()\n\n #Now we have to look at events passing the cuts individually\n tf = ROOT.TFile(\"all.root\",\"RECREATE\")\n Rntree = tree.CloneTree(0)\n \n for i in range(elist_alpha_canidates.GetN()):\n # Print Progress\n if i%int(elist_alpha_canidates.GetN()/20) == 0:\n print \"%d of %d\"%(i,elist_alpha_canidates.GetN())\n\n #Grab the event data\n tree.GetEntry(elist_alpha_canidates.GetEntry(i))\n #ed = tree.EventBranch\n #if verbose>1: print_event_data(ed,verbose)\n\n #is_alphaish = check_alpha_like(ed,verbose)\n \n #is the event a fully reconstructed BiPo?\n #is_bipo = check_full_BiPo(ed,verbose)\n\n # Case1 (position matched Bi-Po)\n #is_case1 = check_case1(ed,verbose)\n #print \"BiPo=%s, Case1=%s\"%(is_bipo, is_case1) \n #raw_input('<hit any key to continue>')\n #if is_bipo or is_alphaish:\n # Write the EventData of events which pass any of our selection criteria\n # to ROOT file\n Rntree.Fill()\n\n Rntree.AutoSave()",
"def upload_model_data(_slices):\n # new directory path\n _dir = os.getcwd() + '/database/Model-Data/128'\n\n \"\"\" load MCGILL data \"\"\"\n # x=data, y=label, c=clinical, z=contour\n (train_x, train_y, train_c, train_z), (test_x, test_y, test_c, test_z) = dp.load_data_MCGILL(_slices)\n # check data shape\n print(\"Train shape:\", np.array(train_x).shape)\n print(\"Test shape:\", np.array(test_x).shape)\n print(\"Label shapes:\", np.array(train_y).shape, np.array(test_y).shape)\n # create new directory\n path = _dir + '/MCGILL'\n Path(path).mkdir(parents=True, exist_ok=True)\n os.chdir(path)\n # write data to directory\n write_file(train_x, train_y, train_c, train_z, 'train')\n write_file(test_x, test_y, test_c, test_z, 'test')\n\n \"\"\" load MAASTRO data \"\"\"\n # x=data, y=label, c=clinical, z=contour\n (train_x, train_y, train_c, train_z), (test_x, test_y, test_c, test_z) = dp.load_data_MAASTRO(_slices)\n # check data shape\n print(\"Train shape:\", np.array(train_x).shape)\n print(\"Test shape:\", np.array(test_x).shape)\n print(\"Label shapes:\", np.array(train_y).shape, np.array(test_y).shape)\n # create new directory\n path = _dir + '/MAASTRO'\n Path(path).mkdir(parents=True, exist_ok=True)\n os.chdir(path)\n # write data to directory\n write_file(train_x, train_y, train_c, train_z, 'train')\n write_file(test_x, test_y, test_c, test_z, 'test')",
"def central_cropping_experiment():\n model, history = train.train(BATCH_SIZE, EPOCHS, print_model_summary=True,\n central_cropping=True)\n evaluate_both(model)\n plotting.plot_metrics(history)",
"def crop_data(vol):\n\n thres = 250\n\n num_x = vol.shape[0]\n num_y = vol.shape[1]\n num_z = vol.shape[2]\n\n \n # set up starting positions\n starts = []\n\n # front and back\n for i in range(num_x):\n for j in range(num_z):\n starts.append( (i, 0, j) )\n starts.append( (i, num_y-1, j) )\n\n # left and right\n for i in range(num_y):\n for j in range(num_z):\n starts.append( (0, i, j) )\n starts.append( (num_x-1, i, j) )\n\n # DFS\n seenpositions = set()\n currentpositions = set(starts)\n\n while currentpositions:\n nextpositions = set()\n for p in currentpositions:\n seenpositions.add(p)\n succ = possiblesuccessors(vol, p, thres)\n for np in succ:\n if np in seenpositions: continue\n nextpositions.add(np)\n\n currentpositions = nextpositions\n\n print \"cropping %i (%i addional) voxels\" % (len(seenpositions), len(seenpositions) - len(starts))\n\n # crop visited voxels\n for pos in seenpositions:\n vol[pos[0], pos[1], pos[2]] = 0.0\n\n return vol",
"def crop_recommendation():\n ## extract user input information\n if request.method == \"POST\":\n re = request.get_json()\n city = re[\"city\"]\n state = re[\"state\"]\n ## convert into lower case\n state = state.lower()\n model_ph = re[\"ph\"]\n model_n = re[\"n\"]\n model_p = re[\"p\"]\n model_k = re[\"k\"]\n\n ##Api key for the call\n try:\n user_api = \"6c4043b2272bb3cf1e7330517937f690\"\n ## extract the weather data such as temp,humidity as per user given location\n complete_api_link = \"https://pro.openweathermap.org/data/2.5/forecast/climate?q=\" + \\\n city + \"&appid=\" + user_api\n api_link = requests.get(complete_api_link)\n ## response of api in api_data\n api_data = api_link.json()\n ## lets get the average of the temp and humidity for monthly\n humidity_sum = 0\n temp_sum = 0\n ll = api_data[\"list\"]\n for i in range(30):\n temp = ll[i]['temp']\n temp_avg = (temp['day'] + temp['min'] + temp[\"max\"] + temp[\"night\"] + temp[\"eve\"]) / 5\n temp_sum = (temp_avg - 273) + temp_sum\n humidity = ll[i]['humidity']\n humidity_sum = humidity + humidity_sum\n ## store the of avg of humidity and temperature in the varibale which we pass to model\n model_humidity = humidity_sum / 30\n model_temp = temp_sum / 30\n except:\n model_humidity = 60\n model_temp = 25\n\n ## lets get rainfall of the location\n today = date.today()\n current_month = today.month\n current_date = today.day\n current_year = today.year\n\n ## harvesting time get from database\n harvesting_time = 4\n ## create an list for the month number for which we have to get rainfall\n temp_month = current_month\n rain_fall_month_list = []\n for i in range(harvesting_time):\n if temp_month > 12:\n temp_month = 1\n rain_fall_month_list.append(temp_month)\n temp_month = temp_month + 1\n\n # month_list = [\"january\",\"february\",\"march\",\"april\",\n # \"may\",\"june\",\"july\",\"august\",\"september\",\n # \"october\",\"november\",\"december\"]\n\n total_rainfall = 0\n try:\n rain_fall = rain_info.query.filter_by(state=state).first()\n ## get the rainfall from the database for the given month\n if 1 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.january\n if 2 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.february\n if 3 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.march\n if 4 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.april\n if 5 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.may\n if 6 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.june\n if 7 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.july\n if 8 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.august\n if 9 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.september\n if 10 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.october\n if 11 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.november\n if 12 in rain_fall_month_list:\n total_rainfall = total_rainfall + rain_fall.december\n except:\n annual_rainfall = [29, 21, 37.5, 30.7, 52.6, 150, 299, 251.7, 179.2, 70.5, 39.8, 10.9]\n for month in rain_fall_month_list:\n total_rainfall = total_rainfall + annual_rainfall[month - 1]\n\n total_rainfall = total_rainfall / len(rain_fall_month_list)\n ## assign the rainfall\n model_rainfall = total_rainfall\n ## get the dataset and append in list for model\n model_para = [model_n, model_p, model_k, model_temp, model_humidity, model_ph, model_rainfall]\n\n ## model testing and get output\n # NOTE: you must manually set API_KEY below using information retrieved from your IBM Cloud account.\n API_KEY = \"6Pe2pNaBxpPB0eN7oyIPBQgDZ6d_ujIp8h4W1ik-pyk5\"\n token_response = requests.post('https://iam.cloud.ibm.com/identity/token',\n data={\"apikey\": API_KEY, \"grant_type\": 'urn:ibm:params:oauth:grant-type:apikey'})\n mltoken = token_response.json()[\"access_token\"]\n\n header = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + mltoken}\n\n # NOTE: manually define and pass the array(s) of values to be scored in the next line\n payload_scoring = {\"input_data\": [\n {\"field\": ['N', 'P', 'K', 'temperature', 'humidity', 'ph', 'rainfall', 'label'], \"values\": [model_para]}]}\n\n response_scoring = requests.post(\n 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/91bf6a6b-7d60-4e50-b75b-bc99fd76d42a/predictions?version=2021-07-08',\n json=payload_scoring, headers={'Authorization': 'Bearer ' + mltoken})\n\n response_score = response_scoring.json()\n\n ## response _score containing the info of the output\n\n ## created a list in which order the probablity of each crop is returned by model\n list_of_ordered_crops = [\"apple\", \"banana\", \"blackgram\", \"chickpea\", \"coconut\",\n \"coffee\", \"cotton\", \"grapes\", \"jute\", \"kidneybeans\",\n \"lentil\", \"maize\", \"mango\", \"mothbeans\", \"mungbean\",\n \"muskmelon\", \"orange\", \"papaya\", \"pigeonpeas\",\n \"pomegranate\", \"rice\", \"watermelon\"]\n\n ##PIE CHART\n ## gather the info of the other crops for pie chart\n predicted_prob = response_score[\"predictions\"][0][\"values\"][0][1]\n new_list = list(zip(list_of_ordered_crops, predicted_prob))\n sorted_list = sorted(new_list, key=lambda l: l[1], reverse=True)\n ## create an list in which the probablity is not zero\n chart_list = []\n for i in sorted_list:\n if i[1] == 0:\n pass\n else:\n chart_list.append(i)\n ## code for top5 or possible top but less than 5\n pie_chart_labels = []\n pie_chart_values = []\n temp_values = []\n for i in range(0, 5):\n try:\n pie_chart_labels.append(chart_list[i][0])\n temp_values.append(chart_list[i][1])\n except:\n break\n\n ## code for the value of of 1\n for val in temp_values:\n temp = val / sum(temp_values)\n temp = round(temp, 2)\n pie_chart_values.append(temp)\n pie_chart_values = [round(item * 100, 2) for item in pie_chart_values]\n\n ## if by chance during round sum become > 1\n if sum(pie_chart_values) > 100:\n pie_chart_values[0] = pie_chart_values[0] - (sum(pie_chart_values) - 100)\n if sum(pie_chart_values) < 100:\n pie_chart_values[0] = pie_chart_values[0] + (100 - sum(pie_chart_values))\n\n ## BAR CHART\n weather_list = [\"Temperature\", \"Humidity\", \"Rainfall\"]\n soil_list = [\"Nitroegen\", \"Phosphorus\", \"Potassium\", \"Ph\"]\n final_list = list()\n count = 0\n for crop_name in pie_chart_labels:\n crop_name = crop_name.lower()\n actual_crop_need = crop_details.query.filter_by(crop=crop_name).first()\n image_path = crop_name + \".jpg\"\n success_chance = pie_chart_values[count]\n weather_info = [round(actual_crop_need.temperature, 2),\n round(actual_crop_need.humidity, 2),\n round(actual_crop_need.rainfall, 2)]\n soil_info = [round(actual_crop_need.n, 2), round(actual_crop_need.p, 2),\n round(actual_crop_need.k, 2), round(actual_crop_need.ph, 2)]\n prodcution_data = crop_name_info.query.filter_by(recommendation_name=crop_name).first()\n prodcution_name = prodcution_data.production_name\n final_list.append(\n {\n \"cropName\" : crop_name,\n \"imagePath\": image_path,\n \"successChance\": success_chance,\n \"weatherInfo\": weather_info,\n \"soilInfo\": soil_info,\n \"productionName\": prodcution_name\n })\n count = count + 1\n\n ##User Crop weather info\n user_weather_list = [round(model_temp, 2), round(model_humidity, 2), round(model_rainfall, 2)]\n user_soil_list = [model_n, model_p, model_k, model_ph]\n static_dict = dict()\n static_dict\n\n ## create an response dict\n response_dict = {\n \"Top5CropInfo\": final_list,\n \"static_info\": {\n \"pieChartOfSuccessPercentageLabel\": pie_chart_labels,\n \"pieChartOfSuccessPercentageValue\": pie_chart_values,\n \"weatherBarChartLabel\": weather_list,\n \"soilBarChartLabel\": soil_list,\n \"weatherBarChartUserValue\": user_weather_list,\n \"soilBarChartUserValue\": user_soil_list\n }\n }\n return jsonify(response_dict)",
"def main(planckfile, dustfile, tomofile, colnames, names, pol, res,\\\n part='all', distcut=None):\n if (pol == 'P') or (pol == 'Q') or (pol == 'U'):\n polarisation = True\n elif (pol == 'p') or (pol == 'q') or (pol == 'u') or (pol == 'qu'):\n polarisation = True\n else:\n polarisation = False\n\n print(pol, polarisation)\n\n if distcut is None:\n distcut = 900\n\n if (polarisation is True):\n # read smoothed planck maps.\n print('load planck 353GHz data')\n # read_smooth_maps(filename, name, shape)\n IQU_smaps = smooth.read_smooth_maps(planckfile, names[0], 3)\n dust_smap = smooth.read_smooth_maps(dustfile, names[1], 1)[0]\n T_smap = IQU_smaps[0]\n Q_smap = IQU_smaps[1]\n U_smap = IQU_smaps[2]\n \n Nside = hp.get_nside(T_smap)\n print('Using Nside={}'.format(Nside))\n print(planckfile)\n band = planckfile.split('_')[2]\n if len(band) > 3:\n band = band[:3]\n if band == '15a':\n band = '353'\n print(band)\n\n if int(band) < 353:\n # load cmb intensity and subtract form polarization maps\n cmbfile = 'Data/IQU_Nside{}_CMB_10arcmin.h5'.format(Nside)\n cmbmaps = tools.Read_H5(cmbfile, 'IQU')*1e6\n Q_cmb = cmbmaps[1,:]\n U_cmb = cmbmaps[1,:]\n Q_smap = Q_smap - Q_cmb\n U_smap = U_smap - U_cmb\n \n print(np.mean(Q_smap), np.mean(U_smap))\n #sys.exit()\n # load tomography data:\n data = load.load_tomographydata(tomofile, colnames)\n print('Data loaded, using Nside={}'.format(Nside))\n\n p_map, q_map, u_map, sigma, r_map, pix =\\\n load.tomo_map(data, Nside, part=part, distcut=distcut)\n u_map = -u_map # to Healpix convention\n mask = np.unique(pix)\n print(len(mask))\n u_smap = smooth.smooth_tomo_map(u_map, mask, Nside, res)\n q_smap = smooth.smooth_tomo_map(q_map, mask, Nside, res)\n p_smap = smooth.smooth_tomo_map(p_map, mask, Nside, res)\n print('Tomography maps smoothed')\n print(np.mean(q_smap[mask]), np.mean(dust_smap[mask]), np.mean(Q_smap[mask]))\n dPsi = np.full(len(u_map), hp.UNSEEN)\n #sys.exit()\n\n l, b = tools.convert2galactic(data[:,0], data[:,1])\n theta, phi = hp.pix2ang(Nside, pix) \n lon = np.mean(phi)*180/np.pi\n lat = 90 - np.mean(theta)*180/np.pi\n print(lon, lat)\n\n x = 0.5*np.arctan2(U_smap[mask], Q_smap[mask])\n #x[x<0.] += np.pi\n #x[x>=np.pi] -= np.pi\n\n x_v = 0.5*np.arctan2(u_smap[mask], q_smap[mask])\n #psi_v[psi_v<0] += np.pi\n #psi_v[psi_v>=np.pi] -= np.pi \n print('Polarization angles of planck (mean, min, max) [deg]:')\n print(np.mean(x)*180/np.pi,np.min(x)*180/np.pi, np.max(x)*180/np.pi)\n print(np.mean(x_v)*180/np.pi,np.min(x_v)*180/np.pi,np.max(x_v)*180/np.pi)\n #print(np.mean(x+np.pi/2-psi_v))\n if (pol == 'P') or (pol == 'p'):\n print('-- P polarisation --')\n\n psi, psi_v, psi_s = tools.delta_psi(Q_smap[mask], q_smap[mask],\\\n U_smap[mask],u_smap[mask])\\\n #, plot=True, name='smooth2')\n\n dPsi[mask] = psi\n full_IQU = [T_smap, Q_smap, U_smap]\n tot_res, frac_res, dust = tools.map_analysis_function(p_smap, T_smap,\\\n dust_smap, mask, Nside)\n\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n elif (pol == 'Q') or (pol == 'q'):\n print('-- Q polarisation --')\n psi, psi_v, psi_s = tools.delta_psi(Q_smap[mask], q_smap[mask], U_smap[mask],\\\n u_smap[mask], plot=True)\n\n dPsi[mask] = psi\n full_IQU = [T_smap, Q_smap, U_smap]\n tot_res, frac_res, dust = tools.map_analysis_function(q_smap, Q_smap,\\\n dust_smap, mask, Nside)\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n elif (pol == 'U') or (pol == 'u'):\n print('-- U polarisation --')\n print(len(u_smap))\n psi, psi_v, psi_s = tools.delta_psi(Q_smap[mask], q_smap[mask],\\\n U_smap[mask],u_smap[mask], plot=True)\n\n dPsi[mask] = psi\n full_IQU = [T_smap, Q_smap, U_smap]\n tot_res, frac_res, dust = tools.map_analysis_function(u_smap, U_smap,\\\n dust_smap, mask, Nside)\n\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n elif (pol == 'QU') or (pol == 'qu'):\n print('-- Q,U polarisation --')\n print('Return: tomo, planck, dust, mask, dpsi, fullIQU, [lon,lat], r')\n psi, psi_v, psi_s = tools.delta_psi(Q_smap[mask], q_smap[mask],\\\n U_smap[mask], u_smap[mask])\n #, plot=True, name=Nside)\n\n dPsi[mask] = psi\n full_IQU = [T_smap, Q_smap, U_smap]\n tomo = [q_smap, u_smap, p_smap, sigma[1], sigma[2], sigma[0]]\n planck = [Q_smap, U_smap]\n coord = [lon, lat]\n angles = [dPsi[mask], psi_v, psi_s, sigma[3]]\n return(tomo, planck, dust_smap, coord, full_IQU, mask, r_map, angles)\n\n\n else:\n # use unsmoothe maps\n print('Use non smoothed maps')\n # load planck\n print('load planck 353GHz data')\n\n #T, P, Q, U = load.load_planck_map(planckfile, p=True)\n data = load.load_planck_map(planckfile, p=True)\n d353 = load.load_planck_map(dustfile)\n sys.exit()\n dust353 = tools.Krj2Kcmb(d353) * 1e6\n T = T*1e6\n P = P*1e6\n Q = Q*1e6\n U = U*1e6\n Nside = hp.get_nside(T_smap)\n\n data = load.load_tomographydata(tomofile, colnames)\n p_map, q_map, u_map, sigma, r_map, pix = load.tomo_map(data, Nside)\n u_map = -u_map # to Healpix convention\n mask = np.unique(pix)\n\n l, b = tools.convert2galactic(data[:,0], data[:,1])\n lon = np.mean(l)\n lat = np.mean(b)\n\n dPsi = np.full(len(u_map), hp.UNSEEN)\n\n if Ppol == True:\n print('-- P polarisation --')\n psi = tools.delta_psi(Q[mask], q_map[mask], U[mask],\\\n u_map[mask], plot=True)\n dPsi[mask] = psi\n full_IQU = [T, Q, U]\n tot_res, frac_res, dust = tools.map_analysis_function(p_map, T,\\\n dust353, mask)\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n elif Qpol == True:\n print('-- Q polarisation --')\n psi = tools.delta_psi(Q[mask], q_map[mask], U[mask],\\\n u_map[mask], plot=True)\n dPsi[mask] = psi\n full_IQU = [T, Q, U]\n tot_res, frac_res, dust = tools.map_analysis_function(q_map, Q,\\\n dust353, mask)\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)\n\n if Upol == True:\n print('-- U polarisation --')\n psi = tools.delta_psi(Q[mask], q_map[mask], U[mask],\\\n u_map[mask], plot=True)\n dPsi[mask] = psi\n full_IQU = [T, Q, U]\n tot_res, frac_res, dust = tools.map_analysis_function(u_map, U,\\\n dust353, mask)\n return(tot_res, frac_res, dust, [lon, lat], full_IQU, mask, r_map, dPsi)",
"def setup_pwn(name,pwndata,phase, free_radius=5, tempdir=None, emin=1.0e2, emax=1.0e5,maxroi=10,model=None,**kwargs):\n sources=yaml.load(open(pwndata))\n\n catalog_name=sources[name]['catalog']\n ltcube=sources[name]['ltcube']\n pulsar_position=SkyDir(*sources[name]['dir'])\n ft2=sources[name]['ft2']\n ft1=sources[name]['ft1']\n\n # in case no list was passed\n if len(phase)==2 and isinstance(phase[0],numbers.Real) and \\\n isinstance(phase[1],numbers.Real):\n\n # write in case phase wraps around.\n if phase[0]>phase[1]:\n phase=[[phase[0],1.0],[0.0,phase[1]]]\n else:\n phase = [phase] \n\n phase_factor=get_phase_factor(phase)\n print \"phase\"\n print phase\n print \"phase_factor=%.2f\"%phase_factor\n\n catalog=FermiCatalog(e(\"$FERMI/catalogs/gll_psc_v02.fit\"),free_radius=free_radius)\n catalog_source=[i for i in catalog.get_sources(SkyDir(),180) if i.name==catalog_name][0]\n\n center=catalog_source.skydir\n\n if tempdir is None: tempdir=mkdtemp(prefix='/scratch/')\n\n binfile=j(tempdir,'binned_phased.fits')\n\n # apply phase cut to ft1 file\n phased_ft1 = j(tempdir,'ft1_phased.fits')\n phasetools.phase_cut(ft1,phased_ft1,phaseranges=phase)\n\n # create a temporary ltcube scaled by the phase factor\n# phased_ltcube=j(tempdir,'phased_ltcube.fits')\n# phase_ltcube(ltcube,phased_ltcube, phase=phase)\n phased_ltcube=ltcube\n from uw.like.pointspec import DataSpecification\n data_specification = DataSpecification(\n ft1files = phased_ft1,\n ft2files = ft2,\n ltcube = phased_ltcube,\n binfile = binfile)\n\n spectral_analysis = SpectralAnalysis(data_specification,\n binsperdec = 4,\n emin = 100,\n emax = 100000,\n irf = \"P6_V3_DIFFUSE\",\n roi_dir = center,\n maxROI = maxroi,\n minROI = maxroi)\n\n if model == None :\n roi=spectral_analysis.roi(\n roi_dir=center,\n diffuse_sources=get_default_diffuse(diffdir=e(\"$FERMI/diffuse\"),\n gfile=\"gll_iem_v02.fit\",\n ifile=\"isotropic_iem_v02.txt\"),\n catalogs = catalog,\n phase_factor = 1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n else :\n roi=spectral_analysis.roi(\n roi_dir=center,\n xmlfile = model,\n phase_factor =1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n\n print \"---------------------Energy range--------------------\"\n \n print \"emin=\"+str(roi.bands[0].emin)+\"\\n\"\n print \"emax=\"+str(roi.bands[len(roi.bands)-1].emax)+\"\\n\"\n \n\n # keep overall flux of catalog source,\n # but change the starting index to 2.\n roi.modify(which=catalog_name, name=name, index=2, \n keep_old_flux=True)\n\n return roi",
"def water_delay(block_size):\n\n\tdirectory = \"/local/scratch/sam5g13/Sam_5th-yr_Project/test_data\"\n\tfile_name = \"{}/tip4p2005_50_TOTEST.npy\".format(directory)\n\tgnuplot = r'/usr/bin/gnuplot'\n\n\n\tfile_data = np.load(file_name, mmap_mode='r')\n\n\t_, _, _, gamma, _ = file_data \n\n\tgamma_sample = blocksav(gamma, block_size)\n\n\tgamma_file = \"{}/tip4p2005_50_blocksize_{}_gamma.txt\".format(directory, block_size)\n\twith open(gamma_file, 'w') as outfile:\n\t\tnp.savetxt(outfile, gamma_sample)\n\n\tgamma_file_name = \"{}/tip4p2005_50_blocksize_{}_gamma.txt\".format(directory, block_size)\n\n\tcorrelations = subprocess.check_output([\"corr\", gamma_file_name])\n\t\n\tmutual_information = subprocess.check_output([\"mutual\", gamma_file_name])\n\n\tcorrelation_array = np.array(correlations.split()[5:], dtype=float)\n\tmutual_information_array = np.array(mutual_information.split()[2:], dtype=float)\n\n\tidx_odd = range(1,199,2)\n\tidx_even = range(0,200,2)\n\n\tidx_odd1 = range(1,43,2)\n\tidx_even1 = range(0,44,2)\n\n\t#correlation_values = correlation_array[idx_odd]\n\tmutual_information_values = mutual_information_array[idx_odd1]\n\tprint 'LOOK HERE...........................................', mutual_information_array[idx_odd1], len(mutual_information_array[idx_odd1])\n\n\t\"\"\"\n\tdelay_length = 0\n\n\tfor o in range(len(correlation_values) - 1):\n\t\tprint o, correlation_values[o], correlation_values[o+1]\n\t\tif correlation_values[o] > correlation_values[o+1]:\n\t\t\tdelay_length = o \n\t\telse: break\n\t\n\tdelay_length = delay_length + 1\n\n\tprint \"The delay length is\", delay_length\n\t\"\"\"\n\n\tmutual_info_length = 0\n\n\tfor o in range(len(mutual_information_values) - 1):\n\t\t#print o, correlation_values[o], correlation_values[o+1]\n\t\tif mutual_information_values[o] > mutual_information_values[o+1]:\n\t\t\tmutual_info_length = o \n\t\telse: break\n\t\n\tmutual_info_length = mutual_info_length + 1\n\t\n\tprint \"The mutual info length is\", mutual_info_length\n\n\t#assert \tdelay_length == mutual_info_length, \"The minimums of the mutual information and the correlations are not equal! %d %d\" % (delay_length, mutual_info_length)\n\t\n\tproduce_delays = subprocess.check_output([\"delay\", gamma_file_name, \"-d\" + str(mutual_info_length)])\n\n\t\n\tdelay_file = \"{}/tip4p2005_50_blocksize_{}_gamma_delay_{}.txt\".format(directory, block_size, mutual_info_length)\n\tf = open(delay_file, 'w')\n\tf.write(produce_delays)\n\tf.close()\n\n\t\"\"\"\n\n\tprint produce_delays\n\tprint len(produce_delays), len(mutual_information_values)\n\tplt.figure(\"produce_delays vs mutual information\")\n\tplt.xlabel(\"produce_delays\")\n\tplt.ylabel(\"Mutual information\")\n\tplt.plot(produce_delays, mutual_information_values)\n\tplt.show()\n\t\n\t\"\"\"\n\t\n\tembedding = subprocess.check_output([\"false_nearest\", gamma_file_name])\n\n\tembedding_dimension = int(raw_input(\"What embedding dimension would you like to use? \"))\n\t\n\trun_calc = subprocess.check_output(['gnuplot', '-e', \"filename='{}/tip4p2005_50_blocksize_{}_gamma_delay_{}.txt';ofilename='tip4p2005_50_blocksize_{}_gamma_delay_{}_graph.png'\".format(directory, block_size, mutual_info_length, block_size, mutual_info_length ),\"plot.gnu\"])\n\n\n\t\"\"\"Imports the time series and specifies each aspect used in building the recurrence matrix\"\"\"\n\n\tsettings = Settings(time_series = gamma_sample, embedding_dimension = embedding_dimension, time_delay = mutual_info_length, similarity_measure = EuclideanMetric, neighbourhood = FixedRadius(radius = 13), min_diagonal_line_length = 2, min_vertical_line_length = 2)\n\n\t\"\"\"Performs the computation and prints out all the results\"\"\"\n\n\trqacomputation = RQAComputation.create(settings, verbose = True)\n\n\trqaresult = rqacomputation.run()\n\n\tprint rqaresult\n\n\t\"\"\"Creates the Recurrence matrix for viewing\"\"\"\n\n\trpcomputation = RecurrencePlotComputation.create(settings)\n\n\trpresult = rpcomputation.run()\n\n\tImageGenerator.save_recurrence_plot(rpresult.recurrence_matrix, 'recurrence_plot.png')",
"def prepare_and_save(path):\n \n raw, timestamp = ur.MNE_Read_EDF(path)\n \n #Use the time columns to create MNE events structure\n events_log, event_id = clean_log(path)\n event_sample_indexes = ur.parse_events(events_log, timestamp)\n events = ur.events_for_MNE(event_sample_indexes, event_id)\n \n #Add response correct/incorrect to events\n new_events, new_event_id = expand_events(path, events, event_id)\n #Crop the data to include only the time between start and stop of the experiment - many artifacts outside this interval \n raw_cropped = raw.copy().crop(tmin = events[0,0]/raw.info['sfreq'], tmax = events[-1,0]/raw.info['sfreq'])\n #Since the raw was cropped to the time of the first event its' new time is now 0. All following events are shifted.\n new_events[:,0] = new_events[:,0] - new_events[0,0]\n \n #Delete bad channels, ears and visually identified channels\n ears = [ch for ch in raw_cropped.ch_names if 'A' in ch]\n raw_cropped = raw_cropped.drop_channels(ears)\n \n subject_bads = {'Adrianna': ['T4'], 'BartekB' : ['Pz'], 'JeremiaszW' : [], 'KonradW' : ['T3'], 'Lucja' : ['T4', 'F8'], 'MaciekG':[], 'MariuszZ' : [], 'OlaS' :['P4'], 'Patrycja' :[]}\n bads = subject_bads[path.split('\\\\')[-3]]\n if len(bads) != 0:\n raw_cropped = raw_cropped.drop_channels(bads)\n \n #Apply average re-reference\n raw_cropped.save('raw_cropped/' + path.split('\\\\')[-3] +'_raw_cropped.fif', overwrite = True)\n return raw_cropped, new_events, new_event_id",
"def usped(self):\n\n # assign variables\n ls_factor = 'ls_factor'\n slope = 'slope'\n aspect = 'aspect'\n flowacc = 'flowacc'\n qsx = 'qsx'\n qsxdx = 'qsxdx'\n qsy = 'qsy'\n qsydy = 'qsydy'\n grow_slope = 'grow_slope'\n grow_aspect = 'grow_aspect'\n grow_qsxdx = 'grow_qsxdx'\n grow_qsydy = 'grow_qsydy'\n erdep = 'erdep' # kg/m^2s\n sedflow = 'sedflow'\n\n # parse, advance, and stamp time\n (evolved_elevation, time, depth, sediment_flux, erosion_deposition,\n difference) = self.parse_time()\n\n # compute event-based erosivity (R) factor (MJ mm ha^-1 hr^-1 yr^-1)\n r_factor = self.event_based_r_factor()\n\n # compute slope and aspect\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n aspect=aspect,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=aspect,\n value=grow_aspect,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{aspect}={grow_aspect}\".format(\n aspect=aspect,\n grow_aspect=grow_aspect),\n overwrite=True)\n\n # compute flow accumulation\n gscript.run_command(\n 'r.watershed',\n elevation=self.elevation,\n accumulation=flowacc,\n flags=\"a\",\n overwrite=True)\n region = gscript.parse_command(\n 'g.region', flags='g')\n res = region['nsres']\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{depth}\"\n \"=({flowacc}*{res})\".format(\n depth=depth,\n flowacc=flowacc,\n res=res),\n overwrite=True)\n # add depression parameter to r.watershed\n # derive from landcover class\n\n\n # compute dimensionless topographic factor\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{ls_factor}\"\n \"=({flowacc}^{m})*(sin({slope})^{n})\".format(\n ls_factor=ls_factor,\n m=self.m,\n flowacc=depth,\n slope=slope,\n n=self.n),\n overwrite=True)\n\n # compute sediment flow at sediment transport capacity\n \"\"\"\n T = R * K * C * P * LST\n where\n T is sediment flow at transport capacity\n R is rainfall factor\n K is soil erodibility factor\n C is a dimensionless land cover factor\n P is a dimensionless prevention measures factor\n LST is the topographic component of sediment transport capacity\n of overland flow\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{sedflow}\"\n \"={r_factor}\"\n \"*{k_factor}\"\n \"*{c_factor}\"\n \"*{ls_factor}\".format(\n r_factor=r_factor,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n ls_factor=ls_factor,\n sedflow=sedflow),\n overwrite=True)\n\n # convert sediment flow from tons/ha/yr to kg/m^2s\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{converted_sedflow}\"\n \"={sedflow}\"\n \"*{ton_to_kg}\"\n \"/{ha_to_m2}\"\n \"/{yr_to_s}\".format(\n converted_sedflow=sediment_flux,\n sedflow=sedflow,\n ton_to_kg=1000.,\n ha_to_m2=10000.,\n yr_to_s=31557600.),\n overwrite=True)\n\n # compute sediment flow rate in x direction (m^2/s)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsx}={sedflow}*cos({aspect})\".format(\n sedflow=sediment_flux,\n aspect=aspect, qsx=qsx),\n overwrite=True)\n\n # compute sediment flow rate in y direction (m^2/s)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsy}={sedflow}*sin({aspect})\".format(\n sedflow=sediment_flux,\n aspect=aspect,\n qsy=qsy),\n overwrite=True)\n\n # compute change in sediment flow in x direction\n # as partial derivative of sediment flow field\n gscript.run_command(\n 'r.slope.aspect',\n elevation=qsx,\n dx=qsxdx,\n overwrite=True)\n\n # compute change in sediment flow in y direction\n # as partial derivative of sediment flow field\n gscript.run_command(\n 'r.slope.aspect',\n elevation=qsy,\n dy=qsydy,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=qsxdx,\n value=grow_qsxdx,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsxdx}={grow_qsxdx}\".format(\n qsxdx=qsxdx,\n grow_qsxdx=grow_qsxdx),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=qsydy,\n value=grow_qsydy,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsydy}={grow_qsydy}\".format(\n qsydy=qsydy,\n grow_qsydy=grow_qsydy),\n overwrite=True)\n\n # compute net erosion-deposition (kg/m^2s)\n # as divergence of sediment flow\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erdep} = {qsxdx} + {qsydy}\".format(\n erdep=erdep,\n qsxdx=qsxdx,\n qsydy=qsydy),\n overwrite=True)\n\n # filter outliers\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erosion_deposition}\"\n \"=if({erdep}<{erdepmin},\"\n \"{erdepmin},\"\n \"if({erdep}>{erdepmax},{erdepmax},{erdep}))\".format(\n erosion_deposition=erosion_deposition,\n erdep=erdep,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax),\n overwrite=True)\n\n # set color table\n gscript.write_command(\n 'r.colors',\n map=erosion_deposition,\n rules='-',\n stdin=erosion_colors)\n\n # evolve landscape\n \"\"\"\n change in elevation (m)\n = change in time (s)\n * net erosion-deposition (kg/m^2s)\n / sediment mass density (kg/m^3)\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{evolved_elevation}\"\n \"={elevation}\"\n \"+({rain_interval}*60\"\n \"*{erosion_deposition}\"\n \"/{density})\".format(\n evolved_elevation=evolved_elevation,\n elevation=self.elevation,\n rain_interval=self.rain_interval,\n erosion_deposition=erosion_deposition,\n density=self.density),\n overwrite=True)\n\n # gravitational diffusion\n evolved_elevation = self.gravitational_diffusion(evolved_elevation)\n\n # compute elevation change\n difference = self.compute_difference(evolved_elevation, difference)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['slope',\n 'aspect',\n 'flowacc',\n 'qsx',\n 'qsy',\n 'qsxdx',\n 'qsydy',\n 'grow_slope',\n 'grow_aspect',\n 'grow_qsxdx',\n 'grow_qsydy',\n 'erdep',\n 'sedflow',\n 'r_factor',\n 'ls_factor'],\n flags='f')\n\n return (evolved_elevation, time, depth, erosion_deposition, difference)",
"def water_uptake_feddes(self, soil):\r\n\r\n # Value of the pressure head, below which roots extract water at the\r\n # maximum possible rate\r\n P1 = soil.field_capacity_water_potential.mean()#-25 # J/kg\r\n P3 = soil.perm_wilt_point_pot.mean()#-8000 # J/kg wilting point\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n for lyr in soil.layers:\r\n stress_fact = feddes_stress_factor(transp_pot,\r\n soil.water_potential[lyr],\r\n self.P0, P1, self.P2L, self.P2H,\r\n P3, self.R2H, self.R2L)\r\n self.water_uptake[lyr] = (stress_fact * self.root_fraction[lyr] *\r\n transp_pot)\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += self.expect_transp\r\n self.transp_ratio = self.att_transp / transp_pot",
"def farm(cps):\n # Head of unit\n mask = cps['tc8_p'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(13. + 1. * rand)\n new_vals = np.where(new_vals < 25000., 25000., new_vals)\n cps.loc[mask, 'filp'] = new_vals\n # spouse of unit\n mask = cps['tc8_s'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(13. + 1. * rand)\n new_vals = np.where(new_vals < 25000., 25000., new_vals)\n cps.loc[mask, 'fils'] = new_vals",
"def runGood():\n forwardModelJointFit(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'),\n out='J600nm54k', wavelength='600nm') #kernel around 0.3, 0.33\n forwardModelJointFit(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'),\n out='J700nm52k', wavelength='700nm') #around 0.3, 0.31\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[2],], out='G800nm',\n wavelength='l800') #around 0.305/315 and 0.295/0.3\n forwardModelJointFit(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'),\n out='J800nm', wavelength='800nm') #around 0.3, 0.3\n forwardModelJointFit(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'),\n out='J890nm50k', wavelength='890nm') #around 0.285, 0.29",
"def runse(self):\n\n # check for se catalog\n\n \n\n t = self.image.split('.fits')\n froot = t[0]\n # check for se catalog\n secat = froot+'.cat'\n\n os.system('ln -s ' +self.astrodir + '/default.* .') \n if self.instrument == 'h':\n defaultcat = 'default.sex.HDI'\n elif self.instrument == 'i':\n defaultcat = 'default.sex.INT'\n self.keepsection=[1000,5000,0,4000]\n elif self.instrument == 'm':\n defaultcat = 'default.sex.HDI'\n elif self.instrument == 'b':\n print(\"hey Rose - \")\n print(\"using default.sex.BOK!!!\")\n print()\n defaultcat = 'default.sex.BOK.getzp'\n header = fits.getheader(self.image)\n try:\n expt = header['EXPTIME']\n except KeyError:\n expt = 1.\n ADUlimit = 40000.\n if self.instrument == 'i':\n if (self.filter == 'r'):\n ADUlimit = 400000./60#/float(expt)\n elif self.filter == 'ha':\n ADUlimit = 40000./180.\n #print('saturation limit in ADU/s {:.1f}'.format(ADUlimit))\n if self.fwhm is None:\n t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '+str(ADUlimit)\n #t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '\n if self.verbose:\n print('running SE first time to get estimate of FWHM')\n print(t)\n os.system(t)\n\n # clean up SE files\n # skipping for now in case the following command accidentally deletes user files\n # os.system('rm default.* .')\n\n\n ###################################\n # Read in Source Extractor catalog\n ###################################\n if self.verbose:\n print('reading in SE catalog from first pass')\n secat_filename = froot+'.cat'\n self.secat = fits.getdata(secat_filename,2)\n self.secat0 = self.secat\n # get median fwhm of image\n # for some images, this comes back as zero, and I don't know why\n fwhm = np.median(self.secat['FWHM_IMAGE'])*self.pixelscale\n \n \n t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '+str(ADUlimit)+' -SEEING_FWHM '+str(fwhm)\n if float(fwhm) == 0:\n print('WARNING: measured FWHM is zero!')\n if self.verbose:\n print('running SE again with new FWHM to get better estimate of CLASS_STAR')\n else:\n t = 'sex ' + self.image + ' -c '+defaultcat+' -CATALOG_NAME ' + froot + '.cat -MAG_ZEROPOINT 0 -SATUR_LEVEL '+str(ADUlimit)+' -SEEING_FWHM '+str(self.fwhm)\n if self.verbose:\n print(t)\n print('running SE w/user input for FWHM to get better estimate of CLASS_STAR') \n #############################################################\n # rerun Source Extractor catalog with updated SEEING_FWHM\n #############################################################\n\n #print(t)\n os.system(t)\n self.read_se_cat()",
"def apply_cuts(objects):\n #- Check if objects is a filename instead of the actual data\n if isinstance(objects, (str, unicode)):\n objects = io.read_tractor(objects)\n \n #- undo Milky Way extinction\n flux = unextinct_fluxes(objects)\n gflux = flux['GFLUX']\n rflux = flux['RFLUX']\n zflux = flux['ZFLUX']\n w1flux = flux['W1FLUX']\n wflux = flux['WFLUX']\n \n #- DR1 has targets off the edge of the brick; trim to just this brick\n if 'BRICK_PRIMARY' in objects.dtype.names:\n primary = objects['BRICK_PRIMARY']\n else:\n primary = np.ones(len(objects), dtype=bool)\n \n #----- LRG\n lrg = primary.copy()\n lrg &= rflux > 10**((22.5-23.0)/2.5)\n lrg &= zflux > 10**((22.5-20.56)/2.5)\n lrg &= w1flux > 10**((22.5-19.35)/2.5)\n lrg &= zflux > rflux * 10**(1.6/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n lrg &= w1flux * rflux.clip(0)**(1.33-1) > zflux.clip(0)**1.33 * 10**(-0.33/2.5)\n\n #----- ELG\n elg = primary.copy()\n elg &= rflux > 10**((22.5-23.4)/2.5)\n elg &= zflux > rflux * 10**(0.3/2.5)\n elg &= zflux < rflux * 10**(1.5/2.5)\n elg &= rflux**2 < gflux * zflux * 10**(-0.2/2.5)\n elg &= zflux < gflux * 10**(1.2/2.5)\n\n #----- Quasars\n psflike = ((objects['TYPE'] == 'PSF') | (objects['TYPE'] == 'PSF ')) \n qso = primary.copy()\n qso &= psflike\n qso &= rflux > 10**((22.5-23.0)/2.5)\n qso &= rflux < gflux * 10**(1.0/2.5)\n qso &= zflux > rflux * 10**(-0.3/2.5)\n qso &= zflux < rflux * 10**(1.1/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n qso &= wflux * gflux.clip(0)**1.2 > rflux.clip(0)**(1+1.2) * 10**(-0.4/2.5)\n ### qso &= wflux * gflux**1.2 > rflux**(1+1.2) * 10**(2/2.5)\n\n #------ Bright Galaxy Survey\n #- 'PSF' for astropy.io.fits; 'PSF ' for fitsio (sigh)\n bgs = primary.copy()\n bgs &= ~psflike\n bgs &= rflux > 10**((22.5-19.35)/2.5)\n\n #----- Standard stars\n fstd = primary.copy()\n fstd &= psflike\n fracflux = objects['DECAM_FRACFLUX'].T \n signal2noise = objects['DECAM_FLUX'] * np.sqrt(objects['DECAM_FLUX_IVAR'])\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for j in (1,2,4): #- g, r, z\n fstd &= fracflux[j] < 0.04\n fstd &= signal2noise[:, j] > 10\n\n #- observed flux; no Milky Way extinction\n obs_rflux = objects['DECAM_FLUX'][:, 2]\n fstd &= obs_rflux < 10**((22.5-16.0)/2.5)\n fstd &= obs_rflux > 10**((22.5-19.0)/2.5)\n #- colors near BD+17; ignore warnings about flux<=0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n rzcolor = 2.5 * np.log10(zflux / rflux)\n fstd &= (grcolor - 0.32)**2 + (rzcolor - 0.13)**2 < 0.06**2\n\n #-----\n #- construct the targetflag bits\n #- Currently our only cuts are DECam based (i.e. South)\n desi_target = lrg * desi_mask.LRG_SOUTH\n desi_target |= elg * desi_mask.ELG_SOUTH\n desi_target |= qso * desi_mask.QSO_SOUTH\n\n desi_target |= lrg * desi_mask.LRG\n desi_target |= elg * desi_mask.ELG\n desi_target |= qso * desi_mask.QSO\n\n desi_target |= fstd * desi_mask.STD_FSTAR\n \n bgs_target = bgs * bgs_mask.BGS_BRIGHT\n bgs_target |= bgs * bgs_mask.BGS_BRIGHT_SOUTH\n\n #- nothing for MWS yet; will be GAIA-based\n mws_target = np.zeros_like(bgs_target)\n\n #- Are any BGS or MWS bit set? Tell desi_target too.\n desi_target |= (bgs_target != 0) * desi_mask.BGS_ANY\n desi_target |= (mws_target != 0) * desi_mask.MWS_ANY\n\n return desi_target, bgs_target, mws_target",
"def __stage_du_to_pj(self, pilotdata, pilotjob):\n pass",
"def abs_units(wb_run,sample_run,mono_van,wb_mono,samp_rmm,samp_mass,ei_guess,rebin,map_file,monovan_mapfile,**kwargs): \n #available keywords\n #abs_units_van_range\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n print 'Output will be in absolute units of mb/str/mev/fu'\n\n #reducer.van_rmm =50.94\n reducer.van_mass=van_mass\n #sample info\n reducer.sample_mass=samp_mass\n reducer.sample_rmm =samp_rmm\n print 'Using vanadium mass: ',van_mass\n print ' sample mass: ',samp_mass \n print ' sample_rmm : ',samp_rmm \n # check if mono-vanadium is provided as multiple files list or just put in brackets ocasionally\n if isinstance(mono_van,list):\n if len(mono_van)>1:\n raise IOError(' Can currently work only with single monovan file but list supplied')\n else:\n mono_van = mono_van[0];\n\n \n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=str(sample_run)+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n \n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n \n #######DIAG###########\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #####diad end########\n \n \n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n map_file = \"\"\n print 'one2one selected'\n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file = map_file+'.map'\n reducer.map_file = map_file;\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n reducer.energy_bins = rebin\n #monovan info\n fileName, fileExtension = os.path.splitext(monovan_mapfile)\n if (not fileExtension):\n monovan_mapfile=monovan_mapfile+'.map'\n reducer.abs_map_file =monovan_mapfile \n\n if kwargs.has_key('abs_units_van_range'):\n reducer.monovan_integr_range = kwargs.get('abs_units_van_range')\n print 'Setting absolute units vanadium integration range to: ', kwargs.get('abs_units_van_range')\n else:\n reducer.monovan_integr_range=[-40,40]\n\n \n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskOnly'):\n if (kwargs.get('hardmaskOnly')): \n totalmask = kwargs.get('hardmaskOnly')\n print 'Using hardmask from ', totalmask\n #next stable version can replace this with loadmask algoritum\n specs=diag_load_mask(totalmask)\n else:\n specs=\"\"\n \n CloneWorkspace(InputWorkspace=sample_run,OutputWorkspace='mask_wksp')\n MaskDetectors(Workspace='mask_wksp',SpectraList=specs)\n masking =mtd['mask_wksp']\n else:\n print '########### Run diagnose for sample run ##############'\n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(masking) \n print 'first Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n if kwargs.has_key('use_sam_msk_on_monovan') and kwargs.get('use_sam_msk_on_monovan')==True:\n print 'applying sample run mask to mono van'\n reducer.spectra_masks=masking\n fail_list=get_failed_spectra_list(masking) \n else:\n print '########### Run diagnose for monochromatic vanadium run ##############'\n masking2 = reducer.diagnose(wb_mono, \n sample=mono_van,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n total_mask=masking+masking2 \n reducer.spectra_masks=total_mask \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(total_mask)\n #fail_list=get_failed_spectra_list('total_mask')\n \n \n print 'Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n \n \n #Run the conversion first on the sample\n deltaE_wkspace_sample = reducer.convert_to_energy(sample_run, ei_guess, wb_run)\n\n \n if kwargs.has_key('mono_correction_factor'):\n absnorm_factor=kwargs.get('mono_correction_factor')\n print 'Using supplied correction factor for absolute units'\n else:\n print '##### Evaluate the integral from the monovan run and calculate the correction factor ######'\n print ' Using absolute units vanadion integration range : ', reducer.monovan_integr_range \n #now on the mono_vanadium run swap the mapping file\n reducer.map_file = monovan_mapfile \n deltaE_wkspace_monovan = reducer.convert_to_energy(mono_van, ei_guess, wb_mono)\n \n (absnorm_factorL,absnorm_factorSS,absnorm_factorP,absnorm_factTGP) = getAbsNormalizationFactor(deltaE_wkspace_monovan.getName(),str(reducer.monovan_integr_range[0]),str(reducer.monovan_integr_range[1])) \n \n print 'Absolute correction factor S^2 =',absnorm_factorSS,' Libisis: ',absnorm_factorL,' Puasonian: ',absnorm_factorP, ' TGP : ',absnorm_factTGP\n CreateSingleValuedWorkspace(OutputWorkspace='AbsFactor',DataValue=absnorm_factTGP)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n ei= (deltaE_wkspace_sample.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n \n print 'Incident energy found for sample run ',ei,' meV'\n print 'Incident energy found for mono vanadium run ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace_sample,OutputWorkspace=results_name)\n if results_name != wksp_out:\n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n Divide(LHSWorkspace=wksp_out,RHSWorkspace='AbsFactor',OutputWorkspace=wksp_out)\n DeleteWorkspace(Workspace='AbsFactor')\n return mtd[wksp_out]"
] |
[
"0.63158184",
"0.6163963",
"0.60120946",
"0.594974",
"0.5815151",
"0.5648626",
"0.56108695",
"0.54560167",
"0.54519814",
"0.5440802",
"0.53856397",
"0.53609353",
"0.53162545",
"0.52887416",
"0.5240997",
"0.5221457",
"0.5217004",
"0.5190389",
"0.5163781",
"0.5143684",
"0.5138386",
"0.5120784",
"0.51185304",
"0.51156133",
"0.5110635",
"0.50986314",
"0.5094582",
"0.5088088",
"0.50824106",
"0.5082086"
] |
0.6273377
|
1
|
Given a list of pysam.cbcf.VariantRecord split it into multiple lists, one for each chromosome copy
|
def split_copies(region, vl):
# Sniff out the ploidy
if len(vl) == 0:
logger.warning('Empty region ({}), assuming diploid'.format(region))
ploidy = 2
else:
ploidy = len(vl[0].samples[0]['GT'])
logger.debug('Region: {}, ploidy: {}'.format(region, ploidy))
# cpy_l = [
# (cpy, '|'.join(['0'] * cpy + ['1'] + ['0'] * (ploidy - 1 - cpy)))
# for cpy in range(ploidy)
# ]
#
# return {
# 'region': region,
# 'v': dict(
# [
# (gt, list(filter(None, (parse(v, cpy=cpy) for v in vl))))
# for cpy, gt in cpy_l
# ]
# )
# }
return {
'region': region,
'v': [
list(filter(None, (parse(v, cpy=cpy) for v in vl)))
for cpy in range(ploidy)
]
}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def unsplit(self, variant_groups):\n for vargroup in variant_groups:\n self.variant_list.extend(vargroup.variant_list)\n self.pos = min([var.start for var in self.variant_list])\n self.end = max([var.end for var in self.variant_list])",
"def Chunks(l):\n return_list = [[]]\n counter = 0\n index = 0\n for i in l:\n # Size is split in half due to the max size being a sum of src and dst.\n if counter > (self._ADDRESS_LENGTH_LIMIT/2):\n counter = 0\n index += 1\n return_list.append([])\n if i.version == 6:\n counter += self._IPV6_SIZE\n else:\n counter += 1\n return_list[index].append(i)\n return return_list",
"def split_and_trim(self):\n indeces_grouped = []\n rejected_variants = []\n groups = self._get_subgroups()\n variant_groups = []\n for group in groups:\n variant_list = []\n for i in group:\n indeces_grouped.append(i)\n variant_list.append(self.variant_list[i])\n var_id = '{}_{}'.format(variant_list[0].CHROM, min([var.start for var in variant_list]))\n variant_groups.append(VariantGroup(var_id, variant_list))\n for v, variant in enumerate(self.variant_list):\n if v not in indeces_grouped:\n rejected_variants.append(variant)\n return variant_groups, rejected_variants",
"def split():\n flag = 0\n for chromosome in region:\n for inf in region[chromosome]:\n if flag == 0:\n if chromosome not in test_set:\n test_set[chromosome] = [inf]\n else:\n test_set[chromosome].append(inf)\n else:\n if chromosome not in train_set:\n train_set[chromosome] = [inf]\n else:\n train_set[chromosome].append(inf)\n\n flag += 1\n flag %= 10",
"def variant_export_lines(store, case_obj, variants_query):\n\n export_variants = []\n\n for variant in variants_query:\n variant_line = []\n position = variant['position']\n change = variant['reference']+'>'+variant['alternative']\n variant_line.append(variant['rank_score'])\n variant_line.append(variant['chromosome'])\n variant_line.append(position)\n variant_line.append(change)\n variant_line.append('_'.join([str(position), change]))\n\n # gather gene info:\n gene_list = variant.get('genes') #this is a list of gene objects\n gene_ids = []\n gene_names = []\n hgvs_c = []\n\n # if variant is in genes\n if len(gene_list) > 0:\n for gene_obj in gene_list:\n hgnc_id = gene_obj['hgnc_id']\n gene_name = gene(store, hgnc_id)['symbol']\n\n gene_ids.append(hgnc_id)\n gene_names.append(gene_name)\n\n hgvs_nucleotide = '-'\n # gather HGVS info from gene transcripts\n transcripts_list = gene_obj.get('transcripts')\n for transcript_obj in transcripts_list:\n if transcript_obj.get('is_canonical') and transcript_obj.get('is_canonical') is True:\n hgvs_nucleotide = str(transcript_obj.get('coding_sequence_name'))\n hgvs_c.append(hgvs_nucleotide)\n\n variant_line.append(';'.join( str(x) for x in gene_ids))\n variant_line.append(';'.join( str(x) for x in gene_names))\n variant_line.append(';'.join( str(x) for x in hgvs_c))\n else:\n while i < 4:\n variant_line.append('-') # instead of gene ids\n i = i+1\n\n variant_gts = variant['samples'] # list of coverage and gt calls for case samples\n for individual in case_obj['individuals']:\n for variant_gt in variant_gts:\n if individual['individual_id'] == variant_gt['sample_id']:\n # gather coverage info\n variant_line.append(variant_gt['allele_depths'][0]) # AD reference\n variant_line.append(variant_gt['allele_depths'][1]) # AD alternate\n # gather genotype quality info\n variant_line.append(variant_gt['genotype_quality'])\n\n variant_line = [str(i) for i in variant_line]\n export_variants.append(\",\".join(variant_line))\n\n return export_variants",
"def getSubsampleList(vcfname, ss_count):\n\n vcf_o = pysam.VariantFile(vcfname)\n rec = next(vcf_o)\n vcf_o.close()\n lst = []\n for samp in rec.samples:\n lst.append(samp)\n return lst[:int(ss_count)]",
"def removeDuplicates(self,covariateList,bands):\n\t\t\n\t\treturn [elem for elem in covariateList if elem not in bands]",
"def get_merged_variants(self, variants, key):\n # type: (List[vcfio.Variant], str) -> List[vcfio.Variant]\n raise NotImplementedError",
"def break_list_to_sub_list(self, full_list, chunk_size = 45):\n if chunk_size < 1:\n chunk_size = 1\n return [full_list[i:i + chunk_size] for i in range(0, len(full_list), chunk_size)]",
"def load_variant_file(fname, sample, bed_fname):\n mode = 'rb' if fname.endswith('bcf') else 'r'\n vcf_fp = pysam.VariantFile(fname, mode)\n vcf_fp.subset_samples([sample])\n return [\n split_copies(region, [v for v in vcf_fp.fetch(contig=region[0], start=region[1], stop=region[2])])\n for region in read_bed(bed_fname)\n ]",
"def make_slices(big_scriptlist):\n num_cores = multiprocessing.cpu_count()\n list_of_scriptlists = [] # This will be our output.\n incrementlist = range(0,len(big_scriptlist),num_cores) # How we increment.\n for i in incrementlist:\n list_of_scriptlists.append(big_scriptlist[i:i+num_cores])\n return list_of_scriptlists",
"def variant_case(store, case_obj, variant_obj):\n case_obj['bam_files'] = []\n case_obj['mt_bams'] = []\n case_obj['bai_files'] = []\n case_obj['mt_bais'] = []\n case_obj['sample_names'] = []\n for individual in case_obj['individuals']:\n bam_path = individual.get('bam_file')\n mt_bam = individual.get('mt_bam')\n case_obj['sample_names'].append(individual.get('display_name'))\n if bam_path and os.path.exists(bam_path):\n case_obj['bam_files'].append(individual['bam_file'])\n case_obj['bai_files'].append(find_bai_file(individual['bam_file']))\n if mt_bam and os.path.exists(mt_bam):\n case_obj['mt_bams'].append(individual['mt_bam'])\n case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))\n\n else:\n LOG.debug(\"%s: no bam file found\", individual['individual_id'])\n\n try:\n genes = variant_obj.get('genes', [])\n if len(genes) == 1:\n hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])\n if hgnc_gene_obj:\n vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)\n case_obj['region_vcf_file'] = vcf_path\n else:\n case_obj['region_vcf_file'] = None\n elif len(genes) > 1:\n chrom = variant_obj['genes'][0]['common']['chromosome']\n start = min(gene['common']['start'] for gene in variant_obj['genes'])\n end = max(gene['common']['end'] for gene in variant_obj['genes'])\n # Create a reduced VCF with variants in the region\n vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)\n case_obj['region_vcf_file'] = vcf_path\n except (SyntaxError, Exception):\n LOG.warning(\"skip VCF region for alignment view\")",
"def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]",
"def vcf2bambino(record):\n chr = \"chr\" + record[0]\n pos = int(record[1])\n ref = record[3]\n\n # multiallelic contains multiple alt\n alts = record[4].split(\",\")\n\n parsed = []\n for alt in alts:\n n = count_padding_bases(ref, alt)\n # insertion\n if len(ref) < len(alt):\n idl_type = 1\n idl_seq = alt[n:]\n idl = IndelSnpFeatures(chr, pos + n, idl_type, idl_seq)\n parsed.append(idl)\n # deletion\n elif len(ref) > len(alt):\n idl_type = 0\n idl_seq = ref[n:]\n idl = IndelSnpFeatures(chr, pos + n, idl_type, idl_seq)\n parsed.append(idl)\n else:\n pass\n\n return parsed",
"def ShardList(list_to_shard, total_shards, shard_idx):\n length = len(list_to_shard)\n split_lists = []\n for i in range(total_shards):\n start_idx = i * length // total_shards\n end_idx = (i + 1) * length // total_shards\n split_lists.append(list_to_shard[start_idx: end_idx])\n\n return split_lists[shard_idx]",
"def mel_ncRNA_list(list):\n\tncRNA = [] #initiates list\n\tfor i in list:\n\t\tif i[2] == 'ncRNA':\n\t\t\tpreidRNA = i[8].split(';')[0]\n\t\t\t#[ID=FBgn0031208];Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'\n\t\t\tncRNA.append(preidRNA)\n\treturn ncRNA\n\t#['ID=FBtr0309810', 'ID=FBtr0347585', 'ID=FBtr0345732', 'ID=FBtr0345733', 'ID=FBtr0344052', 'ID=FBtr0344053', 'ID=FBtr0344032', 'ID=FBtr0336836', 'ID=FBtr0336837', 'ID=FBtr0336984', 'ID=FBtr0336985', 'ID=FBtr0336986', 'ID=FBtr0336987', 'ID=FBtr0336988', 'ID=FBtr0347594', 'ID=FBtr0347595']",
"def extractBlockPairs( mafLineList, hplList, options, data ):\n for i in xrange( 0, len( mafLineList )):\n if mafLineList[ i ].genome not in data.genomesDict:\n continue\n for j in xrange( i + 1, len( mafLineList )):\n if mafLineList[ j ].genome not in data.genomesDict:\n continue\n if mafLineList[ i ].genome == mafLineList[ j ].genome:\n continue\n createMafBlockFromPair( mafLineList[ i ], mafLineList[ j ], hplList, options, data )",
"def split_list_into_chunks(lines):\n qas = []\n qa = []\n for line in lines:\n if line == '\\n':\n qas.append(qa)\n qa = []\n continue\n qa.append(line[:-1]) # remove '\\n' at the end of each line\n return qas",
"def split_shards(original_list, split_fractions):\n\n assert np.isclose(\n sum(split_fractions), 1.0\n ), f\"Split fractions do not sum to 1: {sum(split_fractions)}\"\n\n original_list = [str(x) for x in sorted(original_list)]\n\n sublists = []\n prev_index = 0\n for weight in split_fractions:\n next_index = prev_index + int(round((len(original_list) * weight), 0))\n sublists.append(original_list[prev_index:next_index])\n prev_index = next_index\n\n assert sum([len(x) for x in sublists]) == len(original_list), \"Split size mismatch\"\n\n if not all(len(x) > 0 for x in sublists):\n logger.warning(\"Unexpected shard distribution encountered - trying to fix this\")\n if len(split_fractions) == 3:\n if len(sublists[0]) > 2:\n sublists[0] = original_list[:-2]\n sublists[1] = original_list[-2:-1]\n sublists[2] = original_list[-1:]\n else:\n raise ValueError(\n f\"Not enough shards (#{len(original_list)}) for new distribution\"\n )\n\n elif len(split_fractions) == 2:\n sublists[0] = original_list[:-1]\n sublists[1] = original_list[-1:]\n else:\n raise ValueError\n logger.warning(f\"New shard split: {sublists}\")\n\n if len(sublists) != 3:\n logger.warning(\"No test shards specified\")\n sublists.append(None)\n\n return sublists",
"def __init__(self, chromosome_list):\n\n self.chromosome_list = [make_chromosome(chromosome) for chromosome in chromosome_list]\n self.mating_pool = []\n self.next_population = []",
"def combine_list_allformats(\n reference_genome_list, new_ref_genome_label, project):\n rg_dataset_list = []\n for ref_genome in reference_genome_list:\n rg_dataset_tup = None\n for dataset_type in [\n Dataset.TYPE.REFERENCE_GENOME_GENBANK,\n Dataset.TYPE.REFERENCE_GENOME_FASTA]:\n filter_result = ref_genome.dataset_set.filter(type=dataset_type)\n if len(filter_result):\n rg_dataset_tup = (ref_genome, filter_result[0])\n break\n if (not rg_dataset_tup or\n not os.path.exists(rg_dataset_tup[1].get_absolute_location())):\n return {\n 'is_success': False,\n 'error_msg': 'All reference genomes must have an associated \\\n FASTA or Genbank dataset'\n }\n else:\n rg_dataset_list.append(rg_dataset_tup)\n assert len(rg_dataset_list) == len(reference_genome_list)\n\n # Read the datasets into Biopython SeqRecord objects.\n rg_seqrecord_list = []\n seqrecord_ids = []\n seqrecord_descriptions = []\n for rg, dataset in rg_dataset_list:\n with open(dataset.get_absolute_location()) as input_fh:\n for record in SeqIO.parse(\n input_fh, DATASET_TO_SEQIO_FORMAT[dataset.type]):\n rg_seqrecord_list.append((rg, record))\n seqrecord_ids.append('_'.join([\n remove_whitespace(rg.label)[:7],\n remove_whitespace(record.id)[:8]]))\n seqrecord_descriptions.append(record.description)\n\n # Create a new ReferenceGenome.\n new_ref_genome = ReferenceGenome.objects.create(\n project=project,\n label=new_ref_genome_label)\n\n # If ReferenceGenome label and Chromosome id are the same, there will be\n # duplicate seqrecord_ids: resolve by including numeric prefix in id\n seq_record_list = []\n MAX_LOCUS_NAME_LEN = 16\n unique_id_len = len(str(len(seqrecord_ids)))\n label_len = (MAX_LOCUS_NAME_LEN - 2 - unique_id_len) / 2\n for i, seqrecord_id in enumerate(seqrecord_ids):\n rg, seqrecord = rg_seqrecord_list[i]\n\n if seqrecord_ids.count(seqrecord_id) == 1:\n unique_seqrecord_id = seqrecord_id\n else:\n unique_seqrecord_id = '_'.join([\n str(i),\n remove_whitespace(rg.label)[:label_len],\n remove_whitespace(seqrecord.id)[:label_len]])\n\n seqrecord.seq.alphabet = ambiguous_dna\n seqrecord.name = unique_seqrecord_id\n seqrecord.id = unique_seqrecord_id\n\n if seqrecord_descriptions.count(seqrecord.description) > 1:\n seqrecord.description = ' '.join([\n seqrecord.description,\n 'from Reference Genome:', rg.label])\n\n seq_record_list.append(seqrecord)\n Chromosome.objects.create(\n reference_genome=new_ref_genome,\n label=seqrecord.id,\n seqrecord_id=seqrecord.id,\n num_bases=len(seqrecord))\n\n # Generate a filename from the label with non-alphanumeric characters\n # replaced by underscores.\n filename_prefix = generate_safe_filename_prefix_from_label(\n new_ref_genome_label)\n does_list_include_genbank = (\n Dataset.TYPE.REFERENCE_GENOME_GENBANK in\n [rg_dataset_tup[1].type for rg_dataset_tup in rg_dataset_list])\n\n if does_list_include_genbank:\n filename = filename_prefix + '.gb'\n else:\n filename = filename_prefix + '.fa'\n new_file_dest = os.path.join(new_ref_genome.get_model_data_dir(), filename)\n\n # Write the result.\n if does_list_include_genbank:\n ref_genome_dataset_type = Dataset.TYPE.REFERENCE_GENOME_GENBANK\n else:\n ref_genome_dataset_type = Dataset.TYPE.REFERENCE_GENOME_FASTA\n output_file_format = DATASET_TO_SEQIO_FORMAT[ref_genome_dataset_type]\n\n with open(new_file_dest, 'w') as output_fh:\n SeqIO.write(seq_record_list, output_fh, output_file_format)\n\n # Create a dataset which will point to the file. This step must happen\n # after writing the file because a signal will be triggered which requires\n # the Genbank to exist already.\n add_dataset_to_entity(\n new_ref_genome, ref_genome_dataset_type, ref_genome_dataset_type,\n new_file_dest)\n\n return {\n 'is_success': True,\n 'new_reference_genome': new_ref_genome\n }",
"def separate_by_list(self, criterium, reshape=False):\n\n separated_seqs = {}\n\n for s in self.unstructured():\n key = criterium(s)\n if key in separated_seqs:\n separated_seqs[key].append(s)\n else:\n separated_seqs[key] = [s]\n\n for key, seqs in separated_seqs.items():\n if reshape:\n separated_seqs[key] = DataArray(separated_seqs[key]).reshape(self.shape)\n else:\n separated_seqs[key] = DataArray(separated_seqs[key])\n\n return separated_seqs",
"def ghetto_split(list_, chunk_size=100):\n logging.debug(f\"Splitting list of {len(list_)} length, chunk size = {chunk_size}\")\n split_lists = []\n for i in range(0,len(list_),chunk_size):\n split_lists.append(list_[i:i+chunk_size])\n logging.debug(f\"List has been split into {len(split_lists)} lists. Total num of elements in split lists is {sum([len(i) for i in split_lists])}\")\n return split_lists",
"def load_variants_from_vcf( vcf_file ):\n\t\n\tsnps_per_chr = {}\n\tindels_per_chr = {}\n\t\n\ttri_counter = 0\n\t\n\twith open( vcf_file, \"r\" ) as f:\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tif line[0] != '#':\n\t\t\t\tparts = line.strip().split('\\t')\n\t\t\t\tif not \",\" in parts[4]:\t#only biallelic variants\n\t\t\t\t\tif len( parts[3] ) == len( parts[4] ) and len( parts[3] ) == 1:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsnps_per_chr[ parts[0] ].append( parts[1] )\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tsnps_per_chr.update( { parts[0]: [ parts[1] ] } )\n\t\t\t\t\t\t\n\t\t\t\t\telif len( parts[3] ) != len( parts[4] ):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tindels_per_chr[ parts[0] ].append( parts[1] )\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tindels_per_chr.update( { parts[0]: [ parts[1] ] } )\n\t\t\t\telse:\t#count triallelic variants\n\t\t\t\t\ttri_counter += 1\n\t\t\t\t\t\t\n\t\t\tline = f.readline()\n\tprint \"number of triallelic variants: \" + str( tri_counter )\n\t\n\treturn snps_per_chr, indels_per_chr",
"def process_data_to_chromatograms(self,data):\n chromatograms = []\n for block in data:\n chrom = self.extract_data_from_block(block)\n chromatograms.append(chrom)\n return chromatograms",
"def load_data() -> list:\n # trans_dict is used for changing the given names into standardized names.\n trans_dict = {\"chr1\": \"1\", \"chr2\": \"2\", \"chr3\": \"3\", \"chr4\": \"4\", \"chr5\": \"5\", \"chr6\": \"6\", \"chr7\": \"7\",\n \"chr8\": \"8\", \"chr9\": \"9\", \"chr10\": \"10\", \"chr11\": \"11\", \"chr12\": \"12\", \"chr13\": \"13\", \"chr14\": \"14\",\n \"chr15\": \"15\", \"chr16\": \"16\", \"chr17\": \"17\", \"chr18\": \"18\", \"chr19\": \"19\", \"chrx\": \"x\", \"chry\": \"y\"}\n # This try statement catches user error.\n try:\n with open(sys.argv[1]) as bed_file, open(sys.argv[2]) as fasta_file:\n fasta_records = []\n # Opens the bed file and splits into lists\n bed_file = list(csv.reader(bed_file, delimiter='\\t'))\n # Changes the names of the chromosomes in bed file, does some light rearranging and formatting.\n bed_file = [[trans_dict[record[0].lower()], record[1], record[3][0:record[3].index(\n '\\'')]] for record in bed_file]\n # Sorts the desired indices by chromosome, then by index in the chromosome.\n bed_file = sorted(bed_file, key=itemgetter(1))\n bed_file = sorted(bed_file, key=itemgetter(0))\n # This stores the desired indexes for each chromosome.\n indexable_bed_records = {'1': [], '2': [], '3': [], '4': [], '5': [], '6': [], '7': [], '8': [], '9': [],\n '10': [], '11': [], '12': [], '13': [], '14': [], '15': [], '16': [], '17': [],\n '18': [], '19': [], 'x': [], 'y': []}\n # Put each desired index into it's appropriate chromosome list.\n for record in bed_file:\n indexable_bed_records[record[0]].append([record[2], record[1]])\n # Loops over fasta records in the supplied fasta file\n for fasta_record in fasta_iter(fasta_file):\n # grabs the chromosome id\n chrom_id = fasta_record[\"header\"][:fasta_record[\"header\"].index(' ')].lower()\n # Some chromosomes are not desired, skip them.\n if chrom_id not in indexable_bed_records.keys():\n continue\n # Grabs the indexes we want to extract from the chromosome.\n indexes = indexable_bed_records[chrom_id]\n # Grabs each index+/-10 from the sequence\n for index in indexes:\n fasta_records.append([index[0], fasta_record[\"seq\"][int(index[1]) - 10:int(index[1]) + 10]])\n # Returns a list of lists of format [5'/3',splice site sequence]\n return fasta_records\n # Catches user error.\n except (FileNotFoundError, IndexError) as e:\n if type(e) is IndexError:\n sys.stderr.write(\"Usage: {} bed_file fasta_file\\n\\tbed_file: The appropriate bed file. \\n\\t\"\n \"fasta_file: The appropriate fasta file.\\n\".format(os.path.basename(__file__)))\n elif type(e) is FileNotFoundError:\n sys.stderr.write(\"One of the specified files was not found.\\n\")\n sys.exit(1)",
"def _chunk_list(list_to_chunk, chunk_length):\n return [list_to_chunk[i:i+max(1, chunk_length)] for i in range(0, len(list_to_chunk), max(1, chunk_length))]",
"def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]",
"def chunks(self, list_to_chunk, size):\n for i in range(0, len(list_to_chunk), size):\n yield list_to_chunk[i:i + size]",
"def createChromosomes(self) -> ChromList:\n raise NotImplementedError"
] |
[
"0.5714213",
"0.5681728",
"0.5427925",
"0.5411116",
"0.538021",
"0.53091216",
"0.5299388",
"0.52366877",
"0.52330506",
"0.5231694",
"0.52247053",
"0.5207851",
"0.5207769",
"0.5184043",
"0.5158705",
"0.5153309",
"0.5130597",
"0.5109795",
"0.5106975",
"0.5104809",
"0.5101138",
"0.50946903",
"0.50769466",
"0.50394183",
"0.5025317",
"0.5008644",
"0.50010604",
"0.49959576",
"0.4992204",
"0.49887002"
] |
0.5779796
|
0
|
Take a pysam.cbcf.VariantRecord and convert it into a Variant object
|
def parse(v, cpy):
if v.samples[0]['GT'][cpy] == 0: # Not present in this copy
return None
alt = v.samples[0].alleles[cpy]
l_r, l_a = len(v.ref), len(alt)
if l_r == 1:
if l_a == 1:
op, op_len = 'X', 0
else:
op, op_len = 'I', l_a - l_r
elif l_a == 1:
op, op_len = 'D', l_r - l_a
else:
raise ValueError("Complex variants present in VCF. Please filter or refactor these.")
return Variant(v.pos, v.ref, v.samples[0].alleles[cpy], op, op_len)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def variant_from_example(example):\n features = example.features.feature\n var_string = features['variant/encoded'].bytes_list.value[0]\n return variants_pb2.Variant.FromString(var_string)",
"def variant_case(store, case_obj, variant_obj):\n case_obj['bam_files'] = []\n case_obj['mt_bams'] = []\n case_obj['bai_files'] = []\n case_obj['mt_bais'] = []\n case_obj['sample_names'] = []\n for individual in case_obj['individuals']:\n bam_path = individual.get('bam_file')\n mt_bam = individual.get('mt_bam')\n case_obj['sample_names'].append(individual.get('display_name'))\n if bam_path and os.path.exists(bam_path):\n case_obj['bam_files'].append(individual['bam_file'])\n case_obj['bai_files'].append(find_bai_file(individual['bam_file']))\n if mt_bam and os.path.exists(mt_bam):\n case_obj['mt_bams'].append(individual['mt_bam'])\n case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))\n\n else:\n LOG.debug(\"%s: no bam file found\", individual['individual_id'])\n\n try:\n genes = variant_obj.get('genes', [])\n if len(genes) == 1:\n hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])\n if hgnc_gene_obj:\n vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)\n case_obj['region_vcf_file'] = vcf_path\n else:\n case_obj['region_vcf_file'] = None\n elif len(genes) > 1:\n chrom = variant_obj['genes'][0]['common']['chromosome']\n start = min(gene['common']['start'] for gene in variant_obj['genes'])\n end = max(gene['common']['end'] for gene in variant_obj['genes'])\n # Create a reduced VCF with variants in the region\n vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)\n case_obj['region_vcf_file'] = vcf_path\n except (SyntaxError, Exception):\n LOG.warning(\"skip VCF region for alignment view\")",
"def parse_variant(store, institute_obj, case_obj, variant_obj, update=False, genome_build='37',\n get_compounds = True):\n has_changed = False\n compounds = variant_obj.get('compounds', [])\n if compounds and get_compounds:\n # Check if we need to add compound information\n # If it is the first time the case is viewed we fill in some compound information\n if 'not_loaded' not in compounds[0]:\n new_compounds = store.update_variant_compounds(variant_obj)\n variant_obj['compounds'] = new_compounds\n has_changed = True\n\n # sort compounds on combined rank score\n variant_obj['compounds'] = sorted(variant_obj['compounds'],\n key=lambda compound: -compound['combined_score'])\n\n # Update the hgnc symbols if they are incorrect\n variant_genes = variant_obj.get('genes')\n if variant_genes is not None:\n for gene_obj in variant_genes:\n # If there is no hgnc id there is nothin we can do\n if not gene_obj['hgnc_id']:\n continue\n # Else we collect the gene object and check the id\n if gene_obj.get('hgnc_symbol') is None:\n hgnc_gene = store.hgnc_gene(gene_obj['hgnc_id'], build=genome_build)\n if not hgnc_gene:\n continue\n has_changed = True\n gene_obj['hgnc_symbol'] = hgnc_gene['hgnc_symbol']\n\n # We update the variant if some information was missing from loading\n # Or if symbold in reference genes have changed\n if update and has_changed:\n variant_obj = store.update_variant(variant_obj)\n\n variant_obj['comments'] = store.events(institute_obj, case=case_obj,\n variant_id=variant_obj['variant_id'], comments=True)\n\n if variant_genes:\n variant_obj.update(get_predictions(variant_genes))\n if variant_obj.get('category') == 'cancer':\n variant_obj.update(get_variant_info(variant_genes))\n\n for compound_obj in compounds:\n compound_obj.update(get_predictions(compound_obj.get('genes', [])))\n\n if isinstance(variant_obj.get('acmg_classification'), int):\n acmg_code = ACMG_MAP[variant_obj['acmg_classification']]\n variant_obj['acmg_classification'] = ACMG_COMPLETE_MAP[acmg_code]\n\n\n # convert length for SV variants\n variant_length = variant_obj.get('length')\n variant_obj['length'] = {100000000000: 'inf', -1: 'n.d.'}.get(variant_length, variant_length)\n if not 'end_chrom' in variant_obj:\n variant_obj['end_chrom'] = variant_obj['chromosome']\n\n return variant_obj",
"def VtVariant(list):\n return win32com.client.VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_VARIANT, list)",
"def serialize_to_bytes(self, record):\n result = self._quickavro_encoder.write(record)\n return result",
"def decode_record(record):\n return json.loads(record, object_hook=decode_dict)",
"def test_loqusdb_variant(mocker, loqus_extension):\n # GIVEN a return value from loqusdb\n return_value = (\n b'{\"homozygote\": 0, \"hemizygote\": 0, \"observations\": 1, \"chrom\": \"1\", \"start\": '\n b'235918688, \"end\": 235918693, \"ref\": \"CAAAAG\", \"alt\": \"C\", \"families\": [\"643594\"],'\n b' \"total\": 3}'\n )\n mocker.patch.object(subprocess, \"check_output\")\n subprocess.check_output.return_value = return_value\n # WHEN fetching the variant info\n var_info = loqus_extension.get_variant({\"_id\": \"a variant\"})\n\n # THEN assert the info was parsed correct\n assert var_info[\"total\"] == 3",
"def load_variant(self, variant_obj):\n # LOG.debug(\"Loading variant %s\", variant_obj['_id'])\n try:\n result = self.variant_collection.insert_one(variant_obj)\n except DuplicateKeyError as err:\n raise IntegrityError(\"Variant %s already exists in database\", variant_obj[\"_id\"])\n return result",
"async def transform_record(db_pool, record):\n\n # Before creating the dict, we want to get the stable_id frm the DB\n async with db_pool.acquire(timeout=180) as connection:\n try: \n query = f\"\"\"SELECT stable_id, access_type\n FROM beacon_dataset\n WHERE id={dict(record).pop(\"dataset_id\")};\n \"\"\"\n statement = await connection.prepare(query)\n extra_record = await statement.fetchrow()\n except Exception as e:\n raise BeaconServerError(f'Query metadata (stableID) DB error: {e}') \n\n response = dict(record)\n\n response.pop(\"id\")\n response[\"datasetId\"] = dict(extra_record).pop(\"stable_id\") \n response[\"internalId\"] = response.pop(\"dataset_id\")\n response[\"exists\"] = True\n response[\"variantCount\"] = response.pop(\"variant_cnt\") \n response[\"callCount\"] = response.pop(\"call_cnt\") \n response[\"sampleCount\"] = response.pop(\"sample_cnt\") \n response[\"frequency\"] = 0 if response.get(\"frequency\") is None else float(response.pop(\"frequency\"))\n response[\"numVariants\"] = 0 if response.get(\"num_variants\") is None else response.pop(\"num_variants\")\n response[\"info\"] = {\"access_type\": dict(extra_record).pop(\"access_type\")} \n \n return response",
"def load_variant_file(fname, sample, bed_fname):\n mode = 'rb' if fname.endswith('bcf') else 'r'\n vcf_fp = pysam.VariantFile(fname, mode)\n vcf_fp.subset_samples([sample])\n return [\n split_copies(region, [v for v in vcf_fp.fetch(contig=region[0], start=region[1], stop=region[2])])\n for region in read_bed(bed_fname)\n ]",
"async def dump_variant(obj, elem, elem_type=None, params=None, field_archiver=None):\n field_archiver = field_archiver if field_archiver else dump_field\n if isinstance(elem, x.VariantType) or elem_type.WRAPS_VALUE:\n return {\n elem.variant_elem: await field_archiver(None, getattr(elem, elem.variant_elem), elem.variant_elem_type)\n }\n\n else:\n fdef = elem_type.find_fdef(elem_type.f_specs(), elem)\n return {\n fdef[0]: await field_archiver(None, elem, fdef[1])\n }",
"def variant(store, institute_obj, case_obj, variant_id=None, variant_obj=None, add_case=True,\n add_other=True, get_overlapping=True):\n # If the variant is already collected we skip this part\n if not variant_obj:\n default_panels = []\n # Add default panel information to variant\n for panel in case_obj['panels']:\n if not panel.get('is_default'):\n continue\n panel_obj = store.gene_panel(panel['panel_name'], panel.get('version'))\n if not panel:\n LOG.warning(\"Panel {0} version {1} could not be found\".format(\n panel['panel_name'], panel.get('version')))\n continue\n default_panels.append(panel_obj)\n\n # NOTE this will query with variant_id == document_id, not the variant_id.\n variant_obj = store.variant(variant_id, gene_panels=default_panels)\n\n\n genome_build = case_obj.get('genome_build', '37')\n if genome_build not in ['37','38']:\n genome_build = '37'\n\n if variant_obj is None:\n return None\n # Add information to case_obj\n if add_case:\n variant_case(store, case_obj, variant_obj)\n\n # Collect all the events for the variant\n events = list(store.events(institute_obj, case=case_obj, variant_id=variant_obj['variant_id']))\n for event in events:\n event['verb'] = VERBS_MAP[event['verb']]\n\n other_causatives = []\n # Adds information about other causative variants\n if add_other:\n for other_variant in store.other_causatives(case_obj, variant_obj):\n # This should work with old and new ids\n case_id = other_variant['case_id']\n other_case = store.case(case_id)\n if not other_case:\n continue\n other_variant['case_display_name'] = other_case.get('display_name', case_id)\n other_causatives.append(other_variant)\n\n variant_obj = parse_variant(store, institute_obj, case_obj, variant_obj, genome_build=genome_build)\n\n variant_obj['end_position'] = end_position(variant_obj)\n variant_obj['frequency'] = frequency(variant_obj)\n variant_obj['clinsig_human'] = (clinsig_human(variant_obj) if variant_obj.get('clnsig')\n else None)\n variant_obj['thousandg_link'] = thousandg_link(variant_obj, genome_build)\n variant_obj['exac_link'] = exac_link(variant_obj)\n variant_obj['gnomad_link'] = gnomad_link(variant_obj)\n variant_obj['swegen_link'] = swegen_link(variant_obj)\n variant_obj['cosmic_link'] = cosmic_link(variant_obj)\n variant_obj['beacon_link'] = beacon_link(variant_obj, genome_build)\n variant_obj['ucsc_link'] = ucsc_link(variant_obj, genome_build)\n variant_obj['alamut_link'] = alamut_link(variant_obj)\n variant_obj['spidex_human'] = spidex_human(variant_obj)\n variant_obj['expected_inheritance'] = expected_inheritance(variant_obj)\n variant_obj['callers'] = callers(variant_obj, category='snv')\n\n individuals = {individual['individual_id']: individual for individual in\n case_obj['individuals']}\n for sample_obj in variant_obj['samples']:\n individual = individuals[sample_obj.get('sample_id')]\n if not individual:\n return None\n sample_obj['is_affected'] = True if individual['phenotype'] == 2 else False\n\n gene_models = set()\n variant_obj['disease_associated_transcripts'] = []\n\n # Parse the gene models, both from panels and genes\n for gene_obj in variant_obj.get('genes', []):\n # Adds gene level links\n parse_gene(gene_obj, genome_build)\n omim_models = set()\n for disease_term in gene_obj.get('disease_terms', []):\n omim_models.update(disease_term.get('inheritance', []))\n gene_obj['omim_inheritance'] = list(omim_models)\n\n # Build strings for the disease associated transcripts from gene panel\n for refseq_id in gene_obj.get('disease_associated_transcripts', []):\n hgnc_symbol = (gene_obj['common']['hgnc_symbol'] if gene_obj.get('common') else\n gene_obj['hgnc_id'])\n transcript_str = \"{}:{}\".format(hgnc_symbol, refseq_id)\n variant_obj['disease_associated_transcripts'].append(transcript_str)\n\n gene_models = gene_models | omim_models\n\n if variant_obj.get('genetic_models'):\n variant_models = set(model.split('_', 1)[0] for model in variant_obj['genetic_models'])\n variant_obj['is_matching_inheritance'] = variant_models & gene_models\n\n evaluations = []\n for evaluation_obj in store.get_evaluations(variant_obj):\n evaluation(store, evaluation_obj)\n evaluations.append(evaluation_obj)\n\n case_clinvars = store.case_to_clinVars(case_obj.get('display_name'))\n if variant_id in case_clinvars:\n variant_obj['clinvar_clinsig'] = case_clinvars.get(variant_id)['clinsig']\n\n svs = []\n if get_overlapping:\n svs = (parse_variant(store, institute_obj, case_obj, variant_obj) for\n variant_obj in store.overlapping(variant_obj))\n\n return {\n 'variant': variant_obj,\n 'causatives': other_causatives,\n 'events': events,\n 'overlapping_svs': svs,\n 'manual_rank_options': MANUAL_RANK_OPTIONS,\n 'dismiss_variant_options': DISMISS_VARIANT_OPTIONS,\n 'mosaic_variant_options': MOSAICISM_OPTIONS,\n 'ACMG_OPTIONS': ACMG_OPTIONS,\n 'evaluations': evaluations,\n }",
"def get_variant_info(self, fields=\"dbsnp\", pandas=True):\n mv = myvariant.MyVariantInfo()\n return mv.getvariants(self.hvgs_ids, fields=fields,\n as_dataframe=pandas, df_index=True)",
"def create_variant (self):\n return self.create_name().create_variant('Variant',\n [self.create_topic()])",
"def test_variant_case(adapter, case_obj, variant_obj):\n # GIVEN a variant WITH gene info\n variant_obj[\"genes\"] = [\n {\"hgnc_id\": 1},\n {\"hgnc_id\": 2, \"common\": {\"chromosome\": \"1\", \"start\": \"10\", \"end\": \"100\"}},\n ]\n # GIVEN a variant without gene info\n assert case_obj.get(\"region_vcf_file\") is None\n variant_case(adapter, case_obj, variant_obj)\n # THEN assert that the region VCF was created\n assert case_obj.get(\"region_vcf_file\") is not None",
"def variant(\n store,\n institute_id,\n case_name,\n variant_id=None,\n variant_obj=None,\n add_case=True,\n add_other=True,\n get_overlapping=True,\n add_compounds=True,\n variant_category=None,\n variant_type=None,\n case_obj=None,\n institute_obj=None,\n):\n if not (institute_obj and case_obj):\n institute_obj, case_obj = institute_and_case(store, institute_id, case_name)\n # If the variant is already collected we skip this part\n if not variant_obj:\n # NOTE this will query with variant_id == document_id, not the variant_id.\n variant_obj = store.variant(variant_id)\n\n if variant_obj is None:\n return None\n\n variant_type = variant_type or variant_obj.get(\"variant_type\", \"clinical\")\n\n # request category specific variant display\n variant_category = variant_obj.get(\"category\", \"snv\")\n LOG.debug(\"Variant category {}\".format(variant_category))\n\n variant_id = variant_obj[\"variant_id\"]\n\n genome_build = str(case_obj.get(\"genome_build\", \"37\"))\n if genome_build not in [\"37\", \"38\"]:\n genome_build = \"37\"\n\n panels = default_panels(store, case_obj)\n variant_obj = add_gene_info(store, variant_obj, gene_panels=panels, genome_build=genome_build)\n # Add information about bam files and create a region vcf\n if add_case:\n variant_case(store, case_obj, variant_obj)\n\n # Collect all the events for the variant\n events = store.events(institute_obj, case=case_obj, variant_id=variant_id)\n for event in events:\n event[\"verb\"] = VERBS_MAP[event[\"verb\"]]\n\n # Comments are not on case level so these needs to be fetched on their own\n variant_obj[\"comments\"] = store.events(\n institute_obj, case=case_obj, variant_id=variant_id, comments=True\n )\n\n # Adds information about other causative variants\n other_causatives = []\n if add_other:\n other_causatives = [\n causative for causative in store.other_causatives(case_obj, variant_obj)\n ]\n\n # Gather display information for the genes\n variant_obj.update(predictions(variant_obj.get(\"genes\", [])))\n\n # Prepare classification information for visualisation\n classification = variant_obj.get(\"acmg_classification\")\n if isinstance(classification, int):\n acmg_code = ACMG_MAP[variant_obj[\"acmg_classification\"]]\n variant_obj[\"acmg_classification\"] = ACMG_COMPLETE_MAP[acmg_code]\n\n # sort compounds on combined rank score\n compounds = variant_obj.get(\"compounds\", [])\n if compounds:\n # Gather display information for the compounds\n for compound_obj in compounds:\n compound_obj.update(predictions(compound_obj.get(\"genes\", [])))\n\n variant_obj[\"compounds\"] = sorted(\n variant_obj[\"compounds\"], key=lambda compound: -compound[\"combined_score\"]\n )\n\n variant_obj[\"end_position\"] = end_position(variant_obj)\n\n # Add general variant links\n variant_obj.update(get_variant_links(variant_obj, int(genome_build)))\n variant_obj[\"frequencies\"] = frequencies(variant_obj)\n if variant_category in [\"snv\", \"cancer\"]:\n # This is to convert a summary of frequencies to a string\n variant_obj[\"frequency\"] = frequency(variant_obj)\n # Format clinvar information\n variant_obj[\"clinsig_human\"] = clinsig_human(variant_obj) if variant_obj.get(\"clnsig\") else None\n\n # Add display information about callers\n variant_obj[\"callers\"] = callers(variant_obj, category=variant_category)\n\n # Convert affection status to strings for the template\n is_affected(variant_obj, case_obj)\n\n if variant_obj.get(\"genetic_models\"):\n variant_models = set(model.split(\"_\", 1)[0] for model in variant_obj[\"genetic_models\"])\n all_models = variant_obj.get(\"all_models\", set())\n variant_obj[\"is_matching_inheritance\"] = set.intersection(variant_models, all_models)\n\n # Prepare classification information for visualisation\n classification = variant_obj.get(\"acmg_classification\")\n if isinstance(classification, int):\n acmg_code = ACMG_MAP[variant_obj[\"acmg_classification\"]]\n variant_obj[\"acmg_classification\"] = ACMG_COMPLETE_MAP[acmg_code]\n\n evaluations = []\n for evaluation_obj in store.get_evaluations(variant_obj):\n evaluation(store, evaluation_obj)\n evaluations.append(evaluation_obj)\n\n case_clinvars = store.case_to_clinVars(case_obj.get(\"display_name\"))\n\n if variant_id in case_clinvars:\n variant_obj[\"clinvar_clinsig\"] = case_clinvars.get(variant_id)[\"clinsig\"]\n\n overlapping_vars = []\n if get_overlapping:\n for var in store.overlapping(variant_obj):\n var.update(predictions(var.get(\"genes\", [])))\n overlapping_vars.append(var)\n variant_obj[\"end_chrom\"] = variant_obj.get(\"end_chrom\", variant_obj[\"chromosome\"])\n\n dismiss_options = DISMISS_VARIANT_OPTIONS\n if case_obj.get(\"track\") == \"cancer\":\n dismiss_options = {\n **DISMISS_VARIANT_OPTIONS,\n **CANCER_SPECIFIC_VARIANT_DISMISS_OPTIONS,\n }\n\n return {\n \"institute\": institute_obj,\n \"case\": case_obj,\n \"variant\": variant_obj,\n variant_category: True,\n \"causatives\": other_causatives,\n \"events\": events,\n \"overlapping_vars\": overlapping_vars,\n \"manual_rank_options\": MANUAL_RANK_OPTIONS,\n \"cancer_tier_options\": CANCER_TIER_OPTIONS,\n \"dismiss_variant_options\": dismiss_options,\n \"mosaic_variant_options\": MOSAICISM_OPTIONS,\n \"ACMG_OPTIONS\": ACMG_OPTIONS,\n \"evaluations\": evaluations,\n }",
"def sigFromPy(pobj):\n sig = getattr(pobj, 'dbusSignature', None)\n\n if sig is not None:\n return sig\n\n elif isinstance(pobj, bool):\n return 'b'\n elif isinstance(pobj, int):\n return 'i'\n elif isinstance(pobj, int):\n return 'x'\n elif isinstance(pobj, float):\n return 'd'\n elif isinstance(pobj, str):\n return 's'\n elif isinstance(pobj, bytearray):\n return 'ay'\n\n elif isinstance(pobj, list):\n vtype = type(pobj[0])\n same = True\n for v in pobj[1:]:\n if not isinstance(v, vtype):\n same = False\n if same:\n return 'a' + sigFromPy(pobj[0])\n else:\n return 'av'\n\n elif isinstance(pobj, tuple):\n return '(' + ''.join(sigFromPy(e) for e in pobj) + ')'\n\n elif isinstance(pobj, dict):\n same = True\n vtype = None\n for k, v in pobj.items():\n if vtype is None:\n vtype = type(v)\n elif not isinstance(v, vtype):\n same = False\n if same:\n return 'a{' + sigFromPy(k) + sigFromPy(v) + '}'\n else:\n return 'a{' + sigFromPy(k) + 'v}'\n\n else:\n raise MarshallingError(\n 'Invalid Python type for variant: '\n + repr(pobj)\n )",
"async def load_variant(obj, elem, elem_type=None, params=None, field_archiver=None, wrapped=None):\n field_archiver = field_archiver if field_archiver else load_field\n is_wrapped = elem_type.WRAPS_VALUE if wrapped is None else wrapped\n\n if is_wrapped:\n elem = elem_type() if elem is None else elem\n\n fname = list(obj.keys())[0]\n for field in elem_type.f_specs():\n if field[0] != fname:\n continue\n\n fvalue = await field_archiver(obj[fname], field[1], field[2:], elem if not is_wrapped else None)\n if is_wrapped:\n elem.set_variant(field[0], fvalue)\n\n return elem if is_wrapped else fvalue\n raise ValueError('Unknown tag: %s' % fname)",
"def update_variant(self, variant_obj):\n LOG.debug(\"Updating variant %s\", variant_obj.get(\"simple_id\"))\n\n new_variant = self.variant_collection.find_one_and_replace(\n {\"_id\": variant_obj[\"_id\"]},\n variant_obj,\n return_document=pymongo.ReturnDocument.AFTER,\n )\n return new_variant",
"def load_vcf(vcf_fn, asm_dat, state=\"UNK\", sample='unknown'):\n logging.info(\"Loading Variants from %s\", vcf_fn)\n asm_header, asms = asm_dat\n ret_header = build_var_header()\n ret_header.update(asm_header)\n asmheadidx = list(asm_header.keys())\n ret = []\n with pysam.VariantFile(vcf_fn) as fh:\n for var in fh:\n cur_data = [sample]\n cur_data.append(var.chrom)\n cur_data.append(var.start)\n cur_data.append(var.stop)\n\n var_type, var_len = get_type_lens(var)\n \n cur_data.append(var_type)\n cur_data.append(state)\n \n baid = None # best aid\n blen = 0 # best aid length\n best = None # pulled data\n num_asms = 0\n for aid in var.info[\"AID\"]:\n if aid not in asms:\n continue\n num_asms += 1\n dat = asms[aid]\n alen = dat[asmheadidx.index(\"ASMLEN\")]\n if alen > blen:\n baid = aid\n blen = alen\n best = dat\n \n cur_data.append(var.info[\"POP\"])\n cur_data.append(var_len)\n cur_data.append(num_asms)\n\n cur_data.extend(parse_format(var.samples[0]))\n \n if baid is not None:\n cur_data.extend(best)\n else:\n cur_data.extend(([0] * (len(asmheadidx) - 1)) + [\"\"])\n\n\n ret.append(cur_data)\n logging.info(\"Loaded %d variants\", len(ret))\n return pd.DataFrame(ret, columns = ret_header.keys())",
"def _read_record(self, stream):\n header = stream.read(4)\n if len(header) < 4:\n return None\n size, rec_type = struct.unpack('>HH', header)\n data_type = (rec_type & 0x00ff)\n rec_type = rec_type // 256\n data = None\n if size > 4:\n if data_type == 0x01:\n data = numpy.array(\n struct.unpack('>{0}H'.format((size - 4) // 2),\n stream.read(size - 4)),\n dtype='uint')\n elif data_type == 0x02:\n data = numpy.array(\n struct.unpack('>{0}h'.format((size - 4) // 2),\n stream.read(size - 4)),\n dtype='int')\n elif data_type == 0x03:\n data = numpy.array(\n struct.unpack('>{0}l'.format((size - 4) // 4),\n stream.read(size - 4)),\n dtype='int')\n elif data_type == 0x05:\n data = numpy.array([\n _eight_byte_real_to_float(stream.read(8))\n for _ in range((size - 4) // 8)\n ])\n else:\n data = stream.read(size - 4)\n if str is not bytes:\n if data[-1] == 0:\n data = data[:-1].decode('ascii')\n else:\n data = data.decode('ascii')\n elif data[-1] == '\\0':\n data = data[:-1]\n return [rec_type, data]",
"def simple_vcf_reader(fh):\n\n for line in fh:\n if line.startswith('#'):\n continue\n ls = line.rstrip().split('\\t')\n # 8 fixed fields per record\n assert len(ls)>=8, (\n \"Number of retrieved fields in vcf file too small\")\n # ignoring the rest\n (chrom, pos, id, ref, alt, qual, filter, info) = ls[:8]\n pos = int(pos)-1\n try:\n qual = int(qual)\n except:\n qual = \".\"\n info_d = dict()\n for field in info.split(';'):\n kv = field.split('=')\n # boolean entries get True as value\n if len(kv)==1:\n info_d[kv[0]] = True\n else:\n info_d[kv[0]] = kv[1]\n #try:\n # info = dict([field.split('=') for field in info.split(';')])\n #except ValueError:\n # import pdb; pdb.set_trace()\n yield Variant(chrom, pos, id, ref, alt, qual, filter, info_d)",
"def parse_record(self, record):\n raise NotImplementedError()",
"def record_to_tuple(record):\n return float(record[\"base_volume\"]), float(record[\"counter_volume\"]), int(record[\"trade_count\"])",
"def str_variant(store, institute_id, case_name, variant_id):\n\n institute_obj, case_obj = institute_and_case(store, institute_id, case_name)\n variant_obj = store.variant(variant_id)\n\n # fill in information for pilup view\n variant_case(store, case_obj, variant_obj)\n\n variant_obj['callers'] = callers(variant_obj, category='str')\n\n # variant_obj['str_ru']\n # variant_obj['str_repid']\n # variant_obj['str_ref']\n\n variant_obj['comments'] = store.events(institute_obj, case=case_obj,\n variant_id=variant_obj['variant_id'], comments=True)\n\n return {\n 'institute': institute_obj,\n 'case': case_obj,\n 'variant': variant_obj,\n 'overlapping_snvs': overlapping_snvs,\n 'manual_rank_options': MANUAL_RANK_OPTIONS,\n 'dismiss_variant_options': DISMISS_VARIANT_OPTIONS\n }",
"def _decode_union_old(data_type, obj, alias_validators, strict, for_msgpack):\n val = None\n if isinstance(obj, six.string_types):\n # Union member has no associated value\n tag = obj\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if not isinstance(val_data_type, (bv.Void, bv.Nullable)):\n raise bv.ValidationError(\n \"expected object for '%s', got symbol\" % tag)\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n elif isinstance(obj, dict):\n # Union member has value\n if len(obj) != 1:\n raise bv.ValidationError('expected 1 key, got %s' % len(obj))\n tag = list(obj)[0]\n raw_val = obj[tag]\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if isinstance(val_data_type, bv.Nullable) and raw_val is None:\n val = None\n elif isinstance(val_data_type, bv.Void):\n if raw_val is None or not strict:\n # If raw_val is None, then this is the more verbose\n # representation of a void union member. If raw_val isn't\n # None, then maybe the spec has changed, so check if we're\n # in strict mode.\n val = None\n else:\n raise bv.ValidationError('expected null, got %s' %\n bv.generic_type_name(raw_val))\n else:\n try:\n val = _json_compat_obj_decode_helper(\n val_data_type, raw_val, alias_validators, strict, True,\n for_msgpack)\n except bv.ValidationError as e:\n e.add_parent(tag)\n raise\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n else:\n raise bv.ValidationError(\"expected string or object, got %s\" %\n bv.generic_type_name(obj))\n return data_type.definition(tag, val)",
"def parseDigitalSValRecord(self, f):\n try:\n # gives an np.void with named fields:\n r = self.digitalsvalrecords[self.ndigitalsvalrecords]\n except IndexError:\n newsize = len(self.digitalsvalrecords) + DEFNDIGITALSVALRECORDS\n self.digitalsvalrecords.resize(newsize, refcheck=False)\n # gives an np.void with named fields:\n r = self.digitalsvalrecords[self.ndigitalsvalrecords]\n #junk, junk, r['TimeStamp'], r['SVal'], junk, junk = unpack('<hiqhhi', f.read(22))\n #junk, r['TimeStamp'], r['SVal'], junk, junk = unpack('qqhhi', f.read(24))\n junk, r['TimeStamp'], r['SVal'], junk, junk = unpackdsvalrec(f.read(24))\n self.ndigitalsvalrecords += 1",
"def __convert( source ):\n # Just in case things get this far but we don't know about the record\n if source['recordType'] not in definitions.RECORDS:\n return {\n 'rec_type': source['recordType']\n }\n\n # Create a flat wrapper\n record = estreamer.common.Flatdict( source )\n\n # Transform\n output = __selectWithNewKeys( record )\n\n return output",
"def _val(obj):\n if isinstance(obj, ctypes._SimpleCData):\n return obj.value\n else:\n return obj",
"def sv_variant(store, institute_id, case_name, variant_id=None, variant_obj=None, add_case=True,\n get_overlapping=True):\n institute_obj, case_obj = institute_and_case(store, institute_id, case_name)\n\n if not variant_obj:\n variant_obj = store.variant(variant_id)\n\n if add_case:\n # fill in information for pilup view\n variant_case(store, case_obj, variant_obj)\n\n # frequencies\n variant_obj['frequencies'] = [\n ('1000G', variant_obj.get('thousand_genomes_frequency')),\n ('1000G (left)', variant_obj.get('thousand_genomes_frequency_left')),\n ('1000G (right)', variant_obj.get('thousand_genomes_frequency_right')),\n ('ClinGen CGH (benign)', variant_obj.get('clingen_cgh_benign')),\n ('ClinGen CGH (pathogenic)', variant_obj.get('clingen_cgh_pathogenic')),\n ('ClinGen NGI', variant_obj.get('clingen_ngi')),\n ('SweGen', variant_obj.get('swegen')),\n ('Decipher', variant_obj.get('decipher')),\n ]\n\n variant_obj['callers'] = callers(variant_obj, category='sv')\n\n overlapping_snvs = []\n if get_overlapping:\n overlapping_snvs = (parse_variant(store, institute_obj, case_obj, variant) for variant in\n store.overlapping(variant_obj))\n\n # parse_gene function is not called for SVs, but a link to ensembl gene is required\n for gene_obj in variant_obj['genes']:\n if gene_obj.get('common'):\n ensembl_id = gene_obj['common']['ensembl_id']\n try:\n build = int(gene_obj['common'].get('build','37'))\n except Exception:\n build = 37\n gene_obj['ensembl_link'] = ensembl(ensembl_id, build=build)\n\n variant_obj['comments'] = store.events(institute_obj, case=case_obj,\n variant_id=variant_obj['variant_id'], comments=True)\n\n case_clinvars = store.case_to_clinVars(case_obj.get('display_name'))\n if variant_id in case_clinvars:\n variant_obj['clinvar_clinsig'] = case_clinvars.get(variant_id)['clinsig']\n\n if not 'end_chrom' in variant_obj:\n variant_obj['end_chrom'] = variant_obj['chromosome']\n\n return {\n 'institute': institute_obj,\n 'case': case_obj,\n 'variant': variant_obj,\n 'overlapping_snvs': overlapping_snvs,\n 'manual_rank_options': MANUAL_RANK_OPTIONS,\n 'dismiss_variant_options': DISMISS_VARIANT_OPTIONS\n }"
] |
[
"0.5806947",
"0.5430533",
"0.54252964",
"0.5344559",
"0.53102547",
"0.52931106",
"0.5232737",
"0.52294105",
"0.517921",
"0.50639087",
"0.5034603",
"0.50143707",
"0.49965522",
"0.49807188",
"0.4961065",
"0.49577075",
"0.4936524",
"0.4915479",
"0.49094373",
"0.48896736",
"0.48889184",
"0.48813766",
"0.48598847",
"0.48584315",
"0.484115",
"0.48281214",
"0.479101",
"0.47897884",
"0.47754374",
"0.47712478"
] |
0.54504484
|
1
|
Prepare a variant file with only the given sample, complex variant calls filtered out, and restricted to the given bed file
|
def prepare_variant_file(fname_in, sample, bed_fname, fname_out, write_mode='w'):
def _complex_variant(_v):
var = _v.samples.values()[0]
for alt in var.alleles:
if _v.rlen > 1 and len(alt) > 1:
if _v.ref != alt:
logger.debug('Filtered out {}:{} {} -> {}'.format(_v.contig, _v.pos, _v.ref, var.alleles))
return True
return False
logger.debug('Starting filtering ...')
t0 = time.time()
mode = 'rb' if fname_in.endswith('bcf') else 'r'
vcf_in = pysam.VariantFile(fname_in, mode)
vcf_in.subset_samples([sample])
vcf_out = pysam.VariantFile(fname_out, mode=write_mode, header=vcf_in.header)
v_cnt, fltr_cnt = 0, 0
for region in read_bed(bed_fname):
logger.debug('Filtering {}'.format(region))
n = 0
for n, v in enumerate(vcf_in.fetch(contig=region[0], start=region[1], stop=region[2])):
if _complex_variant(v):
fltr_cnt += 1
continue
vcf_out.write(v)
v_cnt += n
logger.debug('Processed {} variants'.format(v_cnt))
logger.debug('Filtered out {} complex variants'.format(fltr_cnt))
t1 = time.time()
logger.debug('Took {} s'.format(t1 - t0))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def variant_case(store, case_obj, variant_obj):\n case_obj['bam_files'] = []\n case_obj['mt_bams'] = []\n case_obj['bai_files'] = []\n case_obj['mt_bais'] = []\n case_obj['sample_names'] = []\n for individual in case_obj['individuals']:\n bam_path = individual.get('bam_file')\n mt_bam = individual.get('mt_bam')\n case_obj['sample_names'].append(individual.get('display_name'))\n if bam_path and os.path.exists(bam_path):\n case_obj['bam_files'].append(individual['bam_file'])\n case_obj['bai_files'].append(find_bai_file(individual['bam_file']))\n if mt_bam and os.path.exists(mt_bam):\n case_obj['mt_bams'].append(individual['mt_bam'])\n case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))\n\n else:\n LOG.debug(\"%s: no bam file found\", individual['individual_id'])\n\n try:\n genes = variant_obj.get('genes', [])\n if len(genes) == 1:\n hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])\n if hgnc_gene_obj:\n vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)\n case_obj['region_vcf_file'] = vcf_path\n else:\n case_obj['region_vcf_file'] = None\n elif len(genes) > 1:\n chrom = variant_obj['genes'][0]['common']['chromosome']\n start = min(gene['common']['start'] for gene in variant_obj['genes'])\n end = max(gene['common']['end'] for gene in variant_obj['genes'])\n # Create a reduced VCF with variants in the region\n vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)\n case_obj['region_vcf_file'] = vcf_path\n except (SyntaxError, Exception):\n LOG.warning(\"skip VCF region for alignment view\")",
"def load_variant_file(fname, sample, bed_fname):\n mode = 'rb' if fname.endswith('bcf') else 'r'\n vcf_fp = pysam.VariantFile(fname, mode)\n vcf_fp.subset_samples([sample])\n return [\n split_copies(region, [v for v in vcf_fp.fetch(contig=region[0], start=region[1], stop=region[2])])\n for region in read_bed(bed_fname)\n ]",
"def get_BedTool_from_variant_file(variant_file: str):\r\n # TODO: create checks for file format\r\n os.system(f\"grep -v '#' {variant_file} \" + \"| awk -F '\\t' '{print substr($2, 4), $3, $4, $5, $6, $7, $8, $9}' \" +\r\n f\" >{variant_file}_tmp\")\r\n pybedtools.BedTool(variant_file + \"_tmp\").saveas(variant_file + \".bed_tmp\")\r\n os.system(f\"perl -p -i -e 's/ /\\t/g' {variant_file}.bed_tmp\")\r\n os.system(f\"sed '1d' {variant_file}.bed_tmp > {variant_file}.bed\")\r\n os.remove(f\"{variant_file}.bed_tmp\")\r\n return",
"def prepare_bed_file(bed_file, output, ouf=False, save_rejected=None, only_chrom=None):\n new_lines = [] # keep updated lines\n rejected = [] # keep IDs of skipped transcripts + the reason why\n names = Counter() # we need to make sure that all names are unique\n allowed_re = re.compile(ALLOWED_CHARSET_RE).search\n broken_names = []\n\n f = open(bed_file, \"r\")\n for num, line in enumerate(f, 1):\n # parse bed file according to specification\n line_data = line.rstrip().split(\"\\t\")\n\n if len(line_data) != 12:\n f.close() # this is for sure an error\n # it is possible only if something except a bed12 was provided\n die(\n \"Error! Bed 12 file is required! Got a file with {len(line_data)} fields instead\"\n )\n\n chrom = line_data[0]\n if only_chrom and chrom != only_chrom:\n # TOGA allows to perform the analysis on a specific chromosome only\n # is so, we can skip all transcripts that located on other chromosomes\n continue\n chromStart = int(line_data[1])\n chromEnd = int(line_data[2])\n name = line_data[3] # gene_name usually\n corr_name = not bool(allowed_re(name))\n if corr_name is False:\n broken_names.append(name)\n # TODO: check weird characters in the transcript name\n # bed_score = int(line_data[4]) # never used\n # strand = line_data[5] # otherwise:\n # strand = True if line_data[5] == '+' else False\n thickStart = int(line_data[6])\n thickEnd = int(line_data[7])\n # itemRgb = line_data[8] # never used\n blockCount = int(line_data[9])\n blockSizes = [int(x) for x in line_data[10].split(\",\") if x != \"\"]\n blockStarts = [int(x) for x in line_data[11].split(\",\") if x != \"\"]\n blockEnds = [blockStarts[i] + blockSizes[i] for i in range(blockCount)]\n blockAbsStarts = [blockStarts[i] + chromStart for i in range(blockCount)]\n blockAbsEnds = [blockEnds[i] + chromStart for i in range(blockCount)]\n blockNewStarts, blockNewEnds = [], []\n names[name] += 1\n\n if thickStart > thickEnd:\n f.close() # according to bed12 specification this should never happen\n sys.stderr.write(f\"Problem occurred at line {num}, gene {name}\\n\")\n die(\"Error! Bed file is corrupted, thickEnd MUST be >= thickStart\")\n elif thickStart == thickEnd:\n # this means that this is a non-coding transcript\n # TOGA cannot process them: we can skip it\n rejected.append((name, \"No CDS\"))\n continue\n\n if thickStart < chromStart or thickEnd > chromEnd:\n # a very strange (but still possible) case\n f.close() # for sure an error with input data\n sys.stderr.write(f\"Problem occurred at line {num}, gene {name}\\n\")\n die(\"Error! Bed file is corrupted, thickRange is outside chromRange!\")\n\n # now select CDS only\n # we keep UTRs in the filtered file\n # however, we need CDS to check whether it's correct (% 3 == 0)\n for block_num in range(blockCount):\n blockStart = blockAbsStarts[block_num]\n blockEnd = blockAbsEnds[block_num]\n\n # skip the block if it is entirely UTR\n if blockEnd <= thickStart:\n continue\n elif blockStart >= thickEnd:\n continue\n\n # if we are here: this is not an entirely UTR exon\n # it might intersect the CDS border or to be in the CDS entirely\n # remove UTRs: block start must be >= CDS_start (thickStart)\n # block end must be <= CDS_end (thickEnd)\n blockNewStart = blockStart if blockStart >= thickStart else thickStart\n blockNewEnd = blockEnd if blockEnd <= thickEnd else thickEnd\n blockNewStarts.append(blockNewStart - thickStart)\n blockNewEnds.append(blockNewEnd - thickStart)\n\n if len(blockNewStarts) == 0:\n # even it thickStart != thickEnd this transcript still can be non-coding\n # but if there are no blocks in the CDS -> we can catch this\n rejected.append((name, \"No CDS\"))\n continue\n\n block_new_count = len(blockNewStarts)\n blockNewSizes = [\n blockNewEnds[i] - blockNewStarts[i] for i in range(block_new_count)\n ]\n\n if sum(blockNewSizes) % 3 != 0 and not ouf:\n # this is an out-of-frame (or incomplete transcript)\n # ideally CDS length should be divisible by 3\n # not ouf means that we like to keep such transcripts for some reason\n rejected.append((name, \"Out-of-frame gene\"))\n continue\n\n # we keep this transcript: add in to the list\n new_line = \"\\t\".join([str(x) for x in line_data])\n new_lines.append(new_line)\n f.close()\n\n # if not allowed characters in transcript names: list them\n if len(broken_names) > 0:\n eprint(\"Error! Some transcript names contain not allowed characters\")\n for t in broken_names:\n eprint(t)\n die(f\"Allowed characters are: {ALLOWED_CHARSET}\")\n # if there are non-unique transcript IDs: die\n # I kill it there, not earlier to show them altogether\n if any(v > 1 for v in names.values()):\n eprint(\"Error! There are non-uniq transcript IDs:\")\n duplicates = [k for k, v in names.items() if v > 1]\n for d in duplicates:\n eprint(d)\n die(\"Abort\")\n\n if len(new_lines) == 0:\n # no transcripts pass the filter: probably an input data mistake\n sys.exit(\n f\"Error! No reference annotation tracks left after filtering procedure! Abort\"\n )\n\n # write transcripts that passed the filter to the output file\n f = open(output, \"w\") if output != \"stdout\" else sys.stdout\n f.write(\"\\n\".join(new_lines) + \"\\n\")\n f.close() if output != \"stdout\" else None\n\n if save_rejected:\n # save transcripts that didn't pass the filter + reason why\n f = open(save_rejected, \"w\")\n for elem in rejected:\n f.write(f\"{elem[0]}\\t{elem[1]}\\n\")\n f.close()",
"def filter_variants_dna(file, normal_coverage, tumor_coverage, tumor_var_depth,\n tumor_var_freq, normal_var_freq, t2n_ratio, num_callers,\n num_callers_indel, ensembl_version):\n\n ens_data = EnsemblRelease(int(ensembl_version))\n variants = list()\n reader = vcfpy.Reader.from_path(file)\n for record in reader:\n for info in record.INFO['CSQ']:\n record_INFO = Record_INFO(*info.split('|'))\n funcensGene = record_INFO.Consequence\n has_func_ens = 'missense' in funcensGene or 'frame' in funcensGene\n avsnp150 = record_INFO.Existing_variation.split('&')[0] if 'rs' in record_INFO.Existing_variation else 'NA'\n gnomad_AF = record_INFO.gnomAD_AF if record_INFO.gnomAD_AF != '' else 'NA'\n cosm_count = record_INFO.Existing_variation.count('COSV')\n cosmic70 = ';'.join(record_INFO.Existing_variation.split('&')[-cosm_count::]) if cosm_count > 0 else 'NA'\n gene = record_INFO.SYMBOL\n\n if has_func_ens:\n called = {x.sample: x.data for x in record.calls if x.called}\n filtered = dict()\n pass_snp = 0\n pass_indel = 0\n try:\n if 'NORMAL.mutect' in called and 'TUMOR.mutect' in called and 'PASS' in record.FILTER:\n normal_DP = int(called['NORMAL.mutect']['DP'])\n token = called['NORMAL.mutect']['AD']\n normal_AD = int(token[1]) if type(token) is list else int(token)\n token = called['NORMAL.mutect']['AF']\n value = token[0] if type(token) is list else token\n normal_VAF = np.around(float(value) * 100,\n 3) if normal_DP > 0.0 else 0.0\n tumor_DP = int(called['TUMOR.mutect']['DP'])\n token = called['TUMOR.mutect']['AD']\n tumor_AD = int(token[1]) if type(token) is list else int(token)\n token = called['TUMOR.mutect']['AF']\n value = token[0] if type(token) is list else token\n tumor_VAF = np.around(float(value) * 100, 3)\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio:\n pass_snp += 1\n filtered['mutect'] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD,\n normal_VAF,\n tumor_DP,\n tumor_AD,\n tumor_VAF)\n if 'NORMAL.somaticsniper' in called and 'TUMOR.somaticsniper' in called:\n normal_DP = int(called['NORMAL.somaticsniper']['DP'])\n normal_AD = sum(called['NORMAL.somaticsniper']['DP4'][2:])\n normal_VAF = np.around((normal_AD / float(normal_DP)) * 100, 3) if normal_DP > 0.0 else 0.0\n tumor_DP = int(called['TUMOR.somaticsniper']['DP'])\n tumor_AD = sum(called['TUMOR.somaticsniper']['DP4'][2:])\n tumor_VAF = np.around((tumor_AD / float(tumor_DP)) * 100, 3)\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n is_somatic = int(called['TUMOR.somaticsniper']['SS']) == 2\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio and is_somatic:\n pass_snp += 1\n if is_somatic:\n filtered['somaticsniper'] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD,\n normal_VAF,\n tumor_DP,\n tumor_AD,\n tumor_VAF)\n\n if ('NORMAL.varscan' in called and 'TUMOR.varscan' in called) \\\n or ('NORMAL.varscan_indel' in called and 'TUMOR.varscan_indel' in called) \\\n and 'PASS' in record.FILTER and 'SOMATIC' in record.INFO:\n label_index = 'varscan' if 'NORMAL.varscan' in called else 'varscan_indel'\n normal_DP = int(called['NORMAL.{}'.format(label_index)]['DP'])\n normal_AD = sum(called['NORMAL.{}'.format(label_index)]['DP4'][2:])\n token = called['NORMAL.{}'.format(label_index)]['FREQ']\n value = token[0] if type(token) is list else token\n normal_VAF = float(value.replace('%', ''))\n tumor_DP = int(called['TUMOR.{}'.format(label_index)]['DP'])\n tumor_AD = sum(called['TUMOR.{}'.format(label_index)]['DP4'][2:])\n token = called['TUMOR.{}'.format(label_index)]['FREQ']\n value = token[0] if type(token) is list else token\n tumor_VAF = float(value.replace('%', ''))\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio:\n if 'indel' in label_index:\n pass_indel += 1\n else:\n pass_snp += 1\n filtered[label_index] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD,\n normal_VAF,\n tumor_DP,\n tumor_AD,\n tumor_VAF)\n if 'NORMAL.strelka' in called and 'TUMOR.strelka' in called and 'PASS' in record.FILTER:\n ref_index = record.REF + 'U'\n alt_index = str(record.ALT[0].serialize()) + 'U'\n # normal_DP = int(called['NORMAL.strelka']['DP'])\n token = called['NORMAL.strelka'][ref_index]\n normal_AD1 = int(token[0]) if type(token) is list else int(token)\n token = called['NORMAL.strelka'][alt_index]\n normal_AD2 = int(token[0]) if type(token) is list else int(token)\n normal_DP = normal_AD1 + normal_AD2\n normal_VAF = np.around((normal_AD2 / float(normal_DP)) * 100, 3) if normal_DP > 0.0 else 0.0\n # tumor_DP = int(called['TUMOR.strelka']['DP'])\n token = called['TUMOR.strelka'][ref_index]\n tumor_AD1 = int(token[0]) if type(token) is list else int(token)\n token = called['TUMOR.strelka'][alt_index]\n tumor_AD2 = int(token[0]) if type(token) is list else int(token)\n tumor_DP = tumor_AD1 + tumor_AD2\n tumor_VAF = np.around((tumor_AD2 / float(tumor_DP)) * 100, 3)\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD2 >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio:\n pass_snp += 1\n filtered['strelka'] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD2,\n normal_VAF,\n tumor_DP,\n tumor_AD2,\n tumor_VAF)\n if 'NORMAL.strelka_indel' in called and 'TUMOR.strelka_indel' in called and 'PASS' in record.FILTER:\n # normal_DP = int(called['NORMAL.strelka_indel']['DP'])\n token = called['NORMAL.strelka_indel']['TAR']\n normal_AD1 = int(token[0]) if type(token) is list else int(token)\n token = called['NORMAL.strelka_indel']['TIR']\n normal_AD2 = int(token[0]) if type(token) is list else int(token)\n normal_DP = normal_AD1 + normal_AD2\n normal_VAF = np.around((normal_AD2 / float(normal_DP)) * 100, 3) if normal_DP > 0.0 else 0.0\n # tumor_DP = int(called['TUMOR.strelka_indel']['DP'])\n token = called['TUMOR.strelka_indel']['TAR']\n tumor_AD1 = int(token[0]) if type(token) is list else int(token)\n token = called['TUMOR.strelka_indel']['TIR']\n tumor_AD2 = int(token[0]) if type(token) is list else int(token)\n tumor_DP = tumor_AD1 + tumor_AD2\n tumor_VAF = np.around((tumor_AD2 / float(tumor_DP)) * 100, 3)\n tumor_normal_ratio = tumor_VAF / normal_VAF if normal_VAF != 0 else t2n_ratio\n if normal_DP >= normal_coverage and tumor_DP >= tumor_coverage \\\n and tumor_VAF >= tumor_var_freq and tumor_AD2 >= tumor_var_depth \\\n and normal_VAF <= normal_var_freq and tumor_normal_ratio >= t2n_ratio:\n pass_indel += 1\n filtered['strelka_indel'] = '{};{};{};{};{};{}'.format(normal_DP,\n normal_AD2,\n normal_VAF,\n tumor_DP,\n tumor_AD2,\n tumor_VAF)\n except KeyError:\n continue\n\n variant_epitopes = epitopes(record, record_INFO, ens_data)\n variant = Variant()\n variant.chrom = record.CHROM\n variant.start = record.POS\n variant.ref = record.REF\n variant.alt = record.ALT[0].serialize()\n variant.callers = '|'.join(['{}:{}'.format(key, value) for key, value in filtered.items()])\n variant.num_callers = len(filtered)\n variant.status = pass_snp >= num_callers or pass_indel >= num_callers_indel\n variant.epitopes = variant_epitopes\n variant.dbsnp = avsnp150\n variant.gnomad = gnomad_AF\n variant.cosmic = cosmic70\n variant.type = 'dna'\n variant.gene = gene\n variants.append(variant)\n\n return variants",
"def sample_refine(self, **kwargs):",
"def write_to_bed(input_sequence, variant):\n # inputSequence ID used as the filename\n filename = variant.chromosome + '-' + str(variant.start) + '.bed'\n targets = input_sequence.target_regions\n #with open('/srv/primer_design/s_drive/designs/' + filename, 'w') as csvfile:\n with open('/media/sf_S_DRIVE/genomic_resources/primer_design/designs/' + filename, 'w') as csvfile:\n f = csv.writer(csvfile, delimiter='\\t',\n quotechar=';', quoting=csv.QUOTE_MINIMAL)\n f.writerow(['track name=\"' + filename + '\" description=' + '\"Primers designed for' + filename +\n '\" visibility=2 itemRgb=\"On\"'])\n for target in targets:\n f.writerow([input_sequence.chrom_number, target.seq_start + target.overhang,# - 30,\n target.seq_stop - target.overhang , target.target_id, 0, input_sequence.strand,\n target.seq_start + target.overhang, target.seq_stop - target.overhang,# + 30,\n '255,0,0'])\n for target in targets:\n for primer in target.primers:\n f.writerow([input_sequence.chrom_number, primer.forward_genomic_coords[0] - 1,\n primer.reverse_genomic_coords[1], target.target_id, 0, input_sequence.strand,\n primer.forward_genomic_coords[1], primer.reverse_genomic_coords[0] - 1, '0,0,255'])",
"def prepare(params, samples):\r\n return",
"def main(var_file, rd_file, segment_file, patient_names, vaf_threshold=0.05, filterSegments = False):\n patient = patient_names\n\n patient_varcount = pd.read_csv(var_file, low_memory=False, delimiter=\"\\t\")\n patient_readdepth = pd.read_csv(rd_file, low_memory=False, delimiter=\"\\t\")\n\n # Sanity check to see if the columns are identical\n unmatch = patient_varcount.loc[patient_varcount.loc[:, 'Chromosome'] != patient_readdepth.loc[:, 'Chromosome']]\n if (unmatch.empty != True):\n print(\"Something wrong with sample, columns order do not match!\")\n\n tumor_sample = patient_varcount.columns[4:]\n info_col = patient_varcount.columns[:4]\n\n # Can use this to remove indels\n # patient_readdepth = patient_readdepth.loc[patient_readdepth['Change'].str.contains('-')!=True]\n # patient_varcount = patient_varcount.loc[patient_varcount['Change'].str.contains('-')!=True]\n\n # Make sure there's no zero read depth position for any sector, as Pyclone\n # will assume that the mutatation has identical VAF at that sector\n tmp = (patient_readdepth.loc[:, tumor_sample] == 0)\n patient_readdepth = patient_readdepth.loc[tmp.any(axis=1)==False]\n patient_varcount = patient_varcount.loc[tmp.any(axis=1)==False]\n\n # Transform RD to ref count which is just the difference between RD and varcount\n patient_readdepth.iloc[:, 4:] = patient_readdepth.iloc[:, 4:] - patient_varcount.iloc[:, 4:]\n # Get VAF and filter out those with < 0.05 VAF called in any sector.\n patient_VAF = patient_varcount.iloc[:, 4:] / patient_readdepth.iloc[:, 4:]\n patient_VAF = (patient_VAF < vaf_threshold)\n\n # Remove the mutations where the condition is true for ALL segments, i.e. it has to be below\n # 0.05 for all sectors. If it's above 0.05 in any sector, keep the mutations. This will keep most\n # of the private mutations.\n filter_VAF_index = (patient_VAF.all(axis=1) == False)\n num_filtered = filter_VAF_index.loc[filter_VAF_index == False, ]\n print(\"Patient {} has {} mutations with average VAF < {} removed\".format(patient, num_filtered.shape[0], vaf_threshold))\n # Filter out the variants\n patient_readdepth = patient_readdepth.loc[filter_VAF_index, ]\n patient_varcount = patient_varcount.loc[filter_VAF_index, ]\n\n all_segments = pd.read_csv(segment_file, low_memory=False, delimiter='\\t')\n\n if not os.path.exists(\"{}_mutations_withCN\".format(patient)):\n os.makedirs(\"{}_mutations_withCN\".format(patient))\n if not os.path.exists(\"{}_pyclone_input\".format(patient)):\n os.makedirs(\"{}_pyclone_input\".format(patient))\n\n for sample in tumor_sample:\n # The treeomics input has this weird problem of not accepting dash\n # in the name, so the output from my script in preparing treeomics\n # input has underscore instead. Change it back here.\n samplename = re.sub(r'_', r'-', sample)\n print(samplename)\n col_to_get = list(info_col)\n col_to_get.extend([sample])\n var_pat = patient_varcount.loc[:, col_to_get]\n var_pat.rename(columns={sample:\"var_counts\"}, inplace=True)\n ref_pat = patient_readdepth.loc[:, col_to_get]\n ref_pat.rename(columns={sample:\"ref_counts\"}, inplace=True)\n merge_sample_mut = var_pat.merge(ref_pat, how=\"left\")\n merge_sample_mut.loc[:, 'normal_cn'] = 2\n merge_sample_mut.loc[:, 'mutation_id'] = merge_sample_mut.loc[:, 'Gene'].map(str) + \"_\" + merge_sample_mut.loc[:, 'Chromosome'].map(str) + \":\" + merge_sample_mut.loc[:, 'Position'].map(str)\n sample_segments = all_segments[all_segments['Tumor_Sample_Barcode'] == samplename]\n\n seg_dict = segments_to_dict(sample_segments)\n\n overlap_seg = pd.DataFrame()\n filtered_seg = pd.DataFrame()\n for _, mut_row in merge_sample_mut.iterrows():\n # Skip X and Y chromosome\n if (mut_row['Chromosome'] == \"X\" or mut_row['Chromosome'] == \"Y\"):\n continue\n\n # Search for the segment\n buf = search_overlap(mut_row, seg_dict)\n # Skip if no overlapping segments\n if (buf.empty):\n continue\n # Filter segments with unreliable calls. This is according to Canopy's guideline. However, I set CNt to 8 instead of 6 since\n # LUAD tends to have higher ploidy than the other cancer types.\n elif filterSegments:\n print(\"--filterSegments specified. Will filter segments of low quality.\")\n if (buf.iloc[0]['numMarker'] < 100) or (buf.iloc[0]['end.pos'] - buf.iloc[0]['start.pos'] < 5000000) or (buf.iloc[0]['CNt'] >= 8):\n if (filtered_seg.empty):\n filtered_seg = buf.iloc[0].to_frame()\n else:\n filtered_seg = pd.concat([filtered_seg, buf.iloc[0]], axis=1)\n else:\n # Get copy number for mutations\n assigned_row = mut_row.copy(deep=True)\n assigned_row['CNt'] = buf.iloc[0]['CNt']\n assigned_row['major_cn'] = buf.iloc[0]['A']\n assigned_row['minor_cn'] = buf.iloc[0]['B']\n # Initialize dataframe for merging.\n if (overlap_seg.empty):\n overlap_seg = assigned_row.to_frame()\n else:\n overlap_seg = pd.concat([overlap_seg, assigned_row], axis=1)\n\n overlap_seg = overlap_seg.transpose()\n overlap_seg.to_csv(\"./{}_mutations_withCN/{}_SNV_withCN.maf\".format(patient, samplename),sep=\"\\t\", index=False)\n\n filtered_seg = filtered_seg.transpose()\n print(\"Sample {} has {} segments with marker<100 or smaller than 5 Mb or >= 8 copy number (Canopy guideline)\".format(sample, filtered_seg.shape[0]))\n filtered_seg.to_csv(\"./{}_mutations_withCN/{}_filtered_seg.maf\".format(patient, samplename),sep=\"\\t\", index=False)\n\n towrite = overlap_seg.loc[:, ['mutation_id', 'ref_counts', 'var_counts', 'normal_cn', 'minor_cn', 'major_cn']]\n # Remove those with major CN = 0. Most likely false positive. Note this will however remove the mutations across all\n # sectors when Pyclone run analysis\n weird_mut = towrite.loc[towrite.loc[:, 'major_cn'] == 0]\n print(\"{} mutations for sample {} are located in regions with major_cn 0!\".format(weird_mut.shape[0], samplename))\n towrite = towrite.loc[towrite.loc[:, 'major_cn'] != 0]\n towrite['ref_counts'] = towrite['ref_counts'].map(int)\n towrite['var_counts'] = towrite['var_counts'].map(int)\n\n towrite.to_csv(\"./{}_pyclone_input/{}.tsv\".format(patient, samplename), sep='\\t', index=False)",
"def __init__(\n self,\n sample_path,\n sample_name=None,\n sample_index=None,\n lane_no=None,\n read_no=None,\n is_index=None,\n checksum=None):\n super(SampleFile, self).__init__(sample_path, file_checksum=checksum)\n self.sample_name = sample_name\n self.sample_index = sample_index\n self.lane_no = lane_no\n self.read_no = read_no\n self.is_index = is_index",
"def _write_variants_bedfile(h_o, p_f, p_t, byc):\n\n local_paths = byc.get(\"local_paths\")\n if not local_paths:\n return False\n tmp_path = Path( path.join( *local_paths[ \"server_tmp_dir_loc\" ]) )\n if not tmp_path.is_dir():\n return False\n\n v_ret = 0\n v_max = 1000\n\n if len( h_o[\"target_values\"] ) < 1:\n return()\n if not h_o[\"target_collection\"] == \"variants\":\n return()\n \n ds_id = h_o[\"source_db\"]\n accessid = h_o[\"id\"]\n l = \"\"\n if p_t > 0:\n l = \"_{}-{}\".format(p_f + 1, p_t)\n else:\n p_t = v_max # only for the non-paginated ...\n\n bed_file_name = f'{accessid}{l}.bed'\n bed_file = Path( path.join( tmp_path, bed_file_name ) )\n\n vs = { \"DUP\": [ ], \"DEL\": [ ], \"LOH\": [ ], \"SNV\": [ ]}\n\n data_client = MongoClient(host=environ.get(\"BYCON_MONGO_HOST\", \"localhost\"))\n data_db = data_client[ ds_id ]\n data_coll = data_db[ h_o[\"target_collection\"] ]\n\n for v in data_coll.find( { h_o[\"target_key\"]: { '$in': h_o[\"target_values\"] } }).limit(p_t):\n\n v_ret += 1\n\n if p_t > 0:\n if v_ret > p_t:\n continue\n if v_ret <= p_f:\n continue\n \n # TODO: Just make this from the standard variant format\n pv = ByconVariant(byc).byconVariant(v)\n\n if \"DUP\" in pv[\"variant_type\"]:\n vs[\"DUP\"].append(pv)\n elif \"DEL\" in pv[\"variant_type\"]:\n vs[\"DEL\"].append(pv)\n elif \"LOH\" in pv[\"variant_type\"]:\n vs[\"LOH\"].append(pv)\n elif \"SNV\" in pv[\"variant_type\"]:\n vs[\"SNV\"].append(pv)\n else:\n continue\n\n b_f = open( bed_file, 'w' )\n pos = set()\n\n ucsc_chr = \"\"\n\n for vt in vs.keys():\n if len( vs[vt] ) > 0:\n try:\n vs[vt] = sorted(vs[vt], key=lambda k: k['variant_length'], reverse=True)\n except:\n pass\n col_key = \"plot_{}_color\".format(vt.lower())\n col_hex = byc[\"plot_defaults\"].get(col_key, \"#666666\")\n col_rgb = hex_2_rgb(col_hex)\n b_f.write(\"track name={} visibility=squish description=\\\"{} variants matching the query with {} overall returned\\\" color={},{},{}\\n\".format(vt, vt, v_ret, col_rgb[0], col_rgb[1], col_rgb[2] ) )\n b_f.write(\"#chrom\\tchromStart\\tchromEnd\\tbiosampleId\\n\")\n for v in vs[vt]:\n ucsc_chr = \"chr\"+v[\"reference_name\"]\n ucsc_min = int( v[\"start\"] + 1 )\n ucsc_max = int( v[\"end\"] )\n l = \"{}\\t{}\\t{}\\t{}\\n\".format( ucsc_chr, ucsc_min, ucsc_max, v[\"biosample_id\"] )\n pos.add(ucsc_min)\n pos.add(ucsc_max)\n b_f.write( l )\n \n b_f.close()\n ucsc_range = sorted(pos)\n ucsc_pos = \"{}:{}-{}\".format(ucsc_chr, ucsc_range[0], ucsc_range[-1])\n\n return [bed_file_name, ucsc_pos]",
"def test_variants(capsys):\n\n wd_path = os.path.join(packagedir, \"sample_data\", \"special_wd_variants\")\n init_wd(wd_path, os.path.join(packagedir, \"sample_data\", \"sample_reads_in\"), remove_analysis=False)\n\n # clean up possible old results\n for file in ['config_used.yaml', 'variants_raw/variants_merged.csv']:\n if os.path.exists(os.path.join(wd_path, 'analysis', file)):\n os.unlink(os.path.join(wd_path, 'analysis', file))\n\n # now update file modification time to pretend we called variants\n for file in ['targets.bed', 'targets_merged.bed', 'versions/gatk.txt', 'variants_raw/S1.vcf']:\n pathlib.Path(os.path.join(wd_path, 'analysis', file)).touch()\n\n rules_manual = [\n '--resume',\n os.path.join('analysis', 'variants_raw', 'variants_summary.csv'), \n ]\n\n # just run the variants rule, we can't run from scratch since we won't have a caller\n captured = check_run(capsys, wd_path, rules = rules_manual, run=False)\n # make sure we are not trying to rerun everything\n # NOTE: this will contain output from above, so we can't fail on align_pe\n assert not 'call_variants_raw' in captured.out.strip()\n # make sure we want to reannotate\n assert 'variants_merge_unannotated' in captured.out.strip()\n # now actually run\n captured = check_run(capsys, wd_path, rules = rules_manual)\n\n # check variant files\n variants_merged = pd.read_csv(os.path.join(wd_path, 'analysis', 'variants_raw', 'variants_merged.csv'), index_col=['Chr', 'Start'])\n assert len(variants_merged) == 5\n assert len(variants_merged.loc['U00096.3', 35]) == 1\n assert len(variants_merged.loc['U00096.3', 36]) == 1\n assert len(variants_merged.loc['U00096.3', 37]) == 1\n assert len(variants_merged.loc['U00096.3', 45]) == 2\n\n variants_summary = pd.read_csv(os.path.join(wd_path, 'analysis', 'variants_raw', 'variants_summary.csv'), index_col=['Chr', 'Start', 'Alt'])\n assert len(variants_summary) == 5\n\n assert variants_summary.loc['U00096.3', 35, 'C']['Ref'] == 'T'\n assert variants_summary.loc['U00096.3', 36, 'A']['Ref'] == 'C'\n assert variants_summary.loc['U00096.3', 37, 'T']['Ref'] == 'TGTG'\n assert variants_summary.loc['U00096.3', 45, 'G']['Ref'] == 'T'\n assert variants_summary.loc['U00096.3', 45, 'C']['Ref'] == 'T'\n\n assert variants_summary.loc['U00096.3', 35, 'C']['Var_Zygosity'] == 'Het'\n assert variants_summary.loc['U00096.3', 36, 'A']['Var_Zygosity'] == 'HOM'\n assert variants_summary.loc['U00096.3', 37, 'T']['Var_Zygosity'] == 'Het'\n assert variants_summary.loc['U00096.3', 45, 'G']['Var_Zygosity'] == 'REF'\n assert variants_summary.loc['U00096.3', 45, 'C']['Var_Zygosity'] == 'Het'\n\n assert variants_summary.loc['U00096.3', 35, 'C']['Target'] == 'target_0'\n assert variants_summary.loc['U00096.3', 36, 'A']['Target'] == 'target_0'\n assert variants_summary.loc['U00096.3', 37, 'T']['Target'] == 'target_0'\n assert pd.isna(variants_summary.loc['U00096.3', 45, 'G']['Target'])\n assert pd.isna(variants_summary.loc['U00096.3', 45, 'C']['Target'])\n\n assert variants_summary.loc['U00096.3', 35, 'C']['Var_FailedFilters'] == 'badReads'\n assert variants_summary.loc['U00096.3', 45, 'G']['Var_FailedFilters'] == 'badReads'\n assert variants_summary.loc['U00096.3', 45, 'C']['Var_FailedFilters'] == 'badReads'\n assert variants_summary['Var_FailedFilters'].isnull().sum() == 2",
"def _prepare_samples(args):\n if args.galaxy:\n system_config = args.galaxy\n else:\n system_config = os.path.join(_get_data_dir(), \"galaxy\", \"bcbio_system.yaml\")\n config = yaml.load(open(system_config))\n config['algorithm'] = {}\n data = []\n vcf_files = [fn for fn in args.files if fn.endswith('vcf')]\n bam_files = [fn for fn in args.files if fn.endswith('bam')]\n fastq_files = [fn for fn in args.files if is_fastq(fn)]\n if not fastq_files:\n fastq_files = vcf_files\n for sample in fastq_files:\n dt = {}\n dt['name'] = splitext_plus(op.basename(sample))[0]\n dt['config'] = config\n dt['fastq'] = op.abspath(sample)\n if bam_files:\n dt['bam'] = _find_bam(bam_files, sample)\n data.append([dt])\n return data",
"def fixture_sample_single() -> dict:\n _sample = {\n \"fastq\": \"<( zcat read_R1.fastq.gz )\",\n \"single_end\": True,\n \"sample_id\": \"single\",\n }\n return _sample",
"def _generate_sample(self, sample):\n # handle paths or pd.Series as input for `sample`\n if type(sample) == str or issubclass(type(sample), Path):\n sample = AudioSample(sample) # initialize with source = file path\n else:\n assert isinstance(sample, AudioSample), (\n \"sample must be AudioSample OR file path (str or pathlib.Path), \"\n f\"was {type(sample)}\"\n )\n\n # add attributes to the sample that might be needed by actions in the pipeline\n sample.preprocessor = self\n sample.target_duration = self.sample_duration\n\n return sample",
"def addSample(self, sample_name, filename):\n tax_levels = None\n if len(self.abundance_df.columns) == 0:\n self.abundance_df = pd.read_csv(filename, header=0, sep='\\t') #krona (no header, no index)\n cols = list(self.abundance_df.columns)\n self.abundance_df = self.abundance_df[cols[0:2] + cols[:1:-1]]\n self.tax_levels = self.abundance_df.columns.tolist()[2:]\n self.abundance_df = self.abundance_df[self.abundance_df.columns.tolist()[0:2] + self.tax_levels]\n self.abundance_df.rename(columns={self.abundance_df.columns[0]:sample_name}, inplace=True)\n self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'\n self.abundance_df.index.name = None \n\n self.abundance_raw_df = self.abundance_df.loc[:,[self.abundance_df.columns[1]] + self.tax_levels]\n self.abundance_raw_df.rename(columns={self.abundance_raw_df.columns[0]:sample_name}, inplace=True)\n self.abundance_raw_df.index = self.abundance_raw_df[self.tax_levels[0]]+'_'\n self.abundance_raw_df.index.name = None \n self.abundance_df = self.abundance_df.loc[:,[self.abundance_df.columns[0]] + self.tax_levels]\n else:\n sample_df = pd.read_csv(filename, header=0, sep='\\t')\n sample_raw_df = sample_df.loc[:,[sample_df.columns[1]]+self.tax_levels]\n sample_raw_df.rename(columns={sample_raw_df.columns[0]:sample_name}, inplace=True) \n sample_raw_df.index = sample_raw_df[self.tax_levels[0]]+'_'\n sample_raw_df.index.name = None\n sample_df.rename(columns={sample_df.columns[0]:sample_name}, inplace=True) \n sample_df.index = sample_df[self.tax_levels[0]]+'_'\n sample_df.index.name = None \n self.abundance_df = pd.merge(self.abundance_df, sample_df, how='outer', on=self.tax_levels)\n self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'\n self.abundance_df.index.name = None\n self.abundance_df.fillna(value=0, inplace=True) \n self.abundance_raw_df = pd.merge(self.abundance_raw_df, sample_raw_df, how='outer', on=self.tax_levels)\n self.abundance_raw_df.index = self.abundance_raw_df[self.tax_levels[0]]+'_'\n self.abundance_raw_df.index.name = None \n self.abundance_raw_df.fillna(value=0, inplace=True)\n self.abundance_df[sample_name] = self.abundance_df[sample_name].astype(float)\n self.abundance_raw_df[sample_name] = self.abundance_raw_df[sample_name].astype(float)\n \n self.sample_names.append(sample_name.strip())\n self.abundance_df = self.abundance_df[self.sample_names + self.tax_levels]\n self.abundance_raw_df = self.abundance_raw_df[self.sample_names + self.tax_levels]\n myindex = list(self.abundance_df.index)\n newlist = sorted(set([i for i in myindex if myindex.count(i)>1]))\n #problems with the ncbi taxonomy (typos?)\n for i in newlist:\n self.abundance_df.loc[i,self.sample_names] = self.abundance_df.loc[i].sum(numeric_only=True)\n self.abundance_df.drop(i, inplace=True)\n self.abundance_raw_df.loc[i,self.sample_names] = self.abundance_raw_df.loc[i].sum(numeric_only=True)\n self.abundance_raw_df.drop(i, inplace=True)\n return self.tax_levels",
"def test_variant_case(adapter, case_obj, variant_obj):\n # GIVEN a variant WITH gene info\n variant_obj[\"genes\"] = [\n {\"hgnc_id\": 1},\n {\"hgnc_id\": 2, \"common\": {\"chromosome\": \"1\", \"start\": \"10\", \"end\": \"100\"}},\n ]\n # GIVEN a variant without gene info\n assert case_obj.get(\"region_vcf_file\") is None\n variant_case(adapter, case_obj, variant_obj)\n # THEN assert that the region VCF was created\n assert case_obj.get(\"region_vcf_file\") is not None",
"def prepare_sample(self, sample_raw):\n\n text, mention, candidate = sample_raw\n self._logger.debug(f'Preprocessing sample (\"{mention}\" vs. \"{candidate}\")')\n\n sample = {}\n sample['text'] = text\n sample['text_tokenized'] = None # set by add_tokens()\n sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = mention\n self.add_tokens(sample)\n sample['text_mention_mask'] = None # set by add_mention_mask()\n self.add_mention_mask(sample)\n sample['item_id'] = candidate\n sample['item_pbg'] = self._pbg.get_item_embedding(candidate)\n sample['item_glove'] = np.empty((1, 900)) # TODO\n return sample",
"def _sample_variants(self, p, file_path=None):\n output_path = '/tmp/subsample_variant.list'\n if file_path is None:\n file_path = '/tmp/variant.list'\n if os.path.isfile(file_path):\n self._variants = pd.read_table(file_path,\n header=None, sep=' ',\n names=['Chrom', 'Pos', 'ID', 'AF'])\n else:\n self.get_allele_freq(file_path)\n self._variants = pd.read_table(file_path,\n header=None, sep=' ',\n names=['Chrom', 'Pos', 'ID', 'AF'])\n else:\n self._variants = pd.read_table(file_path, header=None, sep=' ')\n\n self._sampled = self._variants.sample(n=p)\n self._sampled.to_csv(output_path, sep=' ', index=False, header=False)\n return output_path",
"def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file",
"def make_subsample(whole_file, subsample_file):\n line_counter = 0\n with open(whole_file, 'r') as rf, open(subsample_file, 'w') as wf:\n for line_txt in rf:\n try:\n uid = json.loads(line_txt)['attributed_to']\n if uid[-1] == '0' and uid[-2] == '0': # 1/100\n wf.write(line_txt)\n except:\n print('Error parsing line_txt:', line_txt)\n line_counter += 1\n if line_counter % 10 ** 6 == 0:\n print('read %dM lines' % (line_counter // 10 ** 6))",
"def simple_vcf_reader(fh):\n\n for line in fh:\n if line.startswith('#'):\n continue\n ls = line.rstrip().split('\\t')\n # 8 fixed fields per record\n assert len(ls)>=8, (\n \"Number of retrieved fields in vcf file too small\")\n # ignoring the rest\n (chrom, pos, id, ref, alt, qual, filter, info) = ls[:8]\n pos = int(pos)-1\n try:\n qual = int(qual)\n except:\n qual = \".\"\n info_d = dict()\n for field in info.split(';'):\n kv = field.split('=')\n # boolean entries get True as value\n if len(kv)==1:\n info_d[kv[0]] = True\n else:\n info_d[kv[0]] = kv[1]\n #try:\n # info = dict([field.split('=') for field in info.split(';')])\n #except ValueError:\n # import pdb; pdb.set_trace()\n yield Variant(chrom, pos, id, ref, alt, qual, filter, info_d)",
"def parse(v, cpy):\n if v.samples[0]['GT'][cpy] == 0: # Not present in this copy\n return None\n alt = v.samples[0].alleles[cpy]\n l_r, l_a = len(v.ref), len(alt)\n if l_r == 1:\n if l_a == 1:\n op, op_len = 'X', 0\n else:\n op, op_len = 'I', l_a - l_r\n elif l_a == 1:\n op, op_len = 'D', l_r - l_a\n else:\n raise ValueError(\"Complex variants present in VCF. Please filter or refactor these.\")\n\n return Variant(v.pos, v.ref, v.samples[0].alleles[cpy], op, op_len)",
"def prepare_runs(args):\n output_directory = _prepare_output_dir(args.output_directory)\n z_score_dir = args.z_score_dir\n region_list = args.region_list \n if args.region_list is None:\n try:\n flanking_region = int(args.flanking_region)\n except ValueError:\n logging.error('Flanking region argument needs to be an integer')\n sys.exit(COMMAND_LINE_ERROR)\n build = args.build\n bed_directory = args.bed_directory\n # Create the SNPList\n try:\n min_maf = float(args.maf)\n except:\n logging.error(\"Min Maf -m or --min-maf needs to be an floating point number\")\n sys.exit(COMMAND_LINE_ERROR)\n if args.region_list is not None:\n region_list = {}\n snp_list = []\n with open(args.region_list) as input_file:\n # When using no flaking region SNP must be valid, but it doesn't actually matter what it is, need to ensure that is actually the case.\n for i, line in enumerate(input_file):\n rsid = str(i)+ \"_\" + ''.join(line.strip().split(\"\\t\"))\n chromosome = line.strip().split(\":\")[0] \n snp = Snp(chromosome,\"1\",rsid)\n snp_list.append(snp)\n region_list[snp.rsid] = line.strip()\n else:\n snp_list = SnpList(args.snp_list, build)\n logging.info(snp_list)\n # Locus to process\n # population_to_extract_vcf\n if not args.annotation_only:\n no_flanking = args.flanking_units\n if no_flanking:\n raise NotImplementedError(\"Using a number of flanking SNPs instead of a region is not supported\")\n populations= args.populations.split(',')\n logging.info(\"Populations to process: {0}\".format(populations))\n loci = []\n gemini_databases = []\n output_vcfs = []\n for snp in snp_list:\n logging.info('Preparing output files for SNP {0}'.format(snp.rsid))\n locus = snp.rsid\n loci.append(locus)\n logging.info(\"Obtaining VCF file from the 1000 genomes project\")\n if region_list is not None:\n vcf = get_vcf_file(snp, string_region=region_list[locus])\n else: \n vcf = get_vcf_file(snp, flanking_region=flanking_region)\n for population in populations:\n tmp_vcf = extract_population_from_1000_genomes(vcf=vcf, super_population=population)\n z_score_file = get_relevant_zscore(snp.chrom, population, z_score_dir)\n pos_list_zscore = create_pos_hash_table(z_score_file)\n output_vcf = generate_zscore_and_vcf_output(output_directory=output_directory, zscore_hash=pos_list_zscore, vcf=tmp_vcf, locus=locus,population=population, multiply_rsquare=args.multiply_rsquare)\n if bed_directory is None:\n logging.info(\"Creating gemini database\")\n # TODO: Fix broxen gemini referenec\n gemini_databases.append(create_gemini_database(vcf=output_vcf))\n vcf_to_plink(locus, output_directory=output_directory, vcf=output_vcf, population=population)\n plink_to_ld_matrix(locus, output_directory=output_directory, population=population)\n logging.info(\"Generate transancestrals matrices\")\n generate_transancestral_output(loci, populations, output_directory)\n if bed_directory is None:\n logging.info(\"Generating annotation matrices to be used with Paintor\")\n logging.info(gemini_databases)\n generate_and_write_encode_annotations(databases=gemini_databases, output_directory=output_directory, loci=snp_list)\n else:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n # So finally we need to fix the LD matrices for inputting into PAINTOR. \n\n with open(os.path.join(output_directory, 'input.files'), 'w') as out_f:\n for snp in snp_list:\n out_f.write(snp.rsid +'\\n')\n # Remove .tbi files\n for file in os.listdir('.'):\n if fnmatch.fnmatch(file, '*.tbi'):\n try:\n os.remove(file)\n except OSError:\n logging.warning(\"Could not remove a .tbi file from the 1000 genomes tabix run\")\n else: \n loci = []\n for snp in snp_list:\n loci.append(snp.rsid)\n if bed_directory is not None:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n logging.info(\"Finemapping file preparation complete\")",
"def prepare(self):\r\n if self.varSegment.get() == \"binary\":\r\n self.calculate(\"\")\r\n else:\r\n files = selectFile(multiple=True)\r\n for file in files:\r\n self.calculate(file)",
"def export_verified_variants(aggregate_variants, unique_callers):\n document_lines = []\n for variant in aggregate_variants:\n # get genotype and allele depth for each sample\n samples = []\n for sample in variant[\"samples\"]:\n line = (\n []\n ) # line elements corespond to contants.variants_export.VERIFIED_VARIANTS_HEADER\n line.append(variant[\"institute\"])\n line.append(variant[\"_id\"]) # variant database ID\n line.append(variant[\"category\"])\n line.append(variant[\"variant_type\"])\n line.append(variant[\"display_name\"][:30]) # variant display name\n # Build local link to variant:\n case_name = variant[\"case_obj\"][\"display_name\"] # case display name\n local_link = \"/\".join([\"\", variant[\"institute\"], case_name, variant[\"_id\"]])\n line.append(local_link)\n line.append(variant.get(\"validation\"))\n line.append(case_name)\n case_individual = next(\n ind\n for ind in variant[\"case_obj\"][\"individuals\"]\n if ind[\"individual_id\"] == sample[\"sample_id\"]\n )\n if case_individual[\"phenotype\"] == 2:\n line.append(\n \" \".join([sample.get(\"display_name\"), \"(A)\"])\n ) # label sample as affected\n else:\n line.append(sample.get(\"display_name\"))\n line.append(\n \"\".join([\"chr\", variant[\"chromosome\"], \":\", str(variant[\"position\"])])\n ) # position\n line.append(\n \">\".join([variant.get(\"reference\")[:10], variant.get(\"alternative\")[:10]])\n ) # change\n genes = []\n prot_effect = []\n funct_anno = []\n for gene in variant.get(\n \"genes\", []\n ): # this will be a unique long field in the document\n genes.append(gene.get(\"hgnc_symbol\", \"\"))\n if gene.get(\"functional_annotation\"):\n funct_anno.append(gene.get(\"functional_annotation\"))\n for transcript in gene.get(\"transcripts\", []):\n if transcript.get(\"is_canonical\") and transcript.get(\"protein_sequence_name\"):\n prot_effect.append(\n urllib.parse.unquote(transcript.get(\"protein_sequence_name\"))\n )\n line.append(\",\".join(prot_effect))\n line.append(\",\".join(funct_anno))\n line.append(\",\".join(genes))\n line.append(variant.get(\"rank_score\"))\n line.append(variant.get(\"cadd_score\"))\n line.append(sample.get(\"genotype_call\"))\n line.append(sample[\"allele_depths\"][0])\n line.append(sample[\"allele_depths\"][1])\n line.append(sample[\"genotype_quality\"])\n\n # Set callers values. One cell per caller, leave blank if not applicable\n for caller in unique_callers:\n if variant.get(caller):\n line.append(variant.get(caller))\n else:\n line.append(\"-\")\n document_lines.append(line)\n return document_lines",
"def preparehspiceidvgGEO4(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,Ach_UFCMparam,Cins_UFCMparam,W_UFCMparam,NBODYparam,NFINparam):\n#L=Lparam Ach_UFCM=Ach_UFCMparam Cins_UFCM=Cins_UFCMparam W_UFCM=W_UFCMparam NBODY=NBODYparam NFIN=NFINparam\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Ach_UFCMparam',Ach_UFCMparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Cins_UFCMparam', Cins_UFCMparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'W_UFCMparam',W_UFCMparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam)",
"def test_reading_parse_nosample(tmpdir, nosample_vcf_file):\n # Perform record-wise copying, saving results in records\n path_out = tmpdir.mkdir(\"output\").join(\"output.vcf\")\n with vcfpy.Reader.from_path(nosample_vcf_file) as reader:\n header = reader.header.copy()\n header.samples = vcfpy.SamplesInfos([\"NA00001\", \"NA00002\", \"NA00003\"])\n with vcfpy.Writer.from_path(str(path_out), header) as writer:\n for record in reader:\n record.update_calls(\n [vcfpy.Call(sample, {}) for sample in (\"NA00001\", \"NA00002\", \"NA00003\")]\n )\n record.add_format(\"GT\", \"./.\")\n writer.write_record(record)\n\n expected = textwrap.dedent(\n \"\"\"\n ##fileformat=VCFv4.3\n ##contig=<ID=20,length=62435964,assembly=B36,md5=f126cdf8a6e0c7f379d618ff66beb2da,species=\"Homo sapiens\",taxonomy=x>\n ##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n #CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tNA00001\tNA00002\tNA00003\n 20\t14370\t.\tG\tA\t29\t.\t.\tGT\t.\t.\t.\n 20\t17330\t.\tT\tA\t3\t.\t.\tGT\t.\t.\t.\n 20\t1110696\t.\tA\tG,T\t67\t.\t.\tGT\t.\t.\t.\n 20\t1230237\t.\tT\t.\t47\t.\t.\tGT\t.\t.\t.\n 20\t1234567\t.\tGTC\tG,GTCT\t50\t.\t.\tGT\t.\t.\t.\n \"\"\"\n ).lstrip()\n\n assert path_out.open(\"rt\").read() == expected",
"def test_process_stereo_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/stereo.wav'\n self.default_kwargs['input_file'] = test_path\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()",
"def main_SS(maf_file, segment_file, vaf_threshold = 1.05, filterSegments = False):\n all_mutations = pd.read_csv(maf_file, low_memory=False, delimiter='\\t')\n all_segments = pd.read_csv(segment_file, low_memory=False, delimiter='\\t')\n\n if not os.path.exists(\"./sample_mutations_withCN\"):\n os.makedirs(\"./sample_mutations_withCN\")\n if not os.path.exists(\"./pyclone_input\"):\n os.makedirs(\"./pyclone_input\")\n\n for i, sample in enumerate(all_mutations.Tumor_Sample_Barcode.unique()):\n print(\"Processing sample {}: {}\".format(i+1, sample))\n\n # Subset the mutations and segments to those belonging to the patient\n sample_mutations = all_mutations[all_mutations['Tumor_Sample_Barcode'] == sample]\n sample_segments = all_segments[all_segments['Tumor_Sample_Barcode'] == sample]\n\n patient_VAF = sample_mutations.loc[:, 'VAF']\n filter_VAF_index = (patient_VAF > vaf_threshold)\n\n # Remove the mutations where the condition is true for ALL segments, i.e. it has to be below\n # 0.05 for all sectors. If it's above 0.05 in any sector, keep the mutations. This will keep most\n # of the private mutations.\n num_filtered = filter_VAF_index.loc[filter_VAF_index == False, ]\n print(\"Patient {} has {} mutations with average VAF < {} removed\".format(sample, num_filtered.shape[0], vaf_threshold))\n # Filter out the variants\n sample_mutations = sample_mutations.loc[filter_VAF_index, ]\n # Get the segments dictionary for the patient.\n seg_dict = segments_to_dict(sample_segments)\n\n overlap_seg = pd.DataFrame()\n filtered_seg = pd.DataFrame()\n for _, mut_row in sample_mutations.iterrows():\n # Skip X and Y chromosome\n if (mut_row['Chromosome'] == \"X\" or mut_row['Chromosome'] == \"Y\"):\n continue\n\n # Search for the segment\n buf = search_overlap_singleSample(mut_row, seg_dict)\n # Skip if no overlapping segments\n if (buf.empty):\n continue\n elif filterSegments:\n print(\"--filterSegments specified. Will filter segments of low quality.\")\n if (buf.iloc[0]['numMarker'] < 100) or (buf.iloc[0]['end.pos'] - buf.iloc[0]['start.pos'] < 5000000) or (buf.iloc[0]['CNt'] >= 8):\n if (filtered_seg.empty):\n filtered_seg = buf.iloc[0].to_frame()\n else:\n filtered_seg = pd.concat([filtered_seg, buf.iloc[0]], axis=1)\n else:\n # Get copy number for mutations\n assigned_row = mut_row.copy(deep=True)\n assigned_row['CNt'] = buf.iloc[0]['CNt']\n assigned_row['Major_CN'] = buf.iloc[0]['A']\n assigned_row['Minor_CN'] = buf.iloc[0]['B']\n assigned_row['adjustedCN'] = buf.iloc[0]['adjustedCN']\n # Initialize dataframe for merging.\n if (overlap_seg.empty):\n overlap_seg = assigned_row.to_frame()\n else:\n overlap_seg = pd.concat([overlap_seg, assigned_row], axis=1)\n\n overlap_seg = overlap_seg.transpose()\n overlap_seg.to_csv(\"./sample_mutations_withCN/{}_SNV_withCN.maf\".format(sample),sep=\"\\t\", index=False)\n\n filtered_seg = filtered_seg.transpose()\n print(\"Sample {} has {} segments with marker<100 or smaller than 5 Mb or >= 8 copy number (Canopy guideline)\".format(sample, filtered_seg.shape[0]))\n filtered_seg.to_csv(\"./sample_mutations_withCN/{}_filtered_seg.maf\".format(sample),sep=\"\\t\", index=False)\n\n pyclone_input = overlap_seg.loc[:, ['Hugo_Symbol', 'Chromosome',\n 'Start_position', 'ref_count', 'alt_count', 'VAF', 'Major_CN',\n 'Minor_CN']]\n pyclone_input['mutation_id'] = pyclone_input['Hugo_Symbol'].map(str) + \"_\" + pyclone_input['Chromosome'].map(str) + \":\" + pyclone_input['Start_position'].map(str)\n pyclone_input['normal_cn'] = 2\n towrite = pyclone_input.loc[:, ['mutation_id', 'ref_count', 'alt_count', 'normal_cn', 'Minor_CN', 'Major_CN']]\n towrite.columns = ['mutation_id', 'ref_counts', 'var_counts', 'normal_cn', 'minor_cn', 'major_cn']\n towrite['ref_counts'] = towrite['ref_counts'].map(int)\n towrite['var_counts'] = towrite['var_counts'].map(int)\n towrite.to_csv(\"./pyclone_input/{}_mutations.tsv\".format(sample), sep='\\t', index=False)"
] |
[
"0.5877613",
"0.58462787",
"0.5581498",
"0.5447532",
"0.54402614",
"0.53205305",
"0.5293866",
"0.52790785",
"0.5278006",
"0.5259671",
"0.52136654",
"0.5201567",
"0.51559687",
"0.51083004",
"0.50691414",
"0.50301903",
"0.50298095",
"0.5001772",
"0.49689683",
"0.49355268",
"0.49343267",
"0.49206868",
"0.48912776",
"0.48877555",
"0.48863444",
"0.4884797",
"0.48663712",
"0.48403132",
"0.4839839",
"0.48360625"
] |
0.73524356
|
0
|
Registers a file format from a recipe dictionary
|
def register_format(recipe):
afr = AFMFormatRecipe(recipe)
formats_available.append(afr)
# suffix
if afr.suffix not in formats_by_suffix:
formats_by_suffix[afr.suffix] = []
formats_by_suffix[afr.suffix].append(afr)
# mode
if afr.mode not in formats_by_mode:
formats_by_mode[afr.mode] = []
formats_by_mode[afr.mode].append(afr)
# supported extensions
if afr.suffix not in supported_extensions: # avoid duplucates
supported_extensions.append(afr.suffix)
supported_extensions.sort()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def register(file_format, extensions, reader, writer=None):\n register_format(\n fmt=file_format,\n ext_to_fmt=_extension_to_filetype,\n reader_map=_reader_map,\n writer_map=_writer_map,\n extensions=extensions,\n reader=reader,\n writer=writer,\n )",
"def register(file_format, extensions, reader, writer=None):\n register_format(\n fmt=file_format,\n ext_to_fmt=_extension_to_filetype,\n reader_map=_reader_map,\n writer_map=_writer_map,\n extensions=extensions,\n reader=reader,\n writer=writer,\n )",
"def register_filename_format(format_name,parser):\n if format_name == \"ALL\":\n raise ValueError(\"filename format code ALL is reserved\")\n\n filename_format_parser[format_name] = parser",
"def register_data_format(format_name,parser):\n\n data_format_parser[format_name] = parser",
"def registerFileType(self, extensions, classname):\r\n\r\n # keep track of all reader classes\r\n self._all_readers.append(classname)\r\n\r\n # iterate over all extensions\r\n for e in extensions:\r\n e_lower = e.lower()\r\n if e_lower not in self._extension_map:\r\n self._extension_map[e_lower] = []\r\n\r\n self._extension_map[e_lower].append(\r\n (extensions[e] + ' file', classname))",
"def add_format(vcf_file, nid, num, ntype, desc):\n # pylint: disable=protected-access\n vcf_file.formats[nid] = vcf.parser._Format(id=nid, num=num, type=ntype, desc=desc)",
"def __call__(self, format, filename):\n # turn the filename into something suitable for use in #define's\n prettyname = filename.replace(\".\", \"_\").upper()\n prettyname = prettyname.replace(\"/\", \"__\")\n prettyname = prettyname.replace(\":\", \"__\")\n prettyname = prettyname.replace(\"-\", \"__\")\n\n # try and open the file\n with open(filename, \"w\") as output:\n self.writeFuncsLut[format]( output, prettyname )",
"def get_spec_file(*, path:str, format:str) -> dict:\n ext ={'json': json,\n 'yaml': yaml}\n try:\n with open(path, 'r') as f:\n return ext[format].load(f)\n except Exception as e:\n logger.error(f'file could not be loaded {path}')\n raise",
"def _file_format_adapter(self):\n raise NotImplementedError",
"def RegisterRecipe(self, recipe: Recipe) -> None:\n recipe_name = recipe.name.lower()\n if recipe_name in self._recipes and not self.ALLOW_RECIPE_OVERRIDE:\n raise KeyError('Recipe already set for name: {0:s}.'.format(recipe.name))\n\n self._recipes[recipe_name] = recipe",
"def __init__(self, language=\"en\",\n lowercasing=False,\n path=\"helpers/generic_files/file_extensions.vocab\",\n resource=\"file\"):\n dir_path = os.path.dirname(os.path.realpath(__file__)).strip(\"dicts\")\n full_path=os.path.join(dir_path, path)\n super(FileDictionary, self).__init__(language=language,\n lowercasing=lowercasing,\n path=full_path,\n resource=resource)",
"def __init__(self, filename, registry):\n self.filename = filename\n self.registry = registry",
"def test_format_map():\n template_filelist = listdir(RTEMPLATE_PATH)\n\n R_files = []\n json_files = []\n for file in template_filelist:\n if '.r' in file:\n file = file.replace('.r', '')\n R_files.append(file)\n elif '.json' in file:\n file = file.replace('.json', '')\n json_files.append(file)\n\n\n for template in R_files:\n template_filepath = path.join(RTEMPLATE_PATH, template + '.r')\n metadata_filepath = path.join(RTEMPLATE_PATH, template + '.json')\n\n with open(template_filepath, 'r') as t_fp:\n r_text = t_fp.read()\n\n try:\n with open(metadata_filepath, 'r') as m_fp:\n metadata = json.load(m_fp)\n except Exception as err:\n print(metadata_filepath)\n raise err\n\n format_dict = {}\n for key in metadata['required_args']:\n format_dict[key] = 'TEST'\n\n ## Actual test: apply `format_map` to r_text#\n try:\n r_text = r_text.format_map(format_dict)\n except Exception as err:\n print(template_filepath)\n raise err",
"def map_file_format_info(file_format_event, file_validation_event):\n event_info = {}\n if not file_format_event:\n return\n try:\n event_info.update(\n {\n \"dct:FileFormat\": file_format_event.event_outcome_detail,\n \"prov:softwareAgent\": file_format_event.event_detail.split(\";\")[0],\n \"premis:version\": file_format_event.event_detail.split(\";\")[1],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file format tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_format_event.event_detail,\n )\n if file_validation_event:\n event_info.update(\n {\n \"dct:FileFormat\": file_validation_event.event_outcome_detail,\n }\n )\n return event_info",
"def Register(self, name, fn):\n name = normalizeStr(name)\n Logger.Debug(\"Registering Formatter:\", name)\n self._formatters[name] = fn",
"def setup(self, formats):\n for f in formats:\n try:\n self.counters[f] += 1\n except KeyError:\n self.counters[f] = 1\n self.vim.command(f\"set efm+={f}\")",
"def json2register(self):\n try:\n with open('registered.json', 'r') as file:\n self.final_dicc = json.load(file)\n except (FileNotFoundError, ValueError, json.decoder.JSONDecodeError):\n pass",
"def readdataformat(self,filename_,format_,compress_):\n if isinstance(filename_,unicode):\n filename_ = filename_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_readdataformat(self.__nativep,filename_,format_,compress_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def load_registry(self, fname):\n with contextlib.ExitStack() as stack:\n if hasattr(fname, \"read\"):\n # It's a file object\n fin = fname\n else:\n # It's a file path\n fin = stack.enter_context(open(fname))\n\n for linenum, line in enumerate(fin):\n if isinstance(line, bytes):\n line = line.decode(\"utf-8\")\n\n line = line.strip()\n # skip line comments\n if line.startswith(\"#\"):\n continue\n\n elements = shlex.split(line)\n if not len(elements) in [0, 2, 3]:\n raise OSError(\n f\"Invalid entry in Pooch registry file '{fname}': \"\n f\"expected 2 or 3 elements in line {linenum + 1} but got \"\n f\"{len(elements)}. Offending entry: '{line}'\"\n )\n if elements:\n file_name = elements[0]\n file_checksum = elements[1]\n if len(elements) == 3:\n file_url = elements[2]\n self.urls[file_name] = file_url\n self.registry[file_name] = file_checksum.lower()",
"def add_recipe(self, recipe): \n\t\tfor key, val in self.recipes_list.items():\n\t\t\tif key == recipe.recipe_type:\n\t\t\t\tself.recipes_list[key][recipe.name] = recipe",
"def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]",
"def register_code_name(code_name,format_name):\n if format_name not in data_format_parser:\n raise ValueError(\"unknown format_name: {:s}\".format(format_name))\n code_name_map[code_name] = format_name",
"def register(self, what, obj):\n # print(\"Registering pattern\", name, pattern)\n name = obj.name\n version = obj.version\n enable = obj.enable\n if enable == 'n':\n return\n\n key = Key(name, version)\n self.plugins[what][key] = obj",
"def setDictionary(self, path, token_pattern=\"\\S+\", read_as=ReadAs.TEXT, options={\"format\": \"text\"}):\n self.dictionary_path = path\n opts = options.copy()\n if \"tokenPattern\" not in opts:\n opts[\"tokenPattern\"] = token_pattern\n return self._set(dictionary=ExternalResource(path, read_as, opts))",
"def setDictionary(self, path, token_pattern=\"\\S+\", read_as=ReadAs.TEXT, options={\"format\": \"text\"}):\n self.dictionary_path = path\n opts = options.copy()\n if \"tokenPattern\" not in opts:\n opts[\"tokenPattern\"] = token_pattern\n return self._set(dictionary=ExternalResource(path, read_as, opts))",
"def load_dictionary(cls, args, filename, source=True):\n dictionary = Dictionary.load(filename)\n dictionary.add_symbol(\"<mask>\")\n return dictionary",
"def setFormat(format='screen'):\n formatdict = {'screen':'XWIN', 'postscript':'POST', 'tiff':'TIFF',\n 'color postscript':'PSCL', 'cgm':'CGM','window1':'XWI1',\n 'wmf':'WMF', 'png':'PNG','console':'CONS', 'pdf':'PDF', 'virtual':'VIRT'}\n dislin.metafl(formatdict[format])",
"def __init__(self, filename):\n\n self.term_dict = {}\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n\n #print line\n word, w_type = line.strip().split(\"\\t\")\n self.term_dict[word.strip().lower()] = \"CHESS_\" + w_type.strip().lower()",
"def add_form_file(self, filename, **kwargs):\n Console.ok(f\"ADD FROM FILE: Using {Registry.PROTOCOL_NAME} Protocol\")\n return self.protocol.add_form_file(filename, **kwargs)",
"def register_loader(key, module):\n register(key, module, loader_dict)"
] |
[
"0.68596166",
"0.68596166",
"0.64035183",
"0.6160948",
"0.57899994",
"0.5767842",
"0.5499242",
"0.5429027",
"0.52561927",
"0.5200761",
"0.51057297",
"0.5096522",
"0.50700337",
"0.5037854",
"0.50345564",
"0.50142324",
"0.5000288",
"0.49848488",
"0.4981142",
"0.49784878",
"0.49532962",
"0.4930926",
"0.49230835",
"0.48927167",
"0.48927167",
"0.4859616",
"0.4840916",
"0.47948068",
"0.47897896",
"0.4779663"
] |
0.730984
|
0
|
Shortcut method for getting random start and end points in a file
|
def get_random_start_and_end_points_in_file(self, file_data):
start_point = random.randint(2500, len(file_data))
end_point = start_point + random.randint(0, len(file_data) - start_point)
return start_point, end_point
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def createRandomRange(self, start, end) :\n\t\ttime = random.randint(1, end-start)\n\t\treturn (start, start+time)",
"def random_line(filename):\n return random.choice(list(open(filename)))",
"def getRandomCoordinates( self, size ):\n if not self.mIsLoaded: self.__loadIndex()\n\n token = random.choice( self.mIndex.keys() ) \n strand = random.choice( (\"+\", \"-\") )\n pos_id, pos_seq, lcontig = self.mIndex[token][:3]\n rpos = random.randint( 0, lcontig )\n if random.choice( (\"True\", \"False\") ):\n start = rpos\n end = min(rpos + size, lcontig)\n else:\n start = max(0, rpos - size)\n end = rpos\n \n return token, strand, start, end",
"def randrange(start: int, stop: int, step: int) -> int:\n ...",
"def generate_index(file_name):\n count = num_lines(file_name)\n index = random.randint(0, count - 1)\n return index",
"def getRandomLine(filename):\n lines = open(filename).read().splitlines()\n return random.choice(lines)",
"def random_line(insult_file=None):\n if insult_file is None:\n insult_file = os.path.join(\n os.path.dirname(os.path.dirname(__file__)), \"data\", \"insult.txt\"\n )\n with open(insult_file) as file_used:\n return random.choice(list(file_used))",
"def random_read(infile, nbytes, start_offset, num_samples): \n\tseq = \"\"\n\twhile len(seq) < num_samples:\n\t\t# Assuming 1 byte chars.\n\t\tstartidx = random.randint(start_offset, nbytes - num_samples)\n\t\tinfile.seek(startidx)\n\n\t\tseq = \"\"\n\t\tfor line in infile:\n\t\t\tfor ch in line:\n\t\t\t\tif is_valid_char(ch):\n\t\t\t\t\tseq = seq + ch\n\t\t\tif len(seq) >= num_samples:\n\t\t\t\tbreak\n\n\treturn seq[:num_samples].upper()+'\\n'",
"def get_offset(limit=12):\n return random.randrange(0, limit)",
"def random_gps_gen_from_range(s_lat,n_lat, e_lon, w_lon):\n #print(s_lat, n_lat, e_lon, w_lon)\n latitude = random.uniform(s_lat, n_lat)\n longitude = random.uniform(e_lon, w_lon)\n return latitude, longitude",
"def random_int(start: int = 0, end: int = 100) -> int:\n return random.randint(start, end)",
"def r(w,rangestart,rangeend):\r\n if w == 'r':\r\n print(random.random(rangestart , rangeend))\r\n if w == 'ri':\r\n print(random.randint(rangestart,rangeend))",
"def single_introduction(end):\n return [random.randint(0,end)]",
"def sample_int(self, start, end):\n self.minimum = start\n self.maximum = end\n return np.random.randint(start, end, size=self._sample_size)",
"def _coord(xend, yend):\n x = np.random.randint(0, xend)\n y = np.random.randint(0, yend)\n return x, y",
"def _create_random_offsets(self, block_locations):\n\n min_x, max_x, min_y, _ = self._find_min_and_max_coords(block_locations)\n x_offset = randrange(10 - (max_x - min_x)) - min_x\n y_offset = 0 - min_y\n return [x_offset, y_offset]",
"def getFileSampleLines(dirPath, percen, delim=\",\"):\n\tlines = list()\n\tfor li in fileRecGen(dirPath, delim):\n\t\tif randint(0, 100) < percen:\n\t\t\tlines.append(li)\t\t\n\treturn lines",
"def get_random_coords(width, height):\n return randrange(1, width-2), randrange(1, height-2)",
"def test_line_substring():\n for _x in range(100):\n l_str = random_str(50, 100)\n line = Line(l_str, random_str(10, 20), randint(1, 10000))\n # Try a single charater\n c_idx = randint(0, len(l_str)-1)\n sub_line = line[c_idx]\n assert sub_line == l_str[c_idx]\n assert isinstance(sub_line, Line)\n assert sub_line.file == line.file\n assert sub_line.number == line.number\n # Try a range\n s_idx = randint(0, (len(l_str) // 2) - 1)\n e_idx = randint(len(l_str) // 2, len(l_str) - 1)\n sub_line = line[s_idx:e_idx]\n assert sub_line == l_str[s_idx:e_idx]\n assert sub_line.file == line.file\n assert sub_line.number == line.number",
"def myrandint(begin, end):\n if begin == end:\n return begin\n else:\n return randint(begin, end)",
"def benchmarkRandomFragment( fasta, size ):\n\n contig, strand, start, end = fasta.getRandomCoordinates( size )\n s = fasta.getSequence( contig, strand, start, end )\n return s",
"def generate_starting_point() -> str:\n starter = ''\n for i in range(len(wf.ANSWER) // wf.SECTION_LENGTH):\n section = list(wf.ANSWER[wf.SECTION_LENGTH * i:wf.SECTION_LENGTH * (i + 1)])\n random.shuffle(section)\n starter = starter + ''.join(section)\n return starter",
"def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)",
"def get_random_time_segment(segment_ms):\n \n segment_start = np.random.randint(low=0, high=10000-segment_ms) # Make sure segment doesn't run past the 10sec background \n segment_end = segment_start + segment_ms - 1\n \n return (segment_start, segment_end)",
"def random_num(range_start,range_end):\r\n return random.randint(range_start,range_end)",
"def random_sample(grid_size):\r\n g = grid_size\r\n x_range = g[1] - g[0]\r\n\r\n y_range = g[3] - g[2]\r\n\r\n x_off = g[0]\r\n y_off = g[2]\r\n (x,y) = (x_range*np.random.ranf()+x_off,y_range*np.random.ranf()+y_off) \r\n return (x,y)",
"def random_position():\n path = (\n os.path.dirname(__file__)\n + os.sep\n + \"templates\"\n + os.sep\n + \"data\"\n + os.sep\n + \"taxi_stations.json\"\n )\n with open(path) as f:\n stations = json.load(f)[\"features\"]\n pos = random.choice(stations)\n coords = [pos[\"geometry\"][\"coordinates\"][1], pos[\"geometry\"][\"coordinates\"][0]]\n lat = float(\"{0:.6f}\".format(coords[0]))\n lng = float(\"{0:.6f}\".format(coords[1]))\n return [lat, lng]",
"def rseq(start=0.0, stop=1.0, N=10, randomness=0.5):\n\n return (randomness * sort(start + (stop - start) * rand(N))\n + (1 - randomness) * frange(start, stop, npts=N))",
"def sample(f, n):\n entries = list(SeqIO.parse(f, 'fasta'))\n for seqnum in range(n):\n loc = round(random.uniform(0, len(entries) - 1))\n entry = entries[loc] # get index of randomly-selected FASTA entry\n header = '>' + str(seqnum + 1) + '-' + entry.description # header\n print(header + '\\n' + str(entry.seq)) # print-out entire entry",
"def genInsertPosition(self):\n insize = np.random.normal(self.insertSize, self.insertStdev)\n while True:\n start = random.randint(self.fpstart, self.fpend)\n end = start + insize\n if end < self.fpend and self.isValid(start, end):\n return (start, end - self.readlen)"
] |
[
"0.62949705",
"0.6134966",
"0.60954434",
"0.5919487",
"0.59007275",
"0.5863535",
"0.585577",
"0.58109426",
"0.5758283",
"0.5748873",
"0.5682779",
"0.56414944",
"0.5631739",
"0.5604333",
"0.56005794",
"0.55697596",
"0.5545279",
"0.55299604",
"0.55119026",
"0.5484949",
"0.5425943",
"0.54107964",
"0.5405807",
"0.54049844",
"0.54043114",
"0.5403534",
"0.53681004",
"0.5339725",
"0.53311723",
"0.532758"
] |
0.79144514
|
0
|
Splice a chunk in a file. Picks out a random chunk of the file, duplicates it several times, and then inserts that chunk at some other random position in the file.
|
def splice_a_chunk_in_a_file(self, file_data, glitch_num):
start_point, end_point = self.get_random_start_and_end_points_in_file(file_data)
section = file_data[start_point:end_point]
repeated = ''
for i in range(1, glitch_num):
repeated += section
new_start_point, new_end_point = self.get_random_start_and_end_points_in_file(file_data)
file_data = file_data[:new_start_point] + repeated + file_data[new_end_point:]
return file_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def unshift(self, chunk):\n if chunk:\n self._pos -= len(chunk)\n self._unconsumed.append(chunk)",
"def truncate(data_path: str, n_chunks: int, idx=0):\n lines = open(data_path, 'r').readlines()\n n = len(lines)\n print('{} lines in original dataset'.format(n))\n chunk_size = round(len(lines) / int(n_chunks))\n print('{} lines in truncated dataset'.format(chunk_size))\n # get to idx block (so idx * x)\n start_id = idx * chunk_size\n output = lines[start_id:start_id+chunk_size]\n # write the next x lines in the output file (done)\n with open('trunc{}p_{}'.format(n_chunks, data_path), 'w') as out:\n for l in output:\n out.write(l)",
"def random_sample(input_name):\n\t#Count number of lines in original file\n\twith open(input_name) as f:\n\t\told_size = len(f.readlines())\n\t#Determine number of lines for new file\n\tnew_size=int(round(sum(1 for row in open(input_name))* args.rnd_sample))\n\t#Create name for sub-sampled file\n\tSampledFileName, SampledExten = os.path.splitext(input_name)\n\tSampledName = '%s_smpld%s' % (SampledFileName,SampledExten)\n\t#Randomly select the desired number of lines and print to new file\n\twith open(SampledName,\"wb\") as sink:\n\t\tfor i in random.sample(range(0, old_size), new_size):\n\t\t\tsink.write(linecache.getline(input_name, i))\n\tlinecache.clearcache()",
"def split_lines(input, seed, output1, output2):\n\n f = open(output1, \"w\")\n f2 = open(output2, \"w\")\n random.seed(seed)\n for line in open(input, 'r').readlines():\n\n if random.randint(1, 100) > 50:\n f2.write(line)\n else:\n f.write(line)",
"def get_random_sample(filename, sample_size, outfile):\n with open(filename, 'rb') as infile:\n header = infile.readline();\n rows = []\n for row in infile:\n rows.append(row)\n random_sample = random.sample(rows, sample_size)\n with open(outfile, 'wb') as ofile:\n ofile.writelines([header] + random_sample)",
"def splice_audio(file_path, start, end):\n audio = AudioSegment.from_mp3(file_path)\n\n # Pull thumbnail\n tags = ID3(file_path)\n thumbnail = tags.get(\"APIC:\").data\n\n # Pull any other tags from og audio file\n tags = mediainfo(file_path).get('TAG', {})\n\n # Get start and and end paramters\n # to pull the audio splice of interest\n start = timestamp_to_milliseconds(start)\n end = timestamp_to_milliseconds(end)\n\n spliced = audio[start:end]\n spliced.export(\n file_path,\n format=\"mp3\",\n tags=tags\n )\n\n audiofile = eyed3.load(file_path)\n audiofile.tag.images.set(3, thumbnail, 'image/jpeg')\n audiofile.tag.save()",
"def swap(self, old_chunks, new_chunk):\n indexes = [self.index(chunk) for chunk in old_chunks]\n del self[indexes[0]:indexes[-1] + 1]\n self.insert(indexes[0], new_chunk)",
"def trim_sample_section(file: str,\r\n sampling_rate: Union[float, int, str]) -> str:\r\n sampling_rate = float(sampling_rate)\r\n temp = temporary_copy(file)\r\n\r\n clip_length = int((duration(file) * sampling_rate * 0.01))\r\n start = random.randint(1, int(duration(file) - clip_length))\r\n end = start + clip_length\r\n trim_video(temp, file, start, end)\r\n return temp",
"def copy_chunks(chunk, token):\n # Open a thread to write this chunk\n thread = copy(chunk, token)\n thread.start()\n thread.join()",
"def remove_point(mutated_genome,index):\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n del mutated_genome[index][2][point_index]",
"def _move_chunk(self, args: MigrationArgs) -> None:\n def move_command():\n self._mongo_client.admin.command(\"moveChunk\", args.collection, find={SHARD_KEY: args.shard_key},\n to=args.shard, _secondaryThrottle=False, _waitForDelete=True)\n self._try_until_done(move_command)\n self._chunks[args.collection][args.shard_key] = args.shard\n logging.info(f\"MongoAgent: Moved chunk {args.shard_key} of collection {args.collection} to {args.shard}\")",
"def put_original_chunk(\n self, dimension: Dimension, cx: int, cz: int, chunk: Optional[Chunk]\n ):\n # If the chunk does not exist in the chunk history then add it\n\n # This code is only called when loading a chunk from the database.\n # If this code is called and the chunk history already exists then the\n # chunk must have been unloaded from the World class and reloaded\n # only chunks that are unchanged or have been saved can be unloaded so\n # the latest chunk here should be the same as the one on disk\n\n key = (dimension, cx, cz)\n if key not in self._chunk_history:\n if chunk is not None:\n assert cx == chunk.cx and cz == chunk.cz\n self._chunk_index[key] = (0, 0)\n self._chunk_history[key] = [self._serialise_chunk(chunk, dimension, 0)]\n self._chunk_cache[key] = chunk",
"def delete_chunk(self, dimension: Dimension, cx: int, cz: int):\n self._chunk_cache[(dimension, cx, cz)] = None",
"def pop(self):\n collect()\n if self.files:\n f = self.files.pop()\n logging.info(' in ChunkReader.pop() : trying to open file {0}'.format(f))\n with gzip.open(join(self.fpath, f)) as fp:\n item = pickle.load(fp)\n logging.info(' in ChunkReader.pop() : file {0} loaded'.format(f))\n return item",
"def create_gaps(path):\n random_file = random.choice(os.listdir(path)) # randomly selecting a file\n send2trash.send2trash(os.path.join(path, random_file)) # creating a gap in the list of files\n print(f\"{random_file} is removed from the '{path}'\")",
"def chunkify(self, size=1024*1024*5):\n with open(self.file_name_raw, 'rb') as file:\n chunk_end = file.tell()\n while True:\n chunk_start = chunk_end\n file.seek(size, 1)\n file.readline()\n chunk_end = file.tell()\n\n if chunk_end > self.file_end:\n chunk_end = self.file_end\n yield chunk_start, chunk_end - chunk_start\n break\n else:\n yield chunk_start, chunk_end - chunk_start",
"def big_dedup_file(in_fname, out_fname, n_bins):\n filehandles = []\n for i in range(n_bins):\n filehandles.append(open(f'temp{i}.txt', 'w'))\n handle_iter = itertools.cycle(filehandles)\n with open(in_fname, 'r') as in_file:\n for line in in_file:\n next(handle_iter).write(line)\n for filehandle in filehandles:\n filehandle.close()\n\n with open(out_fname, 'w') as out_file:\n for i in range(n_bins):\n with open(f'temp{i}.txt', 'r') as tempfile:\n # deduplicate\n lines = list(set(tempfile.read().split('\\n')))\n random.shuffle(lines)\n out_file.write('\\n'.join(lines))\n logging.info(f'pseudodeduplicated {in_fname}, {out_fname} is also pseudorandomized')",
"def chunk(f, n, data):\n\n\t# Chunk ID\n\tf.write(number(2, n))\n\t# Chunk length\n\tf.write(number(4, len(data)))\n\t# Data\n\tf.write(data)",
"def dedup_file(in_fname, out_fname):\n with open(in_fname, 'r') as in_file, open(out_fname, 'w') as out_file:\n lines, n_lines, n_duplicates = get_lines(in_file)\n lines = list(lines)\n random.shuffle(lines)\n out_file.write('\\n'.join(lines))\n logging.info(f'deduplicated {in_fname}, removed {n_duplicates} duplicates out of {n_lines} lines')\n return n_lines, n_duplicates",
"def add_chunk(self, chunk):\n self.chunkbuffer.appendleft(chunk)",
"def example_deletion_with_block_lowering(self):\n i = 0\n while i < len(self.shrink_target.blocks):\n if not self.is_shrinking_block(i):\n i += 1\n continue\n\n u, v = self.blocks[i].bounds\n\n j = 0\n while j < len(self.shrink_target.examples):\n n = int_from_bytes(self.shrink_target.buffer[u:v])\n if n == 0:\n break\n ex = self.shrink_target.examples[j]\n if ex.start < v or ex.length == 0:\n j += 1\n continue\n\n buf = bytearray(self.shrink_target.buffer)\n buf[u:v] = int_to_bytes(n - 1, v - u)\n del buf[ex.start : ex.end]\n if not self.incorporate_new_buffer(buf):\n j += 1\n\n i += 1",
"def shift(self, chunk_or_token):\n # TODO if we make a new chunk, we should try to merge with previous chunk\n self.append(chunk_or_token)\n self.leaves.extend(chunk_or_token.leaves) # SPEED: do we need to keep track of leaves?\n # should probability include btp like chunkability?\n self.probability *= self.graph.ftp(self[-2], self[-1]) # markov assumption",
"def sample(self, sample_size: int):\n self.data = random.sample(self.data, sample_size)",
"def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]",
"def augment_data(lines, rep, random_idx):\n\tout = lines.copy()\n\n\tout = np.concatenate([out] * rep, axis=0)\n\t\n\tif not random_idx:\n\t\trem = [lines[idx] for idx in random_idx] # add remainder\n\t\tnp.concatenate((out, rem), axis=0)\n\treturn out",
"def sample_sentences_from_file(file, fraction):\n with open(file, 'r') as f:\n lines = f.readlines()\n new_file_size = ceil(fraction*len(lines))\n rand_lines = sample(lines, new_file_size)\n new_file = file+\"_sampled-\"+str(new_file_size)+\".txt\"\n with open(new_file, 'w') as f:\n f.writelines(rand_lines)\n return new_file",
"def putchunk(self, *args, **kwargs):\n return _image.image_putchunk(self, *args, **kwargs)",
"def write_chunk(chunk, token):\n dest = rem_dir('grab')\n # input(dest)\n file_name = '{}_{}'.format('cpf_temp', token)\n dest_file_name = os.path.join(os.path.abspath(dest), file_name)\n # input(dest_file_name)\n WRITE_STREAM = open(dest_file_name, 'wb')\n WRITE_STREAM.write(chunk)\n WRITE_STREAM.close()\n\n return True",
"def change_chunks(\n self,\n before: typing.Union[typing.Tuple[int, int], None],\n after: typing.Union[typing.Tuple[int, int], None],\n generate_chunks=True,\n load_immediate=True,\n dimension=None,\n ):\n if shared.IS_CLIENT and self.get_active_dimension() is None:\n return\n\n if dimension is None:\n dimension = self.get_active_dimension()\n\n before_set = set()\n after_set = set()\n pad = 4\n for dx in range(-pad, pad + 1):\n for dz in range(-pad, pad + 1):\n if before is not None:\n x, z = before\n if (dx + x) ** 2 + (dz + z) ** 2 <= (pad + 1) ** 2:\n before_set.add((x + dx, z + dz))\n if after is not None:\n x, z = after\n if (dx + x) ** 2 + (dz + z) ** 2 <= (pad + 1) ** 2:\n after_set.add((x + dx, z + dz))\n\n # show = after_set - before_set\n hide = before_set - after_set\n for chunk in hide:\n # todo: fix this, this was previously hiding chunks randomly....\n pyglet.clock.schedule_once(wrap_method(dimension.hide_chunk, chunk), 0.1)\n c = dimension.get_chunk(*chunk, generate=False, create=False)\n\n if c and c.is_loaded() and not shared.IS_NETWORKING:\n shared.tick_handler.schedule_once(\n shared.world.save_file.dump_async(\n None,\n \"minecraft:chunk\",\n dimension=self.active_dimension,\n chunk=chunk,\n )\n )\n\n for chunk in after_set:\n c = dimension.get_chunk(*chunk, generate=False, create=False)\n\n if c and c.is_visible():\n continue\n\n c = dimension.get_chunk(*chunk, generate=False)\n pyglet.clock.schedule_once(wrap_method(dimension.show_chunk, c), 0.1)\n\n if not shared.IS_NETWORKING and shared.world.save_file:\n if not load_immediate:\n pyglet.clock.schedule_once(\n lambda _: shared.world.save_file.read(\n \"minecraft:chunk\",\n dimension=self.active_dimension,\n chunk=chunk,\n ),\n 0.1,\n )\n else:\n shared.world.save_file.read(\n \"minecraft:chunk\", dimension=self.active_dimension, chunk=chunk\n )\n else:\n dimension.get_chunk(*chunk, generate=False)\n\n if not after or shared.IS_NETWORKING:\n return\n\n for dx in range(-pad, pad + 1):\n for dz in range(-pad, pad + 1):\n if (\n generate_chunks\n and abs(dx) <= mcpython.common.config.CHUNK_GENERATION_RANGE\n and abs(dz) <= mcpython.common.config.CHUNK_GENERATION_RANGE\n and self.config[\"enable_auto_gen\"]\n ):\n chunk = dimension.get_chunk(\n dx + after[0], dz + after[1], generate=False\n )\n if not chunk.is_generated():\n shared.world_generation_handler.add_chunk_to_generation_list(\n chunk\n )",
"def split(aDeck,location):\r\n newDeck=[]\r\n for x in range(location):\r\n newDeck.append(aDeck.pop())\r\n return (newDeck)"
] |
[
"0.5526482",
"0.53792644",
"0.5207515",
"0.5165682",
"0.5116689",
"0.50714445",
"0.50279456",
"0.501086",
"0.49219203",
"0.48947325",
"0.48261306",
"0.47752142",
"0.47166112",
"0.46978334",
"0.46897793",
"0.46859783",
"0.4681981",
"0.46798566",
"0.46583572",
"0.4646029",
"0.46347997",
"0.46325037",
"0.46034747",
"0.46001673",
"0.45971432",
"0.4584275",
"0.4578406",
"0.4552707",
"0.45461684",
"0.45461372"
] |
0.728253
|
0
|
Glitch! Opens the original image file, reads its contents and stores them as 'file_data' Calls 'splice_a_chunk_in_a_file()' method on the data a random number of times between 1 and 5 Writes the new glitched image out to a file
|
def glitch_an_image(self, local_image):
file_handler = open(local_image, 'r')
file_data = file_handler.read()
file_handler.close()
##YOUR NEW NUMBER##
search_num = tumblr.top_hit_num
# divide by 5,000,000 to normalize the range from [0,5000000] to [0,1]
# multiply by 5 to increase the range from [0,5]
#glitch_num = (int)(((search_num / float(1000000)) * 5.0)+1)
glitch_num = 4
#Image pre-processing via PIL # TODO
#local_image = self.additional_image_processing(local_image)
#print local_image
for i in range(1, glitch_num):
file_data = self.splice_a_chunk_in_a_file(file_data, glitch_num)
outputfile = self.append_random_number_to_filename(local_image)
#print outputfile
file_handler = open(local_image, 'w')
file_handler.write(file_data)
file_handler.close
return local_image
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def splice_a_chunk_in_a_file(self, file_data, glitch_num):\n start_point, end_point = self.get_random_start_and_end_points_in_file(file_data)\n section = file_data[start_point:end_point]\n repeated = ''\n\n for i in range(1, glitch_num):\n repeated += section\n\n new_start_point, new_end_point = self.get_random_start_and_end_points_in_file(file_data)\n file_data = file_data[:new_start_point] + repeated + file_data[new_end_point:]\n return file_data",
"def unstitch_image(self, file_path):\n new_height = 320\n new_width = 256\n stitched_image = cv2.imread(file_path, cv2.IMREAD_ANYDEPTH)\n\n stitched_image_one = stitched_image[0:new_height, :new_width]\n stitched_image_two = stitched_image[0:new_height, new_width:new_width * 2]\n stitched_image_three = stitched_image[0:new_height, new_width * 2:new_width * 3]\n stitched_image_four = stitched_image[0:new_height, new_width * 3:new_width * 4]\n\n stitched_image_five = stitched_image[new_height:new_height * 2, :new_width]\n stitched_image_six = stitched_image[new_height:new_height * 2, new_width:new_width * 2]\n stitched_image_seven = stitched_image[new_height:new_height * 2, new_width * 2:new_width * 3]\n stitched_image_eight = stitched_image[new_height:new_height * 2, new_width * 3:new_width * 4]\n\n cv2.imwrite(file_path[:-4]+ 'one.png', stitched_image_one)\n cv2.imwrite(file_path[:-4] + 'two.png', stitched_image_two)\n cv2.imwrite(file_path[:-4] + 'three.png', stitched_image_three)\n cv2.imwrite(file_path[:-4] + 'four.png', stitched_image_four)\n\n cv2.imwrite(file_path[:-4] + 'five.png', stitched_image_five)\n cv2.imwrite(file_path[:-4] + 'six.png', stitched_image_six)\n cv2.imwrite(file_path[:-4] + 'seven.png', stitched_image_seven)\n cv2.imwrite(file_path[:-4] + 'eight.png', stitched_image_eight)\n\n print('DONE')",
"def gifsicle(fname1, /, *, chunksize = 1048576, debug = False, timeout = 60.0):\n\n # Import standard modules ...\n import os\n import shutil\n import subprocess\n import tempfile\n\n # Import sub-functions ...\n from ..sha512 import sha512\n\n # Check that \"gifsicle\" is installed ...\n if shutil.which(\"gifsicle\") is None:\n raise Exception(\"\\\"gifsicle\\\" is not installed\") from None\n\n # Check that the image exists ...\n if not os.path.exists(fname1):\n raise Exception(f\"\\\"{fname1}\\\" does not exist\") from None\n\n # Create temporary directory ...\n with tempfile.TemporaryDirectory(prefix = \"gifsicle.\") as tname:\n # Create temporary name ...\n fname2 = f\"{tname}/image.gif\"\n\n # Optimise GIF ...\n subprocess.run(\n [\n \"gifsicle\",\n \"--unoptimize\",\n \"--optimize=3\",\n \"--output\", fname2,\n fname1\n ],\n check = True,\n encoding = \"utf-8\",\n stderr = subprocess.DEVNULL,\n stdout = subprocess.DEVNULL,\n timeout = timeout,\n )\n\n # Find the two sizes and don't replace the original if the new one is\n # larger, or equal ...\n if os.path.getsize(fname2) >= os.path.getsize(fname1):\n if debug:\n print(f\"INFO: Skipping because \\\"{fname2}\\\" is larger than, or equal to, \\\"{fname1}\\\"\")\n return\n\n # Find the two hashes and don't replace the original if the new one is\n # the same ...\n if sha512(fname1, chunksize = chunksize) == sha512(fname2, chunksize = chunksize):\n if debug:\n print(f\"INFO: Skipping because \\\"{fname2}\\\" is the same as \\\"{fname1}\\\"\")\n return\n\n # Replace the original ...\n shutil.move(fname2, fname1)",
"def imageProcessing(filepath):\n imagedata = []\n for img in glob.glob(filepath):\n edit_image = cv2.imread(img, cv2.IMREAD_GRAYSCALE)\n edit_image = cv2.resize(255-edit_image, (28, 28))\n\n cv2.imwrite(img, edit_image)\n (thresh, edit_image) = cv2.threshold(edit_image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n while np.sum(edit_image[0]) == 0:\n edit_image = edit_image[1:]\n\n while np.sum(edit_image[:,0]) == 0:\n edit_image = np.delete(edit_image,0,1)\n\n while np.sum(edit_image[-1]) == 0:\n edit_image = edit_image[:-1]\n\n while np.sum(edit_image[:,-1]) == 0:\n edit_image = np.delete(edit_image,-1,1)\n\n rows,cols = edit_image.shape\n if rows > cols:\n factor = 20.0/rows\n rows = 20\n cols = int(round(cols*factor))\n edit_image = cv2.resize(edit_image, (cols,rows))\n else:\n factor = 20.0/cols\n cols = 20\n rows = int(round(rows*factor))\n edit_image = cv2.resize(edit_image, (cols, rows))\n\n colsPadding = (int(math.ceil((28-cols)/2.0)),int(math.floor((28-cols)/2.0)))\n rowsPadding = (int(math.ceil((28-rows)/2.0)),int(math.floor((28-rows)/2.0)))\n edit_image = np.lib.pad(edit_image,(rowsPadding,colsPadding),'constant')\n shiftx,shifty = getBestShift(edit_image)\n shifted = shift(edit_image,shiftx,shifty)\n edit_image = shifted\n edit_image = edit_image.flatten()\n imagedata.append(edit_image)\n return imagedata",
"def truncate(data_path: str, n_chunks: int, idx=0):\n lines = open(data_path, 'r').readlines()\n n = len(lines)\n print('{} lines in original dataset'.format(n))\n chunk_size = round(len(lines) / int(n_chunks))\n print('{} lines in truncated dataset'.format(chunk_size))\n # get to idx block (so idx * x)\n start_id = idx * chunk_size\n output = lines[start_id:start_id+chunk_size]\n # write the next x lines in the output file (done)\n with open('trunc{}p_{}'.format(n_chunks, data_path), 'w') as out:\n for l in output:\n out.write(l)",
"def disintegrate(base_image_fp: Union[str, BytesIO]) -> str:\n if isinstance(base_image_fp, BytesIO):\n # https://stackoverflow.com/questions/46624449/load-bytesio-image-with-opencv\n file_bytes = np.asarray(bytearray(base_image_fp.read()), dtype=np.uint8)\n cv2_img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n else:\n cv2_img = cv2.imread(base_image_fp)\n\n # In order to make sure it works pretty consistently across the board, we'll upscale smaller images to at least\n # be 200x\n\n # TODO surely a cleaner way to do this\n\n h, w, _ = cv2_img.shape\n\n if h < 200:\n h_upscale_ratio = 200 / h\n else:\n h_upscale_ratio = 1\n\n if w < 200:\n w_upscale_ratio = 200 / w\n else:\n w_upscale_ratio = 1\n\n scaling_ratio = min(h_upscale_ratio, w_upscale_ratio)\n\n if scaling_ratio != 1:\n cv2_img = cv2.resize(cv2_img, (0, 0), fx=scaling_ratio, fy=scaling_ratio)\n h, w, _ = cv2_img.shape\n\n cv2_image_tmp = np.copy(cv2_img)\n\n chunk_amount = 90 # 75 is sweet spot\n chunk_size = max(int((w + h) // 2 // chunk_amount // 2), 3)\n\n # This value adjusts the base probability needed for a spot to trigger.\n # The lower the value, the higher\n chunk_move_threshold = 0.5\n\n # 1 / this value represents the point in the image where it'll start dissolving\n starting_fraction = 5\n\n # Force a chunk to trigger\n force_chunk_trigger = False\n\n # The starting value is basically 1/3 of the way into the image\n # This matches more with the general idea of the dusting happening partway through the image,\n # and also saves us time in iterations, cutting off 1/3 of the processing time.\n left_threshold = (w - chunk_size) // starting_fraction if starting_fraction != 0 else 0\n for r in range(h - chunk_size, chunk_size - 1, -chunk_size):\n for c in range(w - chunk_size,\n left_threshold,\n -chunk_size):\n # One chunk is chunk_size square\n\n # Randomly determine if we want to even do anything with this chunk\n # Make it random but biased towards values with a greater x\n\n adjusted_c = left_threshold\n if (random() + (adjusted_c / w) > chunk_move_threshold) or force_chunk_trigger:\n # if True:\n t = 1 / (random() * (c / w) * 0.5)\n\n # This offset represents\n offset_amount = int((w / c))\n # print(\"offset amount\", offset_amount)\n # The amount that the chunk has moved\n vert_offset = r - (chunk_size * offset_amount)\n horiz_offset = c + (chunk_size * offset_amount)\n\n if vert_offset == 0:\n continue\n\n # This bit adds a little bit of extra randomness to the offsets of each chunk.\n # If this isn't added, all chunks tend to fall along the chunk grid.\n # It should just need to be a tiny nudge.\n\n try:\n\n # Beyond just shifting pixels, this part tries to prettify the resulting image slightly.\n # TODO Consider using an alpha channel or just replacing changed pixels with white\n # or an average of the background color or smth.\n\n target_chunk_offset = int(random() * adjusted_c / 3)\n # Subtract because we want it to go up, negative y\n vert_offset -= target_chunk_offset\n horiz_offset += target_chunk_offset\n\n if vert_offset < 0:\n continue\n\n # Swap a target region on the temp image with the target chunk location on the original\n\n if (c / w) > 0.5:\n # Add a slight offset to where the chunks get swapped into.\n # Otherwise, it gets very crowded.\n\n tmp_r = r + int(random() * adjusted_c / 3)\n tmp_c = c + int(random() * adjusted_c / 3)\n\n # set a chunk in the new image to the pixels at the target chunk.\n cv2_image_tmp[tmp_r: tmp_r + chunk_size - 1, tmp_c:tmp_c + chunk_size - 1, :] = \\\n cv2_img[vert_offset:vert_offset + chunk_size - 1,\n horiz_offset:horiz_offset + chunk_size - 1,\n :] # correct\n\n except ValueError:\n # Tends to happen around threshold values which we don't care about anyway\n # TODO Removing this try/except might be a point to optimize\n pass\n\n try:\n target_chunk_offset = int(random() * c / 3)\n # Subtract because we want it to go up, negative y\n\n # Swap the target back first\n vert_offset -= target_chunk_offset\n horiz_offset += target_chunk_offset\n\n cv2_image_tmp[vert_offset:vert_offset + chunk_size - 1,\n horiz_offset:horiz_offset + chunk_size - 1, :] = \\\n cv2_img[r:r + chunk_size - 1, c:c + chunk_size - 1, :] # correct\n # Try to replace the new point with the value at the old one\n\n # Create a new offset value so we don't run into it looking too griddy\n except ValueError:\n pass\n\n # Ideally this would be written out to a BytesIO object but it's hard to get that to work with openCV so\n # this'll have to do\n cv2.imwrite(\"test_out.png\", cv2_image_tmp)\n\n # cv2.imshow(\"test_out\", cv2_image_tmp)\n # cv2.waitKey(0)\n\n return \"test_out.png\"",
"def make_jpeg_chunks(info, scale_index,\n raw_chunks_dir,\n jpeg_quality=95, slicing_plane=\"xy\"):\n\n dtype = np.dtype(info[\"data_type\"])\n if dtype.byteorder != \"|\":\n dtype.byteorder = \"<\"\n num_channels = info[\"num_channels\"]\n if dtype != np.uint8:\n raise ValueError(\"JPEG compression is only possible for uint8 type\")\n if num_channels != 1 and num_channels !=3:\n raise ValueError(\"JPEG compression is only possible for\"\n \" images with 1 or 3 channels\")\n scale_info = info[\"scales\"][scale_index]\n key = scale_info[\"key\"]\n size = scale_info[\"size\"]\n\n for chunk_size in scale_info[\"chunk_sizes\"]:\n for x_idx in range((size[0] - 1) // chunk_size[0] + 1):\n for y_idx in range((size[1] - 1) // chunk_size[1] + 1):\n for z_idx in range((size[2] - 1) // chunk_size[2] + 1):\n xmin = chunk_size[0] * x_idx\n xmax = min(chunk_size[0] * (x_idx + 1), size[0])\n ymin = chunk_size[1] * y_idx\n ymax = min(chunk_size[1] * (y_idx + 1), size[1])\n zmin = chunk_size[2] * z_idx\n zmax = min(chunk_size[2] * (z_idx + 1), size[2])\n shape = (num_channels,\n zmax - zmin, ymax - ymin, xmax - xmin)\n\n raw_chunk_filename = os.path.join(\n raw_chunks_dir, CHUNK_PATTERN.format(\n xmin, xmax, ymin, ymax, zmin, zmax, key=key))\n try:\n f = open(raw_chunk_filename, \"rb\")\n except OSError:\n f = gzip.open(raw_chunk_filename + \".gz\", \"rb\")\n with f:\n chunk = (np.frombuffer(f.read(), dtype=dtype)\n .reshape(shape))\n\n if slicing_plane == \"xy\":\n reshaped_chunk = chunk.reshape(\n shape[0], shape[1] * shape[2], shape[3])\n elif slicing_plane == \"xz\":\n reshaped_chunk = chunk.reshape(\n shape[0], shape[1], shape[2] * shape[3])\n else:\n raise RuntimeError()\n\n if num_channels == 1:\n reshaped_chunk = np.squeeze(reshaped_chunk, 0)\n else:\n # Channels (RGB) need to be along the last axis for PIL\n reshaped_chunk = np.swapaxes(reshaped_chunk, 0, 3)\n\n jpeg_chunk_filename = CHUNK_PATTERN.format(\n xmin, xmax, ymin, ymax, zmin, zmax, key=key)\n img = PIL.Image.fromarray(reshaped_chunk)\n print(\"Writing\", jpeg_chunk_filename)\n os.makedirs(os.path.dirname(jpeg_chunk_filename),\n exist_ok=True)\n img.save(jpeg_chunk_filename,\n format=\"jpeg\",\n quality=jpeg_quality,\n optimize=True,\n progressive=True)",
"def putchunk(self, *args, **kwargs):\n return _image.image_putchunk(self, *args, **kwargs)",
"def image_consumer(socket, hdf5_file, num_expected, shuffle_seed=None,\n offset=0):\n with progress_bar('images', maxval=num_expected) as pb:\n if shuffle_seed is None:\n index_gen = iter(xrange(num_expected))\n else:\n rng = numpy.random.RandomState(shuffle_seed)\n index_gen = iter(rng.permutation(num_expected))\n for i, num in enumerate(index_gen):\n image_filename, class_index = socket.recv_pyobj(zmq.SNDMORE)\n image_data = numpy.fromstring(socket.recv(), dtype='uint8')\n _write_to_hdf5(hdf5_file, num + offset, image_filename,\n image_data, class_index)\n pb.update(i + 1)",
"def decompose(self, file_name):\n print(\"[+] Decompose started...\")\n with open(file_name, \"rb\") as image_file:\n\n # We check if the directory chunks doesn't exist, then, we create it\n if not path.exists(\"./chunks/\"):\n makedirs(\"chunks\")\n \n to_print = b64.b64encode(image_file.read()).decode('utf-8')\n size = len(to_print)\n re_size = self.verify_size_content(self.divide(size))\n content = \"\"\n i = 0\n\n print(\"[+] FILENAME: \" + str(file_name))\n print(\"[+] \" + str(re_size))\n print(\"[+] SIZE: \" + str(size))\n \n while to_print:\n content = to_print[:re_size['chunck']]\n title = md5(content[:300].encode()).hexdigest()\n self.map[i] = title\n self.chunk_array.append({title: content})\n print(\"> chunck: \" + title)\n\n system(\"mkdir ../chunks/\")\n # Optionnal, to saved the chunks\n with open(\"../chunks/\" + title, \"w+\") as file:\n file.write(content)\n # Optionnal, to saved the chunks\n to_print = to_print[re_size['chunck']:]\n i += 1\n print(\"[+] Decompose done.\")\n print(\"-------\")",
"def stitch(dir_path, in_canels=1, choice=0):\n directory = dir_path\n array = [] # array used to create matrix\n\n p = re.compile(tiles_xy_re)\n q = re.compile(original_img_xy_re)\n\n sum_of_files = len(os.listdir(directory))\n tiles_horizontal_num = 0\n\n first = os.listdir(directory)[0] # we take a sample to extract\n # original image information such as height, width, type\n\n original = q.match(first)\n Original_width, Original_height = int(original.group(1)), int(\n original.group(2))\n im = Image.open(dir_path + '\\\\' + first)\n\n tile_h = np.array(im).shape[0]\n tile_w= np.array(im).shape[1]\n file_type = first.split(\".\")[-1]\n\n # creating array to merge all tiles to\n if choice == 2: # if we choose and\n output_array = np.ones((Original_height, Original_width, in_canels))\n else:\n output_array = np.zeros((Original_height, Original_width, in_canels))\n\n for filename in os.listdir(directory):\n\n xy = p.match(filename)\n x, y = int(xy.group(1)), int(xy.group(2)) # extracting x,y relative\n # to original img\n\n im = Image.open(dir_path + '\\\\' + filename)\n if choice == 0:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.array(im)\n elif choice == 1:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.logical_or(\n output_array[y:y + tile_h, x:x + tile_w, :], np.array(im))\n elif choice == 2:\n output_array[y:y + tile_h, x:x + tile_w, :] = np.logical_and(\n output_array[y:y + tile_h, x:x + tile_w, :], np.array(im))\n\n output_array[y:y + tile_h, x:x + tile_w, :] = np.array(im)\n\n array.append([x, y])\n\n if int(xy.group(1)) == 0:\n tiles_horizontal_num = tiles_horizontal_num + 1\n\n # converting array to image and saving image\n output_im = Image.fromarray(output_array.astype(np.uint8))\n file_name = \"original.\" + file_type\n path = dir_path + '\\\\' + file_name\n output_im.save(path)\n\n # array = sorted(array, key=lambda k: [k[0], k[1]])\n # numpy_array = np.array(array)\n # matrix = numpy_array.reshape(sum_of_files // tiles_horizontal_num,\n # tiles_horizontal_num, 2)",
"def long_slice(image_path, out_name, outdir, slice_size, net):\n img = Image.open(image_path)\n imgout = Image.open(image_path)\n orw, orh = img.size\n width, height = img.size\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n orw, orh = imgout.size\n width, height = img.size\n print(img.size)\n r = 1\n draw = ImageDraw.Draw(imgout)\n\n flag_continue = True\n while flag_continue:\n if os.path.exists(\"./testsliceimage/list.txt\"):\n os.remove(\"./testsliceimage/list.txt\")\n file = open(\"./testsliceimage/list.txt\", \"w+\")\n for sliceh in range(slicesh*step):\n for slicew in range(slicesw*step):\n #set the bounding box! The important bit\n bbox = (int(slicew*slice_size/step), int(sliceh*slice_size/step), int(slicew*slice_size/step)+slice_size, int(sliceh*slice_size/step)+slice_size)\n working_slice = img.crop(bbox)\n\n working_slice.save(os.path.join(outdir, \"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\"))\n file.write(\"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\\n\")\n\n if sliceh == 16 and slicew == 27 and width == 450 :\n print (int(slicew*slice_size/step), int(sliceh*slice_size/step),int(slicew*slice_size/step)+slice_size,int(sliceh*slice_size/step)+slice_size)\n\n file.close()\n transform_test = tf.Compose([tf.Grayscale(), tf.ToTensor(), tf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n testset = UnknownDataset(\"./testsliceimage/\", \"./testsliceimage/list.txt\", transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=WORKERS)\n\n with torch.no_grad():\n N = 0\n for data in testloader:\n images, img_names = data['image'], data['image_name']\n outputs = net(images.float())\n _, predicted = torch.max(outputs.data, 1)\n # print(predicted)\n if max(predicted) == 1 :\n ite = -1\n for predic in predicted :\n ite += 1\n if predic == 1 and outputs[ite][1]-outputs[ite][0] > CONFIDENCE:\n print(img_names[ite])\n # print(outputs)\n N += 1\n #dessiner carre sur image\n slh = int(img_names[ite].split('_')[4])\n slw = int(img_names[ite].split('_')[5][:-4])\n x1 = int(slh * slice_size / step)\n x2 = x1 + slice_size\n y1 = int(slw * slice_size / step)\n y2 = y1 + slice_size\n\n if slh == 16 and slw == 27 and width ==450 :\n print (x1, y1, x2, y2)\n\n print(r)\n rh = orh / height\n rw = orw / width\n x1 = x1 * rh\n x2 = x2 * rh\n y1 = y1 * rw\n y2 = y2 * rw\n\n draw.rectangle(((y1, x1), (y2, x2)), outline=\"red\")\n # draw.text((y2,x2), img_names[0])\n copyfile(\"./testsliceimage/\"+img_names[ite], \"./goodimage/\"+ img_names[ite])\n\n if width <= 200 or height <= 200:\n flag_continue = False\n else:\n r = r * scale\n width, height = int(width/scale), int(height/scale)\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n width, height = img.size\n\n # imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout.save(\"./rectangle/out\", \"PNG\")",
"def image_generator_not_random(list_of_files, crop_size=320, scale=1):\n while True:\n text_region = []\n for jpgname in list_of_files:\n print jpgname\n # jpgname = np.random.choice(list_of_files)\n img = cv2.imread(jpgname)\n pattern = re.compile('jpg')\n txtname = pattern.sub('txt', jpgname)\n if not os.path.isfile(txtname):\n continue\n cropped_image = img\n with open(txtname, 'r') as f:\n for line in f:\n line_split = line.strip().split(',')\n print line_split\n # clockwise\n (x1, y1, x2, y2) = line_split[0:4]\n (x3, y3, x4, y4) = line_split[4:8]\n text_region.append([string.atof(x1), string.atof(y1), string.atof(x2), string.atof(y2),\n string.atof(x3), string.atof(y3), string.atof(x4), string.atof(y4)])\n if cropped_image is None or text_region is None or \\\n cropped_image.shape[0] != crop_size or cropped_image.shape[1] != crop_size:\n continue\n yield [scale * cropped_image, text_region]",
"def chop_tensors(fpath, window, overlap, chop_fields=None, save_path=None):\n h5file = h5py.File(fpath, 'r')\n if chop_fields is None:\n chop_fields = sorted(list(h5file.keys()))\n if save_path is None:\n save_path = fpath[::-1].split('.', 1)[-1][::-1] + '_chopped.h5'\n elif not save_path.endswith('.h5'):\n save_path += '.h5'\n data_list = []\n for field in chop_fields:\n arr = h5file[field][()]\n if len(arr.shape) < 3:\n arr = arr[:, :, None] if len(arr.shape) == 2 else arr[:, None, None]\n data_list.append(arr)\n h5file.close()\n splits = np.cumsum([arr.shape[2] for arr in data_list[:-1]])\n data = np.dstack(data_list)\n chop_list = []\n if (data.shape[1] - window) % (window - overlap) != 0:\n logger.warning(\"With the specified window and overlap, \" \\\n f\"{(data.shape[1] - window) % (window - overlap)} samples will be discarded per trial.\")\n for i in range(data.shape[0]):\n seg = data[i, :, :]\n chop_list.append(chop_data(seg, overlap, window))\n chopped_data = np.vstack(chop_list)\n chopped_data = np.split(chopped_data, splits, axis=2)\n data_dict = {chop_fields[i]: chopped_data[i] for i in range(len(chop_fields))}\n save_to_h5(data_dict, save_path, overwrite=True)",
"def fileResizeObscure(new_filepath):\n # Resize\n img1 = Image.open(new_filepath)\n img2=image_reduce(img1)\n *** Stopped working here\n newpath=\"toupload\\\\%s\" % new_filepath\n # Block ID\n width=img2.size[0]\n height=img2.size[1]\n # Obscuring params were decided by trial and error using fraction of width and height\n x1=int(0.16*width)\n x2=int(0.28*width)\n y1=int(0.94*height)\n y2=int(0.98*height) \n # Faster but easier to snoop? should not be since it changes the pixels\n draw = ImageDraw.Draw(img2)\n draw.rectangle([(x1,y1),(x2,y2)],fill=\"white\")\n del draw\n \n img2.save(newpath,optimize=True,quality=95)",
"def stitch_images(self):\n stitched_folder_name = self.parent_folder + 'stitched'\n print(\"Stitching images in:\")\n print(self.folder_list)\n print(\"Storing in: \" + str(stitched_folder_name))\n\n try:\n print(\"Making dir \" + str(stitched_folder_name) + \" for stitching\")\n os.mkdir(stitched_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this stitching??\")\n return\n\n photo_list = self.get_photo_list(self.parent_folder + '/' + self.folder_list[0])\n # get photo sizes\n print(self.parent_folder + '/' + self.folder_list[0] + '/' + photo_list[0])\n size_photo = cv2.imread(self.parent_folder + '/' + self.folder_list[0] +\n '/' + photo_list[0], cv2.IMREAD_ANYDEPTH)\n photo_height, photo_width = np.shape(size_photo)\n stitched_height = photo_height * 2\n stitched_width = photo_width * 4\n\n for photo in photo_list:\n stitched_photo = np.full((stitched_height, stitched_width), 0)\n\n for i, folder in enumerate(self.folder_list):\n print(i)\n print(folder)\n print(self.parent_folder + folder + '/' + photo)\n\n stitched_photo[(int((float(i) / 4.0)) * photo_height):(int(((float(i) / 4.0) + 1)) * photo_height),\n (int(i % 4) * photo_width):((int((i % 4) + 1)) * photo_width)] \\\n = cv2.imread(self.parent_folder + '/' + folder + '/' + photo, cv2.IMREAD_ANYDEPTH)\n\n stitched_photo = stitched_photo.astype(np.uint16)\n cv2.imwrite(stitched_folder_name + '/' + photo, stitched_photo, [cv2.IMWRITE_PNG_COMPRESSION, 0])\n\n return stitched_folder_name",
"def augment_img(img):\n img = random_hflip_img(img)\n img = cutout_img(img, size=12)\n img = zero_pad_and_crop_img(img)\n return img",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def _flush_write_buffer(self):\n if self._buffer_file_size:\n self._write_counter += 1\n self.file.seek(0)\n self._multipart.upload_part_from_file(\n self.file,\n self._write_counter,\n headers=self._storage.headers\n )\n self.file.close()\n self.file = None",
"def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()",
"def set_image_data(self, data_file):\n # TODO: support other file formats, like hd5 and maybe raw binary?\n import scipy.io\n self.image_data = np.atleast_3d(scipy.io.loadmat(data_file).values()[0])\n if self.image_data.ndim == 3:\n self.image_data = self.image_data.reshape(self.image_data.shape + (1,))\n # TODO: confirm that this voxel reordering is necessary. Maybe lean on the recon\n # folks to standardize thier voxle order? Might also look at\n self.image_data = self.image_data.transpose((1,0,2,3))[::-1,:,::-1,:]\n\n if self.image_data.shape[0] != self.size_x or self.image_data.shape[1] != self.size_y:\n msg = 'Image matrix discrepancy. Fixing the header, assuming image_data is correct...'\n self.log and self.log.warning(msg) or print(msg)\n self.size_x = self.image_data.shape[0]\n self.size_y = self.image_data.shape[1]\n self.mm_per_vox[0] = float(self.fov[0] / self.size_x)\n self.mm_per_vox[1] = float(self.fov[1] / self.size_y)\n if self.image_data.shape[2] != self.num_slices:\n msg = 'Image slice count discrepancy. Fixing the header, assuming image_data is correct...'\n self.log and self.log.warning(msg) or print(msg)\n self.num_slices = self.image_data.shape[2]\n if self.image_data.shape[3] != self.num_timepoints:\n msg = 'Image time frame discrepancy (header=%d, array=%d). Fixing the header, assuming image_data is correct...' % (self.num_timepoints, self.image_data.shape[3])\n self.log and self.log.warning(msg) or print(msg)\n self.num_timepoints = self.image_data.shape[3]",
"def test_image(filename, x_size=def_x_size, y_size=def_y_size):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)",
"def wsi_patch_splitting(wsi_path, patch_dir, patch_size=299, save_size=299,\n wsi_ext=\"tiff\", save_ext=\"png\",\n pyramid_flag=True, overlap_flag=True, level=0):\n\n if pyramid_flag == False:\n try:\n img = io.imread(wsi_path)\n if img.dtype == \"uint16\":\n img = (img / 256.0).astype(np.uint8)\n elif img.dtype == \"uint8\":\n pass\n else:\n raise Exception(\"Unknow imge data type\")\n except:\n print(\"Cannot handle {}\".format(wsi_path))\n else:\n wsi_header = openslide.OpenSlide(wsi_path)\n img = wsi_header.read_region(location=(0, 0), level=level,\n size=wsi_header.level_dimensions[level])\n img = np.asarray(img)[:,:,:-1]\n\n coors_arr = wsi_coor_splitting(wsi_h=img.shape[0], wsi_w=img.shape[1],\n length=patch_size, overlap_flag=overlap_flag)\n filename = os.path.splitext(os.path.basename(wsi_path))[0]\n for coor in coors_arr:\n h_start, w_start = coor[0], coor[1]\n cur_patch = img[h_start:h_start+patch_size, w_start:w_start+patch_size, :]\n if patch_size != save_size:\n save_patch = transform.resize(cur_patch, (save_size, save_size))\n save_patch = (save_patch * 255.0).astype(np.uint8)\n else:\n save_patch = cur_patch\n\n patch_name = \"{}_{}.{}\".format(filename, str(uuid.uuid4())[:8], save_ext)\n patch_filepath = os.path.join(patch_dir, patch_name)\n io.imsave(patch_filepath, save_patch)",
"def write_data(infbfile,begin_N,dur_N,outfbfile):\n infbfile.seek_to_sample(begin_N)\n for i in range(begin_N,(begin_N+dur_N)):\n data = infbfile.read_sample()\n data.tofile(outfbfile)",
"def write_image_to_file_incrementally(image):\r\n i = 0\r\n while os.path.exists(\"sample%s.jpeg\" % i):\r\n i += 1\r\n with open(\"sample%s.jpeg\" % i, \"wb\") as f:\r\n f.write(image)",
"def load_and_mangle_image(fname):\n I, meta = load_image(fname)\n\n try:\n voxel_size = tuple(map(float, os.getenv('ZYX_IMAGE_GRID').split(\",\")))\n print(\"ZYX_IMAGE_GRID environment forces image grid of %s micron.\" % (voxel_size,))\n assert len(voxel_size) == 3\n except:\n try:\n voxel_size = I.micron_spacing\n print(\"Using detected %s micron image grid.\" % (voxel_size,))\n except AttributeError:\n print(\"ERROR: could not determine image grid spacing. Use ZYX_IMAGE_GRID=Z,Y,X to override.\")\n raise\n\n meta = ImageMetadata(voxel_size[2], voxel_size[1], voxel_size[0], I.axes)\n setattr(I, 'micron_spacing', voxel_size)\n\n # temporary pre-processing hacks to investigate XY-correlated sensor artifacts...\n try:\n ntile = int(os.getenv('ZNOISE_PERCENTILE'))\n I = I.force().astype(np.float32)\n zerofield = np.percentile(I, ntile, axis=1)\n print('Image %d percentile value over Z-axis ranges [%f,%f]' % (ntile, zerofield.min(), zerofield.max()))\n I -= zerofield\n print('Image offset by %d percentile XY value to new range [%f,%f]' % (ntile, I.min(), I.max()))\n zero = float(os.getenv('ZNOISE_ZERO_LEVEL', 0))\n I = I * (I >= 0.)\n print('Image clamped to range [%f,%f]' % (I.min(), I.max()))\n except:\n pass\n\n I = I.transpose(1,2,3,0)\n\n # allow user to select a bounding box region of interest\n bbox = os.getenv('ZYX_SLICE')\n slice_origin = (0, 0, 0)\n if bbox:\n bbox = bbox.split(\",\")\n assert len(bbox) == 3, \"ZYX_SLICE must have comma-separated slices for 3 axes Z,Y,X\"\n\n def parse_axis(slc_s, axis_len):\n bounds = slc_s.split(\":\")\n assert len(bounds) == 2, \"ZYX_SLICE must have colon-separated START:STOP pairs for each axis\"\n\n if bounds[0] != '':\n assert int(bounds[0]) >= 0, \"ZYX_SLICE START values must be 0 or greater or empty string\"\n assert int(bounds[0]) < (axis_len-2), \"ZYX_SLICE START values must be less than axis length - 2\"\n bounds[0] = int(bounds[0])\n else:\n bounds[0] = 0\n\n if bounds[1] != '':\n assert int(bounds[1]) >= bounds[0], \"ZYX_SLICE STOP values must be greater than START or empty string\"\n bounds[1] = int(bounds[1])\n else:\n bounds[1] = axis_len\n\n return slice(bounds[0], bounds[1])\n\n bbox = tuple([\n parse_axis(bbox[d], I.shape[d])\n for d in range(3)\n ]) + (slice(None),)\n I = I.lazyget(bbox)\n slice_origin = tuple([\n slc.start or 0\n for slc in bbox[0:3]\n ])\n\n if I.shape[2] % 16:\n # trim for 16-pixel row alignment\n slc = tuple([\n slice(None),\n slice(None),\n slice(0,I.shape[2]//16*16),\n slice(None)\n ])\n if hasattr(I, 'lazyget'):\n I = I.lazyget(slc)\n else:\n I = I[slc]\n\n if isinstance(I, np.ndarray):\n # temporarily maintain micron_spacing after munging above...\n I2 = wrapper(shape=I.shape, dtype=I.dtype)\n I2[:,:,:,:] = I[:,:,:,:]\n I = I2\n setattr(I, 'micron_spacing', voxel_size)\n\n return I, meta, slice_origin",
"def get_patches(rimage, gimage, mimage, num_patches=48, patch_size=80, patch_stride=80):\n num_FSpatches = 16\n num_RApatches = 32\n rpatches = []\n gpatches = []\n mpatches = []\n #R_imgs = ((rimage+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'rainy.jpg', R_imgs[0,:,:,:])\n for i in range(int(math.sqrt(num_FSpatches))):\n for j in range(int(math.sqrt(num_FSpatches))):\n point_x = patch_stride*i\n point_y = patch_stride*j\n rpatch = rimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n #print(point_x)\n #print(point_y)\n #print(point_y+patch_size)\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d_%d.jpg'%(i,j), P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n mpatches.append(mpatch)\n\n for k in range(num_RApatches):\n point1 = random.randint(0,240) # 116 comes from the image source size (320) - the patch dimension (80)\n point2 = random.randint(0,240)\n #rpatch = tf.image.crop_to_bounding_box(rimage, point1, point2, patch_size, patch_size)\n rpatch = rimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d.jpg'%i, P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n mpatches.append(mpatch)\n\n rpatches = np.array(rpatches)\n rpatches = np.squeeze(rpatches)\n #print(rpatches.shape)\n gpatches = np.array(gpatches)\n gpatches = np.squeeze(gpatches)\n mpatches = np.array(mpatches)\n mpatches = np.squeeze(mpatches)\n #assert rpatches.get_shape().dims == [num_patches, patch_size, patch_size, 3]\n assert rpatches.shape == (num_patches, patch_size, patch_size, 3)\n return rpatches, gpatches, mpatches",
"def convertCluttered(original_images, finalImgSize, initImgSize=28, number_patches=4, clutter_size=8, batch_size=None):\n\n images, imgCoord = convertTranslated(original_images, batch_size=batch_size, initImgSize=initImgSize, finalImgSize=finalImgSize)\n if batch_size is None:\n batch_size = len(images)\n size_diff = finalImgSize - clutter_size\n clutter_size_diff = initImgSize - clutter_size/2\n cluttered_images = np.zeros([batch_size, finalImgSize*finalImgSize])\n\n for k in range(batch_size):\n image = images[k, :]\n image = np.reshape(image, (finalImgSize, finalImgSize))\n cluttered_image = image\n for l in range(number_patches):\n\n original_image = original_images[random.randint(0, batch_size-1), :]\n original_image = np.reshape(original_image, (initImgSize, initImgSize))\n\n # generate and save random coordinates\n clutterX = random.randint(clutter_size/2, clutter_size_diff)\n clutterY = random.randint(clutter_size/2, clutter_size_diff)\n diff = np.int(clutter_size/2)\n clutter = original_image[clutterX-diff: clutterX+diff, clutterY-diff: clutterY+diff]\n # generate and save random coordinates\n randX = random.randint(0, size_diff)\n randY = random.randint(0, size_diff)\n # padding\n clutter = np.lib.pad(clutter, ((randX, size_diff - randX), (randY, size_diff - randY)), 'constant', constant_values = (0))\n cluttered_image = np.clip(cluttered_image + clutter, a_min=0, a_max=1)\n cluttered_images[k, :] = np.reshape(cluttered_image, (finalImgSize*finalImgSize))\n\n return cluttered_images, imgCoord"
] |
[
"0.73525065",
"0.5891271",
"0.5596695",
"0.55924076",
"0.5514683",
"0.5442865",
"0.535485",
"0.532658",
"0.52328813",
"0.5146267",
"0.5136913",
"0.5126074",
"0.50800765",
"0.50558335",
"0.5039814",
"0.50165755",
"0.5009086",
"0.49895605",
"0.49895605",
"0.49895605",
"0.49886838",
"0.49843845",
"0.49834427",
"0.49743462",
"0.49553964",
"0.4944366",
"0.4943192",
"0.4937052",
"0.4931747",
"0.4922544"
] |
0.6877133
|
1
|
Returns the list of items generated by expanding nonterminal B, where B is the "next" terminal/nonterminal. Assumes the 'dot' is not at the end of the list of items, and therefore B exists, and B is a symbol. Cache the result, as it does not change over time.
|
def items_generated_by_next(self):
def lookup(rule):
return self.grammar.rules[rule.content] if rule.is_symbol_name() else rule
if self.the_items_generated_by_next is None:
self.the_items_generated_by_next = []
rhs = lookup(self.grammar.rules[self.the_next.content])
rhs = [rhs] if rhs.is_terminal() else rhs
# iterate over the alternatives of a Choice
for production in rhs:
if production.is_empty():
# Avoid creating useless productions that have no right-hand-side
# They can only lead to redundant reductions, and sometimes useless
# conflicts.
continue
new_item = self.grammar.MakeItem(self.the_next,production,0)
self.the_items_generated_by_next.append(new_item)
return self.the_items_generated_by_next
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def go_to(self, state, symbol):\n C = []\n # in state search for LR(0) item that has dot in front of symbol\n for production in state:\n dot_index = production[1].index('.')\n alpha = production[1][:dot_index]\n xbeta = production[1][dot_index + 1:]\n if len(xbeta) == 0:\n continue\n X, beta = xbeta[0], xbeta[1:]\n if X == symbol:\n # move the dot after the symbol\n res = alpha + [X] + ['.'] + beta\n result_prod = (production[0], res)\n C += [result_prod]\n # call closure on this new item\n return self.closure(C)",
"def complete_dot(self):\n\n symbol_chain = self.split_completion_object(self.get_word_before())\n\n if len(symbol_chain) > 2:\n logging.error(\"Can't complete complex chain object yet!\")\n return []\n else:\n new_context = self.get_context(self.module, symbol_chain[0])\n if not new_context:\n logging.error(f\"Can't complete without valid context. {symbol_chain[0]} doesn't point to a valid object in {self.module.name} context.\")\n return []\n\n new_context.complete\n logging.info(\"Context found!\")",
"def items(ruleSet, terminals, nonTerminals):\n symbols = nonTerminals + terminals\n #start with closure of [ [S' -> S, $] ]\n C = [closure([startItem], ruleSet, terminals)]\n added = 1\n while added:\n added = 0\n for I in C:\n for X in symbols:\n g = goto(I, X, ruleSet, terminals)\n if g and not fullIn(C, g):# not in C:\n C.append(g)\n added = 1\n return C",
"def nonterminals(items):\n return [Nonterminal(item) for item in items]",
"def closure(items, ruleSet, terminals):\n I = copy.deepcopy(items)\n\n added = 1\n while added:\n added = 0\n\n #for each item [A -> alpha . B Beta, a] in I (result)\n for item in I:\n if item.pointAtEnd(): continue\n A = item.lhs\n alpha = item.rhs[:item.dotPos]\n B = item.rhs[item.dotPos]\n Beta = item.rhs[item.dotPos + 1:]\n a = item.lookaheads\n\n #for each production B -> gamma in G'\n for prod in getProductions(ruleSet, B):\n #and each terminal b in FIRST(Beta a)\n b = FIRSTS(Beta, a.items(), ruleSet, terminals)\n newItem = prod.getItem()\n newItem.lookaheads.addList(b)\n #such that [B -> . gamma, b] not in I\n if newItem not in I:\n #add [B -> . gamma, b] to I\n I.append(newItem)\n added = 1 \n else: #newItem is in I, but are the lookaheads all there?\n i = I.index(newItem)\n #if they aren't add them and say we've added something\n if (newItem.lookaheads != I[i].lookaheads\n and not I[i].lookaheads.contains(newItem.lookaheads)):\n added = 1\n I[i].lookaheads.addSet(newItem.lookaheads)\n return I",
"def expand(dumm: Symbol):\n tilde = TildeOf(dumm)\n jtilde = JOf(tilde)\n return [\n (form_tilde(dumm), tilde_range, tilde),\n (form_m(dumm), m_range[-jtilde, jtilde + 1], MOf(dumm))\n ]",
"def gentree(self, symbol): \n ### YOUR CODE HERE\n tree = \"(\" + symbol + \" \"\n expansion = self.random_expansion(symbol)\n for s in expansion:\n if self.is_terminal(s):\n tree += \" \" + s\n else:\n tree += \" \" + self.gentree(s)\n tree += \")\"\n ### END YOUR CODE\n return tree",
"def expand(self):\n data, end = \\\n self.pat.traverse(lambda obj, *args: args,\n self.begin, self.data)\n return data",
"def expand(self, node : NodeInGraph) -> list:\n expand_list = []\n\n for move in self.moves:\n boat_missionaries = move[0]\n boat_cannibals = move[1]\n\n if node.state[1] < boat_missionaries or node.state[2] < boat_cannibals:\n continue\n\n x0 = node.state[1] - boat_missionaries\n x1 = TOTAL_NO_MISSIONARIES - x0\n y0 = node.state[2] - boat_cannibals\n y1 = TOTAL_NO_CANNIBALS - y0\n\n if (x0 < y0 and x0 > 0) or (x1 < y1 and x1 > 0):\n continue\n\n new_state = (1 - node.state[0], x1, y1)\n next_node = NodeInGraph(new_state, node)\n expand_list.append(next_node)\n\n return expand_list",
"def process_name(self, stack):\n dot_op = self._toks(stack)\n toks = [t.value for t in Stack.flatten(dot_op)]\n # always remove the final dot\n assert toks[-1] == \".\"\n expr = \"\".join(toks[:-1])\n yield from self.dot.complete(expr)",
"def split_and_extend(items):\n if not items:\n return items\n\n output = set()\n\n for item in items:\n current = []\n\n for split_item in item.split(\".\"):\n current = current + [split_item]\n output.add(\".\".join(current))\n\n return output",
"def __items(self, partial_ngram):\n #An n-gram is constructed element by element and passed on to each of the child nodes.\n #When the current node is a terminating node, it will yield the complete n-gram which was passed to it by its parents paired with the value of this node.\n \n #If this is a terminating node then yield the n-gram constructed so far together with this node's value.\n if self.end_of_ngram:\n yield (partial_ngram, self.value)\n #For each next element, construct the new partial n-gram and pass it to that element's child node, yielding every n-gram/value pair it yields.\n for ele in self.children:\n for item in self.children[ele].__items(partial_ngram+(ele,)):\n yield item",
"def linear_extensions(self):\n for ext in self._poset.linear_extensions():\n yield Permutation(ext)",
"def gotos_internal(self,grammar,by_index_memo=None):\n\n # Partition items according to the next symbol to be consumed, X,\n # i.e. the symbol immediately to the right of the dot.\n changed_initial = False\n if self.goto is None:\n self.goto = dict()\n # Create the initial set of edges, copying lookaheads\n for item_id, item in self.id_to_item.items():\n if item.at_end():\n continue\n X = item.next()\n if X.is_end_of_text():\n continue\n xid = X.reg_info.index\n if xid not in self.goto:\n self.goto[xid] = self.GotoEdge(X)\n edge = self.goto[xid]\n next_item = grammar.MakeItem(item.lhs, item.rule, item.position+1)\n edge.add(item,next_item,LookaheadSet(self.id_to_lookahead[item_id]))\n changed_initial = True\n\n # The first time around, construct the destination item sets for each edge.\n # On subsequent rounds, propagate lookaheads from our own ItemSet to next item sets.\n goto_list = []\n changed = changed_initial\n for edge in self.goto.values():\n (created, next_item_set) = edge.NextItemSet(grammar,by_index_memo=by_index_memo)\n if created:\n next_item_set.close(grammar)\n else:\n # Propagate lookaheads\n for src_item_id, (dest_item,stale_lookahead) in edge.next.items():\n src_lookahead = self.id_to_lookahead[src_item_id]\n dest_lookahead = next_item_set.id_to_lookahead[dest_item.reg_info.index]\n changed = changed | dest_lookahead.merge(src_lookahead)\n # Propagate to non-kernel items\n next_item_set.close(grammar)\n\n changed = changed | created\n goto_list.append((edge.x, next_item_set))\n\n return (changed,goto_list)",
"def expand(self): #uninformed\n children = []\n index = self._find0()\n if index >= self.size: return children\n for change in range(1, self.size + 1):\n child = Permutation(self.size)\n elements = self.getElements()\n elements[index] = change\n child.setElements(elements)\n children.append(child)\n return children",
"def generate_items(self, token_parse_list):\r\n # Will return/recurse over this enriched list of parses\r\n enriched_parse_list = []\r\n # Will watch the things we generate -- If they contain 'generate'\r\n # attributes themselves, we will recurse\r\n generate_again = False\r\n\r\n for token_parse in token_parse_list:\r\n # Loop through the SOs of this parse, generating new entries where\r\n # specified\r\n enriched_parse = []\r\n for idx in range(len(token_parse)):\r\n # Check the 'generate' attribute of each SO in this parse\r\n if len(token_parse[idx].generate) > 0:\r\n # Queue up things to be generated to the left and the right\r\n generate_left = []\r\n generate_right = []\r\n for generate_params in token_parse[idx].generate:\r\n if generate_params[0] == \"left\":\r\n generate_left.append(generate_params[1])\r\n elif generate_params[0] == \"right\":\r\n generate_right.append(generate_params[1])\r\n\r\n # Will we need to recurse?\r\n if len(generate_params[1].generate) > 0:\r\n generate_again = True\r\n\r\n # Done queueing up generated elements. Fill in the SOs for\r\n # this parse by extending the left/right queues in\r\n enriched_parse += generate_left\r\n\r\n token_parse[idx].generate = []\r\n enriched_parse.append(token_parse[idx])\r\n\r\n enriched_parse += generate_right\r\n else:\r\n enriched_parse.append(token_parse[idx])\r\n\r\n # Put it on the final parse list\r\n enriched_parse_list.append(enriched_parse)\r\n\r\n # Do we need to recurse?\r\n if generate_again:\r\n return self.generate_items(enriched_parse_list)\r\n else:\r\n return enriched_parse_list",
"def search_productions(citem, chart):\n if len(chart[citem]) == 0:\n return [] \n if citem == \"START\":\n return [{\"START\":child[0]} for child in chart[citem]]\n \n prodlist = list(chart[citem])\n lefts = set(x[0] for x in prodlist)\n lengths = set(len(x) for x in prodlist)\n assert len(lengths) == 1\n split_len = lengths.pop()\n \n # figure out all items that could have been used to complete this nonterminal \n result = [] \n if prodlist[0][0].target == Item.NONTERMINAL:\n assert split_len == 2\n symbol = prodlist[0][0].outside_symbol, prodlist[0][0].outside_index\n for child in prodlist: \n assert child[1].target == Item.ROOT\n other_nts = search_productions(child[0], chart) \n if other_nts:\n for option in other_nts:\n d = dict(option)\n d[symbol] = child[1]\n result.append(d)\n else:\n result.append(dict([(symbol, child[1])]))\n return result\n \n elif prodlist[0][0].target == Item.BINARY:\n assert split_len == 2\n for child in prodlist: \n assert len(child) == 2\n r1 = search_productions(child[0], chart)\n r2 = search_productions(child[1], chart)\n if r1 and r2: #possibilities: all combinations of assignments to NTs in the subtree\n other_iterator = itertools.product(r1,r2)\n for p1, p2 in other_iterator:\n nts = dict(p1)\n nts.update(p2)\n result.append(nts)\n else: # Only one of the subtrees has nonterminals. \n result.extend(r1)\n result.extend(r2)\n return result \n\n elif prodlist[0][0].target == Item.TERMINAL:\n for child in prodlist: \n assert len(child) == 1\n other_nts = search_productions(child[0], chart)\n if other_nts:\n result.extend(other_nts)\n return result",
"def item(key, value, maxdepth, separator):\n keyans = cprint2(key, maxdepth)\n valans = cprint2(value, maxdepth)\n if isinstance(keyans, list):\n keyans[-1] += separator\n if isinstance(valans, list):\n keyans[-1] += valans[0]\n keyans.extend(valans[1:])\n else:\n keyans[-1] += valans\n return keyans\n if isinstance(valans, list):\n valans[0] = keyans + separator + valans[0]\n return valans\n return keyans + separator + valans",
"def _expand_node(self, node, dependency_tree, is_verb_node=False):\n expanded_node = [(node[\"address\"], node[\"word\"])]\n\n for dependency in node[\"deps\"]:\n if dependency == \"rel\":\n continue\n\n # Ignore noun and object phrases\n if is_verb_node and dependency in (\"nsubj\", \"dobj\"):\n continue\n\n for address in node[\"deps\"][dependency]:\n expanded_node.extend(self._expand_node(dependency_tree[\"nodes\"][address], dependency_tree, is_verb_node))\n\n return expanded_node",
"def items(self):\n current = self.first\n output = []\n\n while current is not None:\n output.append(current.item)\n current = current.next_node\n\n return output",
"def GetNextExpanded(self, item): \r\n\r\n return self.GetNext(item, False)",
"def get_paths_from(self, symbol):\n to_return = []\n visitation_queue = [self.head]\n while len(visitation_queue) != 0:\n visiting = visitation_queue.pop(0)\n for elem in visiting.children:\n visitation_queue.append(elem)\n if symbol in visiting.inputs:\n v = visiting\n model_trail = []\n while v.parent is not None:\n model_trail.append(v.m)\n v = v.parent\n to_return.append(SymbolPath(visiting.inputs, model_trail))\n return to_return",
"def __str__(self):\n c = self\n ans = \"[\"\n while c:\n ans += \".\"\n c = c.next\n return ans + \"]\"",
"def expand(self, ignore: Iterable = None) -> list:\n if self.debug: print(f\"StateNode.expand({ignore})\")\n if not self._expanded:\n if ignore is not None:\n if self.debug: print(f\"\\tExpanding without {ignore}\")\n x: D\n self._children = [x for x in self._generate_children() if x.state not in ignore]\n else:\n if self.debug: print(f\"\\tExpanding...\")\n self._children = self._generate_children()\n self._expanded = True\n return self._children",
"def expand(node):\n if node.isTerminal():\n return node\n\n # Get the next unexplored state\n nextState = node.exploreChildNode()\n\n # If all states are already explored, recurse\n if nextState is not None:\n return nextState\n else:\n return expand(node.UCB1())",
"def items(self, _prec=\"\"):\n if self.isLeaf:\n yield (_prec + self.ch, self.value)\n\n for chld in self.children.values():\n yield from chld.items(_prec + self.ch)",
"def generate_greedy(lists):\n \n def greedy_step(lists, base=[]):\n \"\"\"Add a single item from the list of strings to the base list.\"\"\"\n lists_copy = lists[:]\n if base == []:\n # Start with any string\n s = lists_copy.pop()\n else:\n l = find_match(lists_copy, base)\n s = add_string(l, base)\n lists_copy.remove(l)\n return lists_copy, s\n\n # This is probably nicer if it's recursive?\n base = []\n while lists:\n lists, base = greedy_step(lists, base)\n return base",
"def test_expand_degeneracies(self):\r\n # No expansion.\r\n self.assertEqual(expand_degeneracies(['ACG']), ['ACG'])\r\n\r\n # Expansion, single sequence.\r\n self.assertEqual(sorted(expand_degeneracies(['RGY'])),\r\n ['AGC', 'AGT', 'GGC', 'GGT'])\r\n\r\n # Multiple sequences.\r\n self.assertEqual(sorted(expand_degeneracies(['ACGW', 'KAT'])),\r\n ['ACGA', 'ACGT', 'GAT', 'TAT'])",
"def permutation_expand(block, table: List) -> List[chr]:\n return [block[x - 1] for x in table]",
"def expand(env, word, dir):\n ans = [item.evaluate(env) for item in word]\n if STAR not in ans and STARSTAR not in ans:\n return [''.join(ans)]\n\n output = flatten(explode(item.evaluate(env)) for item in word)\n if output[:2] == [\"\", SLASH]:\n dir = \"/\"\n\n result = start(dir)\n bits = []\n rec = False\n for item in output:\n if isinstance(item, str):\n bits.append(item)\n elif item is STAR:\n bits.append(item)\n elif item is STARSTAR:\n bits.append(STAR)\n rec = True\n elif item is SLASH:\n result = _bits(result, bits, rec)\n bits = []\n rec = False\n else:\n if len(bits) > 0:\n result = _bits(result, bits, rec)\n\n return [str(item) for item in result]"
] |
[
"0.60782635",
"0.5367849",
"0.52921903",
"0.5243204",
"0.5075236",
"0.50247693",
"0.49688762",
"0.49165177",
"0.4869679",
"0.48509085",
"0.48409092",
"0.48298466",
"0.48118484",
"0.48070458",
"0.47778127",
"0.4761203",
"0.4753579",
"0.47348222",
"0.47059587",
"0.46668696",
"0.4660061",
"0.46371415",
"0.46348265",
"0.46245742",
"0.4609828",
"0.4605916",
"0.45880452",
"0.45735568",
"0.45678902",
"0.45607945"
] |
0.5718888
|
1
|
Returns True when this item represents having accepted a valid sentence in the language
|
def is_accepting(self):
return (self.position == 1) and (self.lhs.content == LANGUAGE)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_valid(self, sentence):\n return not self.rules.is_invalid(sentence)",
"def hasConstantForm(self, sentence):",
"def is_sentence_valid(w_input_sentence_warning, sentence):\n to_predict = True\n\n # show warning if the sentence shorter than 3 words\n if 0 < len(sentence.split()) <= 2:\n w_input_sentence_warning.warning(\n \"Your sentence seems to be too short, pls. insert another one\")\n to_predict = False\n\n # show warning if the sentence contains english chars\n elif str_contains_en_chars(sentence):\n w_input_sentence_warning.warning(\n \"Your sentence contains non Hebrew characters, which the predictor doesn't support\")\n\n return to_predict",
"def test_sentence_input(self, sentence):\n if len(sentence.strip()) == 0:\n return False\n # Decode unicode, mainly to normalize fancy quotation marks\n decoded = unidecode(sentence)\n # Sentence shouldn't contain problematic characters\n if self.well_formed and self.reject_pat.search(decoded):\n return False\n return True",
"def IsValid(self):\n return len(self.Text) > 0",
"def is_valid(self, text):\n return any(p.lower() in text.lower() for p in self.get_phrases())",
"def isQuestion(self):\n i = 0\n while i < len(self.sentence):\n if \"?\" in self.sentence[i].getWord():\n return True\n i += 1\n return False",
"def is_sentence(self, spacy_sentence):\n tokens = [t for t in spacy_sentence]\n \n # Minimum number of words per sentence\n MIN_TOKEN_COUNT = 6\n if len(tokens) < MIN_TOKEN_COUNT:\n return False\n \n # Most tokens should be words\n MIN_WORD_TOKENS_RATIO = 0.5\n if sum([t.is_alpha for t in tokens]) / len(tokens) < MIN_WORD_TOKENS_RATIO:\n return False\n\n text = spacy_sentence.text\n\n # A sentence has to end with a period\n if not text.strip().endswith('.'):\n return False\n \n # Most characters should be letters, not numbers and not special characters\n MIN_LETTER_CHAR_RATIO = 0.5\n if sum([c.isalpha() for c in text]) / len(text) < MIN_LETTER_CHAR_RATIO:\n return False\n \n return True",
"def negation_check(self,sentence):",
"def match(self, sentence) -> bool:\r\n pass",
"def __contains__(self, sentence):\n return sentence in self._sentences",
"def is_valid(self):\n if self.answer_type == 'F':\n return bool(self.text)\n\n if not self.answers.count():\n return False\n if not self.answers.filter(correct=True).count():\n return False\n return True",
"def is_accepting(self):\n for item_id, lookahead in self.id_to_lookahead.items():\n if lookahead.includesEndOfText():\n item = self.id_to_item[item_id]\n if item.is_accepting():\n return True\n return False",
"def isTrueConstant(self, sentence):",
"def match(self, sentence) -> bool:\r\n if (any(word[0] in sentence.lower() for word in self.word_list if word[1] == \"partial\") or any(\r\n word[0].lower() == sentence.lower() for word in self.word_list if word[1] == \"full\")) and not any(\r\n word[0] in sentence.lower() for word in self.word_list if word[1] == \"not\"):\r\n return True\r\n else:\r\n return False",
"def check_sentence_sanity(self, sentence):\n case_dist = nltk.FreqDist()\n\n for token in sentence:\n case_dist[self.get_casing(token)] += 1\n\n if case_dist.most_common(1)[0][0] != \"allLower\":\n return False\n\n return True",
"def is_invalid(self, sentence: str) -> bool:\n for idx, r in enumerate(self.rules):\n if r.is_invalid(sentence):\n # print(\"RULE %d %s FAILED on |%s|\" % (idx, r.descr, sentence))\n return True\n return False",
"def is_sentence(sentence):\n return len(sentence.split(' ')) > 1",
"def IsValid(self):\n return not TickerFull.DelimiterSplit in self.Text",
"def is_question(self, message):\n text = message.split(' ')\n\n # get first word of message\n first_word = text[0]\n # get punctuation\n last_word = text[-1][-1]\n\n if first_word in self.question_words or last_word == '?':\n return True\n\n return False",
"def tt_entails(knowledge_base, sentence):\n return False",
"def IsValid(self):\n return (TickerFull.DelimiterSplit not in self.Text) and (TickerDataType.DelimiterData in self.Text)",
"def been_played(self, word):\n words = self.played_out or ''\n words = words.split(' ')\n\n return True if ((words.count(word) > 0) or \n (words.count( singularize(word) ) > 0) or \n (words.count( pluralize(word) ) > 0)) else False",
"def check_message(check):\n words_of_message = speech_text.split()\n if set(check).issubset(set(words_of_message)):\n return True\n else:\n return False",
"def isTrueOrDoesSentence(self, sentence):\n name = sentence.__name__\n return name == GdlPool.TRUE or name == GdlPool.DOES",
"def read_sentence(self,data):\n self.addSource(data)\n if self.checkLegal(data):\n self.addTarget(data)\n return True\n else:\n return False",
"def validate(self, word):\n\n return self.valid_word(word)",
"def validate(self, word):\n\n return self.valid_word(word)",
"def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True",
"def is_valid(self):\n for lineedit in self.lineedits:\n if lineedit in self.validate_data and lineedit.isEnabled():\n validator, invalid_msg = self.validate_data[lineedit]\n text = to_text_string(lineedit.text())\n if not validator(text):\n QMessageBox.critical(self, self.get_name(),\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\n QMessageBox.Ok)\n return False\n return True"
] |
[
"0.7314644",
"0.6832887",
"0.674819",
"0.673748",
"0.67186016",
"0.67153496",
"0.6683982",
"0.6666995",
"0.6652227",
"0.64221543",
"0.64155704",
"0.6394634",
"0.6362132",
"0.63527334",
"0.62846106",
"0.62810147",
"0.62630147",
"0.6105913",
"0.6096297",
"0.608771",
"0.606539",
"0.6064891",
"0.6040261",
"0.5989942",
"0.5982937",
"0.5981242",
"0.59795755",
"0.59795755",
"0.59653354",
"0.5964095"
] |
0.6950517
|
1
|
Returns the set of names of symbols in the "externals" section of the Treesitter JSON grammar. Data looks like this, for section "externals". {
|
def json_externals(json):
return set([ x["name"] for x in json.get("externals",[]) ])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def external_terminologies(self):\n terms = set()\n for node_record in self.graph.run(\"MATCH (n) RETURN (n)\"):\n node = node_record[\"n\"]\n if \"links_to\" in node:\n terms.add(node[\"links_to\"])\n return terms",
"def get_external_host_tags(self):\n self.log.debug(\"Collecting external_host_tags now\")\n external_host_tags = []\n for k, v in iteritems(self.external_host_tags):\n external_host_tags.append((k, {SOURCE_TYPE: v}))\n\n self.log.debug(\"Sending external_host_tags: %s\", external_host_tags)\n return external_host_tags",
"def names(self):\n if not self.extensions:\n self.discover()\n\n names = list(self.builtins.keys())\n names += self.extensions.keys()\n\n return sorted(names)",
"def get_external_links(parsed_drug_doc):\n\n external_link_info = list(parsed_drug_doc.find(id='external-links').next_sibling.dl.children)\n external_links = {}\n for i in range(0, len(external_link_info), 2):\n source = external_link_info[i].text\n value = external_link_info[i+1].text\n # Ignoring a few sources for this MVP that don't give obvious alternate IDs.\n if source not in [\"RxList\", \"Drugs.com\", \"PDRhealth\"]:\n external_links[source] = value\n\n return external_links",
"def synonyms(self):\n\n return [synonym[\"name\"] for synonym in self._get_synonym_json()]",
"def get_symbol(self):\n return []",
"def get_external_variables(self) -> ConstsDictT:\n variables = dict(self.globals)\n for name, val in self.closure_vals.items():\n variables[name] = val.cell_contents\n return variables",
"def aliases(self):\n\n if not hasattr(self, \"_aliases\"):\n self._aliases = []\n for node in self.doc.findall(\"OtherName\"):\n self._aliases.append([\n self.id,\n Doc.get_text(node.find(\"OtherTermName\", \"\")).strip(),\n Doc.get_text(node.find(\"OtherNameType\", \"\")).strip(),\n self.LANGUAGE,\n ])\n return self._aliases",
"def _extract_used_data(transforms) -> typing.Set[str]:\n return {m.group(1) for m in re.finditer(r'data\\(\"(.+?)\"\\)', str(transforms))}",
"def get_peers_from_package_json(self):\n pj = self.load_package_json_from_dir(self.sources_path)\n prefix_len = len(self.sources_root) + 1\n\n return [p[prefix_len:] for p in pj.get_workspace_map(ignore_self=True).keys()]",
"def getAtomNames(self):\n return self._raw_data['ATOM_NAME']",
"def get_data_names(self):\n return list(self.__data.keys())",
"def get_well_aliases(self):\n return self.info_wells['well'].unique()",
"def namelist():\n\n\n session = Session(engine)\n\n results = session.query(lockdown.country).order_by(lockdown.country).all()\n\n #session.close()\n all_symbols = list(np.ravel(results))\n sym = all_symbols[1]\n\n return jsonify(all_symbols)",
"def external_array_references(self):\n return self._to_ears(self.fileuris)",
"def get_external_potential(self, units=\"bohr\"):\n from numpy import ndarray\n return_dict = {}\n return_dict[\"sym\"] = self.sym\n return_dict[\"r\"] = self.get_position(units)\n for k in MULTIPOLE_ANALYSIS_KEYS:\n if k in self:\n val = self[k]\n if isinstance(val, ndarray):\n return_dict[k] = list(val)\n else:\n return_dict[k] = val\n\n return return_dict",
"def get_local_peers_from_package_json(self):\n return self.load_package_json_from_dir(self.sources_path).get_workspace_dep_paths(base_path=self.module_path)",
"def get_afferents_names(self):\n\t\treturn self._afferentsNames",
"def get_symmetries(self):\n temp = self._properties.get('symmetries', [])\n return temp",
"def ExternalSystemIdentifiers(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('external_system_identifiers', default)\n return [HEP.IDObject(i) for i in tmp]",
"def getKnownTelemarketers():\n telemarketers = set()\n # Get known telemarketer numbers. They start with \"140\"\n for tm in calls:\n if tm[0][:3] == \"140\":\n telemarketers.add(tm[0])\n\n # See if telemarketer numbers receive any calls. If they\n # do remove them from the list.\n for tm in calls:\n if tm[1][:3] == \"140\":\n telemarketers.remove(tm[1])\n\n return telemarketers",
"def get_all_keywords(resource):\n keywords = []\n resource.populate()\n for res in [i for i in resource.imports.data if isinstance(i, robot.parsing.settings.Resource)]:\n keyword_file = os.path.abspath('{}/{}'.format(res.directory, res.name))\n if keyword_file not in processed:\n res_obj = ResourceFile(keyword_file)\n processed[keyword_file] = res_obj\n keywords += get_all_keywords(res_obj)\n for keyword in resource.keywords:\n print(keyword.name)\n keywords.append(tuple((keyword.source, keyword.name, keyword.args.value if keyword.args.value else [])))\n return keywords",
"def with_aliases(self):\n return self.node.withs",
"def getSymbols(self):\n return self.alpha.getSymbols()",
"def getNames(self) -> List[unicode]:\n ...",
"def names(self) -> list[str]:",
"def getListOfExternalModelDefinitions(self):\n return _libsbml.CompSBMLDocumentPlugin_getListOfExternalModelDefinitions(self)",
"def get_dependencies(self, alias):\n dependencies = {\"Ensembl2Reactome_All_Levels\": ['ReactomePathways'],\n \"ReactomePathways\": list(),\n \"reactome.homo_sapiens.interactions.tab-delimited\": list(),\n \"ReactomePathwaysRelation\": ['ReactomePathways']}\n return dependencies[alias]",
"def __get_names(self): \n names_str = self.names_text.get(1.0, END)\n names = names_str.splitlines()\n return names",
"def named_entities(self) -> List[str]:"
] |
[
"0.57053596",
"0.5550879",
"0.5326788",
"0.52498573",
"0.5130597",
"0.5110515",
"0.5100318",
"0.5075958",
"0.5071106",
"0.50482666",
"0.503921",
"0.50140154",
"0.4998172",
"0.4962017",
"0.49529403",
"0.49522096",
"0.49512246",
"0.49511516",
"0.49457997",
"0.49367157",
"0.49030265",
"0.48961914",
"0.4884283",
"0.48779032",
"0.48726583",
"0.48718065",
"0.48616448",
"0.48510444",
"0.48404238",
"0.48354405"
] |
0.7155185
|
0
|
Translates a JSON dictionary into a corresponding grammar node, based on the 'type' entry. Returns 'dct' itself when 'dct' has no type entry or has an unrecognized type entry. We use Treesitter's conventions for representing a grammar in JSON form.
|
def json_hook(grammar,memo,tokens_only,dct):
def memoize(memo,name,node):
if name in memo:
return memo[name]
memo[name] = node
return node
result = dct
if "type" in dct:
type_entry = dct["type"]
if isinstance(type_entry,str):
if type_entry == "TOKEN":
result = dct["content"]
elif type_entry == "STRING":
result = memoize(memo,dct["value"],grammar.MakeFixed(dct["value"]))
elif type_entry == "PATTERN":
result = memoize(memo,dct["value"],grammar.MakePattern(dct["value"]))
elif not tokens_only:
if type_entry == "BLANK":
result = grammar.empty
elif type_entry == "CHOICE":
result = grammar.MakeChoice(dct["members"])
elif type_entry == "SEQ":
result = grammar.MakeSeq(dct["members"])
elif type_entry == "REPEAT1":
result = grammar.MakeRepeat1([dct["content"]])
elif type_entry == "REPEAT":
# This node type was introduced in a later version of treesitter.
# REPEAT { X } is the same as CHOICE { REPEAT1 {X} | BLANK }
result = grammar.MakeRepeat1([dct["content"]])
result = grammar.MakeChoice([result, grammar.empty])
elif type_entry == "SYMBOL":
result = memoize(memo,dct["name"],grammar.MakeSymbolName(dct["name"]))
else:
raise RuntimeError("unknown node type: {}".format(type_entry))
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def CustomTypeDecoder(dct):\n if len(dct) == 1:\n type_name, _ = dct.items()[0]\n type_name_stripped = type_name.strip('_')\n if type_name_stripped in TYPES:\n obj = TYPES[type_name_stripped]()\n obj.__dict__ = dct[type_name]\n return obj\n return dct",
"def dict_to_rdflib(d):\n if d is None:\n return None\n\n t = d[\"type\"]\n v = d[\"value\"]\n\n if t == \"uri\":\n return URIRef(v)\n\n if t == \"bnode\":\n if v not in _bnodes:\n # v is not used as BNode value on purpose (multiple calls should\n # not have the same value)\n _bnodes[v] = BNode()\n return _bnodes[v]\n\n l = d.get(\"xml:lang\", None)\n if t == \"literal\":\n return Literal(v, lang=l)\n\n if t == \"typed-literal\":\n # will raise type error if lang and datatype set\n return Literal(v, lang=l, datatype=d[\"datatype\"])\n\n raise rdflib.exceptions.ParserError(\n \"Invalid sparql json result according to \"\n \"http://www.w3.org/TR/rdf-sparql-json-res/: {0}\".format(d))",
"def decode_dict(x: dict):\n assert isinstance(x, dict)\n if \"$type\" in x:\n return decode_typed_value(x)\n else:\n return x",
"def Load(json_text, start_symbol, ignore='_reserved'):\n g = Grammar(json_text, start_symbol, ignore=ignore)\n g.canonicalize()\n g.compute_first()\n g.compute_follow()\n return g",
"def type_object(self, json):\n json_prop = _utils.json_prop_finder(json)\n d = {k: (self.switcher(v)) for k, v in json_prop.items()}\n return d",
"def decode_typed_value(x: dict):\n for serializer in string_serializers:\n if x[\"$type\"] == serializer.name:\n return serializer.from_json(x[\"$value\"])\n\n raise TypeError(f\"Unknown $type: {x['$type']}\") # pragma: no cover",
"def decode_typed_json(json_value: str) -> tp.Any:\n def type_check(dct: tp.Dict[tp.Any, tp.Any]) -> tp.Any:\n val: tp.Any = dct.get(\"__custom_key_type__\")\n if val is not None:\n dct.pop(\"__custom_key_type__\")\n if val == \"int\":\n dct = {int(key): val for key, val in dct.items()}\n elif val == \"float\":\n dct = {float(key): val for key, val in dct.items()}\n elif val == \"decimal\":\n dct = {Decimal(key): val for key, val in dct.items()}\n return dct\n\n return json.loads(json_value, object_hook=type_check)",
"def class_hook(dct):\n if len(dct) == 1:\n class_name, value = next(iter(dct.items()))\n class_name = class_name.strip('_')\n if class_name == 'Dictionary':\n return Dictionary(*value)\n return dct",
"def _check_typed_dict(self) -> PossibleResult[T]:\n # pylint: disable=unidiomatic-typecheck\n if type(self.constructor) == _TypedDictMeta:\n # pylint: enable=unidiomatic-typecheck\n if not isinstance(self.obj, dict):\n raise DeserializeError(\n dict, self.obj, self.new_depth, self.key\n )\n return {\n name: Deserialize(\n obj=self.obj.get(name, UNDEFINED),\n constructor=_type,\n depth=self.new_depth,\n convert_primitives=self.convert_primitives,\n key=name,\n ).run()\n for name, _type in get_type_hints(self.constructor).items()\n } # type: ignore\n return NO_RESULT",
"def switcher(self, json_data):\n if json_data['type'] == \"object\":\n return self.type_object(json_data)\n elif json_data['type'] == \"array\":\n return self.type_array()\n elif json_data['type'] in [\"string\", \"boolean\", \"numbers\"]:\n return self.type_others()\n else:\n raise Exception(\"No basic types found in JSON schema\")",
"def deduce_type(obj: Dict[str, Any]) -> str:\n data_type: str = obj.get(\"type\", None)\n\n if data_type is not None:\n return data_type\n\n if \"items\" in obj:\n return \"array\"\n\n if \"properties\" in obj:\n return \"object\"\n\n return \"any\"",
"def loader(data: Union[str, dict], _: FileInfo) -> Optional[dict]:\n if isinstance(data, str):\n if fmt != 'json-ld':\n g = Graph()\n g.parse(data=data, format=fmt)\n data = pyld_jsonld_from_rdflib_graph(g)\n\n if not isinstance(data, dict):\n # TODO: Add a context processor to the source w/ CONTEXTS_PARAM_TYPE\n # TODO: figure out what to do base options below\n # TODO: determine whether jsonld.frame can handle something other than string input\n data_as_dict = jsonld.frame(data, contexts)\n else:\n data_as_dict = data\n typ = data_as_dict.pop('@type', None)\n # TODO: remove this when we get the Biolinkml issue fixed\n if not typ:\n typ = data_as_dict.pop('type', None)\n if typ and typ != target_class.class_name:\n # TODO: connect this up with the logging facility or warning?\n print(f\"Warning: input type mismatch. Expected: {target_class.__name__}, Actual: {typ}\")\n return json_clean(data_as_dict)",
"def parse_composite_types(var, var_type):\n var_info = {}\n\n if (var_type == \"array\"):\n # walk the tree to find the necessary info for an array declaration node\n raw_inner_type = var[\"typeName\"][\"baseType\"][\"typeDescriptions\"][\"typeIdentifier\"]\n inner_type = infer_type(raw_inner_type)\n\n if (inner_type != None):\n var_info[\"key_type\"] = inner_type\n else:\n return None\n\n elif (var_type == \"mapping\"):\n # walk the tree to find the necessary info for a mapping declaration node\n raw_key_type = var[\"typeName\"][\"keyType\"][\"typeDescriptions\"][\"typeIdentifier\"]\n raw_val_type = var[\"typeName\"][\"valueType\"][\"typeDescriptions\"][\"typeIdentifier\"]\n\n key_type = infer_type(raw_key_type)\n val_type = infer_type(raw_val_type)\n\n if (key_type != None and var_type != None):\n var_info[\"key_type\"] = key_type\n var_info[\"val_type\"] = val_type\n else:\n return None\n\n return var_info",
"def _artifact_from_dict(dct):\n if \"recipe\" in dct:\n type_ = UnbuiltArtifact\n elif \"returncode\" in dct:\n type_ = BuiltArtifact\n else:\n type_ = PublishedArtifact\n\n return type_(**dct)",
"def json_decoder_hook(dct, str_decoders=STRING_DECODERS,\n converters=MappingProxyType(dict())) -> dict:\n\n for k, v in dct.items():\n if k in converters:\n parse_func = converters[k]\n dct[k] = parse_func(v)\n\n elif isinstance(v, str):\n for decode_func in str_decoders:\n v = decode_func(v)\n\n if not isinstance(v, str):\n break\n\n dct[k] = v\n elif isinstance(v, collections.Mapping):\n dct[k] = json_decoder_hook(v, str_decoders, converters)\n\n return dct",
"def ContainerFromDicts(source, c_type, e_type):\n if not isinstance(c_type, type):\n raise TypeError(\"Container type '%s' is not a type\" % type(c_type))\n\n if source is None:\n source = c_type()\n\n if c_type is dict:\n ret = dict([(k, e_type.FromDict(v)) for k, v in source.items()])\n elif c_type in _SEQUENCE_TYPES:\n ret = c_type(map(e_type.FromDict, source))\n else:\n raise TypeError(\"Unknown container type '%s'\" % c_type)\n\n return ret",
"def parse(self, ctx, sid=None, stid=None, type=\"mgc\", cache=None):\n\n assert sid is not None\n assert stid is not None\n # answer = get_answer(ctx, self.qs_list[type])\n\n if type == \"dur\":\n cache_id = str(sid) + \"-\" + \"-1\"\n if cache_id in cache:\n if ctx in cache[cache_id]:\n return cache[cache_id][ctx]\n # raise Exception(\"cache has key \" + str(sid) + \"-\" + str(stid) + \":\" + ctx + \". But no model found. Some bugs exits\")\n else:\n cache[cache_id] = {}\n model = self.trees[(sid, -1)].parse(ctx, self.qs_list[type])\n cache[cache_id][ctx] = model\n return model\n else:\n cache_id = str(sid) + \"-\" + str(stid)\n if cache_id in cache:\n if ctx in cache:\n return cache[cache_id][ctx]\n # else:\n # raise Exception(\"cache has key \" + str(sid) + \"-\" + str(stid) + \". But no model found. Some bugs exits\")\n else:\n cache[cache_id] = {}\n model = self.trees[(sid, stid)].parse(ctx, self.qs_list[type])\n cache[cache_id][ctx] = model\n return model",
"def node_from_dict(node_dict):\n node_types = {name: cls for name, cls in globals().items()\n if inspect.isclass(cls) and issubclass(cls, SchemaNode)}\n if node_dict['node_type'] not in node_types:\n raise ValueError('Invalid node type specified.')\n node_type = node_types[node_dict['node_type']]\n return node_type.from_dict(node_dict)",
"def infer_types(g):\n\n def type_of_nt(nt, nt_def):\n if nt_def.type is not None:\n return nt_def.type\n else:\n nt_name = nt if isinstance(nt, str) else nt.name\n return TypeVar(nt_name, 2)\n\n nt_types = {\n nt: type_of_nt(nt, nt_def)\n for nt, nt_def in g.nonterminals.items()\n if not isinstance(nt, grammar.InitNt)\n }\n\n method_types = {}\n\n def element_type(e):\n if isinstance(e, str):\n if e in g.nonterminals:\n return nt_types[e]\n elif e in g.variable_terminals:\n return TokenType\n else:\n # constant terminal\n return UnitType\n elif isinstance(e, grammar.Optional):\n return Type('Option', [element_type(e.inner)])\n elif isinstance(e, grammar.Nt):\n # Cope with the awkward fact that g.nonterminals keys may be either\n # strings or Nt objects.\n return nt_types[e if e in nt_types else e.name]\n else:\n assert False, \"unexpected element type: {!r}\".format(e)\n\n def expr_type(expr):\n if isinstance(expr, int):\n return concrete_element_types[expr]\n elif expr is None:\n return Type('Option', [TypeVar()])\n elif isinstance(expr, grammar.Some):\n return Type('Option', [expr_type(expr.inner)])\n elif isinstance(expr, grammar.CallMethod):\n arg_types = [expr_type(arg) for arg in expr.args]\n if expr.method in method_types:\n mtype = method_types[expr.method]\n if len(expr.args) != len(mtype.argument_types):\n raise JsparagusTypeError(\n \"method {!r} is called with {} argument(s) and with {} argument(s)\"\n .format(expr.method, len(expr.args), len(mtype.argument_types)))\n for i, (actual_type, expected_type) in enumerate(\n zip(arg_types, mtype.argument_types)):\n try:\n unify(actual_type, expected_type)\n except JsparagusTypeError as exc:\n exc.annotate(\n \"error passing {} as argument {} to method {!r}:\"\n .format(\n grammar.expr_to_str(expr.args[i]),\n i + 1,\n expr.method))\n raise\n else:\n # Use method name as fallback type name (but low\n # precedence--this should be unified with something better).\n name = expr.method\n if ' ' in name:\n name = name.split(' ')[0]\n\n mtype = MethodType(arg_types, TypeVar(name, 1))\n method_types[expr.method] = mtype\n return mtype.return_type\n elif expr == 'accept':\n return NoReturnType\n else:\n raise TypeError(\"unrecognized reduce expr: {!r}\".format(expr))\n\n for nt, nt_def in g.nonterminals.items():\n if isinstance(nt, grammar.InitNt):\n continue\n nt_type = nt_types[nt]\n for i, p in enumerate(nt_def.rhs_list):\n concrete_element_types = [\n element_type(e)\n for e in p.body\n if grammar.is_concrete_element(e)\n ]\n try:\n unify(nt_type, expr_type(p.reducer))\n except JsparagusTypeError as exc:\n exc.annotate(\n \"in nonterminal {!r}, production {}:\"\n .format(nt, i + 1))\n raise\n\n for nt, ty in nt_types.items():\n g.nonterminals[nt].type = final_deref(ty)\n g.methods = {name: mtype.resolve() for name, mtype in method_types.items()}",
"def _is_valid_dict(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:dict\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 2:\n return False\n\n sub_type_1 = sub_types[0]\n sub_type_2 = sub_types[1]\n return _is_valid_pt(sub_type_1) and _is_valid_pt(sub_type_2)",
"def convert_pydict_to_netdict(dict):\n type = dict[dict.Keys[0]]\n # to be completed",
"def handler(type=None, resource=None, id=None, json_pointer=None,\n app=None, content=None, root=None, content_pointer=None):\n languages = list(app.config.get(\"MULTILINGUAL_SUPPORTED_LANGUAGES\", []))\n\n default_template = app.config.get(\"ELASTICSEARCH_DEFAULT_LANGUAGE_TEMPLATE\", {})\n template = app.config.get(\"ELASTICSEARCH_LANGUAGE_TEMPLATES\", {})\n\n data_dict= dict()\n for language in languages:\n if id is not None:\n language_with_context = language + '#' + id\n if language_with_context in template.keys():\n data_dict[language] = template[language_with_context]\n continue\n data_dict[language] = template.get(language, default_template)\n\n\n return {\n \"type\": \"object\",\n \"properties\": data_dict\n }",
"def visit_Dict(self, node):\n self.generic_visit(node)\n if all(isinstance(key, ast.Str) for key in node.keys):\n keywords = [ ast.keyword(arg=key.s, value=value)\n for key, value in zip(node.keys, node.values) ]\n return to_call(to_name('dict'), keywords=keywords)\n return node",
"def _JsonDictToArgs(cls, path_context, data_location, dct, memo=None):\n if(cls is InputGenerator):\n tag = dct['tag']\n data = dct['data']\n return cls._registered[tag]._JsonDictToArgs(path_context, data_location, data, memo=memo)\n else:\n _, args, kwargs = super()._JsonDictToArgs(path_context, data_location, dct, memo=memo)\n args.append(StageMeta.Load(path_context, dct['meta'], memo=memo))\n return cls, args, kwargs",
"def from_dict(dictionary):\n typ = dictionary.pop('type', 'control')\n cls = FieldLogic._TYPE_LOOKUP.get(typ, None)\n if cls is None:\n raise ValueError('unknown type code \"%s\"' % typ)\n return cls.from_dict(dictionary)",
"def packet_decoder(packet_type,string):\n dct = json.loads(string)\n if packet_type == HS_Version:\n return HS_Version(dct['version'])\n if packet_type == HS_Options:\n return HS_Options(minport=dct['minport'], maxport=dct['maxport'],\n portusage=dct['portusage'], protocol=dct['protocol'],\n timeout=dct['timeout'], payload=dct['payload'],\n key=dct['key'])\n if packet_type == Data:\n return Data(data=dct['data'], terminate=int(dct['terminate']))\n if packet_type == Management:\n return Management(dct['command'],location=dct['location'])\n if packet_type == Switching:\n return Switching(dct['status'])\n if packet_type == Error:\n return Error()",
"def from_jsonld(data, format):\n # As of this writing, 'application/nquads' is the only RDF format\n # (other than JSON-LD) supported by pyld. Convert via that.\n quads = jsonld.to_rdf(data, { 'format': 'application/nquads' })\n # Using ConjunctiveGraph instead of Graph for nquads support.\n graph = rdflib.ConjunctiveGraph()\n graph.parse(data=quads, format='nquads')\n return graph.serialize(format=format)",
"def parse_statement(self, stmt):\r\n if 'type' not in stmt:\r\n raise TypeError('Type field required')\r\n\r\n if stmt['type'] == 'property':\r\n return self.parse_property(stmt)\r\n elif stmt['type'] == 'edge':\r\n return self.parse_edge(stmt)\r\n elif stmt['type'] == 'key_index':\r\n return self.parse_key_index(stmt)\r\n elif stmt['type'] == 'defaults':\r\n return self.parse_defaults(stmt)\r\n else:\r\n raise ValueError('Invalid `type` value {}'.format(stmt['type']))",
"def convert_requirements(\n self, requirement_dct: Union[None, Dict[str, int]]\n ) -> Tuple[Dict[int, int], str]:\n if requirement_dct is None:\n return {}, \"\"\n err_lst: List[BaseException] = []\n # Try parsing as IDs\n try:\n ret = {}\n for k, v in requirement_dct.items():\n ret[int(k)] = int(v)\n return ret, \"id\"\n except (ValueError, KeyError) as err:\n err_lst.append(err)\n # Try parsing as each lang\n for lang, nameMap in self.itemdata_rv.items():\n ret = {}\n try:\n for k, v in requirement_dct.items():\n ret[nameMap[k]] = int(v)\n return ret, lang\n except (ValueError, KeyError) as err:\n err_lst.append(err)\n # TODO: create custom exception class\n raise BaseException(err_lst)",
"def __init__(self, *args, **kwargs):\r\n Grammar.__init__(self)\r\n dict.__init__(self, *args, **kwargs)"
] |
[
"0.6035078",
"0.5538951",
"0.5173711",
"0.49358642",
"0.49114808",
"0.48749194",
"0.48417294",
"0.48001817",
"0.47498053",
"0.4648304",
"0.4629172",
"0.4613406",
"0.4605138",
"0.45951557",
"0.45777294",
"0.4507989",
"0.4484301",
"0.44649675",
"0.44622406",
"0.4459265",
"0.4450288",
"0.44489193",
"0.44242927",
"0.4397093",
"0.4384117",
"0.4370503",
"0.434511",
"0.43447024",
"0.43170387",
"0.43110603"
] |
0.614961
|
0
|
Computes the Canonical Form of a GrammarDict
|
def canonicalize_grammar(grammar,empty):
rules = grammar.rules
# First ensure right-hand sides of containers are Choice nodes.
result = {}
for key, value in rules.items():
if isinstance(value,ContainerRule):
if isinstance(value,Choice):
# Choice nodes are unchanged
result[key] = value
else:
result[key] = grammar.MakeChoice([value])
else:
result[key] = value
# Now iteratively simplify rules.
# Replace a complex sub-component with a new rule.
# Repeat until settling.
keep_going = True
while keep_going:
keep_going = False
rules = dict(result)
for key, value in rules.items():
if isinstance(value,LeafRule):
result[key] = value
else:
# The value is a Choice
made_a_new_one = False
parts = []
def add_rule(key,*values):
"""
Records a new rule with the given key and value.
Args:
key: A SymbolName whose name is the key into the result
dictionary
values: A list of alternatives
Returns: The key's Symbol
"""
rhs = grammar.MakeChoice(list(values))
result[key.content] = rhs
return key
for i in range(len(value)):
item = value[i]
item_key = grammar.MakeSymbolName("{}/{}".format(key,str(i)))
if isinstance(item,LeafRule):
parts.append(item)
elif isinstance(item,Repeat1):
# value[i] -> X+
# becomes
# value[i] -> value.i
# value.i -> X value.i
# value.i -> epsilon
x = item[0]
parts.append(add_rule(item_key,
grammar.MakeSeq([x,item_key]),
empty))
made_a_new_one = True
elif isinstance(item,Choice):
# Sub-Choices expand in place.
parts.extend(item)
made_a_new_one = True
elif isinstance(item,Seq):
# Expand non-leaf elements
made_a_new_seq_part = False
seq_parts = []
for j in range(len(item)):
seq_item = item[j]
seq_item_key = grammar.MakeSymbolName(
"{}/{}.{}".format(key,str(i),str(j)))
if isinstance(seq_item,LeafRule):
seq_parts.append(seq_item)
else:
seq_parts.append(
add_rule(seq_item_key,seq_item))
made_a_new_seq_part = True
if made_a_new_seq_part:
parts.append(grammar.MakeSeq(seq_parts))
made_a_new_one = True
else:
parts.append(item)
if made_a_new_one:
rhs = grammar.MakeChoice(parts)
result[key] = rhs
keep_going = True
else:
result[key] = value
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def canonicalize(self):\n self.rules = canonicalize_grammar(self,self.empty)\n self.is_canonical = True",
"def getcontactcongressdict2(ccdump):\n d = {}\n for line in ccdump.strip().split('\\n'):\n if line.strip():\n (dist, email_form) = line.split()\n d[dist] = email_form\n return d",
"def __init__(self, dictionary):\n self.d = {}\n for word in dictionary:\n abbr = self.getAbbr(word)\n if abbr in self.d:\n self.d[abbr] += word,\n else:\n self.d[abbr] = [word]",
"def _normalize(self, dictionnary):\r\n copy_dict = OrderedDict()\r\n for k,v in dictionnary.items():\r\n if isinstance(v, OrderedDict):\r\n copy_dict[k.replace('#','').replace('@','')] = self._normalize(v)\r\n else:\r\n copy_dict[k.replace('#','').replace('@','')] = v\r\n return copy_dict",
"def canonical(_signed: Json) -> bytes:\n return encode_canonical(_signed).encode(\"utf-8\")",
"def to_canonical_graph(g1, stats=None):\r\n graph = Graph()\r\n graph += _TripleCanonicalizer(g1).canonical_triples(stats=stats)\r\n return ReadOnlyGraphAggregate([graph])",
"def __init__(self, *args, **kwargs):\r\n Grammar.__init__(self)\r\n dict.__init__(self, *args, **kwargs)",
"def collate_mutation_dict(mutation_dict):\n out_dict = {}\n for chain_key, chain_dict in mutation_dict.iteritems():\n for residue_key, residue_mutations in chain_dict.iteritems():\n\n # Sanity test prior to merging the mutations on one residue. \n if type(residue_mutations) != str and len(residue_mutations) > 1:\n for i in range(1, len(residue_mutations)):\n if residue_mutations[i-1][1] != residue_mutations[i][0]:\n print \"Error: inconsistent mutation list in: \", residue_key, residue_mutations\n print \"This mutation would never completely occur.\"\n raise StandardError\n\n out_dict[residue_key] = residue_mutations[0][0] + residue_mutations[-1][-1]\n return out_dict",
"def complement(s):\n\n # dictionary setup for complement\n dict = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\n\n # make a list of letters from string\n #print(s)\n slist = list(s)\n\n # for loop of the letters and call the base_complementary dictionary\n templist = []\n for i in slist:\n templist.append(dict[i])\n\n # join the letters of the list into string and return\n compstring = \"\"\n for i in templist:\n compstring += i\n #print(compstring)\n return compstring",
"def convert_grammar(grammar):\n\n # Remove all the productions of the type A -> X B C or A -> B a.\n global RULE_DICT\n unit_productions, result = [], []\n res_append = result.append\n index = 0\n\n for rule in grammar:\n new_rules = []\n if len(rule) == 2 and rule[1][0] != \"'\":\n # Rule is in form A -> X, so back it up for later and continue with the next rule.\n unit_productions.append(rule)\n add_rule(rule)\n continue\n elif len(rule) > 2:\n # Rule is in form A -> X B C [...] or A -> X a.\n terminals = [(item, i) for i, item in enumerate(rule) if item[0] == \"'\"]\n if terminals:\n for item in terminals:\n # Create a new non terminal symbol and replace the terminal symbol with it.\n # The non terminal symbol derives the replaced terminal symbol.\n rule[item[1]] = f\"{rule[0]}{str(index)}\"\n new_rules += [f\"{rule[0]}{str(index)}\", item[0]]\n index += 1\n while len(rule) > 3:\n new_rules.append([f\"{rule[0]}{str(index)}\", rule[1], rule[2]])\n rule = [rule[0]] + [f\"{rule[0]}{str(index)}\"] + rule[3:]\n index += 1\n # Adds the modified or unmodified (in case of A -> x i.e.) rules.\n add_rule(rule)\n res_append(rule)\n if new_rules:\n result.extend(new_rules)\n # Handle the unit productions (A -> X)\n while unit_productions:\n rule = unit_productions.pop()\n if rule[1] in RULE_DICT:\n for item in RULE_DICT[rule[1]]:\n new_rule = [rule[0]] + item\n if len(new_rule) > 2 or new_rule[1][0] == \"'\":\n result.insert(0, new_rule)\n else:\n unit_productions.append(new_rule)\n add_rule(new_rule)\n return result",
"def addCanonicalForm(documentName, lexWord):\r\n formRef = \":form_\" + replace_form(lexWord.word) + \"_\" + lexWord.unique_name + \" a ontolex:Form;\\n\"\r\n\r\n writtenRepRef = \" ontolex:writtenRep \\\"\"\r\n writtenRepRef += lexWord.word + \"\\\"\" + lexWord.writingLanguage\r\n\r\n if lexWord.transliteration.word != \" \" and lexWord.transliteration.word != \"\" :\r\n writtenRepRef += \", \\\"\" + lexWord.transliteration.word + \"\\\"\" + lexWord.transliteration.writingLanguage\r\n writtenRepRef += \" .\"\r\n\r\n frequencyRef = \"\"\r\n if lexWord.canonicalFrequencyDict:\r\n frequencyRef = \"\\n\"\r\n for corpus,frequency in lexWord.canonicalFrequencyDict.items():\r\n if frequency != 0:\r\n frequencyRef +=' frac:frequency [a e2model:' + corpus +'; rdf:value \"' + str(frequency) + '\" ] ;\\n'\r\n frequencyRef = frequencyRef[:len(frequencyRef) -2]\r\n frequencyRef += \".\"\r\n formEntry = formRef + writtenRepRef\r\n if frequencyRef != \".\":\r\n formEntry = formEntry[:len(formEntry) -1]\r\n formEntry += \"; \"\r\n formEntry += frequencyRef\r\n\r\n with open(documentName, 'a') as f:\r\n f.write(formEntry)\r\n f.write(\"\\n\\n\")\r\n return",
"def canonical(gra):\n can_key_dct = canonical_keys(gra, backbone_only=False)\n return relabel(gra, can_key_dct)",
"def __init__(self):\n self.grammar = defaultdict(list) # store the grammar and vocabulary",
"def recode_value_dict(dictionary):\n somedict = {k:v.encode('cp1251') for k, v in dictionary.items()}\n return somedict",
"def make_canonical(self):\n n = self.cardinality\n minuc = [self.uc[i] for i in range(n)]\n perms = [[0]+p+[n-1] for p in permutations(1,n-1)]\n for p in perms:\n puc = range(n)\n for i in range(n):\n puc[p[i]] = sorted([p[y] for y in self.uc[i]])\n if puc < minuc: minuc = puc\n return minuc",
"def getcontactcongressdict(ccdump):\n d = {}\n for line in ccdump.strip().split('\\n'):\n (district, name, party, dc_office, dc_voice, district_voice, email_form, website) = line.split('\\t')\n dist = ''.join( (district[:2], '-', district[2:]) )\n d[dist] = email_form\n return d",
"def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}",
"def get_canonical_collection(self):\n C = [self.closure([('S1', ['.', self.grammar.S[0]])])] # augment the grammar\n finished = False\n while not finished: # while we add a new state to the collection\n finished = True\n for state in C:\n for symbol in self.grammar.N + self.grammar.E:\n next_state = self.go_to(state, symbol)\n if next_state is not None and next_state not in C:\n C += [next_state]\n finished = False\n return C",
"def add(counts):\n if counts:\n for k in grammar.keys():\n grammar[k] = grammar[k] + counts[k]",
"def make_chains(corpus):\n c_dict = {}\n\n for x in range(len(corpus)):\n if x < (len(corpus)-2): # not in edge\n bigram_tuple = tuple([corpus[x],corpus[x+1]])\n if bigram_tuple in c_dict:\n c_dict[bigram_tuple].append(corpus[x+2])\n else:\n c_dict[bigram_tuple] = [corpus[x+2]]\n else:\n bigram_tuple = tuple([corpus[-2],corpus[-1]]) # ran twice. Why?\n c_dict.setdefault(bigram_tuple) # could set a default word? Empty list?\n\n return c_dict",
"def spelling_normalization(words, dictionary):\n\n logging.debug('spelling_normalization(): Incoming words: ')\n logging.debug(words)\n\n def edits1(word):\n \"\"\"Return 1-edits away\"\"\"\n letters = 'abcdefghijklmnopqrstuvwxyz'\n\n def split_word(word):\n \"\"\"Returns all possible variations of word split up\"\"\"\n return [(word[:i], word[i:]) for i in range(len(word) + 1)]\n\n pairs = split_word(word)\n deletes = [a + b[1:] for (a, b) in pairs if b]\n transposes = [a + b[1] + b[0] + b[2:] for (a, b) in pairs if len(b) > 1]\n replaces = [a + c + b[1:] for (a, b) in pairs for c in letters if b]\n inserts = [a + c + b for (a, b) in pairs for c in letters]\n return set(deletes + transposes + replaces + inserts)\n\n def edits2(word):\n \"\"\"Return 2-edits away\"\"\"\n return {e2 for e1 in edits1(word) for e2 in edits1(e1)}\n\n def known(words):\n return {w for w in words if w in dictionary}\n\n corrected = []\n for word in words:\n if word.isnumeric():\n corrected.append(word)\n continue\n elif word in dictionary or wordnet.synsets(word):\n corrected.append(word)\n continue\n else: # Doesn't seem to be necessary, but will be guard clause; seem to have some suspicious deletions\n candidates = (known(edits1(word)) or\n known(edits2(word)) or\n [word])\n corrected.append(max(candidates, key=dictionary.get))\n logging.debug('spelling_normalization(): Outgoing words:')\n logging.debug(corrected)\n return corrected",
"def build_dictionary_gensim():\r\n\t# if load_dictionary_gensim():\r\n\t#\treturn\r\n\t\r\n\tglobal gensim_dictionary, common_corpus_list\r\n\t\r\n\tprint('\\nbuilding dictionary')\r\n\tgensim_dictionary = gensim.corpora.Dictionary()\r\n\t\r\n\tfor v in common_corpus_list:\r\n\t\tgensim_dictionary.add_documents([v[1].lower().split()])\r\n\t\t\r\n\tgensim_dictionary.save_as_text(paths.path_data_dictionary_txt)\r\n\tgensim_dictionary.save(paths.path_data_dictionary_dict)\r\n\r\n\t# print(gensim_dictionary.token2id)\r\n\tprint(gensim_dictionary)",
"def add_conjugates(self):\n \n # declare new dict\n self.new_dict = copy.deepcopy(self.dict)\n \n # iterate over items\n for i in range(len(self.dict)):\n for rel_tag, hopping in self.dict[i].items():\n x, y, z, j = rel_tag\n reverse_tag = (-x, -y, -z, i)\n reverse_hopping = np.conjugate(np.transpose(hopping))\n if reverse_tag not in self.new_dict[j]:\n self.new_dict[j][reverse_tag] = reverse_hopping\n \n # done\n self.dict = self.new_dict",
"def generate_antonym_pairs(config: SettingConfig) -> dict:\n print(f\"Generating initial antonym pairs from RoWordNet @ {datetime.now()}\")\n wn = rwn.RoWordNet()\n\n # Create the output dictionary that will be of type dict(str : set(pair(str, str)) where the key is\n # the PoS and the value is a set of pairs of words of PoS specified by the key\n pairs = dict()\n\n # Iterate over the selected parts of speech\n for part_of_speech in config.pos.values():\n\n pos_pairs = list()\n\n # Return all synsets corresponding to the PoS\n synset_ids = wn.synsets(pos=part_of_speech)\n\n # Iterate all the synsets for the current PoS\n for synset_id in synset_ids:\n\n # Get the synset object specified by synset_id\n synset = wn.synset(synset_id)\n\n # Get the outbound relations of type antonym from\n outbound_relations = filter(lambda x: x[1] == 'near_antonym', wn.outbound_relations(synset_id))\n\n # Iterate outbound relations\n for relation in outbound_relations:\n # Get the synset corresponding to the target of the outbound relation\n target_synset = wn.synset(relation[0])\n\n # Get all the pairs, sort them by first word to keep set entries unique\n current_iteration_pairs = get_cross_synset_pairs(synset, target_synset)\n\n # Add the current set of pairs\n pos_pairs.extend(current_iteration_pairs)\n\n # Get corresponding key in pos dictionary and add the pair to the resulting dictionary\n for key, value in config.pos.items():\n if value == part_of_speech:\n pairs[key] = unique(pos_pairs)\n\n # Return the whole dictionary\n print(f\"Successfully generated antonym paris @ {datetime.now()}\")\n return pairs",
"def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()",
"def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()",
"def get_cnf(self):\n nonterm = set(self.nonterminal)\n term = set(self.terminal)\n\n rules = list(self.rules)\n cnf = set()\n\n # STEP 1: eliminate nonsolitary terminals\n for i in range(len(rules)):\n rule = rules[i]\n lhs, rhs, log_prob = rule\n if len(rhs) > 1:\n rhs_list = list(rhs)\n for j in range(len(rhs_list)):\n x = rhs_list[j]\n if x in term: # found nonsolitary terminal\n new_nonterm = 'NT_{}'.format(x)\n new_nonterm_rule = GrammarRule(new_nonterm, (x,), 0.0)\n\n if new_nonterm not in nonterm:\n nonterm.add(new_nonterm)\n cnf.add(new_nonterm_rule)\n else:\n assert new_nonterm_rule in cnf\n rhs_list[j] = new_nonterm\n rhs = tuple(rhs_list)\n rules[i] = GrammarRule(lhs, rhs, log_prob)\n\n # STEP 2: eliminate rhs with more than 2 nonterminals\n for i in range(len(rules)):\n rule = rules[i]\n lhs, rhs, log_prob = rule\n if len(rhs) > 2:\n assert all(x in nonterm for x in rhs), rule\n current_lhs = lhs\n for j in range(len(rhs) - 2):\n new_nonterm = 'BIN_\"{}\"_{}'.format(\n '{}->{}'.format(lhs, ','.join(rhs)), str(j))\n assert new_nonterm not in nonterm, rule\n nonterm.add(new_nonterm)\n cnf.add(\n GrammarRule(current_lhs,\n (rhs[j], new_nonterm),\n log_prob if j == 0 else 0.0))\n current_lhs = new_nonterm\n cnf.add(GrammarRule(current_lhs, (rhs[-2], rhs[-1]), 0.0))\n else:\n cnf.add(rule)\n\n return Grammar(cnf)",
"def buildPDBChainCATHDict(cath_file, iscommpressed=True):\n if iscommpressed:\n gunzip(cath_file, 'cath_b.all.temp')\n cath_file = 'cath_b.all.temp'\n \n cath_dict_temp = dict()\n cath_i_dict = dict()\n with open(cath_file, 'r') as file_temp:\n for line in file_temp:\n line = line.strip()\n if line != '':\n line_list = line.split(' ')\n cath_dict_temp[line_list[0]] = line_list[1:]\n key, value = line[0:5], line[5:7]\n if key in cath_i_dict:\n cath_i_dict[key].append(value)\n else:\n cath_i_dict[key] = [value]\n pdbChain2CATH = dict()\n for key, values in cath_i_dict.items():\n pdbChain2CATH[key] = []\n for v in values:\n pdbChain2CATH[key].append(cath_dict_temp[key+v])\n if iscommpressed:\n remove(cath_file) \n return pdbChain2CATH",
"def phrase_dict(phrase):\n switcher = {\n '처음으로': '닥앤미 병원을 찾아주셔서 감사합니다. 직접문의원할시 오른쪽 아래 1:1 버튼을 눌러주시면 직접 상담 가능합니다. 1:1 상담 가능 시간은 09시 – 18시 입니다.',\n '병원 정보': '어떤 정보를 보시고 싶으신가요?',\n '병원 위치': '“닥앤미 병원 주소는 서울시 용산구 이촌동 세움상가 2층입니다.” 더 자세한 지도확인을 원하실 경우 아래 버튼을 눌러주세요',\n '병원 운영시간': '닥앤미 병원을 찾아주셔서 감사합니다. 병원 운영시간은 위의 내용과 같습니다',\n '병원 프로모션': '현재 진행되고 있는 병원 프로모션입니다. 자세히 보길 원하시면 아래의 프로모션을 선택해 주세요',\n '프로모션 A': '닥앤미에서 6월 30일까지 제공되는 프로모션 A 입니다.',\n '프로모션 B': '닥앤미에서 6월 30일까지 제공되는 프로모션 B 입니다.',\n '프로모션 C': '닥앤미에서 6월 30일까지 제공되는 프로모션 C 입니다.',\n '의료진': '안녕하세요, 닥앤미의 홍길동 전문의 입니다. 항상 최선을 다하겠습니다.',\n '병원 사진': '최고의 진료를 제공하는 닥앤미 병원입니다.',\n '병원 진료과목': '닥앤미 병원의 진료과목입니다.',\n '병원 전화하기': '닥앤미 병원 전화번호는 02 3522 XXXX 입니다. 지금 통화를 원하시면 아래 버튼을 눌러주세요'\n }\n default_text = 'Unable to find appropriate text response'\n return switcher.get(phrase, default_text)",
"def _create_conversion_trie(strict):\n t = pygtrie.CharTrie()\n\n for beta, uni in _map.BETACODE_MAP.items():\n if strict:\n t[beta] = uni\n else:\n # The order of accents is very strict and weak. Allow for many orders of\n # accents between asterisk and letter or after letter. This does not\n # introduce ambiguity since each betacode token only has one letter and\n # either starts with a asterisk or a letter.\n diacritics = beta[1:]\n\n perms = itertools.permutations(diacritics)\n for perm in perms:\n perm_str = beta[0] + ''.join(perm)\n t[perm_str.lower()] = uni\n t[perm_str.upper()] = uni\n\n return t"
] |
[
"0.6435501",
"0.5088603",
"0.49550718",
"0.4946942",
"0.49384597",
"0.49277622",
"0.48983136",
"0.48827356",
"0.4867049",
"0.48520616",
"0.48256868",
"0.48221284",
"0.4769079",
"0.47471413",
"0.47440088",
"0.4743303",
"0.47369075",
"0.4717826",
"0.47074553",
"0.47037065",
"0.47021917",
"0.4698409",
"0.4677816",
"0.466979",
"0.46685126",
"0.46685126",
"0.46543765",
"0.4649486",
"0.46251673",
"0.46216142"
] |
0.57712275
|
1
|
Records a new rule with the given key and value.
|
def add_rule(key,*values):
rhs = grammar.MakeChoice(list(values))
result[key.content] = rhs
return key
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __setitem__(self, key, value):\n self.rules[key] = value",
"def addRule(self, ruleLine):\n cols = ruleLine.split(' ')\n positionNumber = int(cols[0])\n self._rules[positionNumber] = {}\n for i in range(1, len(cols)):\n self._rules[positionNumber][cols[i].upper()] = 1",
"def add_rule(self, rule):\n \n self.rules.append(rule)",
"def add_rule(rule):\n global RULE_DICT\n\n if rule[0] not in RULE_DICT:\n RULE_DICT[rule[0]] = []\n RULE_DICT[rule[0]].append(rule[1:])",
"def add_rule(self, rule):\n self.rule.append(rule)",
"def add_rule(self, rule: Rule):\n self.rules.append(rule)",
"def add_rule(self, rule) -> None:\n self.add_rules([rule])",
"def insert(self, rule, ident):\n self[ident] = rule",
"def add_rule(self, rule):\n assert isinstance(rule, Rule)\n self.rule.append(rule)",
"def add(self, key, value):",
"def add_rule(self, rule: interpreter.Rule) -> None:\n\n if rule.target not in self.rules:\n self.rules[rule.target] = rule\n else:\n self.rules[rule.target] |= rule",
"def register_rule(cls, rule_func):\n cls._rules_factories.append(rule_func)",
"def add_rule(self, rule):\n\n\t\tif self._mode == Mode.PassThrough:\n\t\t\traise ValueError(\"Can't edit rules while in passthrough mode\")\n\n\t\tif self._mode == Mode.BlackList:\n\t\t\tself._log.info('Adding new rule to the blacklist rules set: %s' % rule)\n\t\t\tself._blacklist_rules.append(rule)\n\n\t\tif self._mode == Mode.WhiteList:\n\t\t\tself._log.info('Adding new rule to the whitelist rules set: %s' % rule)\n\t\t\tself._whitelist_rules.append(rule)\n\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()",
"def add(self, key, val, comment='') :\n \n # add lines:\n self.outfile.append('\\n')\n if len(comment) > 0 : self.outfile.append('! %s\\n' % comment)\n self.outfile.append('%s : %s\\n' % (key, str(val)))\n\n # add to dictionairy:\n self.values[key] = val\n \n # ok\n return",
"def add_rule(self, rule, on=None, off=None, strength=1.):\n\n self.x[on:off, :, get_rule_index(rule, self.config)] = strength",
"def add_rule_to_dict(rule_dict, lhs, rhs):\n if rhs not in rule_dict:\n rule_dict[rhs] = list()\n rule_dict[rhs].append(lhs) \n return rule_dict",
"def rule_add(self, rulename, rule, commentline):\n\n if '->' in rule:\n zeroes = '|'.join(self.zerosymbols)\n rule = '[~$[' + zeroes + '] .o. [' + rule + ']]/[' + zeroes + ']'\n\n FST.define(rule, rulename)\n myrule = FST(rule)\n self.rules[rulename] = myrule\n self.comments[rulename] = commentline",
"def add(self, key, value):\n self.data.append((key, value))",
"def append_rule(self, rule):\n\n self._control_manager.append_rule(rule)",
"def add(self, key, val):\n key_ = KeyValue()\n key_.key = key\n key_.value = val\n self.values.append(key_)",
"def append(self, key, record):\n if not self._schema:\n self._schema = _infer_schema(record)\n self._write_schema()\n\n # append record to datatmp\n offset = self._write_record(record)\n\n # add key and offset ptr to in-memory keymap dictionary\n self._keymap[key] = offset",
"def edit_rule(self, value, new=False):\n\n if value >= 0 or new:\n if new:\n name = None\n rule = {}\n else:\n name = self.keys[value]\n rule = self.rules[value]\n text = '\"\"\"\\nIf you don\\'t need a setting, just leave it as None.\\n'\n text += 'When the rule is parsed, the default will be used.\\n'\n text += 'Each variable is evaluated separately, so you cannot substitute variables '\n text += 'in other variables.\\n\"\"\"\\n'\n text += '\\n# name (str): Rule name. Required.\\n'\n text += self.format_string('name', name)\n text += '\\n# find (str): Regular expression pattern or literal string.\\n'\n text += '# Use (?i) for case insensitive. Use (?s) for dotall.\\n'\n text += '# See https://docs.python.org/3.4/library/re.html for more info on regex flags.\\n'\n text += '# Required unless \"scope\" is defined.\\n'\n text += self.format_regex_string('find', rule.get('find'))\n text += '\\n# replace (str - default=r\\'\\\\g<0>\\'): Replace pattern.\\n'\n text += self.format_regex_string('replace', rule.get('replace'))\n text += '\\n# literal (bool - default=False): Preform a non-regex, literal search and replace.\\n'\n text += self.format_bool('literal', rule.get('literal'))\n text += '\\n# literal_ignorecase (bool - default=False): Ignore case when \"literal\" is true.\\n'\n text += self.format_bool('literal_ignorecase', rule.get('literal_ignorecase'))\n text += '\\n# scope (str): Scope to search for and to apply optional regex to.\\n'\n text += '# Required unless \"find\" is defined.\\n'\n text += self.format_string('scope', rule.get('scope'))\n text += '\\n# scope_filter ([str] - default=[]): An array of scope qualifiers for the match.\\n'\n text += '# Only used when \"scope\" is not defined.\\n'\n text += '#\\n'\n text += '# - Any instance of scope qualifies match: scope.name\\n'\n text += '# - Entire match of scope qualifies match: !scope.name\\n'\n text += '# - Any instance of scope disqualifies match: -scope.name\\n'\n text += '# - Entire match of scope disqualifies match: -!scope.name\\n'\n text += self.format_array('scope_filter', rule.get('scope_filter'))\n text += '\\n# greedy (bool - default=True): Apply action to all instances (find all).\\n'\n text += '# Used when \"find\" is defined.\\n'\n text += self.format_bool('greedy', rule.get('greedy'))\n text += '\\n# greedy_scope (bool - default=True): Find all the scopes specified by \"scope.\"\\n'\n text += self.format_bool('greedy_scope', rule.get('greedy_scope'))\n text += '\\n# format_replace (bool - default=False): Use format string style replace templates.\\n'\n text += '# Works only for Regex (with and without Backrefs) and Re (with Backrefs).\\n'\n text += '# See https://facelessuser.github.io/backrefs/usage/#format-replacements for more info.\\n'\n text += self.format_bool('format_replace', rule.get('format_replace'))\n text += '\\n# selection_inputs (bool -default=False): Use selection for inputs into find pattern.\\n'\n text += '# Global setting \"selection_only\" must be disabled for this to work.\\n'\n text += self.format_bool('selection_inputs', rule.get('selection_inputs'))\n text += '\\n# multi_pass (bool - default=False): Perform multiple sweeps on the scope region to find\\n'\n text += '# and replace all instances of the regex when regex cannot be formatted to find\\n'\n text += '# all instances. Since a replace can change a scope, this can be useful.\\n'\n text += self.format_bool('multi_pass', rule.get('multi_pass'))\n text += '\\n# plugin (str): Define replace plugin for more advanced replace logic.\\n'\n text += self.format_string('plugin', rule.get('plugin'))\n text += '\\n# args (dict): Arguments for \\'plugin\\'.\\n'\n text += self.format_dict('args', rule.get('args'))\n text += '\\n# ----------------------------------------------------------------------------------------\\n'\n text += '# test: Here you can setup a test command. This is not saved and is just used for this session.\\n'\n text += '# - replacements ([str]): A list of regex rules to sequence together.\\n'\n text += '# - find_only (bool): Highlight current find results and prompt for action.\\n'\n text += '# - action (str): Apply the given action (fold|unfold|mark|unmark|select).\\n'\n text += '# This overrides the default replace action.\\n'\n text += '# - options (dict): optional parameters for actions (see documentation for more info).\\n'\n text += '# - key (str): Unique name for highlighted region.\\n'\n text += '# - scope (str - default=\"invalid\"): Scope name to use as the color.\\n'\n text += '# - style (str - default=\"outline\"): Highlight style (solid|underline|outline).\\n'\n text += '# - multi_pass (bool): Repeatedly sweep with sequence to find all instances.\\n'\n text += '# - no_selection (bool): Overrides the \"selection_only\" setting and forces no selections.\\n'\n text += '# - regex_full_file_with_selections (bool): Apply regex search to full file then apply\\n'\n text += '# action to results under selections.\\n'\n text += textwrap.dedent(\n \"\"\"\\\n test = {\n \"replacements\": [%s],\n \"find_only\": True,\n \"action\": None,\n \"options\": {},\n \"multi_pass\": False,\n \"no_selection\": False,\n \"regex_full_file_with_selections\": False\n }\n \"\"\" % (self.simple_format_string(name) if name is not None else '')\n )\n\n replace_view = self.window.create_output_panel('reg_replace')\n replace_view.run_command('reg_replace_panel_insert', {'text': text})\n for ext in ST_LANGUAGES:\n highlighter = sublime.load_settings(\n 'reg_replace.sublime-settings'\n ).get('python_highlighter', 'Python/Python')\n highlighter = 'Packages/' + highlighter + ext\n try:\n sublime.load_resource(highlighter)\n replace_view.set_syntax_file(highlighter)\n break\n except Exception:\n pass\n replace_view.settings().set('gutter', True)\n replace_view.settings().set('line_numbers', True)\n replace_view.settings().set('reg_replace.edit_view', True)\n replace_view.settings().set('bracket_highlighter.bracket_string_escape_mode', 'regex')\n replace_view.settings().set('regreplace.name', name)\n replace_view.sel().clear()\n replace_view.sel().add(sublime.Region(0, 0))\n self.window.run_command(\"show_panel\", {\"panel\": \"output.reg_replace\"})\n sublime.set_timeout(lambda w=self.window, v=replace_view: w.focus_view(v), 100)",
"def rule_id(self, rule_id):\n\n self._rule_id = rule_id",
"async def add(self, category, key, value=None):\n if value is None:\n data = {}\n elif self.default_value_field is None:\n data = value\n else:\n data = {self.default_value_field: value}\n\n if self.category_field:\n data[self.category_field] = category\n\n if self.key_field:\n data[self.key_field] = key\n\n record = self.model(**data)\n self.session.add(record)\n self.session.flush()",
"def add_triggered_rule(\n self,\n scan_file,\n line_number,\n column_number,\n rule_id,\n rule_name,\n rule_description,\n extra_error_information,\n ):\n new_entry = (\n scan_file,\n line_number,\n column_number,\n rule_id,\n rule_name,\n rule_description,\n extra_error_information,\n )\n self.__reported.append(new_entry)",
"def create(self, key, val):\n if key not in self._datastore:\n self._datastore[key] = val\n return True\n else:\n raise KeyError(\n \"Tried to create a record for an existing key\"\n )",
"def set_rule(self, rule):\n self.rule.load_state_dict(rule, strict=True)",
"def add(self, key, value):\n\t\tself.__add_key_to_bt(key)[3] = self.__add_key_value_to_ll(key, value)",
"def add_rule_to_route(feed, fare_id, route_id, rules=None):\n\n infostring = \"adding fare \" + fare_id + \" to \" + route_id\n\n if not rules:\n rules = {}\n else:\n infostring += str(repr(rules))\n\n rules['fare_id'] = fare_id\n rules['route_id'] = route_id\n\n print infostring\n\n if 'fare_rules' not in feed.by_id:\n feed.by_id['fare_rules'] = {}\n\n factory = feed.FACTORIES['fare_rules']\n info = factory.from_row(rules)\n feed.by_id['fare_rules'][route_id] = info",
"def create(self, key, value):\n raise NotImplementedError"
] |
[
"0.70236945",
"0.64182764",
"0.6383722",
"0.63762146",
"0.63658285",
"0.6229125",
"0.61702883",
"0.60239136",
"0.6008493",
"0.59731805",
"0.58936876",
"0.58755374",
"0.5871394",
"0.5816767",
"0.5798326",
"0.57449096",
"0.5696872",
"0.5616359",
"0.5581987",
"0.55750114",
"0.5551673",
"0.5551529",
"0.554966",
"0.5544566",
"0.5531742",
"0.5530465",
"0.55236155",
"0.55144197",
"0.55071616",
"0.5502124"
] |
0.7085595
|
0
|
Computes the First set for each node in the grammar. Populates the `first` attribute of each node.
|
def compute_first_sets(grammar,rules):
grammar.reset_first_follow()
names_of_non_terminals = []
grammar.end_of_text.first_data = set({grammar.end_of_text})
grammar.empty.first_data = set({grammar.empty})
for key, rule in rules.items():
if rule.is_terminal() or rule.is_empty():
# If X is a terminal, then First(X) is {X}
# Lazy load it.
dummy = rule.first()
elif rule.is_symbol_name():
names_of_non_terminals.append(key)
else:
# rule is a Choice node
for rhs in rule:
# If X -> empty is a production, then add Empty
if rhs.is_empty():
rule.first_data = set({rhs})
names_of_non_terminals.append(key)
def lookup(rule):
return rules[rule.content] if isinstance(rule,SymbolName) else rule
def dynamic_first(rule,depth):
"""
Returns the currently computed approximation to the First set for a
rule.
The rule is from a Canonical grammar, so a non-terminal can be as
complex as a Choice over Sequences over symbols that may reference
other non-terminals. Gather updated First set info for at most
those first two levels, and use a previous-computed approximation for
the nonterminals at that second level.
Args:
rule: the Rule in question
depth: recursion depth
Returns:
A new approximation to the First set for the given rule.
"""
if rule.is_symbol_name():
return rules[rule.content].first()
if rule.is_empty():
return rule.first()
if rule.is_terminal():
# The terminal isn't registered in the dictionary.
return set({rule})
if isinstance(rule,Choice):
result = rule.first()
#for item in [lookup(i) for i in rule]:
for item in rule:
result = result.union(dynamic_first(item,depth+1))
return result
if isinstance(rule,Seq):
result = rule.first()
# Only recurse 2 levels deep
if depth < 2:
items = [lookup(item) for item in rule]
else:
items = rule
# Add the first sets for Yi if all the earlier items can derive
# empty. But don't add empty itself from this prefix.
for item in items:
from_first = dynamic_first(item,depth+1)
from_first = without_empty(from_first)
result = result.union(from_first)
if not item.derives_empty():
# Not known to derive empty. Stop here.
break
# If all the items derive empty, then add Empty to the first set.
if all([lookup(item).derives_empty() for item in rule]):
result = result.union({grammar.empty})
return result
raise RuntimeError("trying to dynamically compute the First set of: "
+ str(rule))
# Repeat until settling.
keep_going = True
while keep_going:
keep_going = False
for key in names_of_non_terminals:
rule = rules[key]
# Accumulate First items from right-hand sides
df = dynamic_first(rule,0)
new_items = df - rule.first()
if len(new_items) > 0:
rule.first_data = rule.first().union(new_items)
keep_going = True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_first(self):\n compute_first_sets(self, self.rules)",
"def get_first():\n for s in TERMINAL_SET:\n # For each terminal, initialize First with itself.\n sym = SYMBOL_DICT[s]\n sym.first_set = set([s])\n\n for s in NON_TERMINAL_SET:\n sym = SYMBOL_DICT[s]\n if sym.is_nullable:\n sym.first_set = set(['null'])\n else:\n sym.first_set = set()\n\n while True:\n first_set_is_stable = True\n for p in PRODUCTION_LIST:\n sym_left = symbol_for_str(p.left)\n if p.right[0] == 'null':\n sym_left.first_set.update(set(['null']))\n continue\n previous_first_set = set(sym_left.first_set)\n\n for s in p.right:\n # For X -> Y..., First(X) = First(X) U First(Y)\n sym_right = symbol_for_str(s)\n sym_left.first_set.update(sym_right.first_set)\n # For X -> Y1 Y2 ... Yi-1 , if Y1...Yi-1 is all nullable\n # Then First(X) = First(X) U First(Y1) U First(Y2) ...\n if sym_right.is_nullable:\n continue\n else:\n break\n\n if previous_first_set != sym_left.first_set:\n first_set_is_stable = False\n\n if first_set_is_stable:\n break",
"def dynamic_first(rule,depth):\n\n if rule.is_symbol_name():\n return rules[rule.content].first()\n if rule.is_empty():\n return rule.first()\n if rule.is_terminal():\n # The terminal isn't registered in the dictionary.\n return set({rule})\n if isinstance(rule,Choice):\n result = rule.first()\n #for item in [lookup(i) for i in rule]:\n for item in rule:\n result = result.union(dynamic_first(item,depth+1))\n return result\n if isinstance(rule,Seq):\n result = rule.first()\n\n # Only recurse 2 levels deep\n if depth < 2:\n items = [lookup(item) for item in rule]\n else:\n items = rule\n # Add the first sets for Yi if all the earlier items can derive\n # empty. But don't add empty itself from this prefix.\n for item in items:\n from_first = dynamic_first(item,depth+1)\n from_first = without_empty(from_first)\n result = result.union(from_first)\n if not item.derives_empty():\n # Not known to derive empty. Stop here.\n break\n # If all the items derive empty, then add Empty to the first set.\n if all([lookup(item).derives_empty() for item in rule]):\n result = result.union({grammar.empty})\n return result\n raise RuntimeError(\"trying to dynamically compute the First set of: \"\n + str(rule))",
"def firsts(self):\r\n return self._children_of[None]",
"def first(self, input):\n FirstA = set([])\n\n if input.strip(\"'\") in self.T:\n return {input.strip(\"'\")}\n\n elif input == 'eps':\n return {'eps'}\n\n elif input in self.N:\n for alpha in self.P[input]:\n FirstA |= self.first(alpha)\n\n elif input.strip('[]') in self.N:\n FirstA |= {'eps'} | self.first(input.strip('[]'))\n\n else:\n for alpha in input.split(sep=' '):\n FirstA |= self.first(alpha) - {'eps'}\n if 'eps' not in FirstA:\n break\n\n return FirstA",
"def first(grammar,phrase):\n def lookup(rule):\n return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n # Map names of nonterminals to the nonterminals themselves\n phrase = [lookup(i) for i in phrase]\n\n result = set()\n for item in phrase:\n we = without_empty(item.first())\n result = result.union(we)\n if not item.derives_empty():\n break\n if derives_empty(grammar.rules,phrase):\n result.add(grammar.empty)\n return result",
"def first(self): #TODO\r\n result = []\r\n for x in self.first_lookup(self.initialsymbol):\r\n result += x.first()\r\n if len(result) == 1:\r\n return result[0]\r\n return Choice(result)",
"def breadth_first(self):\n q = Queue()\n q.enqueue(self)\n while q.size() > 0:\n node = q.dequeue()\n yield node.val\n if node.left:\n q.enqueue(node.left)\n if node.right:\n q.enqueue(node.right)",
"def _set_first_initial(self, force=False):\n\n if self.first_initial and not force:\n return\n self.first_initial = ' '.join([c[0] for c in self.first_name.split()])",
"def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal):\n dfas = nonterminal_to_dfas[nonterminal]\n new_first_plans = {}\n first_plans[nonterminal] = None # dummy to detect left recursion\n # We only need to check the first dfa. All the following ones are not\n # interesting to find first terminals.\n state = dfas[0]\n for transition, next_ in state.transitions.items():\n # It's a string. We have finally found a possible first token.\n new_first_plans[transition] = [next_.next_dfa]\n\n for nonterminal2, next_ in state.nonterminal_arcs.items():\n # It's a nonterminal and we have either a left recursion issue\n # in the grammar or we have to recurse.\n try:\n first_plans2 = first_plans[nonterminal2]\n except KeyError:\n first_plans2 = _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal2)\n else:\n if first_plans2 is None:\n raise ValueError(\"left recursion for rule %r\" % nonterminal)\n\n for t, pushes in first_plans2.items():\n new_first_plans[t] = [next_] + pushes\n\n first_plans[nonterminal] = new_first_plans\n return new_first_plans",
"def first(self):\n self._ll_tree.first()",
"def breadth_first(self):\n import queue\n keeper = queue.Queue()\n keeper.enqueue(self)\n while(keeper.size() != 0):\n temp = keeper.dequeue()\n if temp.val is not None:\n yield temp.val\n if temp.left is not None:\n keeper.enqueue(temp.left)\n if temp.right is not None:\n keeper.enqueue(temp.right)",
"def breadth_first(self):\n nodes_to_vist = []\n curr = self._root\n nodes_to_vist.append(curr)\n while len(nodes_to_vist):\n curr = nodes_to_vist[0]\n if curr._lkid:\n nodes_to_vist.append(curr._lkid)\n if curr._rkid:\n nodes_to_vist.append(curr._rkid)\n yield curr._data\n nodes_to_vist.remove(curr)",
"def FIRST(L):\n global fi,eps\n R=set()\n eps_appear=False\n for x in L:\n eps_appear=False\n if not x.isTerminal():\n for o in fi[x]:\n if o==eps:\n eps_appear=True\n else:\n R.add(o)\n if eps not in fi[x]:\n break\n elif x!=eps:\n R.add(x)\n break\n else: # x==eps\n eps_appear=True\n if eps_appear:\n R.add(eps)\n if len(R)==0:\n R.add(eps)\n return R",
"def first_iteration(mat, num_range, sub):\n\t#Creating the tree for the first iteration. \n\ttree = pMatrix.create_tree(mat, num_range, sub)\n\t\n\t#Adding the tree to all_trees. \n\tall_trees.append(tree)\n\t\n\t#Calculating the total number of states in the first iteration.\n\tnum_states = tree.get_num_states()\n\t\n\t#Adding total number of states for the first iteration to all_total_states.\n\tall_total_states.append(num_states)\n\t\n\t#Adding all states to be explored in the first iteration to all_states_explored.\n\tfor st in tree.get_all_states():\n\t\tall_states_explored.append(st)\n\t\t\n\t#Adding super states from first tree to super_states.\n\tfor sp in tree.get_super_states():\n\t\tsuper_states.append(sp)\n\t\n\t#Adding results for first iteration to final list.\n\tall_results.append(pMatrix.main(mat, num_range,sub))",
"def getDirectFollowSets(self, FIRST):\n self.init_follow = {v:set() for v in self.v }\n self.containsFOLLOWOf = set()\n for v in self.v:\n if v == self.np[0][0]: # Starting Production\n self.init_follow[v] = set(['$']) # $ is in follow of 'S' applying rule 1\n for prod in self.g[v]:\n for i in range(len(prod)):\n if prod[i] in self.v and i+1 < len(prod):\n if prod[i+1] in self.t:\n self.init_follow[prod[i]] |= set([prod[i+1]])\n else:\n t = i + 1\n while t < len(prod) and prod[t] in self.nullables_map:\n if self.nullables_map[prod[t]] == True:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]\n break\n t += 1\n if t >= len(prod): # every thing on rhs of prod[i] could produce epsison, rule - 3\n self.containsFOLLOWOf |= set([(prod[i], v)])\n else: #prod[i+1] is a non nullable prod or prod[t] was a terminal\n if prod[t] in self.t:\n self.init_follow[prod[i]] |= set([prod[t]])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n\n elif prod[i] in self.v:\n self.containsFOLLOWOf |= set([(prod[i], v)]) # applying rule 2\n\n #self.containsFOLLOWOf = set([(a, b) for (a, b) in self.containsFOLLOWOf if a != b]) # remove the self loops\n return self.init_follow",
"def get_initial_states(self):\n return product(*[phi.automaton().states.initial for phi in self])",
"def init_sets(fastapath):\n st = set()\n with open (fastapath, 'r') as f:\n\n for rec in SeqIO.parse(f, 'fasta'):\n sq = str(rec.seq)\n st.add(sq)\n\n return st",
"def first(s):\n assert is_link(s),\"first only applies ti linked lists.\"\n assert s != empty, \"empty linked list has no first element.\"\n return s[0]",
"def first(self):\n return self._reduce_for_stat_function(F.first, only_numeric=False)",
"def set_first_incident_node(self, first_incident_node):\n # overwrite the existing first incident node with the input first incident Node object\n self.first_incident_node = first_incident_node",
"def first(self):\n\n for literal in self.literals:\n return literal",
"def first(self):\r\n return self.__head",
"def first(self):\n return self.__head",
"def testFirstSet(self):\n\n self.node.desc = 'first description'\n\n self.assertEqual(\n ['first description', ],\n self.node.desc\n )",
"def test_first(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"a\", \"b\"],\n args=[],\n kwargs={},\n expect=\"a\",\n ),\n Case(\n description=\"lists of things\",\n val=[\"a\", \"b\", 1, [], {}],\n args=[],\n kwargs={},\n expect=\"a\",\n ),\n Case(\n description=\"empty list\",\n val=[],\n args=[],\n kwargs={},\n expect=None,\n ),\n Case(\n description=\"unexpected argument\",\n val=[\"a\", \"b\"],\n args=[\", \"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"value not an array\",\n val=12,\n args=[],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"first of undefined\",\n val=self.env.undefined(\"test\"),\n args=[],\n kwargs={},\n expect=None,\n ),\n ]\n\n self._test(First, test_cases)",
"def first(s):\n assert is_link(s), 'first only applies to linked lists.'\n assert s != empty, 'empty linked list has no first element.'\n return s[0]",
"def _first(self) -> Tuple[np.ndarray, np.ndarray, ModelGeneratorBase]:\n pass",
"def first(s):\n assert is_link(s), 'fist only applies to a linked list.'\n assert s != empty, 'empty linked list has no first element.'\n return s[0]",
"def first(s):\n assert is_link(s), \"first only applies to linked lists.\"\n assert s != empty, \"empty linked list has no first element.\"\n return s[0]"
] |
[
"0.7219357",
"0.6483622",
"0.6467381",
"0.60188544",
"0.58658564",
"0.5787844",
"0.56534517",
"0.5636613",
"0.5614592",
"0.5549027",
"0.5497881",
"0.5446197",
"0.544467",
"0.54377466",
"0.53846097",
"0.5349789",
"0.5243688",
"0.5242174",
"0.52311367",
"0.5214554",
"0.5208327",
"0.5159163",
"0.51569563",
"0.51491565",
"0.51445335",
"0.51348585",
"0.51155645",
"0.51133466",
"0.5093056",
"0.50824744"
] |
0.75325096
|
0
|
Returns a copy of list s without Empty
|
def list_without_empty(L):
return [i for i in L if not i.is_empty()]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove_blanks_list(src):\n return [el for el in src if el]",
"def compact_list(self):\n return [ele for ele in self if ele is not None]",
"def copy_list(self,list_):\r\n return list_[:]",
"def prune_empty(lst: List[T]) -> List[T]:\n return [elem for elem in lst if elem]",
"def copy(self):\n return List(*self.__list)",
"def empty_list(*args):\n return []",
"def _filter_empty(lst):\n return [cell for cell in lst if cell is not Sudoku.EMPTY_CELL]",
"def without_empty(s):\n return {i for i in s if not i.is_empty()}",
"def uncons(self):\n if self:\n return self.head, self.tail\n else:\n raise ValueError(\"cannot desconstruct empty list.\")",
"def copy(self):\n c = self.__class__(self) # using self's constructor on self\n c._srtlist = list(self._srtlist) # copy, srtlist not copied in init\n return c",
"def empty_if_none(x):\n if x:\n return x\n else:\n return []",
"def list_copy(l: List[Any]) -> List[Any]:\n return [item for item in l]",
"def _strip_list(list):\n return [x for x in list if x]",
"def remove_empty_string(str_list):\n return list(filter(None, str_list))",
"def copy(self):\n return State([r[:] for r in self.values], empty_loc=self.empty_loc)",
"def compact(items):\n return filter(lambda item: item is not None and len(item) > 0, items)",
"def empty_default(xs, default):\n xs = list(xs)\n if len(xs) == 0:\n return list(default)\n else:\n return xs",
"def copy(self,list):\r\n\t\tnew = []\r\n\t\ti = 0\r\n\t\twhile i<len(list):\r\n\t\t\tif (self.exist(new,list[i]) == False):\r\n\t\t\t\tnew.append(list[i])\r\n\t\t\ti=i+1\r\n\t\treturn new",
"def _naive_unique(li):\n tmp = []\n for el in li:\n if el not in tmp:\n tmp.append(el)\n return tmp",
"def empty(stuff):\n\tfor i in range(len(stuff)):\n\t\tstuff.pop()",
"def filter_empty(word_list):\n new_list = []\n for x in word_list:\n if(x):\n new_list.append(x)\n return new_list",
"def _remove_initial_objects_from_list(self, all):\n\n new_list = []\n for obj in all:\n if obj not in self.initial_set_of_objects:\n new_list.append(obj)\n\n return new_list",
"def reset_S(self):\n self.S = [self._one_S(self.D[n]) for n in range(self.L + 1)]",
"def copy(self) -> 'List':\n return self.__class__(self)",
"def remove_empty(data):\n out = []\n for item in data:\n if item == '':\n continue\n out.append(item)\n return out",
"def clone(self):\n return _libsbml.ListOf_clone(self)",
"def dedupe(self):\n elems = []\n for x in self.elems:\n if x not in elems:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)",
"def list_delete_empty_items(list_arg: list) -> list:\n empty_values = ('', [], 0, False, None)\n # TODO: not sure if this is the right way to implement this\n return [i for i in list_arg if i not in empty_values]",
"def copy_list(self):\n if self.head:\n LIST_COPY = CircularLinkedList()\n current = self.head\n while True:\n\tLIST_COPY.append(current.data)\n\tcurrent = current.next\n\tif current == self.head:\n\t break\n return LIST_COPY\n else:\n return CircularLinkedList()",
"def listops_uniq(list_a):\r\n retlist = []\r\n for item in list_a:\r\n if item not in retlist:\r\n retlist.append(item)\r\n\r\n return retlist"
] |
[
"0.6704229",
"0.6619279",
"0.652867",
"0.6456527",
"0.64414376",
"0.63256955",
"0.6119686",
"0.61053866",
"0.60512376",
"0.5973776",
"0.59730166",
"0.5944269",
"0.5879726",
"0.584061",
"0.58254147",
"0.57863045",
"0.57741314",
"0.5757702",
"0.57458293",
"0.5730203",
"0.5728655",
"0.57214254",
"0.57133824",
"0.5683415",
"0.5674685",
"0.56558037",
"0.5651039",
"0.56311285",
"0.56163347",
"0.55901587"
] |
0.66275156
|
1
|
Computes the First set for a Phrase, in the given grammar
|
def first(grammar,phrase):
def lookup(rule):
return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule
# Map names of nonterminals to the nonterminals themselves
phrase = [lookup(i) for i in phrase]
result = set()
for item in phrase:
we = without_empty(item.first())
result = result.union(we)
if not item.derives_empty():
break
if derives_empty(grammar.rules,phrase):
result.add(grammar.empty)
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_first_sets(grammar,rules):\n grammar.reset_first_follow()\n\n names_of_non_terminals = []\n grammar.end_of_text.first_data = set({grammar.end_of_text})\n grammar.empty.first_data = set({grammar.empty})\n for key, rule in rules.items():\n if rule.is_terminal() or rule.is_empty():\n # If X is a terminal, then First(X) is {X}\n # Lazy load it.\n dummy = rule.first()\n elif rule.is_symbol_name():\n names_of_non_terminals.append(key)\n else:\n # rule is a Choice node\n for rhs in rule:\n # If X -> empty is a production, then add Empty\n if rhs.is_empty():\n rule.first_data = set({rhs})\n names_of_non_terminals.append(key)\n\n def lookup(rule):\n return rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n def dynamic_first(rule,depth):\n \"\"\"\n Returns the currently computed approximation to the First set for a\n rule.\n\n The rule is from a Canonical grammar, so a non-terminal can be as\n complex as a Choice over Sequences over symbols that may reference\n other non-terminals. Gather updated First set info for at most\n those first two levels, and use a previous-computed approximation for\n the nonterminals at that second level.\n\n Args:\n rule: the Rule in question\n depth: recursion depth\n\n Returns:\n A new approximation to the First set for the given rule.\n \"\"\"\n\n if rule.is_symbol_name():\n return rules[rule.content].first()\n if rule.is_empty():\n return rule.first()\n if rule.is_terminal():\n # The terminal isn't registered in the dictionary.\n return set({rule})\n if isinstance(rule,Choice):\n result = rule.first()\n #for item in [lookup(i) for i in rule]:\n for item in rule:\n result = result.union(dynamic_first(item,depth+1))\n return result\n if isinstance(rule,Seq):\n result = rule.first()\n\n # Only recurse 2 levels deep\n if depth < 2:\n items = [lookup(item) for item in rule]\n else:\n items = rule\n # Add the first sets for Yi if all the earlier items can derive\n # empty. But don't add empty itself from this prefix.\n for item in items:\n from_first = dynamic_first(item,depth+1)\n from_first = without_empty(from_first)\n result = result.union(from_first)\n if not item.derives_empty():\n # Not known to derive empty. Stop here.\n break\n # If all the items derive empty, then add Empty to the first set.\n if all([lookup(item).derives_empty() for item in rule]):\n result = result.union({grammar.empty})\n return result\n raise RuntimeError(\"trying to dynamically compute the First set of: \"\n + str(rule))\n\n # Repeat until settling.\n keep_going = True\n while keep_going:\n keep_going = False\n for key in names_of_non_terminals:\n rule = rules[key]\n # Accumulate First items from right-hand sides\n df = dynamic_first(rule,0)\n new_items = df - rule.first()\n if len(new_items) > 0:\n rule.first_data = rule.first().union(new_items)\n keep_going = True",
"def dynamic_first(rule,depth):\n\n if rule.is_symbol_name():\n return rules[rule.content].first()\n if rule.is_empty():\n return rule.first()\n if rule.is_terminal():\n # The terminal isn't registered in the dictionary.\n return set({rule})\n if isinstance(rule,Choice):\n result = rule.first()\n #for item in [lookup(i) for i in rule]:\n for item in rule:\n result = result.union(dynamic_first(item,depth+1))\n return result\n if isinstance(rule,Seq):\n result = rule.first()\n\n # Only recurse 2 levels deep\n if depth < 2:\n items = [lookup(item) for item in rule]\n else:\n items = rule\n # Add the first sets for Yi if all the earlier items can derive\n # empty. But don't add empty itself from this prefix.\n for item in items:\n from_first = dynamic_first(item,depth+1)\n from_first = without_empty(from_first)\n result = result.union(from_first)\n if not item.derives_empty():\n # Not known to derive empty. Stop here.\n break\n # If all the items derive empty, then add Empty to the first set.\n if all([lookup(item).derives_empty() for item in rule]):\n result = result.union({grammar.empty})\n return result\n raise RuntimeError(\"trying to dynamically compute the First set of: \"\n + str(rule))",
"def first(self, input):\n FirstA = set([])\n\n if input.strip(\"'\") in self.T:\n return {input.strip(\"'\")}\n\n elif input == 'eps':\n return {'eps'}\n\n elif input in self.N:\n for alpha in self.P[input]:\n FirstA |= self.first(alpha)\n\n elif input.strip('[]') in self.N:\n FirstA |= {'eps'} | self.first(input.strip('[]'))\n\n else:\n for alpha in input.split(sep=' '):\n FirstA |= self.first(alpha) - {'eps'}\n if 'eps' not in FirstA:\n break\n\n return FirstA",
"def all_phrases(grammar, root):\n #\n # if root not in grammar:\n # return [[root]]\n #\n # phrases = []\n # for structure in grammar[root]:\n # for fragment in structure:\n # phrases = phrases + all_phrases(grammar,fragment)\n # print(phrases)\n # return phrases\n\n if root not in grammar:\n return [[root]]\n phrases = []\n for structure in grammar[root]:\n phrase_template = []\n for speech_part in structure:\n if speech_part not in grammar:\n if len(phrase_template)>0:\n new_phrase_template = []\n for phrase in phrase_template:\n if type(phrase)==str:\n phrase = [phrase]\n new_phrase_template.append(phrase+[speech_part])\n phrase_template = new_phrase_template\n else:\n phrase_template.append([speech_part])\n else:\n if len(phrase_template)>0:\n new_phrase_template = []\n for phrase in phrase_template:\n if type(phrase)==str:\n phrase = [phrase]\n for fragment in grammar[speech_part]:\n fragmented_bool = False\n for fragmented in fragment:\n if fragmented in grammar:\n fragmented_bool = True\n for subfragment in grammar[fragmented]:\n new_phrase_template.append(phrase+subfragment)\n if not fragmented_bool:\n new_phrase_template.append(phrase+fragment)\n phrase_template = new_phrase_template\n else:\n for fragment in grammar[speech_part]:\n if fragment[0] in grammar:\n for subfragment in grammar[fragment[0]]:\n phrase_template.append(subfragment)\n else:\n phrase_template.append(fragment)\n phrases = phrases + phrase_template\n return phrases",
"def compute_first(self):\n compute_first_sets(self, self.rules)",
"def generate_grammar(gram):\r\n c = 0\r\n while gram[c] != \"start_variable\": # find start variable\r\n c += 1\r\n start = gram[c+1]\r\n grammar = pcfg.PCFG(start) # create a PCFG with start and no rules\r\n while gram[c] != \"Grammar\": # find the index of the first rule\r\n c += 1\r\n c += 3\r\n\r\n while gram[c] != '###########':\r\n c = adding_rules_grammar(c, gram, grammar) # find each rule from the grammar and add it to the grammar\r\n c += 1\r\n\r\n while gram[c] != \"Lexicon\": # find the index of the first rule of the lexicon\r\n c += 1\r\n c += 3\r\n\r\n while c < len(gram):\r\n var = gram[c]\r\n c = adding_rules_lexicon(c, gram, grammar, var) # find each rule from the lexicon and add it to the grammar\r\n c += 1\r\n return grammar",
"def __init__(self):\n self.grammar = defaultdict(list) # store the grammar and vocabulary",
"def tokenize(self, s):\n hashset = set()\n if s == '':\n return hashset\n for i in xrange(len(s) - self.ngram):\n hashset.add(s[i:i + self.ngram])\n return hashset",
"def first(self): #TODO\r\n result = []\r\n for x in self.first_lookup(self.initialsymbol):\r\n result += x.first()\r\n if len(result) == 1:\r\n return result[0]\r\n return Choice(result)",
"def get_phrases(self, first=10):\n return get_occurences(self.lemmatized_phrases)[:first]",
"def test_grammar_parse():\n print u\"%s: Grammar test\" % (__file__, )\n print u\"Deriving grammar from parsed TIGER corpus sentences\"\n #tiger_corpus = TigerCorpusReader()\n tiger_corpus = _cached(None, CORPUS_PATH, TigerCorpusReader)\n grammar_parser = tiger_corpus.viterbi_parser(False)\n grammar_parser.trace()\n\n text = nltk.word_tokenize(u\"Der Hase springt über den Baum, der sehr hoch gewachsen ist.\")\n #text = nltk.word_tokenize(u\"Der kleine gelbe Hund beobachtete die Katze.\")\n text = nltk.word_tokenize(u\"Der kleine Hund blickte zu der Katze.\")\n print u\"Parsing unknown text\"\n try:\n tree = grammar_parser.parse(text)\n if tree:\n tree.draw()\n print u\"Printing parse tree for text...\"\n print unicode(tree)\n except ValueError as e:\n print u\"Input contains words not known by grammar!\"\n print u\"%s\" % e",
"def compute_follow_sets(grammar):\n\n # 1. Place $ in FOLLOW(S), where S is the start symbol and $ is the input\n # right end marker.\n grammar.rules[grammar.start_symbol].follow = set({grammar.end_of_text})\n\n def lookup(rule):\n return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n def process_seq(key,seq,keep_going):\n \"\"\"\n Add to Follow sets by processing the given Seq node.\n\n Args:\n key: Python string name for the production\n seq: a Seq rule for the production\n keep_going: A boolean\n\n Returns:\n True if a Follow set was modified.\n keep_going otherwise\n \"\"\"\n\n # Process indirections through symbols\n seq = [lookup(i) for i in seq]\n\n last_index = len(seq)-1\n for bi in range(0,len(seq)):\n b = seq[bi]\n # We only care about nonterminals in the sequence\n if b.is_terminal() or b.is_empty():\n continue\n\n # If there is a production A -> alpha B beta\n # then everything in First(beta) except Empty is\n # added to Follow(B)\n beta = seq[bi+1:len(seq)]\n first_beta = first(grammar, beta)\n new_items = without_empty(first_beta) - b.follow\n if len(new_items) > 0:\n keep_going = True\n b.follow = b.follow.union(new_items)\n\n # If A -> alpha B, or A -> alpha B beta, where First(beta)\n # contains epsilon, then add Follow(A) to Follow(B)\n if (bi==last_index) or derives_empty(grammar.rules,beta):\n new_items = grammar.rules[key].follow - b.follow\n if len(new_items) > 0:\n keep_going = True\n b.follow = b.follow.union(new_items)\n\n return keep_going\n\n # Iterate until settled\n keep_going = True\n while keep_going:\n keep_going = False\n for key, rule in grammar.rules.items():\n if rule.is_terminal() or rule.is_empty():\n continue\n\n if isinstance(rule,Seq):\n keep_going = process_seq(key,rule,keep_going)\n continue\n\n if rule.is_symbol_name():\n keep_going = process_seq(key,[rule],keep_going)\n continue\n\n # Now process Choice over sequences:\n if isinstance(rule,Choice):\n for seq in [i.as_container() for i in rule]:\n keep_going = process_seq(key,seq,keep_going)",
"def generate_input(s_terms):\n qm = QuineMcCluskey()\n res = set()\n if len(s_terms) == 0:\n return res\n for term in s_terms:\n res = res | set([i for i in qm.permutations(term)])\n return res",
"def get_first():\n for s in TERMINAL_SET:\n # For each terminal, initialize First with itself.\n sym = SYMBOL_DICT[s]\n sym.first_set = set([s])\n\n for s in NON_TERMINAL_SET:\n sym = SYMBOL_DICT[s]\n if sym.is_nullable:\n sym.first_set = set(['null'])\n else:\n sym.first_set = set()\n\n while True:\n first_set_is_stable = True\n for p in PRODUCTION_LIST:\n sym_left = symbol_for_str(p.left)\n if p.right[0] == 'null':\n sym_left.first_set.update(set(['null']))\n continue\n previous_first_set = set(sym_left.first_set)\n\n for s in p.right:\n # For X -> Y..., First(X) = First(X) U First(Y)\n sym_right = symbol_for_str(s)\n sym_left.first_set.update(sym_right.first_set)\n # For X -> Y1 Y2 ... Yi-1 , if Y1...Yi-1 is all nullable\n # Then First(X) = First(X) U First(Y1) U First(Y2) ...\n if sym_right.is_nullable:\n continue\n else:\n break\n\n if previous_first_set != sym_left.first_set:\n first_set_is_stable = False\n\n if first_set_is_stable:\n break",
"def all_prefixes(sentence):\r\n sentences = set()\r\n while sentence[0] == ' ':\r\n sentence = sentence[1:]\r\n for j in range(len(sentence)):\r\n if sentence[j] == sentence[0]:\r\n for i in range(j, len(sentence)):\r\n sentences.add(sentence[j:i + 1])\r\n return sentences",
"def query_preprocess(input_pack: DataPack):\n sentence = input_pack.get_single(Sentence)\n\n relations = defaultdict(dict)\n text_mention_mapping = {}\n\n # get all srl relations\n for link in input_pack.get(PredicateLink, sentence):\n verb = link.get_parent()\n verb_text = verb.text\n argument = link.get_child()\n argument_text = argument.text\n\n text_mention_mapping[verb_text] = verb\n text_mention_mapping[argument_text] = argument\n relations[verb_text][link.arg_type] = argument_text\n\n arg0, arg1, predicate = None, None, None\n for verb_text, entity in relations.items():\n arg0, arg1, predicate = collect_mentions(text_mention_mapping, entity, verb_text)\n if not arg0 and not arg1:\n continue\n else:\n break\n\n if not arg0 and not arg1:\n raise Exception('AllenNLP SRL cannot extract the two arguments or the '\n 'predicate in your query, please check our examples '\n 'or rephrase your question')\n\n verb_lemma, is_answer_arg0 = None, None\n\n # check pos tag and lemma for tokens\n for j, token in enumerate(input_pack.get(entry_type=Token,\n range_annotation=sentence,\n components=['forte_wrapper.nltk.nltk_processors.NLTKWordTokenizer']\n )):\n # find WH words\n if token.pos in {\"WP\", \"WP$\", \"WRB\", \"WDT\"}:\n if arg0.begin <= token.begin and arg0.end >= token.end:\n is_answer_arg0 = True\n elif arg1.begin <= token.begin and arg1.end >= token.end:\n is_answer_arg0 = False\n\n # find verb lemma\n if token.text == predicate.text:\n verb_lemma = token.lemma\n\n return sentence, arg0.text if arg0 else '', arg1.text if arg1 else '', \\\n predicate.text, verb_lemma, is_answer_arg0",
"def dp_parse(phrase, songs=None):\n # If phrase is None, there are no parses: return empty set\n if not phrase:\n return 0, [], []\n # Tokenize into words based on spaces\n words = phrase.split()\n # If only one word, no song parsing needed: get best song match\n if len(words) == 1:\n return dp_match(phrase, songs=songs)\n # If multiple words, recursively test possible song parses\n else:\n # Initialize candidate paths, i.e. parses\n candidates = []\n N = len(words)\n for i in range(N, 0, -1):\n # Partition the phrase in two segments at position i\n words_a = ' '.join(words[0:i])\n words_b = ' '.join(words[i:])\n # Match the first part with a song, and get the results of parsing\n # the second part\n part_a = dp_match(words_a, songs=songs)\n part_b = dp_parse(words_b, songs=songs)\n # Compute the resulting total score, phrases parsings, and\n # matching songs for each phrase. Do this for each possible\n # partition.\n score_path = part_a[0] * i + part_b[0]\n word_path = part_a[1] + part_b[1]\n song_path = part_a[2] + part_b[2]\n path = (score_path, word_path, song_path)\n candidates.append(path)\n # Sort in order of highest scores, return the best path\n candidates = sorted(candidates, key=lambda x: x[0], reverse=True)\n return candidates[0]",
"def expand_first(grammar,rule):\n result = []\n # Hoist the rule for 'other' nonterminal.\n phrase = rule.as_container()\n first = phrase[0]\n assert first.is_symbol_name() and (first.content != target_rule_name)\n #print(\" elaborating rule for {} \".format(first.content))\n rest = phrase[1:]\n other_rule = self.rules[first.content]\n for other_rhs in other_rule.as_container():\n result.append(grammar.MakeSeq(list_without_empty(other_rhs.as_container()) + rest))\n return result",
"def FIRST(L):\n global fi,eps\n R=set()\n eps_appear=False\n for x in L:\n eps_appear=False\n if not x.isTerminal():\n for o in fi[x]:\n if o==eps:\n eps_appear=True\n else:\n R.add(o)\n if eps not in fi[x]:\n break\n elif x!=eps:\n R.add(x)\n break\n else: # x==eps\n eps_appear=True\n if eps_appear:\n R.add(eps)\n if len(R)==0:\n R.add(eps)\n return R",
"def tokenize_transcript(tokenize_method,input_transcript):\n final_lst = []\n for i in (range(0,len(input_transcript))):\n #print(tokenize_method(input_transcript[i]))\n final_lst = final_lst + list(set(tokenize_method(input_transcript[i])))\n return final_lst",
"def __init__(self, rules):\n\n self.grammar = defaultdict(list)\n self.word_pos = dict()\n self.pos = set()\n\n for rule in rules:\n rule = rule.rstrip()\n if len(rule) > 0:\n rule = rule.split('->') # split start/end\n left = rule[0].strip()\n right = [(re.sub(r'[^a-zA-Z\\d\\s-]', '', r)).strip().split(' ') for r in rule[1].split('|')]\n self.grammar[left] += right\n\n # extract POS tags\n # pos iff on lhs of rhs without lhs\n # det -> that\n # that -> #\n for left, right in self.grammar.iteritems():\n for r in right:\n for r2 in r:\n if not self.grammar.has_key(r2):\n self.pos.add(left)",
"def LR1_ItemSets(self):\n\n # The root item is the one representing the entire language.\n # Since the grammar is in canonical form, it's a Choice over a\n # single sequence.\n root_item = self.MakeItem(LANGUAGE, self.rules[LANGUAGE][0],0)\n\n # An ItemSet can be found by any of the items in its core.\n # Within an ItemSet, an item maps to its lookahead set.\n\n root_item_set = ItemSet(self, {root_item: LookaheadSet({self.end_of_text})}).close(self)\n\n LR1_item_sets_result = set({root_item_set})\n\n dirty_set = LR1_item_sets_result.copy()\n while len(dirty_set) > 0:\n work_list = dirty_set.copy()\n dirty_set = set()\n # Sort the work list so we get deterministic ordering, and therefore\n # deterministic itemset core numbering.\n for item_set in sorted(work_list):\n (_,gotos) = item_set.gotos(self)\n for (X, dest_item_set) in gotos:\n if dest_item_set not in LR1_item_sets_result:\n LR1_item_sets_result.add(dest_item_set)\n dirty_set.add(dest_item_set)\n\n return sorted(LR1_item_sets_result,key=ItemSet.pretty_key)",
"def parse (self, phrase):\r\n\r\n if isinstance(phrase,str):\r\n #If the phrase is a string\r\n if self.is_simple(phrase):\r\n #EXITS the recursion\r\n if phrase[0:2] == '~~':\r\n return phrase[2:]\r\n #Eliminates negations that cancel each other\r\n return phrase\r\n elif self.bracketed(phrase):\r\n #Eliminate top-level parantheses\r\n return self.parse(phrase[1:-1])\r\n elif phrase[0] == '~':\r\n #If the phrase begins with a negating prefix...\r\n negations,phrase = self.heading_count(phrase)\r\n \r\n if self.bracketed(phrase):\r\n #If the negated phrase is bracketed\r\n if negations % 2 == 1:\r\n subphrase = self.split_into_phrases(phrase[1:-1])\r\n if subphrase[0] != '@': \r\n #De Morgan's Law \r\n return self.parse(['@']+['~'+x for x in subphrase])\r\n else:\r\n #De Morgan's Law\r\n return self.parse(['~'+x for x in subphrase[1:]])\r\n else:\r\n return self.parse(phrase[1:-1])\r\n return self.parse(self.split_into_phrases((negations%2)*'~'+phrase))\r\n \r\n else:\r\n return self.parse(self.split_into_phrases(phrase))\r\n # IF the phrase is a list\r\n if self.all_is_P(phrase,predicate_function=self.is_simple):\r\n #If every terms of the phrase list is simple...\r\n #This prepares for EXIT from recursion\r\n return [self.parse(x) for x in phrase]\r\n return self.parse([self.parse(x) for x in phrase])",
"def canonicalize_grammar(grammar,empty):\n\n rules = grammar.rules\n\n # First ensure right-hand sides of containers are Choice nodes.\n result = {}\n for key, value in rules.items():\n if isinstance(value,ContainerRule):\n if isinstance(value,Choice):\n # Choice nodes are unchanged\n result[key] = value\n else:\n result[key] = grammar.MakeChoice([value])\n else:\n result[key] = value\n\n # Now iteratively simplify rules.\n # Replace a complex sub-component with a new rule.\n # Repeat until settling.\n keep_going = True\n while keep_going:\n keep_going = False\n rules = dict(result)\n\n for key, value in rules.items():\n if isinstance(value,LeafRule):\n result[key] = value\n else:\n # The value is a Choice\n made_a_new_one = False\n parts = []\n def add_rule(key,*values):\n \"\"\"\n Records a new rule with the given key and value.\n\n Args:\n key: A SymbolName whose name is the key into the result\n dictionary\n values: A list of alternatives\n\n Returns: The key's Symbol\n \"\"\"\n rhs = grammar.MakeChoice(list(values))\n result[key.content] = rhs\n return key\n for i in range(len(value)):\n item = value[i]\n item_key = grammar.MakeSymbolName(\"{}/{}\".format(key,str(i)))\n if isinstance(item,LeafRule):\n parts.append(item)\n elif isinstance(item,Repeat1):\n # value[i] -> X+\n # becomes\n # value[i] -> value.i\n # value.i -> X value.i\n # value.i -> epsilon\n x = item[0]\n parts.append(add_rule(item_key,\n grammar.MakeSeq([x,item_key]),\n empty))\n made_a_new_one = True\n elif isinstance(item,Choice):\n # Sub-Choices expand in place.\n parts.extend(item)\n made_a_new_one = True\n elif isinstance(item,Seq):\n # Expand non-leaf elements\n made_a_new_seq_part = False\n seq_parts = []\n for j in range(len(item)):\n seq_item = item[j]\n seq_item_key = grammar.MakeSymbolName(\n \"{}/{}.{}\".format(key,str(i),str(j)))\n if isinstance(seq_item,LeafRule):\n seq_parts.append(seq_item)\n else:\n seq_parts.append(\n add_rule(seq_item_key,seq_item))\n made_a_new_seq_part = True\n if made_a_new_seq_part:\n parts.append(grammar.MakeSeq(seq_parts))\n made_a_new_one = True\n else:\n parts.append(item)\n if made_a_new_one:\n rhs = grammar.MakeChoice(parts)\n result[key] = rhs\n keep_going = True\n else:\n result[key] = value\n\n return result",
"def pph2sphYield(pph):\n a = re.split('(^['+ _HeadPUNCTS +']+)',pph)\n a = [aa for aa in a if len(aa)>0 ] ##there may have empty string\n if len(a)>1: ## so there are punct at the begin\n for aa in a[0]: yield aa\n pph=pph[len(a[0]) : ]\n a = re.split('(['+_TailPUNCTS+']+$)',pph)\n a = [aa for aa in a if len(aa)>0 ] ##there may have empty string\n if len(a)<=1: ## split and yied if middle-puncts\n mm = re.split('([' + _MidPUNCTS + ']+)', a[0]); mm=[aa for aa in mm if len(aa)>0]\n for aa in mm: yield aa\n else: ## so there are punct at the end\n yield a[0]\n for aa in a[1]: yield aa",
"def preorder(self):\n assert self.is_canonical\n # Names of visited nodes\n visited = set()\n # Names of nodes to visit\n worklist = [LANGUAGE]\n\n result = []\n while len(worklist) > 0:\n successors = []\n for rule_name in worklist:\n if rule_name in visited:\n continue\n result.append(rule_name)\n visited.add(rule_name)\n\n rule = self.rules[rule_name].as_container()\n for rhs in rule:\n phrase = rhs.as_container()\n # Note: this tolerates duplicates among siblings.\n successors.extend([x.content for x in phrase if x.is_symbol_name() and x.content not in visited])\n worklist = successors\n return result",
"def split_into_phrases (self, phrase):\r\n\r\n if not self.contains(phrase,'()'):\r\n\r\n #For a phrase without parantheses\r\n \r\n\r\n if '|' in phrase:\r\n return ['@']+[x for x in phrase.split('|')]\r\n elif '&' in phrase:\r\n return [x for x in phrase.split('&')]\r\n\r\n #If the phrase contains parantheses.\r\n \r\n phrase = list (phrase)\r\n #convert string into a list of chars\r\n level = 0\r\n found = False # if one of the operators is found in the phrase \r\n\r\n for operator in ['#','>','|','&']:\r\n level = 0 # reset level\r\n if not found:\r\n \r\n \r\n for x,char in enumerate(phrase):\r\n if char == '(':\r\n level += 1\r\n if char == ')':\r\n level -=1\r\n # level indicates level within hierarchy established by parantheses\r\n\r\n if level == 0 and x+1 < len(phrase) and phrase[x+1] == operator:\r\n phrase[x+1] = '<<'+operator+'>>'\r\n found = True\r\n break\r\n \r\n \r\n\r\n if '<<&>>' in phrase:\r\n # For AND\r\n phrases = ''.join(phrase).split('<<&>>')\r\n elif '<<|>>' in phrase:\r\n # For OR \r\n phrases = ['@']+''.join(phrase).split('<<|>>')\r\n elif '<<>>>' in phrase:\r\n # For INFERENCE \r\n premise = ''.join(phrase).split('<<>>>')[0]\r\n conclusion = ''.join(phrase).split('<<>>>')[1]\r\n phrases = ['@','~'+premise,conclusion]\r\n # A => B translated as ~A OR B\r\n elif '<<#>>' in phrase:\r\n # FOR EQUIVALENCY \r\n premise = ''.join(phrase).split('<<#>>')[0]\r\n conclusion = ''.join(phrase).split('<<#>>')[1]\r\n \r\n phrase1 = '~'+'('+premise+'&'+'~'+conclusion+')'\r\n phrase2 = '~'+'('+conclusion+'&'+'~'+premise+')'\r\n phrases = [phrase1,phrase2]\r\n # A<>B translated as (~A or B) & (~B or A) \r\n \r\n return [x for x in phrases]",
"def match_all_phrases(self, inphrases):\n# temporary - attempted matches\n attempted_matches = []\n phrase_attempts = {}\n phrase = \"\"\n step = \"A\"\n # ALL full phrases \n for phrase in inphrases:\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n #return match_choices, attempted_matches, phrase\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # Normalised version of ALL all full phrases \n phrases = [self.get_normalised_phrase(p) for p in inphrases]\n\n # 3 all prefix trigrams \n step = \"3\"\n for ngram in [p.split()[0:3] for p in phrases if len(p.split()) > 2]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 2 all prefix bigrams \n step = \"2\"\n for ngram in [p.split()[0:2] for p in phrases if len(p.split()) > 1]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 1 all valid words \n step = \"1\"\n for phr_elem in phrases:\n #print phr_elem.split()\n for phrase in [w.strip() for w in phr_elem.split() \n if self.isExcluded(w.strip()) == False and w.strip() not in phrase_attempts]:\n #print \"***\", phrase\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n return [], attempted_matches, phrase, None",
"def init_sets(fastapath):\n st = set()\n with open (fastapath, 'r') as f:\n\n for rec in SeqIO.parse(f, 'fasta'):\n sq = str(rec.seq)\n st.add(sq)\n\n return st",
"def parse(raw_query, EXPAND_SET = False):\r\n\t\r\n\t# tokenize and tag the query using nltk tools, use .lower() to standardize the input\r\n\ttokenized_query = nltk.word_tokenize(raw_query.lower())\r\n\ttagged_query = nltk.pos_tag(tokenized_query)\r\n\t\r\n\t#master_chunk = r\"Chunk: {(<VB\\w?>|<JJ>*|<RB\\w?>)<DT>?(<NN\\w?>+)}\" \r\n\t\r\n\t\r\n\t# master_chunk now captures prepositional phrase, as they are typically part of one thought.\r\n\t\r\n\tmaster_chunk = r\"Chunk: {((<JJ\\w?>*|<RB\\w?>*)<DT>?(<NN\\w?>+))(<IN>((<JJ>*|<RB\\w?>*)<DT>?(<NN\\w?>+)))*}\" # Regex to identify chunks that may be useful \r\n\t#\t\t\t\t\tmaster_chunk breakdown\r\n\t#\r\n\t#\tFirst half : ((<JJ>*|<RB\\w?>*)<DT>?(<NN\\w?>+))\r\n\t#\t<JJ\\w?>* | <RB\\w>?>* allows an arbitrary number of adjectives to precede the noun\r\n\t# \t\"\\w\" is \"any character\" and allows the capture of all JJ and RB tags, which include JJ, JJR, JJS, RB, RBR, and RBS\r\n\t#\t<DT>? allows for exactly one (1) or zero (0) determiner, often this will capture things like \"no\" and then a noun\r\n\t# \t(<NN\\w>+) captures one (1) or arbitrarily more nouns\r\n\t#\t\r\n\t#\tSecond half: (<IN>((<JJ>*|<RB\\w?>*)<DT>?(<NN\\w?>+)))*\r\n\t#\t<IN> captures prepostions \"of\", \"with\", and so on.\r\n\t# \tThe rest of the expression is the same as the first half \r\n\t# \tThe final * (kleene star) allows zero (0) or more prepositional phrases to be captured\r\n\t\r\n\t\r\n\tmaster_parser = nltk.RegexpParser(master_chunk) # Create the parser from the Regex\r\n\tmaster = master_parser.parse(tagged_query) # Parse the query previously tagged\r\n\t\r\n\tchunk_list = []\r\n\tkeywords = []\r\n\tfor phrase in master:\r\n\t\tif (not isinstance(phrase, tuple)): # all non-chunks are tuples, a chunk is a nltk.tree type\r\n\t\t\tchunk_list.append(phrase)\r\n\t\t\ttmp = \"\"\r\n\t\t\tfor word in phrase: # generate keyword phrases from the chunks\r\n\t\t\t\ttmp += word[0] + \" \"\r\n\t\t\t\r\n\t\t\ttmp = tmp[:-1] # Remove final space\r\n\t\t\tkeywords.append(tmp)\r\n\t\t\t\r\n\tif EXPAND_SET: # defualt is not to expand\r\n\t\t# combine the two lists, using set() to remove any repeated phrases\r\n\t\treturn list(set(generate_keywords(chunk_list) + keywords))\r\n\telse:\r\n\t\treturn keywords"
] |
[
"0.7484193",
"0.59974676",
"0.5932933",
"0.5930215",
"0.5569531",
"0.54553574",
"0.5405768",
"0.5362232",
"0.5279543",
"0.52570534",
"0.5246636",
"0.5230497",
"0.52222943",
"0.52207184",
"0.521228",
"0.5203409",
"0.5192013",
"0.51704144",
"0.5169558",
"0.5164506",
"0.51628876",
"0.51609635",
"0.5146492",
"0.51210517",
"0.51144135",
"0.51026845",
"0.5074145",
"0.5064432",
"0.50543725",
"0.5036161"
] |
0.7570357
|
0
|
Computes the Follow set for each node in the grammar. Assumes First sets have been computed. Populates the `follow` attribute of each node.
|
def compute_follow_sets(grammar):
# 1. Place $ in FOLLOW(S), where S is the start symbol and $ is the input
# right end marker.
grammar.rules[grammar.start_symbol].follow = set({grammar.end_of_text})
def lookup(rule):
return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule
def process_seq(key,seq,keep_going):
"""
Add to Follow sets by processing the given Seq node.
Args:
key: Python string name for the production
seq: a Seq rule for the production
keep_going: A boolean
Returns:
True if a Follow set was modified.
keep_going otherwise
"""
# Process indirections through symbols
seq = [lookup(i) for i in seq]
last_index = len(seq)-1
for bi in range(0,len(seq)):
b = seq[bi]
# We only care about nonterminals in the sequence
if b.is_terminal() or b.is_empty():
continue
# If there is a production A -> alpha B beta
# then everything in First(beta) except Empty is
# added to Follow(B)
beta = seq[bi+1:len(seq)]
first_beta = first(grammar, beta)
new_items = without_empty(first_beta) - b.follow
if len(new_items) > 0:
keep_going = True
b.follow = b.follow.union(new_items)
# If A -> alpha B, or A -> alpha B beta, where First(beta)
# contains epsilon, then add Follow(A) to Follow(B)
if (bi==last_index) or derives_empty(grammar.rules,beta):
new_items = grammar.rules[key].follow - b.follow
if len(new_items) > 0:
keep_going = True
b.follow = b.follow.union(new_items)
return keep_going
# Iterate until settled
keep_going = True
while keep_going:
keep_going = False
for key, rule in grammar.rules.items():
if rule.is_terminal() or rule.is_empty():
continue
if isinstance(rule,Seq):
keep_going = process_seq(key,rule,keep_going)
continue
if rule.is_symbol_name():
keep_going = process_seq(key,[rule],keep_going)
continue
# Now process Choice over sequences:
if isinstance(rule,Choice):
for seq in [i.as_container() for i in rule]:
keep_going = process_seq(key,seq,keep_going)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_follow(self):\n compute_follow_sets(self)",
"def followSet(self):\n FOLLOW = {}\n for A in self.N:\n FOLLOW[A] = set()\n FOLLOW[self.S] |= {'$$'}\n\n old = None\n while old != self._size_of_dict(FOLLOW):\n old = self._size_of_dict(FOLLOW)\n self._calcFollow(FOLLOW)\n\n return FOLLOW",
"def get_follow():\n for s in NON_TERMINAL_SET:\n sym = symbol_for_str(s)\n sym.follow_set = set()\n\n symbol_for_str('<s>').follow_set.update(set(['#']))\n\n while True:\n follow_set_is_stable = True\n for p in PRODUCTION_LIST:\n sym_left = symbol_for_str(p.left)\n if sym_left.is_terminal():\n continue\n for s in p.right:\n if s == 'null':\n continue\n if s.startswith('P'):\n continue\n if symbol_for_str(s).is_terminal():\n continue\n current_symbol = symbol_for_str(s)\n previous_follow_set = set(current_symbol.follow_set)\n next_is_nullable = True\n for s2 in p.right[p.right.index(s) + 1:]:\n if s2.startswith('P'):\n continue\n # For X -> sYt, Follow(Y) = Follow(Y) U First(t)\n next_symbol = symbol_for_str(s2)\n current_symbol.follow_set.update(next_symbol.first_set)\n if next_symbol.is_nullable:\n continue\n else:\n next_is_nullable = False\n break\n if next_is_nullable:\n # For X -> sYt, if t is nullable, Follow(Y) = Follow(Y) U\n # Follow(X)\n current_symbol.follow_set.update(sym_left.follow_set)\n\n if current_symbol.follow_set != previous_follow_set:\n follow_set_is_stable = False\n\n if follow_set_is_stable:\n break",
"def getDirectFollowSets(self, FIRST):\n self.init_follow = {v:set() for v in self.v }\n self.containsFOLLOWOf = set()\n for v in self.v:\n if v == self.np[0][0]: # Starting Production\n self.init_follow[v] = set(['$']) # $ is in follow of 'S' applying rule 1\n for prod in self.g[v]:\n for i in range(len(prod)):\n if prod[i] in self.v and i+1 < len(prod):\n if prod[i+1] in self.t:\n self.init_follow[prod[i]] |= set([prod[i+1]])\n else:\n t = i + 1\n while t < len(prod) and prod[t] in self.nullables_map:\n if self.nullables_map[prod[t]] == True:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]\n break\n t += 1\n if t >= len(prod): # every thing on rhs of prod[i] could produce epsison, rule - 3\n self.containsFOLLOWOf |= set([(prod[i], v)])\n else: #prod[i+1] is a non nullable prod or prod[t] was a terminal\n if prod[t] in self.t:\n self.init_follow[prod[i]] |= set([prod[t]])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n\n elif prod[i] in self.v:\n self.containsFOLLOWOf |= set([(prod[i], v)]) # applying rule 2\n\n #self.containsFOLLOWOf = set([(a, b) for (a, b) in self.containsFOLLOWOf if a != b]) # remove the self loops\n return self.init_follow",
"def _calcFollow(self, FOLLOW):\n for A in self.N:\n for prod in self.P[A]:\n text = prod.split(sep=' ')\n for i in range(len(text) - 1):\n B = text[i].strip('[]')\n succ = text[i + 1]\n\n if B in self.N:\n FOLLOW[B] |= self.first(succ) - {'eps'}\n\n if 'eps' in self.first(succ) and B in self.N:\n FOLLOW[B] |= FOLLOW[A]\n\n if text[-1].strip('[]') in self.N:\n FOLLOW[text[-1].strip('[]')] |= FOLLOW[A]",
"def set_follow(self, follow):\n self.follow = follow",
"def generateFollowings(self):\n for f in self._genericGenerator(self.getFollowings):\n yield f",
"def get_follows(self):\n return [c.id for c in self.conf.follows]",
"def generateFollowers(self):\n for f in self._genericGenerator(self.getFollowers):\n yield f",
"def follow_following_followers(self):\n self.logger.log(\"starting follow_following_followers...\")\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n try:\n try:\n followw = perform_with_ran_delay(self.instagram.get_followers, acc, 150, 15,\n delayed=True)\n accountstofollow = followw[\"accounts\"]\n random.shuffle(accountstofollow)\n if len(accountstofollow) > 10:\n accountstofollow = accountstofollow[:10]\n for ac in accountstofollow:\n if not self.is_user_following(ac.identifier):\n self.add_following(ac.identifier)\n self.logger.log(\"following: {}\".format(ac.username))\n except Exception as e:\n print(e)\n self.logger.log(str(e))\n finally:\n sleep(3)",
"def follow(self, followerId, star):\n self.followstar[followerId] = self.followstar.get(followerId, set()) | set([star])",
"def follow(self, followerId, followeeId):\n if followerId in self.follows:\n self.follows[followerId].add(followeeId)\n else:\n self.follows[followerId] = set([followeeId])",
"def follows(self):\r\n return relationships.Follows(self)",
"def follow(self, follower, followee):\n pass",
"def follow(self, followerId, followeeId):\n if followerId not in self.follow_map:\n self.follow_map[followerId] = set()\n \n self.follow_map[followerId].add(followeeId)",
"def follows(self):\n return relationships.Follows(self)",
"def follow_followers(self):\n self.logger.log(\"starting follow_followers...\")\n follow = perform_with_ran_delay(self.instagram.get_followers, self.account.identifier, 150, 15, delayed=True)\n for acc in follow[\"accounts\"]:\n try:\n try:\n # print(\"{} follows me, do I follow him ? > {} \".format(acc.username,self.is_user_following(acc.identifier)))\n if not self.is_user_following(acc.identifier):\n if self.add_following(acc.identifier):\n self.logger.log(\"following: {}\".format(acc.username))\n else:\n self.logger.log(\"follow not working at the moment\")\n except Exception as e:\n print(e)\n self.logger.log(str(e))\n continue\n finally:\n sleep(3)",
"def follows(self):\r\n request = http.Request('GET', '{0}/follows/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json",
"def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))",
"def _follow_relation_set(self, rel_expr,\n inverted):\n if not self.context.is_group(rel_expr.type_name):\n raise RelationNameError(rel_expr.type_name,\n 'Expression type is not a relation group.')\n g = self.context.get_group(rel_expr.type_name)\n if inverted == +1:\n with tf.name_scope('follow_group_%s' % rel_expr.type_name):\n return (self.follow(g.subject_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.object_rel)\n else:\n with tf.name_scope('follow_group_%s_inverse' % rel_expr.type_name):\n return (self.follow(g.object_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.subject_rel)",
"def set_followups(self, elem_name, item_name):\n self.elems[elem_name].followup = self.items[item_name]",
"def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.followList.get(followerId, [followerId]):\n self.followList[followerId] = self.followList.get(followerId, [followerId]) + [followeeId]\n # print(self.followList)",
"def do_follows(self, target_urls, in_jump_wait=False) -> list:\n\n print('({}) follows: START'.format(self.name))\n\n self.print_last_events('follows')\n\n if not self.waited_enough_after_last_block('follows'):\n return []\n\n self.login()\n\n time_start = datetime.now() if self.time_start is None else self.time_start\n\n follows_counter = 0\n num_consecutive_exceptions = 0\n profiles_followed = []\n for my_profile in target_urls:\n\n print(my_profile)\n\n if self.enough_counts_for_today('follows'):\n break\n\n # Wait conditions\n if self.wait_for_max_counts_per_hour('follows', in_jump_wait=in_jump_wait) \\\n and in_jump_wait:\n break\n self.sleep_for_next_profile('follows')\n\n # Follow\n if self.test_on:\n follow_done, exception_cause = mock_follow_profile(self.name)\n else:\n follow_done, exception_cause = \\\n follow_profile(self.bot, my_profile, log_fail_folder=self.log_folder)\n\n # add profile to already followed list\n profiles_followed.append(my_profile)\n append_profile_as_row(self.excluded_profiles_file, my_profile)\n\n if follow_done:\n follows_counter += 1\n num_consecutive_exceptions = 0\n self.event_register.add_event(self.name, EVENT_FOLLOW, my_profile)\n else:\n num_consecutive_exceptions += 1\n self.event_register.add_event(self.name, EVENT_EXCEPTION, in_comments='follow')\n if exception_cause == ACTION_BLOCK:\n self.event_register.add_event(self.name, EVENT_BLOCK, in_comments='follow')\n self.wait_after_exception('follow')\n\n self.event_register.save()\n\n run_time = datetime.now() - time_start\n print('({}) follows: Total: {} in {}'\n .format(self.name,\n self.event_register.get_number_of_follows_since(self.name,\n datetime.now() - run_time),\n run_time))\n\n # Ending conditions\n if self.max_consecutive_exceptions_reached(num_consecutive_exceptions, 'follows'):\n raise # Simply crash so that the navigator can be inspected\n\n if self.bot_is_blocked(not follow_done, exception_cause, 'follows') \\\n or self.enough_run_time(run_time, 'follows') \\\n or self.enough_counts_in_run(follows_counter, 'follows'):\n break\n\n return profiles_followed",
"def follow(self, followerId: int, followeeId: int) -> None:\n # Time Complexity: O(1)\n if followerId != followeeId:\n if followerId not in self.followees:\n self.followees[followerId] = set()\n\n self.followees[followerId].add(followeeId)",
"def update_follow_set(model: Dict[str, Set[str]], word: str, follow_word: str) -> None:\n if word not in model:\n model[word] = {follow_word}\n\n else:\n model[word].add(follow_word)",
"def get_sharp_relations_for_sets(follows, set_1, set_2):\n for item_1 in set_1:\n for item_2 in set_2:\n if not get_sharp_relation(follows, item_1, item_2):\n return False\n return True",
"def get_followings_of_a_user(tx: Transaction, email: str) -> BoltStatementResult:\n query = f\"\"\"\n MATCH (:Person {{email: '{email}'}})-[:FOLLOWS]->(follower) RETURN follower.full_name AS full_name, follower.email AS email, follower.profile_image AS profile_image\n \"\"\"\n return tx.run(query)",
"async def follow(follow):\n await follow.edit(\n f\"`FOLLOW {DEFAULTUSER} ON` \\n\\n\"\n f\"[InstaGram](https://www.instagram.com/mayur_karaniya) \\n\\n\"\n f\"[FaceBook](https://www.facebook.com/mkaraniya) \\n\\n\"\n f\"[YouTube](https://www.youtube.com/channel/UCeKQxQK7XZ3jGi3541uWATg?sub_confirmation=1) \"\n )",
"def get_posts_of_followings_of_a_user(tx: Transaction, email: str) -> BoltStatementResult:\n query = f\"\"\"\n MATCH (:Person {{email: '{email}'}})\n -[:FOLLOWS]->(user:Person)\n -[:POSTED]->(post:Post)\n RETURN DISTINCT {{content:post.content, modified:post.modified, created:post.created, uuid:post.uuid, user_email:user.email}} AS posts\"\"\"\n return tx.run(query)",
"def reachable(self, node, adjacencyList, followOnAdjacencyList=None):\n visited = set()\n\n def dfs(fNode):\n if fNode not in visited:\n visited.add(fNode)\n list(map(dfs, adjacencyList[fNode]))\n if followOnAdjacencyList is not None:\n list(map(dfs, followOnAdjacencyList[fNode]))\n\n dfs(node)\n return visited"
] |
[
"0.742819",
"0.7092153",
"0.6631537",
"0.63742447",
"0.62971425",
"0.57530606",
"0.5679638",
"0.56712836",
"0.5613986",
"0.55428976",
"0.5406604",
"0.53708106",
"0.5330948",
"0.5318362",
"0.5316769",
"0.53076124",
"0.530323",
"0.52977115",
"0.5269209",
"0.525497",
"0.51698226",
"0.5073132",
"0.5072605",
"0.50033844",
"0.5002112",
"0.49941513",
"0.49723887",
"0.497167",
"0.49664518",
"0.49270236"
] |
0.7195605
|
1
|
Walk a JSON structure, yielding a new copy of the object. But for any dictionary 'd', first walk its contents, and then yield the result of calling dict_fn(d).
|
def walk(obj,dict_fn):
if isinstance(obj,dict):
result = dict()
for key, value in obj.items():
result[key] = walk(value, dict_fn)
return dict_fn(result)
if isinstance(obj,list):
return [walk(i,dict_fn) for i in obj]
return obj
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def iterate(d, fun): # type: (Dict, Callable[[Any, Any], None]) -> None\n for key, value in d.items():\n if isinstance(value, dict):\n DictUtil.iterate(value, fun)\n else:\n fun(key, value)",
"def crawl_json(json):\n for key in json:\n if type(json[key]) is dict:\n for k in crawl_json(json[key]):\n yield k\n yield key",
"def walk_dict(dct):\n for k, v in dct.items():\n yield dct, k, v\n if isinstance(v, dict):\n for d_, k_, v_ in walk_dict(v):\n yield d_, k_, v_",
"def nested_dict_walker(fn, coll):\n return walk_values_rec(iffy(is_mapping, fn), coll)",
"def walk(fn, obj, *args, **kwargs):\n if type(obj) in [list, tuple]:\n return list(walk(fn, o, *args) for o in obj)\n if type(obj) is dict:\n return dict((walk(fn, k, *args), walk(fn, v, *args)) for k, v in\n obj.items())\n return fn(obj, *args, **kwargs)",
"def walk_map(d: dict, func: FunctionType):\n out = {}\n for k, v in d.items():\n if isinstance(v, (dict, defaultdict)):\n out[k] = walk_map(v, func)\n else:\n out[k] = func(v)\n return out",
"def flatten_json_struct(data, count_fields=[], datetime_fields=[]):\n for k,v in data.items():\n if v and type(v) != dict and type(v) != list:\n if k in datetime_fields and re_prog.match(v):\n #print('> yielding date {0}'.format(k))\n yield k, date_parser.parse(v).date()\n else:\n #print('> yielding value {0}: {1}'.format(k, v))\n yield k, v\n elif type(v) == list:\n if k in count_fields:\n #print('> yielding count of {0}'.format(k))\n yield k, len(v)\n else:\n new_data = { _generate_name(k,idx):val for idx,val in enumerate(v) }\n #print ('recursing %s' % new_data)\n for item in flatten_json_struct(new_data,\n count_fields=count_fields,\n datetime_fields=datetime_fields):\n #print('> yielding {0}: {1}'.format(item, type(item)))\n yield item[0], item[1] \n elif type(v) == dict:\n new_data = { _generate_name(k, k1): v1 for k1, v1 in v.items()}\n #print ('recursing %s' % new_data)\n for item in flatten_json_struct(new_data,\n count_fields=count_fields,\n datetime_fields=datetime_fields):\n #print('> yielding {0}: {1}'.format(item, type(item)))\n yield item[0], item[1]",
"def walk(d):\n for parent, key, leaf in _walk({}, None, d):\n yield (d, parent, key, leaf)",
"def graph_walk_dict_flat(indict, pre=None):\n pre = pre[:] if pre else []\n if isinstance(indict, dict):\n for key, value in indict.items():\n if isinstance(value, dict):\n for d in dict_generator(value, [key] + pre):\n yield d\n elif isinstance(value, list) or isinstance(value, tuple):\n for v in value:\n for d in dict_generator(v, [key] + pre):\n yield d\n else:\n yield pre + [key, value]\n else:\n yield indict",
"def nestedDictValues(d):\n for key in sorted(d.keys()):\n if isinstance(d[key], dict):\n yield from nestedDictValues(d[key])\n else:\n yield d[key]",
"def walk_json_data(patch=None, **kwargs):\n for path, name in walk(**kwargs):\n if path.endswith('.json'):\n with open(path) as f:\n text = f.read()\n if text:\n if patch:\n text = patch(text)\n try:\n yield path, name, text, json.loads(text)\n except json.decoder.JSONDecodeError:\n continue",
"def jsonTransformer() -> TransformationFunc:\n return partial(traverse, transformation_table=json_AST_transformation_table.copy())",
"def flatten_values(d):\n if isinstance(d, dict):\n for v in d.values():\n if isinstance(v, dict):\n yield from flatten_values(v)\n else:\n yield v\n else:\n yield d",
"def walk_tree(visitor, data_structure):\n if isinstance(data_structure, dict):\n for key in data_structure.keys():\n data_structure[key] = walk_tree(visitor, data_structure[key])\n elif isinstance(data_structure, list):\n for i in xrange(len(data_structure)):\n data_structure[i] = walk_tree(visitor, data_structure[i])\n else:\n data_structure = visitor(data_structure)\n return data_structure",
"def graph_walk_collection_flat(indict, pre=None):\n pre = pre[:] if pre else []\n \n # if isinstance(indict, dict):\n if type(indict) in [dict, OrderedDict]:\n for key, value in indict.items():\n # if isinstance(value, dict):\n if type(value) in [dict, OrderedDict]:\n for d in graph_walk_collection_flat(value, [key] + pre):\n yield d\n # elif isinstance(value, list) or isinstance(value, tuple):\n elif type(value) in [list, tuple]:\n for v in value:\n for d in graph_walk_collection_flat(v, [key] + pre):\n yield d\n else:\n yield pre + [key, value]\n else:\n yield indict",
"def flatten(d, path):\n\n if isinstance(d, dict):\n for k, v in d.items():\n yield from flatten(v, path + [k])\n else:\n yield (\".\".join(path), d)",
"def gen_dict_extract(key, var):\n if hasattr(var,'items'):\n for k, v in var.items():\n if k == key:\n yield v\n if isinstance(v, dict):\n for result in gen_dict_extract(key, v):\n yield result\n elif isinstance(v, list):\n for d in v:\n for result in gen_dict_extract(key, d):\n yield result",
"async def leaf_it(d):\n async for _parent, _key, leaf in _walk({}, None, d):\n yield leaf",
"def dict_path(my_dict, path=None):\n if path is None:\n path = \"\"\n for k, v in my_dict.items():\n newpath = path + (\".\" if path != \"\" else \"\") + k\n if isinstance(v, dict):\n for u in dict_path(v, newpath):\n yield u\n else:\n yield newpath, v",
"def dict_items_recursive_apply(config_dict, apply_method, **apply_method_parameters):\n jsonpath_dict = copy.deepcopy(config_dict)\n for dict_k, dict_v in jsonpath_dict.items():\n if isinstance(dict_v, dict):\n jsonpath_dict[dict_k] = dict_items_recursive_apply(\n dict_v, apply_method, **apply_method_parameters\n )\n elif any(isinstance(dict_v, t) for t in (list, tuple)):\n for list_idx, list_v in enumerate(dict_v):\n if isinstance(list_v, dict):\n jsonpath_dict[dict_k][list_idx] = dict_items_recursive_apply(\n list_v, apply_method, **apply_method_parameters\n )\n else:\n jsonpath_dict[dict_k][list_idx] = apply_method(\n dict_k, list_v, **apply_method_parameters\n )\n else:\n jsonpath_dict[dict_k] = apply_method(\n dict_k, dict_v, **apply_method_parameters\n )\n\n return jsonpath_dict",
"async def materialize_walk_obj(d) -> Tree:\n return await _materialize_walk_obj(d)",
"def iter_json(filedesc, **kwargs):\n\n try:\n while True:\n yield read_json(filedesc, **kwargs)\n except EOFError:\n pass",
"def iteritemsdeep(dct):\n for (key, val) in dct.items():\n if isinstance(val, dict):\n for (key_child, val_child) in iteritemsdeep(val):\n yield ((key,) + key_child, val_child)\n else:\n yield ((key,), val)",
"def mutate_dict_in_place(func, mapping):\n for key, value in mapping.items():\n if isinstance(value, dict):\n mutate_dict_in_place(func, value)\n else:\n mapping[key] = func(value)",
"def _dictitem_gen(self, index):\n # first call can be assumed to work on structure dict\n if index in self.struct['dict']: # \"dict\" is a standard dictionary, thus iterating over it is the same as iterating over the keys\n for idx in self.struct['dict'][index]: # it is always a list\n if idx == 'lifted':\n # recursive case\n for s in self.iter_withpseudo():\n if isinstance(s, Structure) and s.struct['liftedkeys']:\n for elem in s._dictitem_gen(index): # yield from in python 3.x:\n yield elem\n else:\n # base case\n elem = self.struct['list'][idx]\n previous = self.struct['list'][:idx]\n cur_leaf = sum(1 if s is None else s['n'] for s in previous)\n\n if elem is None: # leaf\n yield self.leaves[cur_leaf]\n else:\n yield Structure(struct=elem, leaves=self.leaves[cur_leaf : cur_leaf+elem['n']])",
"def recursive_compile(sf_dict):\n retval = {}\n for key, val in sf_dict.items():\n if isinstance(val, dict):\n retval[key] = recursive_compile(val)\n else:\n retval[key] = dense_evaluated_lookup(*val)\n return retval",
"async def _materialize_walk_obj(d) -> Tree:\n if isinstance(d, ViewModel):\n # Resolve the first level of awaitables\n edge_set = set(d.__visited_edges__)\n edges = await resolve_parallel_dict(d, edge_set)\n # Resolve all edges recursively\n vals = await asyncio.gather(*(_materialize_walk_obj(v) for k, v in edges))\n for (k, _), val in zip(edges, vals):\n if k in edge_set:\n setattr(d, k, val)\n return d\n elif isinstance(d, dict):\n # Resolve the first level of awaitables\n items = await resolve_parallel_dict(d)\n vals = await asyncio.gather(*(_materialize_walk_obj(v) for k, v in items))\n for (k, _), val in zip(items, vals):\n d[k] = val\n return d\n elif isinstance(d, primitive) or d is None:\n return d\n elif isinstance(d, PaginatedEdge):\n d.edges = await resolve_parallel_iterable(d.edges)\n return d\n elif isinstance(d, Iterable):\n resolved = await resolve_parallel_iterable(d)\n return await asyncio.gather(\n *(val for val in (_materialize_walk_obj(v) for v in resolved) if val)\n )\n elif type(d) == types.AsyncGeneratorType:\n d_list = [i async for i in d] # TODO: Optimize\n resolved = await resolve_parallel_iterable(d_list)\n return await asyncio.gather(\n *(val for val in (_materialize_walk_obj(v) for v in resolved) if val)\n )\n elif isawaitable(d) or callable(d):\n # TODO: Profile and optimize recursive call\n resolved = await async_resolve_field(d)\n return await _materialize_walk_obj(resolved)\n raise Exception(\"Invalid type: \" + str(type(d)))",
"def dict_or_list(key, dictionary):\n if type(dictionary) != 'str':\n for k, v in dictionary.items():\n if k == key:\n yield v\n elif isinstance(v, dict):\n for result in dict_or_list(key, v):\n yield result\n elif isinstance(v, list):\n for d in v:\n if isinstance(d, dict):\n for result in dict_or_list(key, d):\n yield result",
"def extract_dict(d, f):\n if len(f) == 1:\n return extract_value(f[0], d)\n else:\n return extract_dict(d[f[0]], f[1:])",
"def __iter__(self):\n\n result = []\n\n # d - dict, p - path (keys sequence)\n def recurs_iter(d, p=None):\n p = p or []\n\n # k - key, v - value\n for k, v in iteritems(d):\n next_p = p + [k]\n if isinstance(v, dict):\n recurs_iter(v, next_p)\n else:\n result.append(tuple(next_p))\n\n recurs_iter(self.__dict__)\n\n return iter(result)"
] |
[
"0.6695884",
"0.65684736",
"0.6518398",
"0.63584036",
"0.61934584",
"0.616072",
"0.60844105",
"0.5964013",
"0.59480333",
"0.5939359",
"0.59319097",
"0.58762056",
"0.5792758",
"0.57106143",
"0.5598589",
"0.5532778",
"0.5450643",
"0.5371491",
"0.5370872",
"0.53495574",
"0.52964616",
"0.529348",
"0.5292589",
"0.52730936",
"0.524737",
"0.5242883",
"0.5241437",
"0.52393025",
"0.523502",
"0.5186058"
] |
0.69596267
|
0
|
Recomputes self.str and self.hash
|
def rehash(self):
self.str = "{}{}{}".format(LBRACE, " ".join(sorted([str(i) for i in self])), RBRACE)
self.hash = self.str.__hash__()
self.has_end_of_text = any([isinstance(i,EndOfText) for i in self])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, str=None, hashfunc=None):\r\n self.str = str\r\n if hashfunc:\r\n self.hashfunc = hashfunc",
"def hash(self) -> str:\r\n ...",
"def __hash__(self):\n return hash(self.text)",
"def hash_me(cls, p_str, p_len=64):\n v_hash = str()\n v_len = EC.SHA256 if p_len is None else EC.SHA256 if p_len not in EC.HASH_ALGO else p_len\n if v_len == EC.SHA512:\n v_hash = hashlib.sha512()\n elif v_len == EC.SHA256:\n v_hash = hashlib.sha256()\n elif v_len == EC.SHA224:\n v_hash = hashlib.sha224()\n elif v_len == EC.SHA1:\n v_hash = hashlib.sha1()\n\n v_hash.update(p_str.encode(\"utf-8\"))\n return v_hash.hexdigest()",
"def hash_string(self):\n return self._hash_string",
"def _calculate_hash(self) -> str:\n data_str = str(self.version) + str(self.index) + self.pre_hash + str(self.timestamp) + str(self.data)\n return sha256(data_str.encode('utf-8')).hexdigest()",
"def hash(self) -> bytes:",
"def update_hash(self, h):\n # Generate a sequence of fragments that add up to the canonical\n # version of the expression.\n fragments = []\n self.collect_str_fragments(fragments)\n # Update the hash. Wrapping with 'node<...>' prevents the hash\n # from being extended in a way that would clash with something we can\n # generate. (Probably not an important concern but it doesn't hurt.)\n h.update(\"node<\")\n for f in fragments:\n h.update(f)\n h.update(\">\")",
"def __hash__(self):\n return hash(self.literals)",
"def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode",
"def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode",
"def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode",
"def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode",
"def __hash__(self):\n return hash(str(self))",
"def __hash__(self):\n return hash(str(self))",
"def __hash__(self):\n return hash(str(self))",
"def apply_hash (self, s):\r\n m = md5()\r\n m.update (s)\r\n d = m.digest()\r\n # base64.encodestring tacks on an extra linefeed.\r\n return encodestring (d)[:-1]",
"def __hash__(self):\n if self._hash is None:\n self._hash = hash(self._scheme) ^ hash(self._host) ^ hash(self._port) ^ hash(self._path) ^ hash(self._query) ^ hash(self._isRegularURI)\n return self._hash",
"def __hash__(self):\n return hash(self.hash)",
"def hash(self, string):\n return self.__scaffydb.hash(string)",
"def __init__(self, hash_str, salt):\n self.hash = hash_str\n self.salt = salt",
"def __str__(self) -> str:\n return self.hash",
"def compute_hash(self) -> str:\r\n #block_dict = self.__dict__.pop('hash', None) # Remove hash field value before calculating hash\r\n block_dict = self.__dict__.copy()\r\n block_dict.pop('hash', None) # Remove hash field value before calculating hash\r\n block_string = json.dumps(block_dict, sort_keys=True).encode('utf-8')\r\n return sha256(block_string).hexdigest()",
"def __hash__(self):\n\n return hash((str(self.type) + str(self.value)))",
"def get_hash(self) -> str:\n return self.__hash.hexdigest()",
"def hash_string(self, hash_string):\n\n self._hash_string = hash_string",
"def __hash__( self ):\n return hash( self.data )",
"def __hash__(self):\n return hash(self.joined())",
"def __hash__(self):\n return hash((self.get_first_name() + self.get_last_name() + self.get_birth_date()))",
"def __Hash(self):\n return self._Hash()"
] |
[
"0.68092525",
"0.675968",
"0.6637325",
"0.65762126",
"0.65433997",
"0.64874315",
"0.6398045",
"0.63752127",
"0.6372463",
"0.6358108",
"0.6358108",
"0.6358108",
"0.6358108",
"0.6302346",
"0.6302346",
"0.6302346",
"0.6286356",
"0.6232979",
"0.6173351",
"0.6162594",
"0.613785",
"0.6114179",
"0.6073105",
"0.6022508",
"0.6000481",
"0.59940684",
"0.5984121",
"0.59735775",
"0.59479845",
"0.59322894"
] |
0.7349005
|
0
|
Lazily creates an unclosed ItemSet out of the next_items tracked by this edge. If by_index_memo is not None, then find and return the previously saved ItemSet with the same core items, if one exists there. Returns a pair (bool,ItemSet) True if the ItemSet was newly created the destination ItemSet when following this edge
|
def NextItemSet(self,grammar,by_index_memo=None):
changed = False
if self.next_item_set_cache is None:
# Create the item set from the "next" items and associated lookaheads.
d = dict()
for item_id, next_and_lookahead in self.next.items():
d[next_and_lookahead[0]] = next_and_lookahead[1]
next_IS = ItemSet(grammar,d).close(grammar)
if (by_index_memo is None) or (next_IS.core_index not in by_index_memo):
self.next_item_set_cache = next_IS
changed = True
else:
original_IS = by_index_memo[next_IS.core_index]
self.next_item_set_cache = original_IS
return (changed, self.next_item_set_cache)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def LALR1(self, max_item_sets=None):\n\n # Part 1. Compute LALR(1) item sets\n\n # Mapping from a core index to an already-discovered item set.\n by_index = dict()\n\n root_item = self.MakeItem(LANGUAGE, self.rules[LANGUAGE][0],0)\n\n # An ItemSet can be found by any of the items in its core.\n # Within an ItemSet, an item maps to its lookahead set.\n\n root_item_set = ItemSet(self, {root_item: LookaheadSet({self.end_of_text})}).close(self)\n by_index[root_item_set.core_index] = root_item_set\n\n item_set_core_ids = set({root_item_set.core_index})\n\n dirty_set = item_set_core_ids.copy()\n keep_going = True\n #while len(dirty_set) > 0:\n while keep_going:\n keep_going = False\n #work_list = dirty_set.copy()\n #dirty_set = set()\n if max_item_sets is not None:\n if len(by_index) > max_item_sets:\n break\n # Sort the work list so we get deterministic ordering, and therefore\n # deterministic itemset core numbering.\n # Go backwards to try to explore the most recently changed items first.\n work_list = sorted(item_set_core_ids, reverse=True)\n for core_index in work_list:\n item_set = by_index[core_index]\n (changed,gotos) = item_set.gotos(self,by_index_memo=by_index)\n keep_going = keep_going | changed\n for (X, item_set_for_X) in gotos:\n if item_set_for_X.core_index not in by_index:\n item_set_core_ids.add(item_set_for_X.core_index)\n by_index[item_set_for_X.core_index] = item_set_for_X\n dirty_set.add(item_set_for_X.core_index)\n keep_going = True\n\n # Now this is a list of item_sets\n sorted_item_set_core_ids = sorted(item_set_core_ids)\n\n # Part 2. Compute the action table and conflicts.\n # Do this as a second pass because it's conceivable that an item set may\n # go from non-accepting to accepting during initial exploration\n # of the item sets.\n\n conflicts = []\n # Maps (item_set.core_index, terminal.reg_info.index) to an Action.\n action_table = dict()\n def addAction(item_set, terminal, action):\n isinstance(item_set, ItemSet) or raiseRE(\"expected ItemSet\")\n terminal.is_terminal() or raiseRE(\"expected terminal: \" + str(terminal))\n isinstance(action,Action) or raiseRE(\"expected action\")\n\n # Use indices, for speed.\n # But also keep the terminal prompting this action.\n action_key = (item_set.core_index,terminal.reg_info.index)\n if action_key not in action_table:\n action_table[action_key] = action\n else:\n prev_action = action_table[action_key]\n if prev_action != action:\n # Record the conflict, and only keep the original.\n conflicts.append(Conflict(item_set,terminal,prev_action,action))\n\n # Maps an item index to its reduction index.\n reduced_items = dict()\n # List, where element i is the Reduce object with index i\n reductions = []\n def make_reduce(item):\n if item.reg_info.index in reduced_items:\n return reductions[reduced_items[item.reg_info.index]]\n index = len(reduced_items)\n reduced_items[item.reg_info.index] = index\n result = Reduce(item,index)\n reductions.append(result)\n return result\n\n # The goto table for noterminals\n # Maps (item_set, nonterminal) to the next item set\n nonterminal_goto = dict()\n\n for item_set_core_id in sorted_item_set_core_ids:\n item_set = by_index[item_set_core_id]\n # Register Reduce and Accept actions\n for item_id, lookahead in item_set.id_to_lookahead.items():\n item = item_set.id_to_item[item_id]\n if item.is_accepting() and lookahead.includesEndOfText():\n addAction(item_set, self.end_of_text, Accept())\n if item.at_end() and (item.lhs.content != LANGUAGE):\n # Register reductions\n for terminal in lookahead:\n addAction(item_set, terminal, make_reduce(item))\n\n # Register Shift actions\n for xid, edge in item_set.goto.items():\n X = self.findByIndex(xid)\n item_set_for_X = edge.NextItemSet(self)[1]\n if X.is_terminal():\n # Can't be EndOfText by construction of the goto result\n isinstance(X,Token) or raiseRE(\"internal error: expected a token\")\n addAction(item_set, X, Shift(item_set_for_X))\n elif X.is_symbol_name():\n nonterminal_goto[(item_set.core_index,X)] = item_set_for_X\n\n item_sets = [by_index[i] for i in sorted_item_set_core_ids]\n\n return ParseTable(self,item_sets, action_table, nonterminal_goto, reductions, conflicts)",
"def contains_duplicate_fast_set(self, nums: List[int]) -> bool:\n visited = set()\n for i in nums:\n if i in visited:\n return True\n visited.add(i)",
"def LR1_ItemSets(self):\n\n # The root item is the one representing the entire language.\n # Since the grammar is in canonical form, it's a Choice over a\n # single sequence.\n root_item = self.MakeItem(LANGUAGE, self.rules[LANGUAGE][0],0)\n\n # An ItemSet can be found by any of the items in its core.\n # Within an ItemSet, an item maps to its lookahead set.\n\n root_item_set = ItemSet(self, {root_item: LookaheadSet({self.end_of_text})}).close(self)\n\n LR1_item_sets_result = set({root_item_set})\n\n dirty_set = LR1_item_sets_result.copy()\n while len(dirty_set) > 0:\n work_list = dirty_set.copy()\n dirty_set = set()\n # Sort the work list so we get deterministic ordering, and therefore\n # deterministic itemset core numbering.\n for item_set in sorted(work_list):\n (_,gotos) = item_set.gotos(self)\n for (X, dest_item_set) in gotos:\n if dest_item_set not in LR1_item_sets_result:\n LR1_item_sets_result.add(dest_item_set)\n dirty_set.add(dest_item_set)\n\n return sorted(LR1_item_sets_result,key=ItemSet.pretty_key)",
"def owningSet(self) -> ghidra.util.graph.KeyIndexableSet:\n ...",
"def get_next_if_any(self):\n try:\n ret = self.work[deepcopy(self.i)]\n self.i += 1\n # print \"Trickling item\", self.i\n return ret\n except Exception:\n return None",
"def Deduplicate(items):\n seen = set()\n for it in items:\n if it not in seen:\n seen.add(it)\n yield it",
"def next(self):\n if not self.minimal:\n self.sets, self.unexplored = subsampled_expand(\n self.sets,\n self.max_num_sets_to_expand,\n self.unexplored,\n self.explored,\n self.features,\n self.partitions,\n self.seed)\n while self.sets:\n self.minimal, self.explored, self.infeasible = \\\n minimal_set_exploration(self.sets,\n self.unexplored,\n self.explored,\n self.infeasible,\n self.partitions)\n if self.minimal:\n break\n else:\n self.sets, self.unexplored = subsampled_expand(\n self.sets,\n self.max_num_sets_to_expand,\n self.unexplored,\n self.explored,\n self.features,\n self.partitions,\n self.seed)\n if not self.minimal:\n raise StopIteration()\n return self.minimal.pop()",
"def gotos_internal(self,grammar,by_index_memo=None):\n\n # Partition items according to the next symbol to be consumed, X,\n # i.e. the symbol immediately to the right of the dot.\n changed_initial = False\n if self.goto is None:\n self.goto = dict()\n # Create the initial set of edges, copying lookaheads\n for item_id, item in self.id_to_item.items():\n if item.at_end():\n continue\n X = item.next()\n if X.is_end_of_text():\n continue\n xid = X.reg_info.index\n if xid not in self.goto:\n self.goto[xid] = self.GotoEdge(X)\n edge = self.goto[xid]\n next_item = grammar.MakeItem(item.lhs, item.rule, item.position+1)\n edge.add(item,next_item,LookaheadSet(self.id_to_lookahead[item_id]))\n changed_initial = True\n\n # The first time around, construct the destination item sets for each edge.\n # On subsequent rounds, propagate lookaheads from our own ItemSet to next item sets.\n goto_list = []\n changed = changed_initial\n for edge in self.goto.values():\n (created, next_item_set) = edge.NextItemSet(grammar,by_index_memo=by_index_memo)\n if created:\n next_item_set.close(grammar)\n else:\n # Propagate lookaheads\n for src_item_id, (dest_item,stale_lookahead) in edge.next.items():\n src_lookahead = self.id_to_lookahead[src_item_id]\n dest_lookahead = next_item_set.id_to_lookahead[dest_item.reg_info.index]\n changed = changed | dest_lookahead.merge(src_lookahead)\n # Propagate to non-kernel items\n next_item_set.close(grammar)\n\n changed = changed | created\n goto_list.append((edge.x, next_item_set))\n\n return (changed,goto_list)",
"def contains_duplicate_full_slow_set(self, nums: List[int]) -> bool:\n return len(nums) != len(set(nums))",
"def mkset(item):\n if isinstance(item, set):\n return item\n elif item is None:\n return set()\n elif isIterable(item):\n return set(item)\n else:\n return set([item])",
"def belongs_to_set(self, node, set_nodes):\r\n rep = set_nodes[0]\r\n if rep.op.as_while != node.op.as_while:\r\n return False\r\n\r\n nsteps = node.inputs[0]\r\n try:\r\n nsteps = int(get_scalar_constant_value(nsteps))\r\n except tensor.NotScalarConstantError:\r\n pass\r\n\r\n rep_nsteps = rep.inputs[0]\r\n try:\r\n rep_nsteps = int(get_scalar_constant_value(rep_nsteps))\r\n except tensor.NotScalarConstantError:\r\n pass\r\n\r\n # Check to see if it is an input of a different node\r\n can_add = True\r\n for nd in set_nodes:\r\n if find_up(node, nd) or find_up(nd, node):\r\n can_add = False\r\n\r\n can_add = can_add and (node.op.truncate_gradient ==\r\n rep.op.truncate_gradient)\r\n can_add = can_add and (node.op.mode == rep.op.mode)\r\n if not node.op.as_while:\r\n return nsteps == rep_nsteps and can_add\r\n cond = node.op.outputs[-1]\r\n rep_cond = rep.op.outputs[-1]\r\n same_cond = scan_utils.equal_computations([cond], [rep_cond],\r\n node.op.inputs,\r\n rep.op.inputs)\r\n return same_cond and (nsteps == rep_nsteps) and can_add",
"def __iter__(self):\n new_set = self._clone()\n new_set.tree.iterator = self.tree.traverse()\n return new_set",
"def is_consistent(self, item):\n targets = set(ident for ident, node in self._nodes.iteritems() \\\n if node[item] == OCCUPIED)\n return self._check_consistency(item, [self.current], targets)",
"def connected(self, avoid=set()):\n if len(self) <= 1:\n return True\n cover = set()\n queue = {(self.nodes() - avoid).pop()} # Take some element not being avoided\n while queue:\n new = queue.pop()\n cover.add(new)\n for adjacent in new.parents() | new.children():\n if not (adjacent in cover or adjacent in avoid):\n queue.add(adjacent)\n if len(cover) == len(self) - len(avoid):\n return True\n else:\n return False",
"def __eq__(self, anotherset):\r\n if not isinstance(anotherset, LR0ItemSet):\r\n raise TypeError\r\n if len(self.itemlist) != len(anotherset.itemlist):\r\n return False\r\n for element in self.itemlist:\r\n if element not in anotherset.itemlist:\r\n return False\r\n return True",
"def __contains__(self, item: Any) -> bool:\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return True\n\n curr = curr.next\n\n return False",
"def mst(self):\n candidate = self.greedy()\n cycle = candidate.find_cycle()\n if not cycle:\n return candidate\n new_id, old_edges, compact = self.contract(cycle)\n merged = self.merge(compact.mst(), new_id, old_edges, cycle)\n return merged",
"def is_monotonic(items: Sequence) -> bool:\n prev_elements = set({items[0]})\n prev_item = items[0]\n\n for item in items:\n if item != prev_item:\n if item in prev_elements:\n return False\n prev_item = item\n prev_elements.add(item)\n\n return True",
"def register_item_set(self,item_set):\n assert isinstance(item_set,ItemSet)\n core = item_set.kernel_item_ids\n if core in self.item_set_core_index:\n return self.item_set_core_index[core]\n # Register it\n result = len(self.item_set_core_index)\n self.item_set_core_index[core] = result\n return result",
"def is_same_set(self, item1, item2):\n res = False\n for s in self._data:\n if item1 in s and item2 in s:\n res = True\n break\n return res",
"def has_item(self, item):\n return item in self.set",
"def has_item(self, item):\n return item in self.set",
"def getOneItemSet(self, transListSet):\n itemSet = set()\n for line in transListSet:\n for item in line:\n itemSet.add(frozenset([item]))\n return itemSet",
"def keep_item(self, content_item):\n return self._content_item_comparison_weak(\n content_item, self.touch_content_item\n )",
"def test_duplicate_equality(self):\r\n def duplicate_and_verify(source_usage_key, parent_usage_key):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n self.assertTrue(check_equality(source_usage_key, usage_key), \"Duplicated item differs from original\")\r\n\r\n def check_equality(source_usage_key, duplicate_usage_key):\r\n original_item = self.get_item_from_modulestore(source_usage_key, draft=True)\r\n duplicated_item = self.get_item_from_modulestore(duplicate_usage_key, draft=True)\r\n\r\n self.assertNotEqual(\r\n original_item.location,\r\n duplicated_item.location,\r\n \"Location of duplicate should be different from original\"\r\n )\r\n # Set the location and display name to be the same so we can make sure the rest of the duplicate is equal.\r\n duplicated_item.location = original_item.location\r\n duplicated_item.display_name = original_item.display_name\r\n\r\n # Children will also be duplicated, so for the purposes of testing equality, we will set\r\n # the children to the original after recursively checking the children.\r\n if original_item.has_children:\r\n self.assertEqual(\r\n len(original_item.children),\r\n len(duplicated_item.children),\r\n \"Duplicated item differs in number of children\"\r\n )\r\n for i in xrange(len(original_item.children)):\r\n if not check_equality(original_item.children[i], duplicated_item.children[i]):\r\n return False\r\n duplicated_item.children = original_item.children\r\n\r\n return original_item == duplicated_item\r\n\r\n duplicate_and_verify(self.problem_usage_key, self.seq_usage_key)\r\n duplicate_and_verify(self.html_usage_key, self.seq_usage_key)\r\n duplicate_and_verify(self.seq_usage_key, self.chapter_usage_key)\r\n duplicate_and_verify(self.chapter_usage_key, self.usage_key)",
"def successorssets(repo, initialnode, cache=None):\n\n succmarkers = repo.obsstore.successors\n\n # Stack of nodes we search successors sets for\n toproceed = [initialnode]\n # set version of above list for fast loop detection\n # element added to \"toproceed\" must be added here\n stackedset = set(toproceed)\n if cache is None:\n cache = {}\n\n # This while loop is the flattened version of a recursive search for\n # successors sets\n #\n # def successorssets(x):\n # successors = directsuccessors(x)\n # ss = [[]]\n # for succ in directsuccessors(x):\n # # product as in itertools cartesian product\n # ss = product(ss, successorssets(succ))\n # return ss\n #\n # But we can not use plain recursive calls here:\n # - that would blow the python call stack\n # - obsolescence markers may have cycles, we need to handle them.\n #\n # The `toproceed` list act as our call stack. Every node we search\n # successors set for are stacked there.\n #\n # The `stackedset` is set version of this stack used to check if a node is\n # already stacked. This check is used to detect cycles and prevent infinite\n # loop.\n #\n # successors set of all nodes are stored in the `cache` dictionary.\n #\n # After this while loop ends we use the cache to return the successors sets\n # for the node requested by the caller.\n while toproceed:\n # Every iteration tries to compute the successors sets of the topmost\n # node of the stack: CURRENT.\n #\n # There are four possible outcomes:\n #\n # 1) We already know the successors sets of CURRENT:\n # -> mission accomplished, pop it from the stack.\n # 2) Node is not obsolete:\n # -> the node is its own successors sets. Add it to the cache.\n # 3) We do not know successors set of direct successors of CURRENT:\n # -> We add those successors to the stack.\n # 4) We know successors sets of all direct successors of CURRENT:\n # -> We can compute CURRENT successors set and add it to the\n # cache.\n #\n current = toproceed[-1]\n if current in cache:\n # case (1): We already know the successors sets\n stackedset.remove(toproceed.pop())\n elif current not in succmarkers:\n # case (2): The node is not obsolete.\n if current in repo:\n # We have a valid last successors.\n cache[current] = [(current,)]\n else:\n # Final obsolete version is unknown locally.\n # Do not count that as a valid successors\n cache[current] = []\n else:\n # cases (3) and (4)\n #\n # We proceed in two phases. Phase 1 aims to distinguish case (3)\n # from case (4):\n #\n # For each direct successors of CURRENT, we check whether its\n # successors sets are known. If they are not, we stack the\n # unknown node and proceed to the next iteration of the while\n # loop. (case 3)\n #\n # During this step, we may detect obsolescence cycles: a node\n # with unknown successors sets but already in the call stack.\n # In such a situation, we arbitrary set the successors sets of\n # the node to nothing (node pruned) to break the cycle.\n #\n # If no break was encountered we proceed to phase 2.\n #\n # Phase 2 computes successors sets of CURRENT (case 4); see details\n # in phase 2 itself.\n #\n # Note the two levels of iteration in each phase.\n # - The first one handles obsolescence markers using CURRENT as\n # precursor (successors markers of CURRENT).\n #\n # Having multiple entry here means divergence.\n #\n # - The second one handles successors defined in each marker.\n #\n # Having none means pruned node, multiple successors means split,\n # single successors are standard replacement.\n #\n for mark in sorted(succmarkers[current]):\n for suc in mark[1]:\n if suc not in cache:\n if suc in stackedset:\n # cycle breaking\n cache[suc] = []\n else:\n # case (3) If we have not computed successors sets\n # of one of those successors we add it to the\n # `toproceed` stack and stop all work for this\n # iteration.\n toproceed.append(suc)\n stackedset.add(suc)\n break\n else:\n continue\n break\n else:\n # case (4): we know all successors sets of all direct\n # successors\n #\n # Successors set contributed by each marker depends on the\n # successors sets of all its \"successors\" node.\n #\n # Each different marker is a divergence in the obsolescence\n # history. It contributes successors sets distinct from other\n # markers.\n #\n # Within a marker, a successor may have divergent successors\n # sets. In such a case, the marker will contribute multiple\n # divergent successors sets. If multiple successors have\n # divergent successors sets, a cartesian product is used.\n #\n # At the end we post-process successors sets to remove\n # duplicated entry and successors set that are strict subset of\n # another one.\n succssets = []\n for mark in sorted(succmarkers[current]):\n # successors sets contributed by this marker\n markss = [[]]\n for suc in mark[1]:\n # cardinal product with previous successors\n productresult = []\n for prefix in markss:\n for suffix in cache[suc]:\n newss = list(prefix)\n for part in suffix:\n # do not duplicated entry in successors set\n # first entry wins.\n if part not in newss:\n newss.append(part)\n productresult.append(newss)\n markss = productresult\n succssets.extend(markss)\n # remove duplicated and subset\n seen = []\n final = []\n candidate = sorted(((set(s), s) for s in succssets if s),\n key=lambda x: len(x[1]), reverse=True)\n for setversion, listversion in candidate:\n for seenset in seen:\n if setversion.issubset(seenset):\n break\n else:\n final.append(listversion)\n seen.append(setversion)\n final.reverse() # put small successors set first\n cache[current] = final\n return cache[initialnode]",
"def distinct(self):\n memory = set()\n\n def _distinct(iterator):\n while True:\n item = next(iterator)\n if item in memory:\n continue\n memory.add(item)\n return item\n return self.__class__(self, _distinct)",
"def is_cyclically_reduced(self):\n if not self:\n return True\n return self[0] != self[-1]**-1",
"def all_same(items):\n \n return all(x == items[0] for x in items)",
"def find_set(self):\n return self._set_set(self._find_set())"
] |
[
"0.53293574",
"0.5068311",
"0.49573022",
"0.48379317",
"0.4810332",
"0.4803411",
"0.476375",
"0.4717968",
"0.4712506",
"0.46483055",
"0.46081868",
"0.45811048",
"0.45610908",
"0.45453298",
"0.45288816",
"0.45250455",
"0.4524672",
"0.45122492",
"0.45077547",
"0.4498581",
"0.44957194",
"0.44957194",
"0.44945663",
"0.44775078",
"0.44769195",
"0.44494554",
"0.44256693",
"0.44193044",
"0.44048515",
"0.4402796"
] |
0.6633974
|
0
|
Adds an itemtolookahead mapping.
|
def internal_add(self,item,lookahead):
assert isinstance(item, Item)
assert isinstance(lookahead, LookaheadSet)
index = item.reg_info.index
assert isinstance(index,int)
assert index not in self.id_to_item
self.id_to_item[index] = item
self.id_to_lookahead[index] = lookahead
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add(self, item):\n self._dict[item] = item",
"def add_item(self, item: _T) -> None:\n if item not in self.item_to_index:\n self.item_to_index[item] = len(self.index_to_item)\n self.index_to_item.append(item)",
"def additemtoinventory(item):\n global ITEM_COUNT\n for i in range(0, 10): # first 10 items are weapons, (this code sux, need a better way of doing this)\n if ITEMTYPES[ITEM_LIST[ZERO_BASE_PLYR_POS]] == ITEMTYPES[i]: \n cur_weapon_strength = WEAPON_STRENGTHS[ITEMS[0]]\n new_weapon_strength = WEAPON_STRENGTHS[ITEMTYPES[i]]\n if new_weapon_strength > cur_weapon_strength:\n change_weapon(ITEMTYPES[i])\n ITEMS[0] = ITEMTYPES[i] # 'overwrite' the main weapon with the new one\n remove_item_from_map()\n return # exit here if item is weapon\n else:\n remove_item_from_map()\n return # remove the inferior weapon from the map and return\n ITEMS.append(ITEMTYPES[item])\n ITEM_COUNT = len(ITEMS)\n remove_item_from_map()",
"def create(self, mapItem: MapItem) -> int:\n pass",
"def add(self, item: Mapping[Hashable, Any], **kwargs: Any) -> None:\n self.contents.update(item, **kwargs)\n return",
"def add_item(self, item: int) -> None:\n self._antecedent.add(item)\n self._is_updated = False",
"def _add_item_by_item(self, item):\n self.item_list[item.call_number] = item",
"def add_addressitem(self, addressitem):\n self.addresses.append(addressitem)",
"def add_item(self, item: str) -> int:\n item = item.encode('utf-8')\n if item not in self.item2idx:\n self.idx2item.append(item)\n self.item2idx[item] = len(self.idx2item) - 1\n return self.item2idx[item]",
"def add_to_bag(self, item):\n self._bag.append(item)",
"def add(self, item):\n self.num_item += 1\n indexs = self.__get_indexs(item)\n for index in indexs:\n self.filter_bitarray[index] = True",
"def add(self, item):",
"def add_item ( self, offset ):\n list, index = self.get_info()\n index += offset \n item_trait = self.factory.trait_handler.item_trait\n value = item_trait.default_value()[1]\n self.value = list[:index] + [ value ] + list[index:]",
"def _lookahead(param, lookahead_ema, step, beta_lookahead=0.5, lookahead_every_nth_iter=4):\n condition = step % lookahead_every_nth_iter < 0.5 # == 0. but inexact to deal with roundoffs\n lookahead_ema = jnp.where(condition, beta_lookahead*lookahead_ema + (1. - beta_lookahead)*param, lookahead_ema)\n param = jnp.where(condition, lookahead_ema, param)\n return (param, lookahead_ema)",
"def add_item(self, key, item):\n self[key].add(item)\n try:\n self._reverse_store[item].add(key)\n except KeyError:\n self._reverse_store[item] = set([key])",
"def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)",
"def add_item(items, coder, tag, start, n):\n if start is not None:\n # close opened items\n add_zero_item(items, coder, tag, start) # default tag\n items[tag][coder].append(item(b=start, l=n-start, v=1)) # found tag",
"def addMapping(self, protocol, match, result,\n chain=None, mapping_type='lfn-to-pfn'):\n entry = {}\n entry.setdefault(\"protocol\", protocol)\n entry.setdefault(\"path-match-expr\", re.compile(match))\n entry.setdefault(\"path-match\", match)\n entry.setdefault(\"result\", result)\n entry.setdefault(\"chain\", chain)\n self[mapping_type].append(entry)",
"def _add_mapping(self, mother_element: GraphElement,\n daughter_element: GraphElement) -> None:\n pass",
"def push(self, mapping):\n self.mappings.append(mapping)",
"def _add_test_items_for_transitions(\n self,\n items_map: Dict[int, TestItem],\n tz: Any,\n ) -> None:\n\n transitions = self._find_transitions(tz)\n for (left, right, only_dst) in transitions:\n left_item = self._create_test_item(\n left, 'a' if only_dst else 'A')\n self._add_test_item(items_map, left_item)\n\n right_item = self._create_test_item(\n right, 'b' if only_dst else 'B')\n self._add_test_item(items_map, right_item)",
"def add_item(self, item_id, item_title, score, filter_stopwords=False):\n with self._r.pipeline() as pipe:\n for prefix in self._prefixes(item_title, filter_stopwords=filter_stopwords):\n pipe.zadd(prefix, item_id, score)\n pipe.hset('$titles', item_id, item_title)\n pipe.execute()\n return True",
"def add_item(self, item: str) -> None:\n try:\n current_max = max(self.stoi.values())\n self.stoi[item] = current_max + 1\n except ValueError:\n self.stoi[item] = 0",
"def predict(self, item, col):\n nx = item.nxt\n assert self.grammar.is_nonterminal(nx)\n for rule in self.grammar.productions_for_name(nx):\n new_item = Item(rule, 0, col.i)\n col.add(new_item)",
"def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]",
"def add(self, item, issue):\n if self.has_key(item):\n self[item].append(issue)\n else:\n self[item] = [issue]\n return 1",
"def add_item(self, item):\n self.items.append(item)",
"def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots",
"def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value",
"def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)"
] |
[
"0.5171958",
"0.49563116",
"0.49124083",
"0.4861802",
"0.48607472",
"0.48448393",
"0.48432246",
"0.48409677",
"0.48383635",
"0.4831507",
"0.482311",
"0.4807569",
"0.48044515",
"0.48009375",
"0.47910455",
"0.47871062",
"0.47762462",
"0.47709268",
"0.4750976",
"0.47148854",
"0.47068116",
"0.46599847",
"0.464623",
"0.46454665",
"0.46446556",
"0.46328577",
"0.4612805",
"0.4607054",
"0.4598795",
"0.45974302"
] |
0.7629796
|
0
|
Returns a short string, based only on core_index. Assumes core_index has been computed.
|
def short_str(self):
return "#{}".format(self.core_index)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def index_as_string(self):\n return self.index().to_string() if self.index() else None",
"def core_name(self):\n buf_size = self.MAX_BUF_SIZE\n buf = (ctypes.c_char * buf_size)()\n self._dll.JLINKARM_Core2CoreName(self.core_cpu(), buf, buf_size)\n return ctypes.string_at(buf).decode()",
"def shorten_cores(cores):\n cores = sorted(list(cores))\n if len(cores) == 0:\n return ''\n core_buffer = ''\n start = 0\n while start < len(cores):\n cont_seq = find_max_continous_sequence(cores, start)\n start += len(cont_seq)\n if len(cont_seq) > 1:\n core_buffer += ',%d-%d' % (cont_seq[0], cont_seq[-1])\n else:\n core_buffer += ',%d' % cont_seq[0]\n return core_buffer[1:]",
"def _amd_index(sysfs_gpu_name):\n drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):]\n return drop_prefix.split('/')[0]",
"def stringify_short(self):\n return self.stringify()",
"def index_id(i):\n return f\"(i={i})\"",
"def getWeatherString(index):\n return Texts.weather_titles[index]",
"def get_index_str(idxs, discard, cap, header=None):\n if header is None:\n header = 'Indexes of samples from mcmc chain ' \\\n f'(after slicing: discard={discard}, cap={cap})'\n string = f'{header}\\n'\n\n for i in idxs:\n string += f'{i}\\n'\n return string",
"def _major_version(self):\n version_tuple = StrictVersion(self.plugin.version).version\n major = '.'.join(map(str, version_tuple[:2]))\n\n return major",
"def __str__(self):\n return \"{}_human\".format(self.index)",
"def get_short_code():\n return rh.get_short_code(request)",
"def strIdx(idx):\n if not isinstance(idx, (int, np.integer)):\n raise ValueError(\"Index must be an integer.\")\n\n return str(idx) if idx >= 0 else str(-idx) + u'\\u0305'",
"def index_to_string(index):\n if index:\n s = \"/\".join(index)\n return Quote(s)\n else:\n return \".\"",
"def index2str(index_input, num_char=4, prepend_char='0'):\n\n index_str = str(index_input)\n num_to_prepend = num_char - len(index_str)\n new_str_index = []\n\n for i in range(num_to_prepend):\n new_str_index.append(prepend_char)\n\n new_str_index.append(index_str)\n index_str = ''.join(new_str_index)\n\n return(index_str)",
"def get_index_text(self, crate, module, impl, name):\n return _('%s (Rust function)') % name",
"def get_label_string(self, index_list):\n\n blabel = [bool(x) for x in index_list]\n blabel_string = list(compress(self.label_strings, blabel))\n\n return blabel_string",
"def _get_interleving(self, index):\n try:\n index = self._char_indexes[index - 1]\n except IndexError:\n return \"\"\n s = \"\"\n while True:\n index += 1\n if index in self._char_indexes:\n break\n elif index in self._code_indexes:\n s += self._raw_string[index]\n else:\n break\n return s",
"def versionstr():\n return \"%d.%d.%d%s\" % (version[0], version[1], version[2],\n '-' + gitstr() if gitstr() else '')",
"def __getitem__(self, index):\n return str(self.cpf[index])",
"def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)",
"def get_index_text(self, crate, module, impl, name):\n return _('%s (Rust struct)') % name",
"def reduced_word(self, index_set = None, positive = True):\n return self.to_dominant_chamber(index_set=index_set,positive=positive,get_direction = True)[1]",
"def short_id(self):\n if self.short_id_missing:\n return \"0\" * settings.ID_LENGTH\n return str(self.id)[0:settings.ID_LENGTH]",
"def get_major_version(version):\n return str(check_version(version)[0])",
"def _get_abs_string_index(self, idx):\r\n idx = operator.index(idx)\r\n if not (-len(self) <= idx < len(self)):\r\n raise IndexError('index {} is out of range'.format(idx))\r\n if idx < 0:\r\n idx += len(self)\r\n return str(idx)",
"def type_core(self):\n type_core = ' '.join(['{}'.format(atom.atom_type_index) for atom in self.atom_types\n if 'shell' not in atom.label])\n return type_core",
"def output(index: int = 0) -> str:\n return outputs()[index]",
"def get_min_build_version(version: str) -> str:\n return Version(version).replace(micro=0).get_stable().dumps()",
"def _index_to_unicode(cls, index: int) -> str:\n return \"\".join(cls._unicode_subscripts[int(_)] for _ in str(index))",
"def shortHostname(self) -> str:\n\t\treturn self.hostname[0]"
] |
[
"0.5660094",
"0.562463",
"0.5596635",
"0.53800434",
"0.5312243",
"0.53054535",
"0.5275866",
"0.5229172",
"0.5226107",
"0.5148643",
"0.51462585",
"0.51282895",
"0.51020956",
"0.5039063",
"0.5014327",
"0.50048393",
"0.49870846",
"0.4973255",
"0.49626827",
"0.4954054",
"0.49522457",
"0.4943293",
"0.49416643",
"0.49173912",
"0.4905267",
"0.49001598",
"0.4895828",
"0.48907703",
"0.48870784",
"0.48786315"
] |
0.77697337
|
0
|
Returns True if the parser action for this item set should be 'accept'.
|
def is_accepting(self):
for item_id, lookahead in self.id_to_lookahead.items():
if lookahead.includesEndOfText():
item = self.id_to_item[item_id]
if item.is_accepting():
return True
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def isAccepted(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n return False",
"def is_accepting(self):\n return (self.position == 1) and (self.lhs.content == LANGUAGE)",
"def is_action(self) -> bool:\n return self.is_action_str(self.content)",
"def accepted(self):\n return self._accepted",
"def match_action(self, action):\n\n return hasattr(self, self._action_handler_name(action))",
"def acceptClicks(self, button):\n if not self.acceptable:\n return False\n if button not in self.__clickItems:\n self.__clickItems[button] = self.currentItem\n return True\n return False",
"def is_accepted(self, rule, method, **options):\n\n raise CoreNotImplementedError()",
"def can_accept(self, user):\n if user.has_perm('funding.make_application_decisions'):\n # Funding manager can override / update decisions, if required\n # But we still need to have had a offer made\n if self.status in ['G', 'A', 'N']:\n return True\n # Applicants can only decide on granted applications\n if self.status == 'G':\n if self.applicant == user:\n return True\n return False",
"def is_accepted_symbol(self, symbol: str) -> bool:\n return symbol in self.accepted_symbols",
"def accept(self):\n self.accepted = True\n self.acceptedItem = self.currentItem",
"def accept(self):\n self.accepted = True\n self.acceptedItem = self.currentItem",
"def is_acceptable(self):\n\n return self.signal_type == self.target_signal_type",
"def __bool__(self):\n return bool(self._actions)",
"def accepts(request, media_type):\r\n accept = parse_accept_header(request.META.get(\"HTTP_ACCEPT\", \"\"))\r\n return media_type in [t for (t, p, q) in accept]",
"def is_rating_allowed(self):\n return self._is_action_allowed('rating')",
"def accept(self):\n return self._accept",
"def acceptAction(self, actId: ActionId)->bool:\n return actId in self._act_ids",
"def client_accepts_json(self):\n\n accept = self.get_header('Accept')\n if accept is not None:\n return ('application/json' in accept) or ('*/*' in accept)\n\n return False",
"def is_acceptable(self):\n\n return not self.created and self.signal_type == self.target_signal_type",
"def set_accepted(self):\n self.logger.info(\"status: ACCEPTED\")\n self._callback('on_accepted')\n return self.update_response(self.encoder.encode_accepted())",
"def is_accept_type(file_name):\n bare_name, file_extension = os.path.splitext(file_name)\n for ext in ACCEPTED_FILES:\n if file_extension.lower() == ext:\n return True\n return False",
"def browse_target(self):\n return self.type in ('a', 's')",
"def is_Scan_allowed(self):\n handler = self.get_command_object(\"Scan\")\n return handler.check_allowed()",
"def accepted_terms(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"accepted_terms\")",
"def is_auto_approval_allowed(self):\n\n if not self.upload:\n raise ImproperlyConfigured(\n 'Need an upload to call is_auto_approval_allowed()'\n )\n\n return self._is_action_allowed('auto_approval')",
"def setAccepted(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def is_acceptable(self):",
"def handle_accept(self):\r\n pass",
"def authorize(self, action, author_id=None):\n if action not in CHANGE_TYPES:\n return False\n return True",
"def acceptDrags(self, button):\n if not self.acceptable:\n return False\n if button not in self.__dragItems:\n self.__dragItems[button] = self.currentItem\n return True\n return False"
] |
[
"0.6501114",
"0.63528556",
"0.6297905",
"0.6200649",
"0.6059053",
"0.5967852",
"0.5967003",
"0.5911329",
"0.5838699",
"0.58103883",
"0.58103883",
"0.5795249",
"0.57933563",
"0.57908297",
"0.5663897",
"0.56602097",
"0.5613129",
"0.55470943",
"0.5518393",
"0.5437106",
"0.5402274",
"0.53513825",
"0.5320534",
"0.52706873",
"0.52220017",
"0.5214486",
"0.5212397",
"0.5168381",
"0.5166468",
"0.5166344"
] |
0.6434725
|
1
|
Loads a grammar from text. The text format is assumed to be JSON object representing a Treesitter grammar.
|
def Load(json_text, start_symbol, ignore='_reserved'):
g = Grammar(json_text, start_symbol, ignore=ignore)
g.canonicalize()
g.compute_first()
g.compute_follow()
return g
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_grammar(path):\n return SCFG(iterrules(smart_ropen(path)))",
"def _load_parser(self, grammar: str, protocol: Protocol) -> None:\n self.parser = parsley.makeGrammar(grammar, {\n 'punctuation': string.punctuation,\n 'ascii_uppercase': string.ascii_uppercase,\n 'ascii_lowercase': string.ascii_lowercase,\n 'itertools': itertools,\n\n 'Art': Art,\n 'ArtField': ArtField,\n 'Field': Field,\n 'RelLoc': RelLoc,\n 'Names': Names,\n\n 'protocol': protocol,\n 'Boolean': Boolean,\n 'Size': Size,\n 'ArgumentExpression': ArgumentExpression,\n 'MethodInvocationExpression': MethodInvocationExpression,\n 'ConstantExpression': ConstantExpression,\n 'FieldAccessExpression': FieldAccessExpression,\n 'ThisExpression': ThisExpression,\n 'IfElseExpression': IfElseExpression,\n })",
"def test_grammar_parse():\n print u\"%s: Grammar test\" % (__file__, )\n print u\"Deriving grammar from parsed TIGER corpus sentences\"\n #tiger_corpus = TigerCorpusReader()\n tiger_corpus = _cached(None, CORPUS_PATH, TigerCorpusReader)\n grammar_parser = tiger_corpus.viterbi_parser(False)\n grammar_parser.trace()\n\n text = nltk.word_tokenize(u\"Der Hase springt über den Baum, der sehr hoch gewachsen ist.\")\n #text = nltk.word_tokenize(u\"Der kleine gelbe Hund beobachtete die Katze.\")\n text = nltk.word_tokenize(u\"Der kleine Hund blickte zu der Katze.\")\n print u\"Parsing unknown text\"\n try:\n tree = grammar_parser.parse(text)\n if tree:\n tree.draw()\n print u\"Printing parse tree for text...\"\n print unicode(tree)\n except ValueError as e:\n print u\"Input contains words not known by grammar!\"\n print u\"%s\" % e",
"def from_text(text):\n return parse(text)",
"def from_string(representation):\r\n gramm = Grammar()\r\n\r\n for rule in representation.strip().split('\\n'):\r\n gramm._add_rule(rule)\r\n\r\n return gramm",
"def load_grammar(\n gt: str = \"Grammar.txt\",\n gp: Optional[str] = None,\n save: bool = True,\n force: bool = False,\n logger: Optional[Logger] = None,\n) -> Grammar:\n if logger is None:\n logger = logging.getLogger(__name__)\n gp = _generate_pickle_name(gt) if gp is None else gp\n if force or not _newer(gp, gt):\n g: grammar.Grammar = pgen.generate_grammar(gt)\n if save:\n try:\n g.dump(gp)\n except OSError:\n # Ignore error, caching is not vital.\n pass\n else:\n g = grammar.Grammar()\n g.load(gp)\n return g",
"def _load_parser_file(self, filename: str, protocol: Protocol):\n with open(filename) as fp:\n grammar = fp.read()\n self._load_parser(grammar, protocol)",
"def read(cls, text):\n\n\t\treturn cls._parse(cls._tokenize(text))",
"def parse(self, text: str) -> Tree:\n return self.parser.parse(text)",
"def parse(grammar, text):\n return _coconut_tail_call(grammar.parseWithTabs().parseString, text)",
"def __init__(self, *args, **kwargs):\r\n Grammar.__init__(self)\r\n dict.__init__(self, *args, **kwargs)",
"def load_text(txt_path):\n with open(txt_path, 'r') as json_file:\n data = json_file.read()\n content = json.loads(data)\n \n return content",
"def parse(cls, text):\n # Ensure that there are no extraneous braces to confuse things\n if text.count('{') != 1 or text.count('}') != 1:\n raise ParsingError(\n 'Invalid syntax for a relation constraint declaration. '\n 'Expected a single block enclosed in curly braces.')\n\n # Get the position of each curly brace so we can slice the input text\n openingPos = text.find('{')\n closingPos = text.find('}')\n\n # Slice up to the opening brace to get the name of the constraint\n name = text[:openingPos].strip()\n if not name:\n raise ParsingError(\n 'Invalid syntax for a relation constraint declaration. '\n 'Expected the block to be explicitly named.')\n\n # Collect the individual declarations (either box or connection)\n # line-by-line\n boxDeclarationsText = []\n connectionDeclarationsText = []\n for line in cls.splitBodyLines(text[openingPos+1:closingPos]):\n if '->' in line:\n connectionDeclarationsText.append(line)\n else:\n boxDeclarationsText.append(line)\n\n # Parse the collected input text and use the resulting data structures\n # to construct a new RelationSyntax object\n return RelationSyntax(\n name,\n [BoxSyntax.parse(t) for t in boxDeclarationsText],\n [ConnectionSyntax.parse(t) for t in connectionDeclarationsText])",
"def __init__(self, grammar, trace=...):\n ...",
"def load_model():\n logging.info(\"Load language model...\")\n ngram_arpa_t = pkg_resources.resource_filename(\"hwrt\", \"misc/ngram.arpa.tar.bz2\")\n with tarfile.open(ngram_arpa_t, \"r:bz2\") as tar:\n tarfolder = tempfile.mkdtemp()\n tar.extractall(path=tarfolder)\n ngram_arpa_f = os.path.join(tarfolder, \"ngram.arpa\")\n with open(ngram_arpa_f) as f:\n content = f.read()\n ngram_model = NgramLanguageModel()\n ngram_model.load_from_arpa_str(content)\n return ngram_model",
"def _read_grammar(filename):\r\n with open(filename, 'r') as file:\r\n data = file.read()\r\n\r\n return data",
"def test_load_text():\n # Create lexer without value\n lexer = lex._lexer(None, None)._load_text(\"TEST\")\n\n # Check if the loaded text\n assert lexer._original_text == \"TEST\" and lexer._text_to_process == \"TEST\"",
"def parse(grammar: Dict[str, Tuple[List[str]]], text: str):\n\n if not text:\n return None\n\n return parse_atom(grammar, 'Wrap', text)",
"def demo_legacy_grammar():\n from nltk.grammar import parse_fcfg\n\n g = parse_fcfg(\"\"\"\n % start S\n S[sem=<hello>] -> 'hello'\n \"\"\")\n print \"Reading grammar: %s\" % g\n print \"*\" * 20\n for reading in batch_interpret(['hello'], g, semkey='sem'):\n syn, sem = reading[0]\n print\n print \"output: \", sem",
"def load_json_str(self, json_text: str):\n\n self.data = json.loads(json_text)",
"def __init__(self, grammar_file=None):\n if grammar_file is None:\n self.grammar = pygram.pattern_grammar\n self.syms = pygram.pattern_symbols\n else:\n self.grammar = driver.load_grammar(grammar_file)\n self.syms = pygram.Symbols(self.grammar)\n self.pygrammar = pygram.python_grammar\n self.pysyms = pygram.python_symbols\n self.driver = driver.Driver(self.grammar, convert=pattern_convert)",
"def load(cls, path: str) -> 'Vocab':\n with open(path, 'r', encoding='utf-8') as f:\n return cls.from_json(f.read())",
"def from_text(cls, text):\n raw = decode_b64(json.loads(text))\n raw[0] = Code(raw[0]) # make it an object of type Code\n return cls(*raw)",
"def parseGrammarFile(fname):\n\n # if anything goes wrong, skip the rest of the block\n # to know more, search for 'python exception handling'\n try:\n fp = open(fname, 'r')\n\n g = {\n 'terms': [], # list of terminals\n 'rnames': [], # list of variables\n 'start': \"\", # starting symbol\n 'rules': {}, # rnames and their lists of productions\n }\n\n g['terms'] = parseTerms(fp)\n g['rnames'] = parseVars(fp)\n g['start'] = parseStart(fp)\n g['rules'] = parseRules(fp)\n\n fp.close()\n return g\n\n # re-raise to __main__\n except:\n raise",
"def import_grammar(path):\n grammar_name = os.path.basename(path).replace(\".py\", \"\")\n grammar_file = f'restler_grammar_{grammar_name}_{os.getpid()}.py'\n\n # import req_collection from given grammar\n sys.path.append(os.path.dirname(path))\n grammar = importlib.import_module(grammar_name)\n req_collection = getattr(grammar, \"req_collection\")\n # copy grammar inside experiment's folder (for debugging purposes mainly)\n try:\n target_path = os.path.join(logger.EXPERIMENT_DIR, grammar_file)\n shutil.copyfile(path, target_path)\n except shutil.Error:\n pass\n\n return req_collection",
"def parse (self, filename, verbose=False) :\n\t\tout_grammar = Grammar()\n\t\tself.preproc.addToQueue (filename)\n\n\t\twhile not self.preproc.queueIsEmpty() :\n\n\t\t\t#tokenize grammar source\n\t\t\tfilename = self.preproc.queue[0]\n\t\t\tsource = io.gettextfilecontent (filename)\n\t\t\tlang = GenericGrammarTokenizer._tokenize (\n\t\t\t\tTokenizer (GenericGrammarTokenizer.grammartokens), \n\t\t\t\tsource,\n\t\t\t\tverbose\n\t\t\t)\n\t\t\t\n\t\t\t#preprocessor here (one pass preprocessor)\n\t\t\tlang.tokenized = self.preproc.preprocess (filename, lang.tokenized)\n\n\t\t\t#text tokens are needed for next step\n\t\t\ttxtok = transformtosource (lang.tokenized)\n\t\t\t#tokenize in abstract grammar tokens\n\t\t\tgram = GenericGrammarTokenizer._tokenize (\n\t\t\t\tTokenizer (GenericGrammarTokenizer.genericgrammarprodrules),\n\t\t\t\ttxtok,\n\t\t\t\tverbose\n\t\t\t)\n\n\t\t\t##make production rules\n\t\t\tgrammar = Grammar ()\n\t\t\tresult = grammar.makegrammar (\n\t\t\t\tgram.tokenized,\n\t\t\t\tlang.tokenized,\n\t\t\t)\n\t\t\tif (result == []) :\n\t\t\t\tif verbose : print (grammar)\n\t\t\t\tout_grammar.merge (grammar)\n\t\t\telse :\n\t\t\t\tio.Printer.showerr (result)\n\t\t\t\treturn Grammar()\n\n\t\treturn out_grammar",
"def fromstring(text, schema=None):\n if schema:\n parser = objectify.makeparser(schema=schema.schema)\n return objectify.fromstring(text, parser=parser)\n else:\n return objectify.fromstring(text)",
"def from_string(data, format):\n # Using ConjunctiveGraph instead of Graph for nquads support.\n graph = rdflib.ConjunctiveGraph()\n graph.parse(data=data, format=format)\n return graph",
"def from_string(s):\n r_rule = re.compile(\"^(\\w+): (.*)$\")\n try:\n parent_tag, rules_string = s.split(\" -> \")\n rules = []\n for i in rules_string.split(\",\"):\n optional = i.strip().startswith(\"(\")\n match = r_rule.match(i.strip().strip(\"()\"))\n assert match\n tag, rule = match.groups()\n rules.append(\n {\"optional\": optional, \"tag\": tag, \"rule\": rule})\n return Grammar(parent_tag, rules)\n except (ValueError, AssertionError):\n raise Exception(\"Can not parse.\")",
"def __init__(self, text: Union[str, Text, None] = None):\n if isinstance(text, str):\n text = TNTParser().parse(text)\n if text is not None:\n self.text = text"
] |
[
"0.6654417",
"0.60497326",
"0.59955835",
"0.5882323",
"0.58619785",
"0.5717015",
"0.55960184",
"0.559184",
"0.5571526",
"0.5547955",
"0.5544798",
"0.5487034",
"0.5477157",
"0.54342926",
"0.54124516",
"0.53949016",
"0.53861123",
"0.53754157",
"0.53649855",
"0.5353797",
"0.53331476",
"0.5329949",
"0.5277446",
"0.52618253",
"0.52562433",
"0.52193904",
"0.5215538",
"0.5200901",
"0.5196745",
"0.5195281"
] |
0.748296
|
0
|
Finds a Rule by its Python string name.
|
def find(self, rule_name):
return self.rules[rule_name]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def lookup_name(self, name):\n if name not in self.rule_dict:\n raise PegvmException(\"Failed to find rule named '{}'\".format(name))\n return self.rule_dict[name]",
"def get_rule(self, name):\n if not self._rules:\n raise NoRulesException()\n if not name in self._rules:\n raise UnknownRuleException(name)\n return self._rules[name]",
"def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule",
"def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule",
"def _get_rule(self, rule):\n for kbrule in self.rules:\n if rule == kbrule:\n return kbrule",
"def find(self, rules):\n for rule in rules:\n if self == rule:\n return rule\n return None",
"def get_rule(self, rule_name):\n assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'\n return self.rules[rule_name]",
"def get_rule(self, name):\n\n return self._control_manager.get_rule(name)",
"def get_ruleset(name: str = \"standard\") -> RuleSet:\n std_rules = _load_standard_rules()\n lookup = {std_rules.name: std_rules}\n # Return a copy in case someone modifies the register.\n return lookup[name].copy()",
"def lookup(name):",
"def lookup(name):",
"def find_byname(self, name):\n name = name.lower()\n try:\n return self.__byname[name]\n except KeyError:\n raise BadColor(name)",
"def rule_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule_name\")",
"def rule_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule_name\")",
"def find(name: str):\n return _functions[name]",
"def parseRule(s):\n return Parser._convertRule(ruleNT.parseString(s))",
"def lookup_pattern(name):\n\treturn _registered_patterns[name]",
"def rule_name(self) -> str:\n return pulumi.get(self, \"rule_name\")",
"def rule_name(self) -> str:\n return pulumi.get(self, \"rule_name\")",
"def get_rule(rule_id):\n\n rule = get_db().execute('SELECT i.*, c.name as category_name FROM ruleset i JOIN categories c ON i.category_id = c.id WHERE i.id = ?', (rule_id, )).fetchone()\n\n return rule",
"def rule_name(self) -> typing.Optional[str]:\n return self._values.get('rule_name')",
"def lookup_by_name(cls, name):\n return cls.__by_name[name]",
"def get_rule(self, subverbify, short_name):\n try:\n rules = self._cf.get(subverbify._id36)\n except tdb_cassandra.NotFoundException:\n return None\n rule = rules.get(short_name, None)\n if not rule:\n return None\n rule = json.loads(rule)\n rule[\"short_name\"] = short_name\n return rule",
"def find_by_name(self, name):\n return self.get(name)",
"def _match_rule(self, name: str, rule: str) -> bool:\n if not rule in self._regexps:\n regexps = []\n for part in os.path.normpath(rule).split(os.sep):\n if part:\n pattern = re.escape(part).replace(\"\\*\", \".*\").replace(\"\\?\", \".\")\n regexp = re.compile(f\"^{pattern}$\", re.IGNORECASE)\n else:\n regexp = None\n regexps.append(regexp)\n self._regexps[rule] = regexps\n for i, part in enumerate(os.path.normpath(name).split(os.sep)):\n try:\n regexp = self._regexps[rule][i]\n except:\n regexp = None\n if part:\n if not regexp or not regexp.match(part):\n return False\n elif regexp:\n return False\n return True",
"def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")",
"def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")",
"def query_rule_by_id(runtime, idd):\r\n return runtime.policy_parser.query_policy_by_id(idd).rule",
"def rule_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"rule_name\")"
] |
[
"0.72327983",
"0.68010986",
"0.65098494",
"0.65098494",
"0.65098494",
"0.64873236",
"0.6475164",
"0.6460521",
"0.6324386",
"0.6131065",
"0.6131065",
"0.6001113",
"0.59306926",
"0.58415884",
"0.5783659",
"0.57832754",
"0.5764284",
"0.57564",
"0.57564",
"0.5746668",
"0.5716955",
"0.56540245",
"0.5555259",
"0.55393296",
"0.5538597",
"0.5526642",
"0.5526642",
"0.5526642",
"0.551347",
"0.5410326"
] |
0.7614755
|
0
|
Finds a registered object by its index. Registered objects are either Item or Rule (including Token)
|
def findByIndex(self, obj_index):
return self.registry.findByIndex(obj_index)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_index(self, obj):\n return self.model.indexlist[obj]",
"def getObject(name, index=-1):\n\n names = [x.name for x in bpy.data.objects if getNamePrefix(x.name) == name]\n names = sorted(names, key=getNameIndex)\n\n if len(names) == 0:\n print(\"Object '{}' not found!\".format(name))\n return None\n\n if index < 0:\n obj = bpy.data.objects[names[index]]\n else:\n for n in names:\n if getNameIndex(n) == index:\n obj = bpy.data.objects[n]\n\n return obj",
"def select_object_at_index(self, index):\n\t\treturn self.object_list[index]",
"def find(self,item):\n sig = str(item)\n try:\n return self.index[sig]\n except:\n return None",
"def __getitem__(self, index: int) -> object:\n return self.get_at_index(index)",
"def get(self, idx):\n if idx in self._objects:\n return self._objects[idx]\n else:\n warning(\"%s not found\" % idx)\n return None",
"def __getitem__(self, item):\n return self._object_names[item]",
"def get_indexed_item_from_list(index, given_list):\n\n returned_item = None\n\n if len(given_list)>0:\n for item in given_list:\n if isinstance(item, AutoBaseObject):\n if item.ID == index:\n returned_item = item\n break\n else:\n print(\"Issue with list: item is not AutoBaseObject\")\n print(\" index=\\n\",index)\n sys.exit()\n return returned_item",
"def get_obj_in_list(obj_name, obj_list):\n for o in obj_list:\n if o.name == obj_name:\n return o\n print (\"Unable to find object by the name of %s in list:\\n%s\" %\n (o.name, map(lambda o: o.name, obj_list)))\n exit(1)",
"def find_object_by_id(stix_objects, obj_id):\n ret_obj = None\n for obj in stix_objects:\n if obj[\"id\"] == obj_id:\n ret_obj = obj\n break\n return ret_obj",
"def find_object(field, object_list):\n for item in object_list:\n if item.name == field:\n return item\n return None",
"def find_object(field, object_list):\n for item in object_list:\n if item.name == field:\n return item\n return None",
"def find(self, index, value):\n res = self.storage.find(index, value)\n return {\n 'count': res['count'],\n 'items': [self.klass.from_json(_object)\n for _object in res['items']]\n }",
"def find_object(self, obj_type, obj_name):\n try:\n # Simply look it up by type and name.\n obj = self.model_map['object'][obj_type][obj_name][1]\n except KeyError:\n # No dice. This object doesn't exist in the model.\n obj = None\n\n return obj",
"def find_object(field, object_list):\n for item in object_list: # We will iterate through all the items in object_list\n if item.name == field: # if item.name is same as field (a parameter passed to this method)\n return item # we return that item that was found\n\n return None # Otherwise return None if item.name is not found in fields",
"def lookup(name):\n for i in range(len(catalog.obj_catalog)):\n if catalog.obj_catalog[i].name == name:\n return i\n return -1",
"def find(self, objectclass, **kwargs):\n raise NotImplementedError",
"def __getitem__(self, ref):\n for concept in self.concepts:\n if concept.match_ref(ref):\n return concept\n raise KeyError()",
"def find_object(field, list):\n for item in list:\n if item.name == field:\n return item\n return None",
"def index(queryset, obj):\n for index, item in enumerate(queryset):\n if item == obj:\n return index\n\n return -1",
"def find_by_id(object_id, items):\n for item in items:\n if object_id == item[\"id\"]:\n return item\n\n raise Exception(f\"Item with {object_id} not found\")",
"def __getitem__(self, index):\n if self._constructed is False:\n self._not_constructed_error(index)\n\n try:\n obj = self._data.get(index, _NotFound)\n except TypeError:\n try:\n index = self._processUnhashableIndex(index)\n except TypeError:\n # This index is really unhashable. Set a flag so that\n # we can re-raise the original exception (not this one)\n index = TypeError\n if index is TypeError:\n raise\n if index.__class__ is _IndexedComponent_slice:\n return index\n # The index could have contained constant but nonhashable\n # objects (e.g., scalar immutable Params).\n # _processUnhashableIndex will evaluate those constants, so\n # if it made any changes to the index, we need to re-check\n # the _data dict for membership.\n try:\n obj = self._data.get(index, _NotFound)\n except TypeError:\n obj = _NotFound\n\n if obj is _NotFound:\n # Not good: we have to defer this import to now\n # due to circular imports (expr imports _VarData\n # imports indexed_component, but we need expr\n # here\n from pyomo.core.expr import current as EXPR\n if index.__class__ is EXPR.GetItemExpression:\n return index\n validated_index = self._validate_index(index)\n if validated_index is not index:\n index = validated_index\n # _processUnhashableIndex could have found a slice, or\n # _validate could have found an Ellipsis and returned a\n # slicer\n if index.__class__ is _IndexedComponent_slice:\n return index\n obj = self._data.get(index, _NotFound)\n #\n # Call the _getitem_when_not_present helper to retrieve/return\n # the default value\n #\n if obj is _NotFound:\n return self._getitem_when_not_present(index)\n\n return obj",
"def __index__(self, ???):",
"def __getitem__(self, objectId: str):\n return super()._getitem(\n objectId, f=lambda x: registry.getObject(x, self.session)\n )",
"def get_object_by_name(self, object_list, object_name):\n obj = None\n for i in object_list:\n if i.get_name().lower() == object_name.lower():\n obj = i\n break\n return obj",
"def index_object(idxs=None):",
"def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')",
"def FindObject(self, tagged_address):\n raise NotImplementedError",
"def __getitem__(self, item_index: Index) -> Item:\n raise NotImplementedError(\"__getitem__\")",
"def __getitem__(self,key):\n if not hasattr(self,\"_register\"):\n self._register = dict()\n if key in self._register:\n return self._register[key]\n raise IndexError(\"No such key: %s\" % key)"
] |
[
"0.6923253",
"0.6511828",
"0.6414528",
"0.63838357",
"0.62515825",
"0.61839396",
"0.60195625",
"0.60153675",
"0.5983119",
"0.5975968",
"0.59725803",
"0.59725803",
"0.59528816",
"0.5948798",
"0.5843048",
"0.57931817",
"0.5785484",
"0.57647073",
"0.57612133",
"0.57382756",
"0.5708224",
"0.5703331",
"0.5699838",
"0.56874883",
"0.567763",
"0.55961555",
"0.5577269",
"0.5574927",
"0.55504775",
"0.5549399"
] |
0.7559963
|
0
|
Returns a new Fixed object, unique up to equivalence of its string text.
|
def MakeFixed(self,content):
return self.register(Fixed(content,reg=self))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fixed(self):\n return self.f_fixed().m_fixed()",
"def instance2fv(self, text):\n if isinstance(text, unicode):\n text = text.encode('utf8')\n\n arr = np.zeros((self.n_feats,), dtype='uint32')\n\n # Convert the text to a sequence of ascii values\n ords = map(ord, text)\n\n # Count the number of times we enter each state\n state = 0\n statecount = defaultdict(int)\n for letter in ords:\n state = self.tk_nextmove[(state << 8) + letter]\n statecount[state] += 1\n\n # Update all the productions corresponding to the state\n for state in statecount:\n for index in self.tk_output.get(state, []):\n arr[index] += statecount[state]\n\n # The returned vector is the TFxIDF vector. The IDF for the\n # linguini system is actually the inv-lang-freq, and this is\n # pre-computed from the training data. We also normalize to len 1\n # at this stage.\n retval = arr * self.ilf\n return retval",
"def fix(self, string: str) -> Gingerfied:\n\n broken = string\n\n fixes = self.create_fixes_list(string)\n\n fix = self.cleanse_string(string)\n\n return Gingerfied(fix, broken, fixes)",
"def ReadFixedString(self, length):\n return self.ReadBytes(length).rstrip(b'\\x00')",
"def Clone(self):\n return _gmat_py.CSFixed_Clone(self)",
"def __init__(self, fixed_content: str):\n self.fixed_content = fixed_content",
"def new_node(self, offset):\n # First we get the name of the node\n nameidx = self.string[offset:].find(b'\\0')\n name = self.string[offset: offset + nameidx]\n string_offset = offset + calc_length_word_align(nameidx + 1)\n node = FDTNode(name)\n return string_offset, node",
"def create_fixed_distance_constraint():\n return FixedDistanceConstraint()",
"def test_fixed_type():\n name = \"a_fixed_field\"\n namespace = \"md5\"\n aliases = [\"md5\", \"hash\"]\n default = types.Fixed(16, namespace=namespace, aliases=aliases)\n python_type = types.Fixed\n field = fields.AvroField(name, python_type, default)\n\n expected = {\n \"name\": name,\n \"type\": {\n \"type\": \"fixed\",\n \"name\": name,\n \"size\": default.size,\n \"namespace\": namespace,\n \"aliases\": aliases,\n },\n }\n\n assert expected == field.to_dict()",
"def create_from_text(cls, text):\n duplicate_words = text.lower().replace('.', ' .').split(' ')\n words = list(set(duplicate_words))\n corpus = [words.index(word) for word in duplicate_words]\n return cls(words, corpus)",
"def fix_seq(self, fixed_seq):\n assert len(fixed_seq) == self.length, \\\n \"Length of fixed sequence (%d) does not match length of %s (%d)\" \\\n % (len(fixed_seq), self.full_name, self.length)\n i = 0\n for seq in self.seqs:\n seq.fix_seq( fixed_seq[i:i+seq.length] )\n i += seq.length",
"def correct(search_key, text, strictness):\n\n text_copy = copy.deepcopy(text)\n words = text.split()\n for word in words:\n similarity = SequenceMatcher(None, word, search_key)\n if similarity.ratio() > strictness:\n text_copy = text_copy.replace(word, search_key)\n return text_copy",
"def replace_instance(s, to_replace, replacement):\n index=0\n matchedIndeces=0\n while index<len(s):\n if s[index]==to_replace[matchedIndeces]:\n matchedIndeces+=1\n if matchedIndeces>=len(to_replace):\n s=s[:index-(matchedIndeces-1)]+replacement+s[index+1:]\n index-=matchedIndeces-1\n matchedIndeces=0\n else:\n matchedIndeces=0\n index+=1\n return s",
"def test_text_roundtrip():\n for text in (\"\", \"a\", \"Hello, world!\", \"9\" * 1000):\n assert text == String.read(String.to_bytes(text))",
"def _only_fixed(o, d):\n if d[\"fixed\"]:\n return (\"value\", \"fixed\")\n else:\n return (\"fixed\",)",
"def test_similarity_fixed():\n similarity = pm.compute_similarity_for_fixed(\"Rio de Janeiro\", \"São Paulo\")\n nose.tools.eq_(similarity, 0, \"Wrong fixed similarity\")\n similarity = pm.compute_similarity_for_fixed(\"Rio de Janeiro\", \"Rio de Janeiro\")\n nose.tools.eq_(similarity, 1, \"Wrong fixed similarity\")",
"def test_eq_1():\n a = FixedPoint(1, 'Q2.8')\n assert a == 1",
"def test_eq_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1, 'Q2.8')\n assert a == b",
"def test_xml_string_roundtrip(self):\n forcefield_1 = ForceField(xml_simple_ff)\n string_1 = forcefield_1.to_string(\"XML\")\n # Ensure that we have spaces instead of tabs\n assert \" \" in string_1\n assert \"\\t\" not in string_1\n forcefield_2 = ForceField(string_1)\n string_2 = forcefield_2.to_string(\"XML\")\n assert string_1 == string_2",
"def test_fixed_text_layer(self):\n\t\tself.graphic = Text('text')\n\t\tself.viewport = Viewport(0, 0, 100, 100)\n\t\tself.offset_x = 10\n\t\tself.offset_y = 5\n\n\t\t# Test a static text layer first\n\t\tself.layer = text_layer.FixedStaticTextLayer(\n\t\t\tself.graphic, viewport=self.viewport,\n\t\t\toffset_x=self.offset_x, offset_y=self.offset_y\n\t\t)\n\n\t\tself.assert_layer_graphic_position()\n\n\t\tself.viewport.x = 10\n\t\tself.viewport.y = 10\n\t\tself.layer.update(0)\n\n\t\tself.assert_layer_graphic_position()\n\n\t\tself.viewport.x = 5\n\t\tself.viewport.y = 15\n\t\tself.layer.update(0)\n\n\t\tself.assert_layer_graphic_position()\n\n\t\t# Test a non-static text layer\n\n\t\t# We'll change the value of i and check if the text label is up to date\n\t\tself.i = 0\n\t\tself.graphic = LiveText(lambda: str(self.i))\n\t\tself.viewport = Viewport(0, 0, 100, 100)\n\n\t\tself.layer = text_layer.FixedTextLayer(\n\t\t\tself.graphic, viewport=self.viewport,\n\t\t\toffset_x=self.offset_x, offset_y=self.offset_y\n\t\t)\n\n\t\tself.assert_layer_graphic_position()\n\t\tself.assertEqual(self.layer.graphic.text, str(self.i),\n\t\t\t\"Layer failed to update text layer.\")\n\n\t\tself.viewport.x = 10\n\t\tself.viewport.y = 10\n\t\tself.i += 1\n\t\tself.layer.update(0)\n\n\t\tself.assert_layer_graphic_position()\n\t\tself.assertEqual(self.layer.graphic.text, str(self.i),\n\t\t\t\"Layer failed to update text layer.\")\n\n\t\tself.viewport.x = 5\n\t\tself.viewport.y = 15\n\t\tself.i += 1\n\t\tself.layer.update(0)\n\t\tself.assertEqual(self.layer.graphic.text, str(self.i),\n\t\t\t\"Layer failed to update text layer.\")\n\n\t\tself.assert_layer_graphic_position()",
"def from_fixed(cls, fixed_date):\n critical = cls.sunset(fixed_date)\n month = sidereal_zodiac(critical)\n year = cls.calendar_year(critical) - HinduSolarDate.SOLAR_ERA\n approx = fixed_date - 3 - mod(ifloor(sidereal_solar_longitude( critical)), 30)\n begin = next_int(approx, lambda i: (sidereal_zodiac(cls.sunset(i)) == month))\n day = fixed_date - begin + 1\n return HinduAstroSolar(year, month, day)",
"def test_fixed2base(self):\n if self._explicit:\n t1 = self.typedef\n x1 = self.import_cls.typedef_fixed2base(t1)\n t2 = copy.deepcopy(x1)\n t2['type'] = t1['type']\n x2 = self.import_cls.typedef_fixed2base(t2)\n self.assert_equal(x1, x2)\n y = self.import_cls.typedef_base2fixed(x1)\n self.assert_equal(y, self.typedef)",
"def duplicate(self):\n\n return Note(self.nbr, self.length, self.vel)",
"def from_fixed(cls, fixed_date):\n critical = cls.sunrise(fixed_date + 1)\n month = cls.zodiac(critical)\n year = cls.calendar_year(critical) - cls.SOLAR_ERA\n approx = fixed_date - 3 - mod(ifloor(cls.solar_longitude(critical)), 30)\n begin = next_int(approx, lambda i: (cls.zodiac(cls.sunrise(i + 1)) == month))\n day = fixed_date - begin + 1\n return HinduSolarDate(year, month, day)",
"def text(self, text: str) -> bytes:\n\n buffer = text.encode(\"utf-8\")\n return struct.pack(\">i\", len(buffer)) + buffer",
"def from_text(text):\n\n return _from_text(text, _by_text)",
"def sub(self, other):\n result = self.copy()\n for char, qty in other.chars.items():\n result.chars[char] -= qty\n result.surface = \"\"\n return result",
"def get_or_create(cls, text):\n h = sha256(text).hexdigest()[:32]\n planet = TextPlanet.query.get(h)\n\n if planet is None:\n app.logger.info(\"Storing new text\")\n planet = TextPlanet(\n id=h,\n text=text)\n\n return planet",
"def copy(self):\n return LetterBag(self.string())",
"def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self"
] |
[
"0.50933117",
"0.4975673",
"0.49061912",
"0.48256734",
"0.476634",
"0.47550938",
"0.4729677",
"0.46913713",
"0.46866438",
"0.45747095",
"0.4565746",
"0.45626754",
"0.45316634",
"0.45182678",
"0.4494061",
"0.4493968",
"0.44902837",
"0.4489109",
"0.44701687",
"0.4465243",
"0.44560272",
"0.4438954",
"0.4411405",
"0.44038635",
"0.43998858",
"0.43913913",
"0.43896288",
"0.4384777",
"0.4371283",
"0.4368095"
] |
0.5763124
|
0
|
Returns a new Pattern object, unique up to equivalence of its pattern text.
|
def MakePattern(self,content):
return self.register(Pattern(content,reg=self))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def relate_pattern(a, b, pattern, **kwargs):\n return lib.relate_pattern(a, b, pattern, **kwargs)",
"def get_pattern(self):\n if self.pattern is None:\n pattern_str = self.blueprint.pattern()\n pattern_file = self.remgr.lookup_pattern_file(self.blueprint, self.provider)\n self.pattern = pattern.Pattern(pattern_str, pattern_file)\n self.pattern.set_provider(self)\n return self.pattern",
"def create_from_text(cls, text):\n duplicate_words = text.lower().replace('.', ' .').split(' ')\n words = list(set(duplicate_words))\n corpus = [words.index(word) for word in duplicate_words]\n return cls(words, corpus)",
"def make_pattern(current_pattern):\n pattern = ''.join([str(b) for b in current_pattern])\n return pattern",
"def __init__(self, pattern):\r\n self.pattern = pattern",
"def regex(self, pattern):\n return RegexClauseElement(self, pattern)",
"def make_connection(self):\n if self._created_connections[self._pattern_idx] >= self.max_connections_per_pattern:\n raise ConnectionError(\"Too many connections\")\n self._created_connections[self._pattern_idx] += 1\n conn = self.connection_class(**self.patterns[self._pattern_idx])\n conn._pattern_idx = self._pattern_idx\n return conn",
"def __init__(self, pattern1, pattern2):\n self.pattern1 = pattern1\n self.pattern2 = pattern2",
"def __new__(cls, format):\n self = super(SF_Pattern, cls).__new__(cls)\n\n if isinstance(format, bytes):\n uni_str = format.decode('ISO-8859-1') # decode to unicode\n trans_str = translate(uni_str) # translate only works with unicode\n re_fmt = trans_str.encode('ISO-8859-1') # encode back to bytes\n self._spec = _gbspec\n else:\n re_fmt = translate(format)\n self._spec = _gspec\n\n self._format = format\n self._re = cre = re.compile(re_fmt)\n\n if cre.groupindex and len(cre.groupindex) != cre.groups:\n raise RuntimeError('cannot mix mapped and unmapped specifiers')\n elif not cre.groupindex:\n self._retfunc = self._return_tuple\n self._type = tuple\n else:\n self._retfunc = self._return_dict\n self._type = dict\n\n self._casts = self._get_types()\n\n return self",
"def __init__(self, pattern):\n self._pattern = re.compile(pattern)",
"def to_pattern(obj):\n if isinstance(obj, Pattern):\n return obj\n return Glob(str(obj))",
"def _create_regex(pattern, ignore_case=False, whole_words=False, literal_pattern=False):\n if literal_pattern:\n pattern = re.escape(pattern)\n if whole_words:\n b = r'\\b' if isinstance(pattern, str) else br'\\b'\n pattern = b + pattern + b\n\n regex = re.compile(pattern, re.I if ignore_case else 0)\n return regex",
"def relate_pattern(self, other, pattern): # -> bool:\n ...",
"def make_pattern_set(self):\n \n _pattern = []\n for x in range(1,9):\n _pattern.append(self.make_pattern())\n \n self.pattern = _pattern",
"def Pattern(self):\r\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.statrequest.pattern.pattern import Pattern\r\n\t\treturn Pattern(self)",
"def get_pattern(self, name):\n return self._pattern_reg[name]",
"def pattern_factory(self):\n\t\treturn self.args[1]",
"def convert_pattern_format(text):\n parsed_text = []\n # parse text via Pattern's parser\n pattern_parsed_text = Text(parse(text, relations=True, lemmata=True))\n for sentence in pattern_parsed_text:\n s = Sentence()\n s.string = remove_blanks(sentence.string)\n for word in sentence:\n # Patterns tags for each word in the sentence are stored in a new Word-object\n w = Word()\n w.string = word.string\n w.lemma = word.lemma\n w.index = word.index\n w.tag = word.type\n w.entity = \"\"\n # each word is appended to a Sentence-object\n s.words.append(w)\n # each Sentence-object is appended to an array\n parsed_text.append(s)\n return parsed_text",
"def compile(format):\n try:\n return _cache[format]\n except KeyError:\n _cache[format] = retval = SF_Pattern.__new__(SF_Pattern, format)\n return retval",
"def compile(self, name, pattern):\n try:\n return self.get_pattern(name)\n except KeyError:\n return self.store_pattern(name, re.compile(pattern))",
"def getPattern(self):\n return self.pattern",
"def __init__(self, pattern):\n self._pattern = pattern.lower()",
"def pattern_gen():\n pattern = \"\"\n\n return pattern",
"def make_pattern(self):\n probability = random.SystemRandom().random()\n if probability < 0.1:\n _pattern = [0 for x in range(32)]\n elif probability > 0.5:\n pattern_num = SECURE_RANDOM.choice(CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.80:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n elif _probability < 0.40:\n _offset = random.SystemRandom().randint(2, 16)\n _pattern = [1 if (x == _offset) or (x % pattern_num == _offset) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n pattern_num = SECURE_RANDOM.choice(INNER_CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.50:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n\n if not self.global_swing:\n _probability = random.SystemRandom().random()\n if _probability > 0.3:\n _pattern.extend([random.SystemRandom().uniform(0.01, 0.5), random.SystemRandom().randint(1, 14), 0])\n else:\n _pattern.extend([0,1,0])\n else: \n _pattern.extend([0,1,1]) \n\n return _pattern",
"def __init__(self, pattern, markdown_instance=None):\r\n self.pattern = pattern\r\n self.compiled_re = re.compile(\"^(.*?)%s(.*?)$\" % pattern, \r\n re.DOTALL | re.UNICODE)\r\n\r\n # Api for Markdown to pass safe_mode into instance\r\n self.safe_mode = False\r\n if markdown_instance:\r\n self.markdown = markdown_instance",
"def _find_pattern(self, locator):\n assert locator is not None and len(locator) > 0\n locator = locator.strip().lower()\n (pattern, sensitivity) = self._parse_locator(locator)\n\n if (sensitivity != None):\n sensitivity = float(sensitivity)\n pattern = Pattern(pattern).similar(sensitivity)\n else:\n pattern = pattern\n return pattern",
"def from_text(text):\n\n return _from_text(text, _by_text)",
"def save_pattern(self, pattern: Pattern):",
"def save_pattern(self, pattern: Pattern):",
"def rabinkarp(text, pattern):\n n = len(text)\n m = len(pattern)\n res = []\n if n < m :\n return res\n #compute hash\n q = 11 #prime number used\n d = 26 #size of our alphabet\n h = (10**(m-1))%q\n\n p_h = t_h = 0\n for i in range(m):\n p_h = (p_h*d+getHash(pattern[i]))%q\n t_h = (t_h*d+getHash(text[i]))%q\n\n for i in range(m, n):\n if p_h == t_h and pattern == text[i-m:i]:\n res.append(i-m)\n t_h = (d*(t_h - getHash(text[i-m])*h)+getHash(text[i]))%q\n if t_h == p_h and text[n-m:n] == pattern :\n res.append(n-m)\n return res"
] |
[
"0.5792537",
"0.5585",
"0.5559421",
"0.5530123",
"0.54837835",
"0.54252803",
"0.5418044",
"0.54112136",
"0.538951",
"0.5339793",
"0.5319212",
"0.531522",
"0.5246962",
"0.5242287",
"0.52363425",
"0.52203023",
"0.5204605",
"0.52017283",
"0.51949394",
"0.5193178",
"0.5146308",
"0.51352775",
"0.51321507",
"0.51210374",
"0.5085669",
"0.507252",
"0.5058751",
"0.50514156",
"0.50514156",
"0.504755"
] |
0.58428156
|
0
|
Returns a new Repeat1 object, unique up to equivalence of its member.
|
def MakeRepeat1(self,content):
return self.register(Repeat1(content,reg=self))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def to_repeat(self):\n if hasattr(self, '_m_to_repeat'):\n return self._m_to_repeat if hasattr(self, '_m_to_repeat') else None\n\n self._m_to_repeat = self.to_repeat_raw.value\n return self._m_to_repeat if hasattr(self, '_m_to_repeat') else None",
"def duplicate(self):\n\n return Note(self.nbr, self.length, self.vel)",
"def repeat(self, count):\n return self.Sequence((self,) * count)",
"def dupe(q_1: Q) -> Q:\n\n du = Q(\n [q_1.t, q_1.x, q_1.y, q_1.z],\n q_type=q_1.q_type,\n representation=q_1.representation,\n )\n return du",
"def make_unique(self) -> \"UniqueMolecule\":\n return UniqueMolecule(rd_mol=self.rd_mol)",
"def ipset_x_repeating():\n x = np.linspace(0, 10, 11)\n x[5] = x[4]\n return IPSet(x=x, y=np.linspace(-1, 1, 11), x_new=np.linspace(2, 5, 7))",
"def removeDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return self[ind[ok]]",
"def duplicate(self):\n return Sample(self.solutes, self.quantities, self.volume, self.endless)",
"def repeat_count_m1(self):\n if hasattr(self, '_m_repeat_count_m1'):\n return self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None\n\n self._m_repeat_count_m1 = self.repeat_count_m1_raw.value\n return self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None",
"def repeat(self, repeats):\n return SeriesDefault.register(pandas.Series.repeat)(self, repeats=repeats)",
"def one(self) -> 'PFElement':\n return self(1)",
"def _make_identical(self, name):\n if not name in self.all_variables:\n return name\n i = 2\n while '%s%s' % (name, i) in self.all_variables:\n i += 1\n return '%s%s' % (name, i)",
"def create_repeat(self, repeat_id: int, start_from: str, count: Union[int, str]) \\\n -> Tuple[Optional['Repeater'], str]:\n new_repeat: Repeater = Repeater(StartingFrom=start_from, Count=count)\n verified_repeat = self.verify_repeat(new_repeat)\n if not verified_repeat:\n return None, 'wrong_start'\n if repeat_id == -1:\n self.Sequence.append(verified_repeat)\n self.Sequence.append(verified_repeat)\n else:\n self.Sequence.insert(repeat_id + 1, verified_repeat)\n return verified_repeat, \"\"",
"def repeat(self, count):\n x = _OSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x",
"def make_unique(self, unnamed_prefix: str = '') -> 'Entity':\n orig_name = self['targetname']\n if orig_name:\n self['targetname'] = '' # Remove ourselves from the .by_target[] set.\n else:\n orig_name = unnamed_prefix\n\n base_name = orig_name.rstrip('0123456789')\n\n if self.map.by_target[base_name]:\n # Check every index in order.\n i = 1\n while True:\n name = base_name + str(i)\n if not self.map.by_target[name]:\n self['targetname'] = name\n break\n i += 1\n else:\n # The base name is free!\n self['targetname'] = base_name\n\n return self",
"def repeat(self, count):\n x = HSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x",
"def repeat(\n self, repeats: Union[int, Tuple[int, ...]], axis: Optional[int] = None\n ) -> PhiTensor:\n\n result = self.child.repeat(repeats, axis)\n if isinstance(self.min_vals, lazyrepeatarray):\n minv = lazyrepeatarray(data=self.min_vals.data.min(), shape=result.shape)\n maxv = lazyrepeatarray(data=self.max_vals.data.max(), shape=result.shape)\n else:\n minv = self.min_vals\n maxv = self.max_vals\n\n return PhiTensor(\n child=result,\n data_subjects=self.data_subjects.repeat(repeats, axis),\n min_vals=minv,\n max_vals=maxv,\n )",
"def repeat(self):\n return self._repeat",
"def testduplicate(self):\n a = AttributeAbility(['ST',], 3)\n self.assertTrue(a.duplicate(a))\n self.assertTrue(a.duplicate(AttributeAbility(['ST',], 3)))\n self.assertTrue(a.duplicate(AttributeAbility(['ST',], 5)))\n self.assertFalse(a.duplicate(AttributeAbility(['DX',], 5)))",
"def member_deduplicate(item):\n\n if item.tablename == \"member_membership\":\n\n db = current.db\n s3db = current.s3db\n\n mtable = s3db.member_membership\n\n data = item.data\n\n person_id = data.person_id\n organisation_id = data.organisation_id\n\n # 1 Membership record per Person<>Organisation\n query = (mtable.deleted != True) & \\\n (mtable.person_id == person_id) & \\\n (mtable.organisation_id == organisation_id)\n row = db(query).select(mtable.id,\n limitby=(0, 1)).first()\n if row:\n item.id = row.id\n item.method = item.METHOD.UPDATE",
"def copy(self):\n return self._new_rep(self._func(self.rep))",
"def _new_rep(self, rep):\n return self._new(rep, self.shape, self.domain)",
"def _element_from_rep_and_mod(self, rep, mod):\n if mod != self.T.rep.rep:\n raise UnificationFailed('Element does not appear to be in the same field.')\n return self.element_from_poly(Poly(rep, self.T.gen))",
"def repeat_resourceview(resource, member_id):\n return resourceview(resource, member_id, changed=False)",
"def testduplicate(self):\n self.assertTrue(WeaponAbility('Guided').duplicate(\n WeaponAbility('Guided')))\n self.assertFalse(WeaponAbility('Guided').duplicate(\n WeaponAbility('Changling')))\n self.assertTrue(WeaponAbility('Animated', range=1).duplicate(\n WeaponAbility('Animated', range=3)))\n self.assertTrue(WeaponAbility('Defender', size=1).duplicate(\n WeaponAbility('Defender', size=3)))\n fire = MentalAbility('Fireball')\n ice = MentalAbility('Iceball')\n self.assertTrue(WeaponAbility('Enhanced', abilities=[ice,]).duplicate(\n WeaponAbility('Enhanced', abilities=[fire,])))",
"def remove_duplicates(self):\n names: Dict[str, int] = dict()\n for step in self.Sequence:\n if isinstance(step, Repeater):\n continue\n name = step.Name\n if name != '':\n if name not in names:\n names[name] = 1\n else:\n names[name] += 1\n for step in reversed(self.Sequence):\n if isinstance(step, Repeater):\n continue\n name = step.Name\n if name and (names[name] > 1):\n names[name] -= 1\n step.Name = name + \"_%i\" % names[name]",
"def simplify(self):\n\n from podpac.core.coordinates.uniform_coordinates1d import UniformCoordinates1d\n\n if self.is_uniform:\n return UniformCoordinates1d(self.start, self.stop, self.step, **self.properties)\n\n return self",
"def simple_copy(self):\n\n def complementary(half_strand):\n return {\n 'A': half_strand['T'],\n 'T': half_strand['A'],\n 'C': half_strand['G'],\n 'G': half_strand['C'],\n }\n\n return Gen(\n self.epoch + 1,\n complementary(self.reverse),\n complementary(self.forward),\n )",
"def distinct(self):\n memory = set()\n\n def _distinct(iterator):\n while True:\n item = next(iterator)\n if item in memory:\n continue\n memory.add(item)\n return item\n return self.__class__(self, _distinct)",
"def copy (self):\n return self.__class__(self.name, self[:])"
] |
[
"0.62315536",
"0.5892301",
"0.57454616",
"0.5742474",
"0.5706816",
"0.5539956",
"0.55211467",
"0.5520314",
"0.55111814",
"0.54748863",
"0.54399043",
"0.5416626",
"0.5397096",
"0.52835655",
"0.5254687",
"0.52383393",
"0.5224376",
"0.52110064",
"0.51618063",
"0.5122607",
"0.5107659",
"0.50935656",
"0.50864923",
"0.50308836",
"0.50230294",
"0.501887",
"0.49911356",
"0.49814156",
"0.49716452",
"0.49564093"
] |
0.62277514
|
1
|
Returns a new Item, unique up to equivalence of its lefthand side nonterminal, righthand side production rule, and its position within that righthand side.
|
def MakeItem(self,lhs,rule,position):
# Upconvert a lhs to a SymbolName if it's a Python string.
lhs = lhs if isinstance(lhs,SymbolName) else self.MakeSymbolName(lhs)
candidate = Item(lhs,rule,position,reg=self)
# Avoid double-registering.
result = self.register(candidate)
if result is candidate:
result.precompute(self)
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def duplicate(self):\n\n return Note(self.nbr, self.length, self.vel)",
"def construct(self, rule):\n reads = set(self.reads) - set(rule.keys()) | set(rule.values())\n rhs = self.rhs.xreplace(rule)\n return Temporary(self.lhs, rhs, reads=reads, readby=self.readby)",
"def __getitem__(self, item):\n index = self.reindex(item)\n return self.parent[index]",
"def items(self) -> typing.Tuple[tuple]:\n\n return (\n (LEFT_PES, self.left_pes),\n (RIGHT_PES, self.right_pes),\n (LEFT_MANUS, self.left_manus),\n (RIGHT_MANUS, self.right_manus)\n )",
"def close(self,grammar):\n def lookup(rule):\n return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n dirty_dict = self.id_to_lookahead.copy()\n while len(dirty_dict) > 0:\n # From the dragon book, 1st ed. 4.38 Sets of LR(1) items construction.\n #\n # For each item [ A -> alpha . B beta, a ] in I,\n # and each production \" B -> gamma \" in the grammar,\n # and each terminal b in FIRST(beta a),\n # add [ B -> . gamma, b ] to I if it is not already there.\n work_list = dirty_dict\n dirty_dict = dict()\n for item_id, lookahead in work_list.items():\n item = self.id_to_item[item_id]\n if item.at_end():\n continue\n B = item.next()\n if not B.is_symbol_name():\n continue\n\n # Compute lookahead. (A fresh LookaheadSet)\n new_item_lookahead = item.rest_lookahead_with_other_lookahead(lookahead)\n\n # Iterate over items [ B -> . B_prod ]\n # for each production B -> B_prod in the grammar.\n for candidate in item.items_generated_by_next():\n candidate_id = candidate.reg_info.index\n if candidate_id not in self.id_to_item:\n la = LookaheadSet(new_item_lookahead)\n self.internal_add(candidate, LookaheadSet(new_item_lookahead))\n dirty_dict[candidate_id] = la\n else:\n if self.id_to_lookahead[candidate_id].merge(new_item_lookahead):\n dirty_dict[candidate_id] = self.id_to_lookahead[candidate_id]\n return self",
"def clone_item(item):\n i = h5Item(item.text(0))\n i.path = item.path\n i.listIndex = item.dataIndex\n i.originalIndex = item.originalIndex\n i.data = item.data\n return i",
"def _add_right(self, p, e):\n node = self._validate(p)\n if node._right is not None:\n raise ValueError('Right child exists')\n self._size += 1\n node._right = self._Node(e, node) # node is its parent\n return self._make_position(node._right)",
"def _add_right(self, p, e):\n node = self._validate(p)\n if node._right is not None:\n raise ValueError('Right child exists')\n self._size += 1\n node._right = self._Node(e, node) # node is its parent\n return self._make_position(node._right)",
"def _add_right(self, p, e):\n node = self._validate(p)\n\n if node.right is not None:\n raise ValueError('Right child exists')\n\n self._size += 1\n node.right = self._Node(e, node)\n return self._make_position(node.right)",
"def _add_right(self, p, e):\n node = self._validate_position(p)\n if node.right is not None:\n raise ValueError(\"right child exists\")\n self._size += 1\n node.right = self._Node(e, parent=node)\n return self._make_position(node.right)",
"def get_other_item(item: str) -> np.ndarray:\n i = choice(range(len(items)))\n while items[i] == item:\n i = choice(range(len(items)))\n return embeddings[i]",
"def Item(self) -> object:",
"def Item(self) -> object:",
"def new_varItem(self):\n newInd = (len(pQt.getTopItems(self)) + 1)\n newItem = QtGui.QTreeWidgetItem()\n newItem.setText(0, str(newInd))\n newItem._treeParent = self\n newItem._wdgtParent = self.treeParent\n newItem.wdgEnabled = self.new_varEnabledWidget()\n newItem.wdgLabel = self.new_varTextWidget()\n newItem.wdgType = self.new_varTypeWidget()\n newItem.wdgValue = self.new_varTextWidget()\n newItem.wdgComment = self.new_varTextWidget()\n return newItem",
"def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots",
"def __getitem__(self, item: Union[Vec, Tuple[float, float, float]]) -> 'Side':\n if item == (1, 0, 0):\n return self.east\n elif item == (-1, 0, 0):\n return self.west\n elif item == (0, 1, 0):\n return self.north\n elif item == (0, -1, 0):\n return self.south\n elif item == (0, 0, 1):\n return self.top\n elif item == (0, 0, -1):\n return self.bottom\n else:\n raise KeyError(item)",
"def _add_right(self, p, data):\n node = self._validate(p)\n if node._right is not None:\n raise ValueError(\"right Child Exists\")\n node._right = self._Node(data, parent=node)\n return self._make_position(node._right)",
"def right(self, p):\n node = self._validate(p)\n return self._make_position(node._right)",
"def right(self, p):\n node = self._validate(p)\n return self._make_position(node._right)",
"def __getitem__(self, item):\n if item not in self.complex_mols:\n complex_mol = rdkit.Chem.rdmolops.CombineMols(self.protein_mols[item],self.ligand_mols[item] )\n self.complex_mols[item]=complex_mol\n\n return item, self.protein_mols[item], self.ligand_mols[item], \\\n self.complex_mols[item], self.labels[item]",
"def new(self, cur_pos: int):\n return HallwayState(self.end_pos + 1, cur_pos)",
"def right(self, p):\n\n node = self._validate(p)\n return self._make_position(node._right)",
"def convert_to_item(self, ptype, rule):\n line = {}\n line['ptype'] = {}\n line['ptype']['S'] = ptype\n\n for i, v in enumerate(rule):\n line['v{}'.format(i)] = {}\n line['v{}'.format(i)]['S'] = v\n\n line['id'] = {}\n line['id']['S'] = self.get_md5(line)\n\n return line",
"def get_line_from_item(self, item):\n line = item['ptype']['S']\n i = 0\n\n while i < len(item) - 2:\n line = '{}, {}'.format(line, item['v{}'.format(i)]['S'])\n i = i + 1\n\n return line",
"def right(self, p):\n node = self._validate(p)\n return self._make_position(node.right)",
"def with_rarity(self, rarity: int) -> Creature:\n result = self.clone()\n result.rarity\n return result",
"def __init__(self, item, left=None, right=None):\n self.item = item\n self.left = left\n self.right = right",
"def __init__(self, item, left=None, right=None):\n self.item = item\n self.left = left\n self.right = right",
"def clone(self):\n return _libsbml.AlgebraicRule_clone(self)",
"def right(self, p):\n node = self._validate_position(p)\n return self._make_position(node.right)"
] |
[
"0.522883",
"0.5150924",
"0.505851",
"0.50136834",
"0.49676684",
"0.49488047",
"0.48961335",
"0.48812065",
"0.4836267",
"0.48335648",
"0.48042563",
"0.4794623",
"0.4794623",
"0.47915378",
"0.4764995",
"0.4737992",
"0.47363135",
"0.47355115",
"0.47323593",
"0.4722751",
"0.4721462",
"0.47169274",
"0.47056055",
"0.46974412",
"0.4688914",
"0.4661881",
"0.46422964",
"0.46422964",
"0.462908",
"0.46250367"
] |
0.5554848
|
0
|
Rewrites this Grammar's rules so they are in Canonical Form.
|
def canonicalize(self):
self.rules = canonicalize_grammar(self,self.empty)
self.is_canonical = True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)",
"def canonicalize_grammar(grammar,empty):\n\n rules = grammar.rules\n\n # First ensure right-hand sides of containers are Choice nodes.\n result = {}\n for key, value in rules.items():\n if isinstance(value,ContainerRule):\n if isinstance(value,Choice):\n # Choice nodes are unchanged\n result[key] = value\n else:\n result[key] = grammar.MakeChoice([value])\n else:\n result[key] = value\n\n # Now iteratively simplify rules.\n # Replace a complex sub-component with a new rule.\n # Repeat until settling.\n keep_going = True\n while keep_going:\n keep_going = False\n rules = dict(result)\n\n for key, value in rules.items():\n if isinstance(value,LeafRule):\n result[key] = value\n else:\n # The value is a Choice\n made_a_new_one = False\n parts = []\n def add_rule(key,*values):\n \"\"\"\n Records a new rule with the given key and value.\n\n Args:\n key: A SymbolName whose name is the key into the result\n dictionary\n values: A list of alternatives\n\n Returns: The key's Symbol\n \"\"\"\n rhs = grammar.MakeChoice(list(values))\n result[key.content] = rhs\n return key\n for i in range(len(value)):\n item = value[i]\n item_key = grammar.MakeSymbolName(\"{}/{}\".format(key,str(i)))\n if isinstance(item,LeafRule):\n parts.append(item)\n elif isinstance(item,Repeat1):\n # value[i] -> X+\n # becomes\n # value[i] -> value.i\n # value.i -> X value.i\n # value.i -> epsilon\n x = item[0]\n parts.append(add_rule(item_key,\n grammar.MakeSeq([x,item_key]),\n empty))\n made_a_new_one = True\n elif isinstance(item,Choice):\n # Sub-Choices expand in place.\n parts.extend(item)\n made_a_new_one = True\n elif isinstance(item,Seq):\n # Expand non-leaf elements\n made_a_new_seq_part = False\n seq_parts = []\n for j in range(len(item)):\n seq_item = item[j]\n seq_item_key = grammar.MakeSymbolName(\n \"{}/{}.{}\".format(key,str(i),str(j)))\n if isinstance(seq_item,LeafRule):\n seq_parts.append(seq_item)\n else:\n seq_parts.append(\n add_rule(seq_item_key,seq_item))\n made_a_new_seq_part = True\n if made_a_new_seq_part:\n parts.append(grammar.MakeSeq(seq_parts))\n made_a_new_one = True\n else:\n parts.append(item)\n if made_a_new_one:\n rhs = grammar.MakeChoice(parts)\n result[key] = rhs\n keep_going = True\n else:\n result[key] = value\n\n return result",
"def cleanUpRules(self):\n\n\t\t# initialize\n\t\tscoreDict = {}\n\t\tnewRules = {}\n\n\t\t# loop through rules\n\t\tfor i, tup in enumerate(self.generatedRules):\n\n\n\t\t\tantecedent = str(tup[0].antecedent)\n\n\t\t\t# if there is no rule in the scoredictionary yet with the same antecedent, put it in both dictionaries\n\t\t\tif (not antecedent in scoreDict):\n\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\telse:\n\n\t\t\t\t# if there is, then first compare if the degree is higher before overwriting\n\t\t\t\tif (tup[1] > scoreDict[antecedent]):\n\t\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\t\telse:\n\t\t\t\t\t# not higher? don't overwrite\n\t\t\t\t\tcontinue\n\n\t\t# save rules\n\t\tself.generatedRules = []\n\t\tfor key in newRules:\n\t\t\tself.generatedRules.append(newRules[key])\n\n\t\treturn",
"def convert_grammar(grammar):\n\n # Remove all the productions of the type A -> X B C or A -> B a.\n global RULE_DICT\n unit_productions, result = [], []\n res_append = result.append\n index = 0\n\n for rule in grammar:\n new_rules = []\n if len(rule) == 2 and rule[1][0] != \"'\":\n # Rule is in form A -> X, so back it up for later and continue with the next rule.\n unit_productions.append(rule)\n add_rule(rule)\n continue\n elif len(rule) > 2:\n # Rule is in form A -> X B C [...] or A -> X a.\n terminals = [(item, i) for i, item in enumerate(rule) if item[0] == \"'\"]\n if terminals:\n for item in terminals:\n # Create a new non terminal symbol and replace the terminal symbol with it.\n # The non terminal symbol derives the replaced terminal symbol.\n rule[item[1]] = f\"{rule[0]}{str(index)}\"\n new_rules += [f\"{rule[0]}{str(index)}\", item[0]]\n index += 1\n while len(rule) > 3:\n new_rules.append([f\"{rule[0]}{str(index)}\", rule[1], rule[2]])\n rule = [rule[0]] + [f\"{rule[0]}{str(index)}\"] + rule[3:]\n index += 1\n # Adds the modified or unmodified (in case of A -> x i.e.) rules.\n add_rule(rule)\n res_append(rule)\n if new_rules:\n result.extend(new_rules)\n # Handle the unit productions (A -> X)\n while unit_productions:\n rule = unit_productions.pop()\n if rule[1] in RULE_DICT:\n for item in RULE_DICT[rule[1]]:\n new_rule = [rule[0]] + item\n if len(new_rule) > 2 or new_rule[1][0] == \"'\":\n result.insert(0, new_rule)\n else:\n unit_productions.append(new_rule)\n add_rule(new_rule)\n return result",
"def rebuild_rule(self):\r\n if self.validated_rule[-1] == '\\n':\r\n self.validated_rule = self.validated_rule[:-1]\r\n\r\n if self.rule_to_validate is None or self.validated_rule is None:\r\n exit()\r\n elif self.rule_to_validate == self.validated_rule:\r\n return\r\n\r\n yara_valid_lines, yara_valid_meta_start, yara_valid_meta_end = self.__find_meta_start_end(self.rule_to_validate)\r\n yara_cccs_lines, yara_cccs_meta_start, yara_cccs_meta_end = self.__find_meta_start_end(self.validated_rule)\r\n\r\n if yara_valid_meta_start != 0 and yara_valid_meta_end != 0 and yara_cccs_meta_start != 0 and yara_cccs_meta_end != 0:\r\n yara_new_file = yara_valid_lines[0:yara_valid_meta_start] + yara_cccs_lines[yara_cccs_meta_start:yara_cccs_meta_end] + yara_valid_lines[yara_valid_meta_end:]\r\n yara_new_file = \"\\n\".join(yara_new_file)\r\n if self.rule_to_validate != yara_new_file:\r\n self.validated_rule = yara_new_file",
"def flushRules(self):\n self.chain.flush()",
"def canonicalize(self):\n return _libsbml.ASTNode_canonicalize(self)",
"def canonicalize(self, url):\n pass",
"def rewrite(self, axiom) -> str:\n return \"\".join(self.rules.get(c, c) for c in axiom)",
"def make_all_rules(self):\n\n def compatible(pattern1, pattern2, direction):\n \"\"\"Returns `True` if `pattern2` is compatible with `pattern1` in the `direction`,\n otherwise return `False`.\"\"\"\n if direction == 0:\n return pattern1[:-1] == pattern2[1:]\n if direction == 2:\n return [line[:-1] for line in pattern1] == [line[1:] for line in pattern2]\n\n for index in range(len(self.patterns)):\n for ind in range(index + 1):\n for direction in (0, 2):\n if compatible(self.patterns[index], self.patterns[ind], direction):\n self.rules[index][direction].add(ind)\n self.rules[ind][direction + 1].add(index)",
"def get_cnf(self):\n nonterm = set(self.nonterminal)\n term = set(self.terminal)\n\n rules = list(self.rules)\n cnf = set()\n\n # STEP 1: eliminate nonsolitary terminals\n for i in range(len(rules)):\n rule = rules[i]\n lhs, rhs, log_prob = rule\n if len(rhs) > 1:\n rhs_list = list(rhs)\n for j in range(len(rhs_list)):\n x = rhs_list[j]\n if x in term: # found nonsolitary terminal\n new_nonterm = 'NT_{}'.format(x)\n new_nonterm_rule = GrammarRule(new_nonterm, (x,), 0.0)\n\n if new_nonterm not in nonterm:\n nonterm.add(new_nonterm)\n cnf.add(new_nonterm_rule)\n else:\n assert new_nonterm_rule in cnf\n rhs_list[j] = new_nonterm\n rhs = tuple(rhs_list)\n rules[i] = GrammarRule(lhs, rhs, log_prob)\n\n # STEP 2: eliminate rhs with more than 2 nonterminals\n for i in range(len(rules)):\n rule = rules[i]\n lhs, rhs, log_prob = rule\n if len(rhs) > 2:\n assert all(x in nonterm for x in rhs), rule\n current_lhs = lhs\n for j in range(len(rhs) - 2):\n new_nonterm = 'BIN_\"{}\"_{}'.format(\n '{}->{}'.format(lhs, ','.join(rhs)), str(j))\n assert new_nonterm not in nonterm, rule\n nonterm.add(new_nonterm)\n cnf.add(\n GrammarRule(current_lhs,\n (rhs[j], new_nonterm),\n log_prob if j == 0 else 0.0))\n current_lhs = new_nonterm\n cnf.add(GrammarRule(current_lhs, (rhs[-2], rhs[-1]), 0.0))\n else:\n cnf.add(rule)\n\n return Grammar(cnf)",
"def pretty_str(self,print_option=PrintOption()):\n\n po = print_option.clone()\n po.is_canonical = self.is_canonical\n po.grammar = self\n\n token_rules = set()\n\n # Look for defined rules that look better as absorbed into their uses.\n for name, rule in self.rules.items():\n # Star-able is also optional-able, so starrable must come first.\n starred_phrase = rule.as_starred(name)\n if starred_phrase is not None:\n po.replace_with_starred[name] = starred_phrase\n continue\n optional_phrase = rule.as_optional()\n if optional_phrase is not None:\n po.replace_with_optional[name] = optional_phrase\n continue\n options = rule.as_container()\n if len(options)==1:\n phrase = options[0].as_container()\n if len(phrase)==1 and phrase[0].is_token():\n token_rules.add(name)\n\n # A rule that was generated to satisfy canonicalization is better\n # presented as absorbed in its original parent.\n for name, rule in self.rules.items():\n # We only care about rules generated during canonicalization\n if name.find('.') > 0 or name.find('/') > 0:\n options = rule.as_container()\n if len(options) != 2:\n continue\n if any([len(x.as_container())!=1 for x in options]):\n continue\n if any([(not x.as_container()[0].is_symbol_name()) for x in options]):\n continue\n # Rule looks like A -> X | Y\n po.replace_with_nested[name] = rule\n\n parts = []\n for key in sorted(self.rules):\n if key == LANGUAGE:\n # This is synthetic, for analysis\n continue\n rule_content = self.rules[key].pretty_str(po)\n if key in po.replace_with_optional:\n continue\n if key in po.replace_with_starred:\n continue\n if key in po.replace_with_nested:\n continue\n if (not po.print_terminals) and (key in token_rules):\n continue\n space = \"\" if po.multi_line_choice else \" \"\n if po.bikeshed:\n key_content = \" <dfn for='recursive descent syntax'>{}</dfn>\".format(key)\n content = \"<div class='syntax' noexport='true'>\\n{}:\\n{}\\n</div>\".format(key_content,rule_content)\n else:\n content = \"{}:{}{}\".format(key,space,rule_content)\n parts.append(content)\n content = (\"\\n\\n\" if po.more_newlines else \"\\n\").join(parts)\n return content",
"def make_grammar(self, trim=True, remove_lexical_rules=False):\n if 1. * self.number_binary_productions > (self.number_nonterminals ** 3) * 0.9:\n raise ValueError()\n if 1. * self.number_lexical_productions > (self.number_nonterminals * self.number_terminals) * 0.9:\n raise ValueError()\n\n # Create the terminals and nonterminals.\n self.terminals = list(dictionary.generateDictionary(self.number_terminals))\n self.nonterminals = [\"S\"]\n for j in xrange(self.number_nonterminals - 1):\n nt = \"NT\" + str(j)\n self.nonterminals.append(nt)\n\n # Create the productions.\n bprods = set()\n while len(bprods) < self.number_binary_productions:\n bprods.add(self.make_bprod())\n\n uprods = set()\n if self.unique_lexicon:\n if len(self.terminals) < len(self.nonterminals):\n raise ValueError()\n i = 0\n for nt in self.nonterminals:\n terminal = self.terminals[i]\n i += 1\n uprods.add((nt, (terminal,)))\n else:\n while len(uprods) < self.number_lexical_productions:\n uprods.add(self.make_uprod())\n\n # Create the grammar.\n for p in uprods:\n bprods.add(p)\n grammar = cfg.ContextFreeGrammar()\n grammar.terminals = set(self.terminals)\n grammar.nonterminals = set(self.nonterminals)\n grammar.productions = bprods\n grammar.start_set = {\"S\"}\n\n if remove_lexical_rules:\n grammar.remove_lexical_rules()\n if trim:\n grammar.trim()\n\n return grammar",
"def main(rules, antecedent_prefix, consequent_prefix, deltas_prefix):\n _main(rules, antecedent_prefix, consequent_prefix, deltas_prefix)",
"def _create_rules(rules, node_rules, node_atrrs):\n for node_attr, node_value in node_atrrs.iteritems():\n if node_attr not in node_rules:\n continue\n for rule in node_rules[node_attr]:\n # if isinstance(rule['from'], REGEX_TYPE) and node_value.startswith('mediumtext'):\n if rule['from'] == node_value:\n rules[node_attr] = rule['to']",
"def make_grammar(self):\n self.construct_terminals()\n self.construct_nonterminals()\n if self.number_preterminal_productions == 0:\n self.make_productions2()\n else:\n self.make_productions3()\n return self._return_grammar()",
"def __init__(self, rules):\n\n self.grammar = defaultdict(list)\n self.word_pos = dict()\n self.pos = set()\n\n for rule in rules:\n rule = rule.rstrip()\n if len(rule) > 0:\n rule = rule.split('->') # split start/end\n left = rule[0].strip()\n right = [(re.sub(r'[^a-zA-Z\\d\\s-]', '', r)).strip().split(' ') for r in rule[1].split('|')]\n self.grammar[left] += right\n\n # extract POS tags\n # pos iff on lhs of rhs without lhs\n # det -> that\n # that -> #\n for left, right in self.grammar.iteritems():\n for r in right:\n for r2 in r:\n if not self.grammar.has_key(r2):\n self.pos.add(left)",
"def clone(self):\n return _libsbml.AlgebraicRule_clone(self)",
"def translate_coding_to_rule(self, rule):\n node = Node(\"\", None, None, None)\n node.code_to_rule(rule, None)\n self.rule = node\n self.human_read = self.rule.visit_easy_read()\n self.polish_notation = self.rule.visit_with_polish_notation()\n self.coding = self.rule.visit_make_coding()\n self.find_needed_premises()\n self.find_conclusions()",
"def update_acc_by_rules(self) -> None:\n for rule, coeff in self.rules.items():\n acc_delta = rule(self) # can't call self.rule\n self.update_acc(acc_delta, coeff)",
"def apply_rules(term: Term, rules):\n return functools.reduce(apply_rule, rules, term)",
"def __init__(self, rules):\n self.rules = rules\n\n self._rhs_rules = defaultdict(list)\n self._rhs_unary_rules = defaultdict(list)\n\n self._nonterm = set(rule.lhs for rule in rules)\n self._term = set(token for rhs in chain(rule.rhs for rule in rules)\n for token in rhs if token not in self._nonterm)\n\n for rule in rules:\n _, rhs, _ = rule\n self._rhs_rules[rhs].append(rule)\n\n for rhs_rules in self._rhs_rules.values():\n rhs_rules.sort(key=lambda r: r.log_prob, reverse=True)\n\n self._is_cnf = all(len(rule.rhs) == 1\n or (len(rule.rhs) == 2\n and all(s in self._nonterm for s in rule.rhs))\n for rule in self.rules)",
"def one_time_rules(self):\n # There is also a hidden sameAs rule in RDF Semantics: if a literal appears in a triple, and another one has\n # the same value, then the triple should be duplicated with the other value.\n literals = self.literal_proxies.lit_to_bnode\n items = ((lt1, lt2) for lt1, lt2 in product(literals, literals) if lt1 != lt2)\n for lt1, lt2 in items:\n try:\n lt1_d = lt1.lit.toPython()\n lt2_d = lt2.lit.toPython()\n if lt1_d == lt2_d:\n # In OWL, this line is simply stating a sameAs for the corresponding BNodes, and then let\n # the usual rules take effect. In RDFS this is not possible, so the sameAs rule is,\n # essentially replicated...\n bn1 = self.literal_proxies.lit_to_bnode[lt1]\n bn2 = self.literal_proxies.lit_to_bnode[lt2]\n for (s, p, o) in self.graph.triples((None, None, bn1)):\n self.graph.add((s, p, bn2))\n except:\n # there may be a problem with one of the python conversions; the rule is imply ignored\n # raise e\n pass",
"def update_rules():\n update_all_rules()\n return \"OK\"",
"def get_canonical_collection(self):\n C = [self.closure([('S1', ['.', self.grammar.S[0]])])] # augment the grammar\n finished = False\n while not finished: # while we add a new state to the collection\n finished = True\n for state in C:\n for symbol in self.grammar.N + self.grammar.E:\n next_state = self.go_to(state, symbol)\n if next_state is not None and next_state not in C:\n C += [next_state]\n finished = False\n return C",
"def _rules_to_trxf_dnf_ruleset(self, rules, label):\n conjunctions = list()\n for rule in rules:\n conjunction = self._rule_to_trxf_conjunction(rule)\n conjunctions.append(conjunction)\n dnf_ruleset = DnfRuleSet(conjunctions, label)\n return dnf_ruleset",
"def canonical(self):\n args = tuple([i.canonical if isinstance(i, Relational) else i for i in self.args])\n if args != self.args:\n r = self.func(*args)\n if not isinstance(r, Relational):\n return r\n else:\n r = self\n if r.rhs.is_number:\n if r.rhs.is_Number and r.lhs.is_Number and r.lhs > r.rhs:\n r = r.reversed\n elif r.lhs.is_number:\n r = r.reversed\n elif tuple(ordered(args)) != args:\n r = r.reversed\n\n LHS_CEMS = getattr(r.lhs, 'could_extract_minus_sign', None)\n RHS_CEMS = getattr(r.rhs, 'could_extract_minus_sign', None)\n\n if isinstance(r.lhs, BooleanAtom) or isinstance(r.rhs, BooleanAtom):\n return r\n\n # Check if first value has negative sign\n if LHS_CEMS and LHS_CEMS():\n return r.reversedsign\n elif not r.rhs.is_number and RHS_CEMS and RHS_CEMS():\n # Right hand side has a minus, but not lhs.\n # How does the expression with reversed signs behave?\n # This is so that expressions of the type\n # Eq(x, -y) and Eq(-x, y)\n # have the same canonical representation\n expr1, _ = ordered([r.lhs, -r.rhs])\n if expr1 != r.lhs:\n return r.reversed.reversedsign\n\n return r",
"def normalize_rule(rule):\n return rule_comment_re.sub('', rule).replace(\n '\\n', ' ').replace('\\r', ' ').strip()",
"def to_normal_form(self) -> \"CFG\":\n if self._normal_form is not None:\n return self._normal_form\n nullables = self.get_nullable_symbols()\n unit_pairs = self.get_unit_pairs()\n generating = self.get_generating_symbols()\n reachables = self.get_reachable_symbols()\n if (len(nullables) != 0 or len(unit_pairs) != len(self._variables) or\n len(generating) !=\n len(self._variables) + len(self._terminals) or\n len(reachables) !=\n len(self._variables) + len(self._terminals)):\n if len(self._productions) == 0:\n self._normal_form = self\n return self\n new_cfg = self.remove_useless_symbols() \\\n .remove_epsilon() \\\n .remove_useless_symbols() \\\n .eliminate_unit_productions() \\\n .remove_useless_symbols()\n cfg = new_cfg.to_normal_form()\n self._normal_form = cfg\n return cfg\n # Remove terminals from body\n new_productions = self._get_productions_with_only_single_terminals()\n new_productions = self._decompose_productions(new_productions)\n cfg = CFG(start_symbol=self._start_symbol,\n productions=set(new_productions))\n self._normal_form = cfg\n return cfg",
"def left_refactor(self,target_rule_name,stop_at_set):\n name_suffix = \".post.{}\".format(target_rule_name)\n\n # Map a rule name X to a set of rules Y where X appears\n # as a first nonterminal in one of Y's options.\n appears_first_in = defaultdict(set)\n for name, rule in self.rules.items():\n for option in rule.as_container():\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(name)\n #print(\"appears first dict\\n{}\\n\\n\".format(appears_first_in))\n\n po = PrintOption()\n po.is_canonical = self.is_canonical\n po.inline_synthetic = False\n candidates = set(self.rules.keys())\n while len(candidates) > 0:\n for A in list(candidates):\n candidates.remove(A)\n if A in stop_at_set:\n continue\n rule = self.rules[A]\n (starts,others,terms,empties) = rule.partition(target_rule_name)\n if len(starts) > 0 and (len(others)+len(terms)+len(empties) == 0):\n #print(\"processing {}\".format(A))\n # Create the new rule.\n new_rule_name = \"{}{}\".format(A,name_suffix)\n # Form alpha1 ... alphaN\n new_options = []\n for option in rule:\n if len(option.as_container()) == 1:\n new_options.append(self.MakeEmpty())\n else:\n assert option.is_container() and (len(option)>1)\n new_options.append(self.MakeSeq(option[1:]))\n self.rules[new_rule_name] = self.MakeChoice(new_options)\n\n # Rewrite A itself.\n self_parts = [self.MakeSymbolName(x) for x in [target_rule_name,new_rule_name]]\n self.rules[A] = self.MakeChoice([self.MakeSeq(self_parts)])\n\n # Update bookkeeping for appears_first_in\n for option in new_options:\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(new_rule_name)\n\n # Replace the old rule everywhere it appears in the first\n # position\n for parent_name in list(appears_first_in[A]):\n if parent_name == A:\n # Already processed above\n continue\n parent = self.rules[parent_name]\n (starts,others,terms,empties) = parent.partition(A)\n new_options = []\n for option in starts:\n parts = []\n parts.append(self.MakeSymbolName(target_rule_name))\n parts.append(self.MakeSymbolName(new_rule_name))\n parts.extend(option.as_container()[1:])\n new_options.append(self.MakeSeq(parts))\n new_options.extend(others+terms+empties)\n self.rules[parent_name] = self.MakeChoice(new_options)\n appears_first_in[A].remove(parent_name)\n appears_first_in[target_rule_name].add(parent_name)\n # Set up transitive closure.\n candidates.add(parent_name)\n\n #print()\n #print()\n #print()\n\n #self.absorb_post(target_rule_name)\n self.remove_unused_rules()"
] |
[
"0.6701039",
"0.574",
"0.55820376",
"0.55024123",
"0.5463086",
"0.54190004",
"0.5369632",
"0.527828",
"0.52666974",
"0.5177244",
"0.51597923",
"0.51496726",
"0.5147174",
"0.51346004",
"0.5035184",
"0.50138915",
"0.50105506",
"0.5006201",
"0.4998683",
"0.49688843",
"0.49423853",
"0.49192858",
"0.49054015",
"0.48591965",
"0.48565",
"0.48439774",
"0.48340586",
"0.48188394",
"0.47984338",
"0.47950578"
] |
0.81821036
|
0
|
Computes the First set for each rule, saving the result on each rule node. Also computes .derives_empty
|
def compute_first(self):
compute_first_sets(self, self.rules)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_first_sets(grammar,rules):\n grammar.reset_first_follow()\n\n names_of_non_terminals = []\n grammar.end_of_text.first_data = set({grammar.end_of_text})\n grammar.empty.first_data = set({grammar.empty})\n for key, rule in rules.items():\n if rule.is_terminal() or rule.is_empty():\n # If X is a terminal, then First(X) is {X}\n # Lazy load it.\n dummy = rule.first()\n elif rule.is_symbol_name():\n names_of_non_terminals.append(key)\n else:\n # rule is a Choice node\n for rhs in rule:\n # If X -> empty is a production, then add Empty\n if rhs.is_empty():\n rule.first_data = set({rhs})\n names_of_non_terminals.append(key)\n\n def lookup(rule):\n return rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n def dynamic_first(rule,depth):\n \"\"\"\n Returns the currently computed approximation to the First set for a\n rule.\n\n The rule is from a Canonical grammar, so a non-terminal can be as\n complex as a Choice over Sequences over symbols that may reference\n other non-terminals. Gather updated First set info for at most\n those first two levels, and use a previous-computed approximation for\n the nonterminals at that second level.\n\n Args:\n rule: the Rule in question\n depth: recursion depth\n\n Returns:\n A new approximation to the First set for the given rule.\n \"\"\"\n\n if rule.is_symbol_name():\n return rules[rule.content].first()\n if rule.is_empty():\n return rule.first()\n if rule.is_terminal():\n # The terminal isn't registered in the dictionary.\n return set({rule})\n if isinstance(rule,Choice):\n result = rule.first()\n #for item in [lookup(i) for i in rule]:\n for item in rule:\n result = result.union(dynamic_first(item,depth+1))\n return result\n if isinstance(rule,Seq):\n result = rule.first()\n\n # Only recurse 2 levels deep\n if depth < 2:\n items = [lookup(item) for item in rule]\n else:\n items = rule\n # Add the first sets for Yi if all the earlier items can derive\n # empty. But don't add empty itself from this prefix.\n for item in items:\n from_first = dynamic_first(item,depth+1)\n from_first = without_empty(from_first)\n result = result.union(from_first)\n if not item.derives_empty():\n # Not known to derive empty. Stop here.\n break\n # If all the items derive empty, then add Empty to the first set.\n if all([lookup(item).derives_empty() for item in rule]):\n result = result.union({grammar.empty})\n return result\n raise RuntimeError(\"trying to dynamically compute the First set of: \"\n + str(rule))\n\n # Repeat until settling.\n keep_going = True\n while keep_going:\n keep_going = False\n for key in names_of_non_terminals:\n rule = rules[key]\n # Accumulate First items from right-hand sides\n df = dynamic_first(rule,0)\n new_items = df - rule.first()\n if len(new_items) > 0:\n rule.first_data = rule.first().union(new_items)\n keep_going = True",
"def dynamic_first(rule,depth):\n\n if rule.is_symbol_name():\n return rules[rule.content].first()\n if rule.is_empty():\n return rule.first()\n if rule.is_terminal():\n # The terminal isn't registered in the dictionary.\n return set({rule})\n if isinstance(rule,Choice):\n result = rule.first()\n #for item in [lookup(i) for i in rule]:\n for item in rule:\n result = result.union(dynamic_first(item,depth+1))\n return result\n if isinstance(rule,Seq):\n result = rule.first()\n\n # Only recurse 2 levels deep\n if depth < 2:\n items = [lookup(item) for item in rule]\n else:\n items = rule\n # Add the first sets for Yi if all the earlier items can derive\n # empty. But don't add empty itself from this prefix.\n for item in items:\n from_first = dynamic_first(item,depth+1)\n from_first = without_empty(from_first)\n result = result.union(from_first)\n if not item.derives_empty():\n # Not known to derive empty. Stop here.\n break\n # If all the items derive empty, then add Empty to the first set.\n if all([lookup(item).derives_empty() for item in rule]):\n result = result.union({grammar.empty})\n return result\n raise RuntimeError(\"trying to dynamically compute the First set of: \"\n + str(rule))",
"def getDirectFollowSets(self, FIRST):\n self.init_follow = {v:set() for v in self.v }\n self.containsFOLLOWOf = set()\n for v in self.v:\n if v == self.np[0][0]: # Starting Production\n self.init_follow[v] = set(['$']) # $ is in follow of 'S' applying rule 1\n for prod in self.g[v]:\n for i in range(len(prod)):\n if prod[i] in self.v and i+1 < len(prod):\n if prod[i+1] in self.t:\n self.init_follow[prod[i]] |= set([prod[i+1]])\n else:\n t = i + 1\n while t < len(prod) and prod[t] in self.nullables_map:\n if self.nullables_map[prod[t]] == True:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]\n break\n t += 1\n if t >= len(prod): # every thing on rhs of prod[i] could produce epsison, rule - 3\n self.containsFOLLOWOf |= set([(prod[i], v)])\n else: #prod[i+1] is a non nullable prod or prod[t] was a terminal\n if prod[t] in self.t:\n self.init_follow[prod[i]] |= set([prod[t]])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n\n elif prod[i] in self.v:\n self.containsFOLLOWOf |= set([(prod[i], v)]) # applying rule 2\n\n #self.containsFOLLOWOf = set([(a, b) for (a, b) in self.containsFOLLOWOf if a != b]) # remove the self loops\n return self.init_follow",
"def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0",
"def get_first():\n for s in TERMINAL_SET:\n # For each terminal, initialize First with itself.\n sym = SYMBOL_DICT[s]\n sym.first_set = set([s])\n\n for s in NON_TERMINAL_SET:\n sym = SYMBOL_DICT[s]\n if sym.is_nullable:\n sym.first_set = set(['null'])\n else:\n sym.first_set = set()\n\n while True:\n first_set_is_stable = True\n for p in PRODUCTION_LIST:\n sym_left = symbol_for_str(p.left)\n if p.right[0] == 'null':\n sym_left.first_set.update(set(['null']))\n continue\n previous_first_set = set(sym_left.first_set)\n\n for s in p.right:\n # For X -> Y..., First(X) = First(X) U First(Y)\n sym_right = symbol_for_str(s)\n sym_left.first_set.update(sym_right.first_set)\n # For X -> Y1 Y2 ... Yi-1 , if Y1...Yi-1 is all nullable\n # Then First(X) = First(X) U First(Y1) U First(Y2) ...\n if sym_right.is_nullable:\n continue\n else:\n break\n\n if previous_first_set != sym_left.first_set:\n first_set_is_stable = False\n\n if first_set_is_stable:\n break",
"def _compute_soffsets(self):\n self.soffsets = [ [] for i in self.doffsets ]\n for idx,dofs in enumerate(self.doffsets):\n for o in dofs:\n self.soffsets[(idx + o) % self.p].append(-o)",
"def first(grammar,phrase):\n def lookup(rule):\n return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n # Map names of nonterminals to the nonterminals themselves\n phrase = [lookup(i) for i in phrase]\n\n result = set()\n for item in phrase:\n we = without_empty(item.first())\n result = result.union(we)\n if not item.derives_empty():\n break\n if derives_empty(grammar.rules,phrase):\n result.add(grammar.empty)\n return result",
"def MinimizeDFA(self, ):\n\n def Split(S):\n \"\"\"This function split a given set according to their\n reaction to input characters.\"\"\"\n # for each char do\n # if c splits S into s1 and s2\n # then return {s1, s2}\n \n # return S\n\n # T <- {Da, {D - Da}}\n # P <- {}\n\n T = [[ID for ID in range(self.NumStates + 1) if ID not in self.AcceptStates],\n self.AcceptStates]\n Set1 = [ID for ID in range(self.NumStates + 1) if ID not in self.AcceptStates]\n if Set1:\n T = [Set1, self.AcceptStates]\n else:\n T = [self.AcceptStates]\n P = list()\n\n # Minimize DFA using the following algorithm:\n # \n # while P != T do\n # P <- T\n # T <- {}\n # for each set p in P do\n # T <- T | Split(p)\n __counter = 0\n while len(P) != len(T):\n if __counter > 10:\n print \"ERROR: loop forever\"\n exit()\n __counter += 1\n \n P = T[:]\n T = list()\n for p in P:\n if len(p) == 1:\n # p has only one member, nothing to split\n T.append(p)\n continue\n # p should not be empty\n assert p\n\n s1 = list()\n s2 = list()\n # main splitting function\n for idx, char in enumerate(rule.ForAllChar()):\n for state in p:\n # state should be a string\n key = str(state) + '_' + char\n if key in self.TransitionMap:\n if self.TransitionMap[key] not in p:\n s2.append(state)\n else:\n s1.append(state)\n else:\n s2.append(state)\n \n if s2 and s1:\n # set splitted. exit the loop to update the main list\n break\n elif idx < len(rule.ForAllChar()) - 1:\n # clear s1 and s2, enter the next round\n del s1[:]\n del s2[:]\n\n if not s2 or not s1:\n # the set is not splitted, so just append p\n T.append(p)\n else:\n # set is splitted into s1 and s2\n T.append(s1)\n T.append(s2)\n\n # Now, create a new Transition Map\n NewTransitionMap = dict()\n for States in T:\n for char in rule.ForAllChar():\n key = str(States[0]) + '_' + char\n if key in self.TransitionMap:\n # Cannot directly copy the destination state, because they\n # already have new ids. have to use the new state id here\n for states in T:\n if self.TransitionMap[key] in states:\n # doesn't matter which id in the set is used, since\n # they all have the same behavior\n # choose first state here\n NewTransitionMap[key] = states[0]\n \n self.TransitionMap = dict(NewTransitionMap.items())\n \n # Modify the accepting State\n NewAcceptStates = set()\n for States in T:\n for state in States:\n if state in self.AcceptStates:\n NewAcceptStates.add(States[0])\n break\n self.AcceptStates = list(NewAcceptStates)\n \n # Modify the starting State\n NewStartStates = set()\n for States in T:\n for state in States:\n if state in self.StartStates:\n NewStartStates.add(States[0])\n break\n self.StartStates = list(NewStartStates)\n\n # for key, value in self.TransitionMap.items():\n # print key, '=>', value\n # print 'Accept =', self.AcceptStates\n # print 'Start =', self.StartStates",
"def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal):\n dfas = nonterminal_to_dfas[nonterminal]\n new_first_plans = {}\n first_plans[nonterminal] = None # dummy to detect left recursion\n # We only need to check the first dfa. All the following ones are not\n # interesting to find first terminals.\n state = dfas[0]\n for transition, next_ in state.transitions.items():\n # It's a string. We have finally found a possible first token.\n new_first_plans[transition] = [next_.next_dfa]\n\n for nonterminal2, next_ in state.nonterminal_arcs.items():\n # It's a nonterminal and we have either a left recursion issue\n # in the grammar or we have to recurse.\n try:\n first_plans2 = first_plans[nonterminal2]\n except KeyError:\n first_plans2 = _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal2)\n else:\n if first_plans2 is None:\n raise ValueError(\"left recursion for rule %r\" % nonterminal)\n\n for t, pushes in first_plans2.items():\n new_first_plans[t] = [next_] + pushes\n\n first_plans[nonterminal] = new_first_plans\n return new_first_plans",
"def clearpredicates(self):\n self._preds = []",
"def first_iteration(mat, num_range, sub):\n\t#Creating the tree for the first iteration. \n\ttree = pMatrix.create_tree(mat, num_range, sub)\n\t\n\t#Adding the tree to all_trees. \n\tall_trees.append(tree)\n\t\n\t#Calculating the total number of states in the first iteration.\n\tnum_states = tree.get_num_states()\n\t\n\t#Adding total number of states for the first iteration to all_total_states.\n\tall_total_states.append(num_states)\n\t\n\t#Adding all states to be explored in the first iteration to all_states_explored.\n\tfor st in tree.get_all_states():\n\t\tall_states_explored.append(st)\n\t\t\n\t#Adding super states from first tree to super_states.\n\tfor sp in tree.get_super_states():\n\t\tsuper_states.append(sp)\n\t\n\t#Adding results for first iteration to final list.\n\tall_results.append(pMatrix.main(mat, num_range,sub))",
"def reset(self):\n\n self.cost = {} # record cost value at each iteration\n self.cost_change = {} # record the change of cost items\n self.prim_var = {} # record primal variable values for each iteration\n self.prim_var_change = {} # record the change of primal variable between two consective iterations\n self.dual_var = {} # record dual variable values for each iteration\n self.dual_var_change = {} # record the change of dual variable between any two consective iterations\n self.fea_conditions = {} # record the satisfication of feasiblity conditions at each iteration",
"def compute(self):\n\n self.setd = []\n self.satc = [False for cl in self.soft] # satisfied clauses\n self.solution = None\n self.bb_assumps = [] # backbone assumptions\n self.ss_assumps = [] # satisfied soft clause assumptions\n\n if self.oracle.solve():\n # hard part is satisfiable => there is a solution\n self._filter_satisfied(update_setd=True)\n self._compute()\n\n self.solution = list(map(lambda i: i + 1, filter(lambda i: not self.satc[i], range(len(self.soft)))))\n\n return self.solution",
"def test_heuristic_first_steps(self):\n graph = {n: set(self.deterministic_graph[n]) - set([n])\n for n in self.deterministic_graph}\n print(\"Graph {}:\".format(graph))\n elim_node = min_fill_in_heuristic(graph)\n steps = []\n\n while elim_node is not None:\n print(\"Removing {}:\".format(elim_node))\n steps.append(elim_node)\n nbrs = graph[elim_node]\n\n for u, v in itertools.permutations(nbrs, 2):\n if v not in graph[u]:\n graph[u].add(v)\n\n for u in graph:\n if elim_node in graph[u]:\n graph[u].remove(elim_node)\n\n del graph[elim_node]\n print(\"Graph {}:\".format(graph))\n elim_node = min_fill_in_heuristic(graph)\n\n # check only the first 2 elements for equality\n assert_equals(steps[:2], [6, 5])",
"def generate(self):\n\n\t\tfor datapoint in self.dataSet[:]:\n\t\t\trule, degree = self.makeRule(datapoint)\n\t\t\tself.generatedRules.append((rule, degree))",
"def generate_rules(self):\n for rule in self._parser.conditionals:\n\n all_in_facts, matrix = self._generate_rules(rule)\n if all_in_facts is True:\n self.new_fact_from_facts(rule)\n else:\n facts = self._parser.conditionals[rule][1]\n #print(rule, facts, matrix)",
"def reset(self):\n self.__sets = []\n self._computed = False",
"def test_heuristic_first_steps(self):\n graph = {n: set(self.deterministic_graph[n]) - set([n])\n for n in self.deterministic_graph}\n deg_heuristic = MinDegreeHeuristic(graph)\n elim_node = deg_heuristic.best_node(graph)\n print(\"Graph {}:\".format(graph))\n steps = []\n\n while elim_node is not None:\n print(\"Removing {}:\".format(elim_node))\n steps.append(elim_node)\n nbrs = graph[elim_node]\n\n for u, v in itertools.permutations(nbrs, 2):\n if v not in graph[u]:\n graph[u].add(v)\n\n for u in graph:\n if elim_node in graph[u]:\n graph[u].remove(elim_node)\n\n del graph[elim_node]\n print(\"Graph {}:\".format(graph))\n elim_node = deg_heuristic.best_node(graph)\n\n # check only the first 5 elements for equality\n assert_equals(steps[:5], [0, 1, 2, 3, 4])",
"def warmup_one_iter(self) -> None:\n self.model.eval()\n for batch_data in self.testloader:\n images, labels = (\n batch_data[0].to(self.device),\n batch_data[1].to(self.device),\n )\n\n # forward + backward + optimize\n loss, outputs = self.criterion(\n model=self.model, images=images, labels=labels\n )\n return None",
"def __init__(self, shapes, pathways, take_first=False, relax_assignments_until=inf,\n relax_crossings_until=inf):\n self.shapes = tuple(sorted(shapes))\n self.pathways = pathways\n self.take_first = take_first # whether to raise after the first solution is found\n self.relax_assignments_until = relax_assignments_until\n self.relax_crossings_until = relax_crossings_until\n self.resources = flatten(pathways)\n\n nlevels = len(self.shapes)\n assert len(self.resources) == nlevels, (self.resources, nlevels, shapes, pathways)\n\n self.stats = { 'ncalls': 0\n , 'nlevels': nlevels\n , 'nsolutions': 0\n , 'npossible_solutions': count_possible_solutions(nlevels)\n , 'nnodes': count_nodes(nlevels)\n }\n\n # Make sure we can access shape definitions by id.\n self.s2shape = shapes\n\n # Give ourselves a way to find the pathway for a given resource.\n self.r2p = {}\n for k,v in pathways.items():\n for val in v:\n self.r2p[val] = k\n\n # And let's maintain a list of segments for each pathway.\n self.segments = {k:[] for k in pathways}\n\n # Maintain indices into shapes and resources for the current node while backtracking.\n self.pairs = [] # pairs of (shape_index, resource_index)\n self.shape_pool = set(range(nlevels))\n self.resource_pool = set(range(nlevels))\n self.siblings = [] # stack of siblings generators\n\n # We'll accumulate solutions into a list.\n self.solutions = []\n\n # Logfile!\n self.logfile = open('problem.log', 'w+')\n self.loglines = 0",
"def first(self, input):\n FirstA = set([])\n\n if input.strip(\"'\") in self.T:\n return {input.strip(\"'\")}\n\n elif input == 'eps':\n return {'eps'}\n\n elif input in self.N:\n for alpha in self.P[input]:\n FirstA |= self.first(alpha)\n\n elif input.strip('[]') in self.N:\n FirstA |= {'eps'} | self.first(input.strip('[]'))\n\n else:\n for alpha in input.split(sep=' '):\n FirstA |= self.first(alpha) - {'eps'}\n if 'eps' not in FirstA:\n break\n\n return FirstA",
"def calc_hessian(self, reuse_first=False):\n \n self.setup()\n \n # Create our 3D dictionary the first time we execute.\n if not self.hessian:\n for name1 in self.param_names:\n self.hessian[name1] = {}\n for name2 in self.param_names:\n self.hessian[name1][name2] = {}\n \n self.hessian_ondiag_case = OrderedDict()\n self.hessian_offdiag_case = OrderedDict()\n\n # Pull stepsizes from driver's parameters\n base_param = OrderedDict()\n stepsize = {}\n for key, item in self._parent.get_parameters().iteritems():\n \n if item.fd_step:\n stepsize[key] = item.fd_step\n else:\n stepsize[key] = self.default_stepsize\n\n # Diagonal terms in Hessian always need base point\n # Usually, we will have saved this when we calculated\n # the gradient.\n if reuse_first:\n base_param = self.base_param\n base_data = self.base_data\n else:\n # Pull initial state from driver's parameters\n for key, item in self._parent.get_parameters().iteritems():\n base_param[key] = item.evaluate()\n \n base_data = self._run_point(base_param)\n \n # Assemble input data\n # Cases : ondiag [fp, fm]\n deltas = [1, -1]\n for param in self.param_names:\n \n pcase = []\n for j_step, delta in enumerate(deltas):\n \n case = base_param.copy()\n case[param] += delta*stepsize[param]\n pcase.append({ 'param': case })\n \n self.hessian_ondiag_case[param] = pcase\n \n # Assemble input data\n # Cases : offdiag [fpp, fpm, fmp, fmm]\n deltas = [[1, 1],\n [1, -1],\n [-1, 1],\n [-1, -1]]\n for i, param1 in enumerate(self.param_names):\n \n offdiag = {}\n for param2 in self.param_names[i+1:]:\n \n pcase = []\n for delta in deltas:\n \n case = base_param.copy()\n case[param1] += delta[0]*stepsize[param1]\n case[param2] += delta[1]*stepsize[param2]\n pcase.append({ 'param': case })\n offdiag[param2] = pcase\n \n self.hessian_offdiag_case[param1] = offdiag\n \n # Run all \"cases\".\n # TODO - Integrate OpenMDAO's concurrent processing capability once it\n # is formalized. This operation is inherently paralellizable.\n \n # We don't need to re-run on-diag cases if the gradients were\n # calculated with Central Difference.\n if reuse_first and self.form=='central':\n for key, case in self.hessian_ondiag_case.iteritems():\n \n gradient_case = self.gradient_case[key]\n for ipcase, pcase in enumerate(case):\n \n gradient_ipcase = gradient_case[ipcase]\n pcase['data'] = gradient_ipcase['data'] \n else:\n for case in self.hessian_ondiag_case.values():\n for pcase in case:\n data = self._run_point(pcase['param'])\n pcase['data'] = data\n\n # Off-diag cases must always be run.\n for cases in self.hessian_offdiag_case.values():\n for case in cases.values():\n for pcase in case:\n pcase['data'] = self._run_point(pcase['param'])\n\n \n # Calculate Hessians - On Diagonal\n for key, case in self.hessian_ondiag_case.iteritems():\n \n eps = stepsize[key]\n \n for name in list(self.objective_names + \\\n self.eqconst_names + \\\n self.ineqconst_names):\n self.hessian[key][key][name] = \\\n diff_2nd_xx(case[0]['data'][name],\n base_data[name],\n case[1]['data'][name], eps)\n \n # Calculate Hessians - Off Diagonal\n for key1, cases in self.hessian_offdiag_case.iteritems():\n \n eps1 = stepsize[key1]\n for key2, case in cases.iteritems():\n \n eps2 = stepsize[key2]\n \n for name in list(self.objective_names + \\\n self.eqconst_names + \\\n self.ineqconst_names):\n self.hessian[key1][key2][name] = \\\n diff_2nd_xy(case[0]['data'][name],\n case[1]['data'][name],\n case[2]['data'][name],\n case[3]['data'][name],\n eps1, eps2)\n \n # Symmetry\n # (Should ponder whether we should even store it.)\n self.hessian[key2][key1][name] = \\\n self.hessian[key1][key2][name]",
"def test_ds_1d(i, num_bins):\n np.random.seed(2191+i)\n simulated_dataset = simulate_direction(num_bins, ntaxa=47, nsamples=int(360/num_bins), Sigma_trace=1)\n X, K, sigma, mu = simulated_dataset\n y = np.zeros((X.shape[0]*X.shape[1], np.shape(X)[2])) #reformat data for model\n for i in range(len(X)):\n for j in range(len(X[0])):\n y[X.shape[1]*i+j] = X[i,j]\n no_struc = 1\n one_dim = fitModel_1d_util(y)\n for i in range(2):\n print([one_d_AIC(one_dim[1][i], y) for i in range(len(one_dim[1]))])\n #for i in range(2):\n # print([one_d_AWE(one_dim[1][i], y) for i in range(len(one_dim[1]))])\n #print(\"silhouette\")\n #for i in range(len(one_dim[1])):\n # mixing, sigma, delta, Q, Q_edge, edge_mean, mu, likelihoods, iterations = one_dim[1][i]\n # print(silhouette(mixing, sigma, mu, y))\n two_dim = fitModel_2d_util(y)\n for i in range(2):\n print([one_d_AIC(one_dim[1][i], y) for i in range(len(one_dim[1]))])\n print([AIC(two_dim[1][i], y) for i in range(len(two_dim[1]))])\n #one_dim_scores = one_dim[0] #Scores start at 2 bins\n #two_dim_scores = two_dim[0]\n selection = 1 #if selection is negative just assume i'm referring to the 2d case\n return simulated_dataset, one_dim, two_dim, selection",
"def __call__(self, graph: Data, n_min: int, nodes_to_keep: List[int] = None, exhaustive: bool = False):\n nodes_to_keep = nodes_to_keep if nodes_to_keep is not None else []\n mcts = self._get_mcts(graph, n_min, nodes_to_keep, exhaustive)\n\n for iteration in range(self.m):\n mcts.search_one_iteration()\n\n explanation = mcts.best_leaf_node()\n\n return explanation.node_set, mcts",
"def FIRST(L):\n global fi,eps\n R=set()\n eps_appear=False\n for x in L:\n eps_appear=False\n if not x.isTerminal():\n for o in fi[x]:\n if o==eps:\n eps_appear=True\n else:\n R.add(o)\n if eps not in fi[x]:\n break\n elif x!=eps:\n R.add(x)\n break\n else: # x==eps\n eps_appear=True\n if eps_appear:\n R.add(eps)\n if len(R)==0:\n R.add(eps)\n return R",
"def generateRules(singleCovering, decisions):\n tempCovering = tupleToDict(singleCovering)\n tempDecisions = tupleToDict(decisions)\n\n coverDF = pd.DataFrame(tempCovering)\n decisionsDF = pd.DataFrame(tempDecisions)\n\n combinedDF = pd.concat([coverDF, decisionsDF], axis=1)\n\n ruleDF = combinedDF[combinedDF.iloc[:,-1] != 'madhu']\n # ruleDF = ruleDF.drop_duplicates()\n conceptblockDF = ruleDF.copy(deep=True)\n del conceptblockDF['class']\n\n ruleDict = conceptblockDF.T.to_dict().values()\n ruleTuple = dictToTuple(ruleDict)\n\n\n ruleset = set(ruleDF.index.values)\n\n for i in range(len(ruleTuple)):\n listofsets = []\n count = 0\n\n for j in range(len(ruleTuple[i])):\n # collect the cases that are satisfying a rule from the ruleTuple\n listofsets.append(set(combinedDF[combinedDF[ruleTuple[i][j][0]] == ruleTuple[i][j][1]].index.values))\n\n for m in range(len(listofsets)):\n if (len(listofsets) > 1):\n # drop the first condition from the rule\n appendlast = listofsets.pop(0)\n\n # compute the case Numbers thar are satifying the ruleTUple\n u = set.intersection(*listofsets)\n\n if (not u.issubset(ruleset)):\n # Check whether the remaining attributes satisfy the cases\n # if not append the condition to the attribute list\n listofsets.append(appendlast)\n elif(len(ruleTuple[i]) > 1):\n # if yes remove the dropped attribute from the list\n ruleTuple[i].pop(m-count)\n count = count + 1\n\n return list(set([tuple(i) for i in ruleTuple]))",
"def expand_first(grammar,rule):\n result = []\n # Hoist the rule for 'other' nonterminal.\n phrase = rule.as_container()\n first = phrase[0]\n assert first.is_symbol_name() and (first.content != target_rule_name)\n #print(\" elaborating rule for {} \".format(first.content))\n rest = phrase[1:]\n other_rule = self.rules[first.content]\n for other_rhs in other_rule.as_container():\n result.append(grammar.MakeSeq(list_without_empty(other_rhs.as_container()) + rest))\n return result",
"def prep_optics(SetofObjects, epsilon):\n\n for j in SetofObjects._index:\n # Find smallest nonzero distance\n SetofObjects._core_dist[j] = np.sort(SetofObjects.data[j,:])[1]\n print(\n 'Core distances and neighborhoods prepped for ' + str(\n SetofObjects._n) + ' points.')",
"def evaluate_dep_type_sets():\n strategies = {\n 'defensive': ['agent', 'advcl', 'parataxis'],\n 'aggressive': ['agent', 'advcl', 'parataxis', 'dep', 'aux', 'ccomp', 'xcomp', 'dobj', 'pobj', 'nsubj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'preconj', 'advmod', 'neg', 'rcmod', 'tmod', 'poss', 'prepc'],\n 'compromise_1': ['agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc'],\n 'compromise_2': ['agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc', 'attr', 'csubj', 'csubjpass', 'number', 'possessive', 'punct', 'ref']\n }\n results = {'classification':{}, 'retrieval':{}}\n\n print '------ CLASSIFICATION EVALUATION --------'\n print '> Reading cases..'\n descriptions_path = '../data/tasa/TASA900_dependencies'\n texts, labels = data.read_files(descriptions_path)\n print '> Creating representations..'\n rep = {}\n for strategy in strategies:\n rep[strategy] = []\n metric = graph.GraphMetrics.CLOSENESS\n for i, text in enumerate(texts):\n if i%10==0: print ' ',str(i)+'/'+str(len(texts))\n for strategy in strategies:\n g = graph_representation.construct_dependency_network(text, exclude=strategies[strategy])\n d = graph_representation.graph_to_dict(g, metric)\n rep[strategy].append(d)\n g = None # just to make sure. I don't trust this damn garbage collector...\n for strategy in strategies:\n rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])\n print '> Evaluating..'\n for strategy in strategies:\n score = evaluation.evaluate_classification(rep[strategy], labels)\n print ' ', strategy, score\n results['classification'][strategy] = score\n\n data.pickle_to_file(results, 'output/dependencies/types_set_eval_tmp')\n\n print '------ RETRIEVAL EVALUATION --------'\n print '> Reading cases..'\n descriptions_path = '../data/air/problem_descriptions_dependencies'\n description_texts, labels = data.read_files(descriptions_path)\n solutions_path = '../data/air/solutions_preprocessed'\n solution_texts, labels = data.read_files(solutions_path)\n solution_vectors = freq_representation.text_to_vector(solution_texts, freq_representation.FrequencyMetrics.TF_IDF)\n print '> Creating representations..'\n rep = {}\n for strategy in strategies:\n rep[strategy] = []\n metric = graph.GraphMetrics.EIGENVECTOR\n for i, text in enumerate(description_texts):\n if i%1==0: print ' ',str(i)+'/'+str(len(description_texts))\n full_graph = graph_representation.construct_dependency_network(text)\n for strategy in strategies:\n g = graph_representation.construct_dependency_network(text, exclude=strategies[strategy])\n d = graph_representation.graph_to_dict(g, metric)\n rep[strategy].append(d)\n g = None # just to make sure..\n full_graph = None\n #~ if i%100==0: data.pickle_to_file(rep, 'output/dependencies/types_eval_rep_'+str(i))\n for strategy in strategies:\n rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])\n print '> Evaluating..'\n for strategy in strategies:\n score = evaluation.evaluate_retrieval(rep[strategy], solution_vectors)\n print ' ', strategy, score\n results['retrieval'][strategy] = score\n\n pp.pprint(results)\n data.pickle_to_file(results, 'output/dependencies/types_set_eval')\n\n return results",
"def assess_all_solutions_clasically(self):\n all_possible_solutions = list(itertools.product([0, 1], repeat = len(self.adj_matrix)))\n for solution in all_possible_solutions:\n print(solution, self._calculate_cost_once(solution))"
] |
[
"0.70863134",
"0.6661198",
"0.5768406",
"0.5582631",
"0.53950924",
"0.53758353",
"0.52905947",
"0.5282538",
"0.5208242",
"0.515067",
"0.5143861",
"0.51085323",
"0.5101925",
"0.5083233",
"0.5082251",
"0.50380355",
"0.5036741",
"0.5025692",
"0.50147015",
"0.49965972",
"0.49915358",
"0.49841094",
"0.49798736",
"0.4979047",
"0.497895",
"0.49691024",
"0.4933834",
"0.49244246",
"0.4874579",
"0.4855019"
] |
0.7545464
|
0
|
Computes the Follow set for each rule, saving the result on each rule node. Assumes First sets have been computed.
|
def compute_follow(self):
compute_follow_sets(self)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_follow_sets(grammar):\n\n # 1. Place $ in FOLLOW(S), where S is the start symbol and $ is the input\n # right end marker.\n grammar.rules[grammar.start_symbol].follow = set({grammar.end_of_text})\n\n def lookup(rule):\n return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n def process_seq(key,seq,keep_going):\n \"\"\"\n Add to Follow sets by processing the given Seq node.\n\n Args:\n key: Python string name for the production\n seq: a Seq rule for the production\n keep_going: A boolean\n\n Returns:\n True if a Follow set was modified.\n keep_going otherwise\n \"\"\"\n\n # Process indirections through symbols\n seq = [lookup(i) for i in seq]\n\n last_index = len(seq)-1\n for bi in range(0,len(seq)):\n b = seq[bi]\n # We only care about nonterminals in the sequence\n if b.is_terminal() or b.is_empty():\n continue\n\n # If there is a production A -> alpha B beta\n # then everything in First(beta) except Empty is\n # added to Follow(B)\n beta = seq[bi+1:len(seq)]\n first_beta = first(grammar, beta)\n new_items = without_empty(first_beta) - b.follow\n if len(new_items) > 0:\n keep_going = True\n b.follow = b.follow.union(new_items)\n\n # If A -> alpha B, or A -> alpha B beta, where First(beta)\n # contains epsilon, then add Follow(A) to Follow(B)\n if (bi==last_index) or derives_empty(grammar.rules,beta):\n new_items = grammar.rules[key].follow - b.follow\n if len(new_items) > 0:\n keep_going = True\n b.follow = b.follow.union(new_items)\n\n return keep_going\n\n # Iterate until settled\n keep_going = True\n while keep_going:\n keep_going = False\n for key, rule in grammar.rules.items():\n if rule.is_terminal() or rule.is_empty():\n continue\n\n if isinstance(rule,Seq):\n keep_going = process_seq(key,rule,keep_going)\n continue\n\n if rule.is_symbol_name():\n keep_going = process_seq(key,[rule],keep_going)\n continue\n\n # Now process Choice over sequences:\n if isinstance(rule,Choice):\n for seq in [i.as_container() for i in rule]:\n keep_going = process_seq(key,seq,keep_going)",
"def followSet(self):\n FOLLOW = {}\n for A in self.N:\n FOLLOW[A] = set()\n FOLLOW[self.S] |= {'$$'}\n\n old = None\n while old != self._size_of_dict(FOLLOW):\n old = self._size_of_dict(FOLLOW)\n self._calcFollow(FOLLOW)\n\n return FOLLOW",
"def getDirectFollowSets(self, FIRST):\n self.init_follow = {v:set() for v in self.v }\n self.containsFOLLOWOf = set()\n for v in self.v:\n if v == self.np[0][0]: # Starting Production\n self.init_follow[v] = set(['$']) # $ is in follow of 'S' applying rule 1\n for prod in self.g[v]:\n for i in range(len(prod)):\n if prod[i] in self.v and i+1 < len(prod):\n if prod[i+1] in self.t:\n self.init_follow[prod[i]] |= set([prod[i+1]])\n else:\n t = i + 1\n while t < len(prod) and prod[t] in self.nullables_map:\n if self.nullables_map[prod[t]] == True:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]\n break\n t += 1\n if t >= len(prod): # every thing on rhs of prod[i] could produce epsison, rule - 3\n self.containsFOLLOWOf |= set([(prod[i], v)])\n else: #prod[i+1] is a non nullable prod or prod[t] was a terminal\n if prod[t] in self.t:\n self.init_follow[prod[i]] |= set([prod[t]])\n else:\n self.init_follow[prod[i]] |= FIRST[prod[t]]-set(['epsilon'])\n\n elif prod[i] in self.v:\n self.containsFOLLOWOf |= set([(prod[i], v)]) # applying rule 2\n\n #self.containsFOLLOWOf = set([(a, b) for (a, b) in self.containsFOLLOWOf if a != b]) # remove the self loops\n return self.init_follow",
"def _calcFollow(self, FOLLOW):\n for A in self.N:\n for prod in self.P[A]:\n text = prod.split(sep=' ')\n for i in range(len(text) - 1):\n B = text[i].strip('[]')\n succ = text[i + 1]\n\n if B in self.N:\n FOLLOW[B] |= self.first(succ) - {'eps'}\n\n if 'eps' in self.first(succ) and B in self.N:\n FOLLOW[B] |= FOLLOW[A]\n\n if text[-1].strip('[]') in self.N:\n FOLLOW[text[-1].strip('[]')] |= FOLLOW[A]",
"def get_follow():\n for s in NON_TERMINAL_SET:\n sym = symbol_for_str(s)\n sym.follow_set = set()\n\n symbol_for_str('<s>').follow_set.update(set(['#']))\n\n while True:\n follow_set_is_stable = True\n for p in PRODUCTION_LIST:\n sym_left = symbol_for_str(p.left)\n if sym_left.is_terminal():\n continue\n for s in p.right:\n if s == 'null':\n continue\n if s.startswith('P'):\n continue\n if symbol_for_str(s).is_terminal():\n continue\n current_symbol = symbol_for_str(s)\n previous_follow_set = set(current_symbol.follow_set)\n next_is_nullable = True\n for s2 in p.right[p.right.index(s) + 1:]:\n if s2.startswith('P'):\n continue\n # For X -> sYt, Follow(Y) = Follow(Y) U First(t)\n next_symbol = symbol_for_str(s2)\n current_symbol.follow_set.update(next_symbol.first_set)\n if next_symbol.is_nullable:\n continue\n else:\n next_is_nullable = False\n break\n if next_is_nullable:\n # For X -> sYt, if t is nullable, Follow(Y) = Follow(Y) U\n # Follow(X)\n current_symbol.follow_set.update(sym_left.follow_set)\n\n if current_symbol.follow_set != previous_follow_set:\n follow_set_is_stable = False\n\n if follow_set_is_stable:\n break",
"def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0",
"def fetch(self):\n self.initJoints()\n # TODO: change to stack-loop style\n for joint in self.joints:\n rules, sitesFound = self.fetchRulesForNode(joint)\n # save to mapJointRules\n for site in sitesFound:\n if site not in self.joints:\n self.joints.append(site)\n # Save found sites to sorted list.\n self.mapJointRules[joint] = (rules, sorted(sitesFound))",
"def _follow_relation_set(self, rel_expr,\n inverted):\n if not self.context.is_group(rel_expr.type_name):\n raise RelationNameError(rel_expr.type_name,\n 'Expression type is not a relation group.')\n g = self.context.get_group(rel_expr.type_name)\n if inverted == +1:\n with tf.name_scope('follow_group_%s' % rel_expr.type_name):\n return (self.follow(g.subject_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.object_rel)\n else:\n with tf.name_scope('follow_group_%s_inverse' % rel_expr.type_name):\n return (self.follow(g.object_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.subject_rel)",
"def anchors_to_adjacency(set_path, n_proteomes, mailbox_reader):\n frame_list = []\n for idx in range(n_proteomes):\n with mailbox_reader(idx) as file_handle:\n frame_list.append(\n pd.read_csv(\n file_handle, sep=\"\\t\", index_col=0\n ).convert_dtypes()\n )\n nodes = pd.concat(\n frame_list,\n ignore_index=True,\n )\n del frame_list\n graph = nx.Graph()\n for unused_tuple, subframe in nodes.groupby(\n by=[\"syn.anchor.id\", \"syn.anchor.sub_id\"]\n ):\n ids = subframe[\"member_ids\"]\n n_ids = len(ids)\n graph.add_nodes_from(ids)\n if n_ids > 1:\n edges = combinations(ids, 2)\n graph.add_edges_from(edges, weight=n_ids)\n outpath = set_path / ANCHORS_FILE\n summarypath = outpath.parent / (\n outpath.name[: -len(outpath.suffix)] + \"_summary.tsv\"\n )\n histpath = outpath.parent / (\n outpath.name[: -len(outpath.suffix)] + \"_hist.tsv\"\n )\n components = [\n c\n for c in sorted(nx.connected_components(graph), key=len, reverse=True)\n if len(c) > 1\n ]\n fh = outpath.open(\"w\")\n fh.write(\"idx\\tcluster_id\\tsize\\tmembers\\n\")\n n_items = 0\n count_list = []\n hash_list = []\n id_list = []\n for i, comp in enumerate(components):\n component = np.sort(pd.Index(list(comp)).to_numpy())\n id_list.append(i)\n size = len(comp)\n count_list.append(size)\n hash_list.append(hash_array(component))\n for node in component:\n fh.write(f\"{n_items}\\t{i}\\t{size}\\t{node}\\n\")\n n_items += 1\n fh.close()\n n_clusts = len(count_list)\n del graph, components\n cluster_counts = pd.DataFrame({\"size\": count_list})\n largest_cluster = cluster_counts[\"size\"].max()\n cluster_hist = (\n pd.DataFrame(cluster_counts.value_counts()).sort_index().reset_index()\n )\n cluster_hist = cluster_hist.set_index(\"size\")\n cluster_hist = cluster_hist.rename(columns={0: \"n\"})\n cluster_hist[\"item_pct\"] = (\n cluster_hist[\"n\"] * cluster_hist.index * 100.0 / n_items\n )\n cluster_hist.to_csv(histpath, sep=\"\\t\", float_format=\"%5.2f\")\n cluster_hist[\"cluster_pct\"] = cluster_hist[\"n\"] * 100.0 / n_clusts\n cluster_hist.to_csv(histpath, sep=\"\\t\", float_format=\"%5.2f\")\n clusters = pd.DataFrame(\n {\"anchor.id\": id_list, \"count\": count_list, \"hash\": hash_list}\n )\n clusters.to_csv(summarypath, sep=\"\\t\")\n stats_dict = {\n \"in_anchor\": n_items,\n \"syn.anchors.n\": n_clusts,\n \"syn.anchors.largest\": largest_cluster,\n }\n return stats_dict",
"def compute_first(self):\n compute_first_sets(self, self.rules)",
"def get_sharp_relations_for_sets(follows, set_1, set_2):\n for item_1 in set_1:\n for item_2 in set_2:\n if not get_sharp_relation(follows, item_1, item_2):\n return False\n return True",
"def generate(self):\n\n\t\tfor datapoint in self.dataSet[:]:\n\t\t\trule, degree = self.makeRule(datapoint)\n\t\t\tself.generatedRules.append((rule, degree))",
"def MakeSets(parse, sent_len, ignore_WALL, content_words):\n current_ignored = 0\n link_list = []\n if content_words:\n with open(\"/home/andres/MyOpenCogSources/language-learning/src/parse_evaluator/func_words.txt\", 'r') as ff:\n func_words = ff.readlines()[0].split()\n for link in parse:\n if content_words:\n if (link[1].lower() in func_words or link[3].lower() in func_words):\n current_ignored += 1\n continue\n if ignore_WALL:\n if (link[0] == '0') or (link[2] == str(sent_len) and link[3] == \".\"):\n current_ignored += 1\n continue\n link_list.append([link[0], link[2]])\n\n # using sets for each link evaluates without link direction\n links_set = set(map(frozenset, link_list))\n return links_set, current_ignored",
"def compute_first_sets(grammar,rules):\n grammar.reset_first_follow()\n\n names_of_non_terminals = []\n grammar.end_of_text.first_data = set({grammar.end_of_text})\n grammar.empty.first_data = set({grammar.empty})\n for key, rule in rules.items():\n if rule.is_terminal() or rule.is_empty():\n # If X is a terminal, then First(X) is {X}\n # Lazy load it.\n dummy = rule.first()\n elif rule.is_symbol_name():\n names_of_non_terminals.append(key)\n else:\n # rule is a Choice node\n for rhs in rule:\n # If X -> empty is a production, then add Empty\n if rhs.is_empty():\n rule.first_data = set({rhs})\n names_of_non_terminals.append(key)\n\n def lookup(rule):\n return rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n def dynamic_first(rule,depth):\n \"\"\"\n Returns the currently computed approximation to the First set for a\n rule.\n\n The rule is from a Canonical grammar, so a non-terminal can be as\n complex as a Choice over Sequences over symbols that may reference\n other non-terminals. Gather updated First set info for at most\n those first two levels, and use a previous-computed approximation for\n the nonterminals at that second level.\n\n Args:\n rule: the Rule in question\n depth: recursion depth\n\n Returns:\n A new approximation to the First set for the given rule.\n \"\"\"\n\n if rule.is_symbol_name():\n return rules[rule.content].first()\n if rule.is_empty():\n return rule.first()\n if rule.is_terminal():\n # The terminal isn't registered in the dictionary.\n return set({rule})\n if isinstance(rule,Choice):\n result = rule.first()\n #for item in [lookup(i) for i in rule]:\n for item in rule:\n result = result.union(dynamic_first(item,depth+1))\n return result\n if isinstance(rule,Seq):\n result = rule.first()\n\n # Only recurse 2 levels deep\n if depth < 2:\n items = [lookup(item) for item in rule]\n else:\n items = rule\n # Add the first sets for Yi if all the earlier items can derive\n # empty. But don't add empty itself from this prefix.\n for item in items:\n from_first = dynamic_first(item,depth+1)\n from_first = without_empty(from_first)\n result = result.union(from_first)\n if not item.derives_empty():\n # Not known to derive empty. Stop here.\n break\n # If all the items derive empty, then add Empty to the first set.\n if all([lookup(item).derives_empty() for item in rule]):\n result = result.union({grammar.empty})\n return result\n raise RuntimeError(\"trying to dynamically compute the First set of: \"\n + str(rule))\n\n # Repeat until settling.\n keep_going = True\n while keep_going:\n keep_going = False\n for key in names_of_non_terminals:\n rule = rules[key]\n # Accumulate First items from right-hand sides\n df = dynamic_first(rule,0)\n new_items = df - rule.first()\n if len(new_items) > 0:\n rule.first_data = rule.first().union(new_items)\n keep_going = True",
"def association_rules(mtv, itemsets, use_observed_frequency=False):\n\n # Since we iterate several itemsets,\n # we may visit the same subsets X,Y several times\n # so we track visited nodes to avoid dublicates\n association_rules_set = set()\n\n cache = {}\n\n # List of all possible rules\n # that come from all subsets of the\n # itemsets\n rules = []\n\n for itemset in itemsets:\n\n singletons = singletons_of_itemset(itemset)\n\n for k in range(len(singletons)):\n choose_X = k + 1\n for comb in combinations(singletons, choose_X):\n\n X = union_of_itemsets(comb)\n\n prob_X = 0\n if use_observed_frequency:\n prob_X = mtv.fr(X)\n else:\n prob_X = cached_query(mtv, X, cache)\n\n Ys = set(singletons) - set(comb)\n\n for i in range(len(Ys)):\n choose_Y = i + 1\n for Y_comb in combinations(Ys, choose_Y):\n\n Y = union_of_itemsets(Y_comb)\n\n if not (X, Y) in association_rules_set:\n association_rules_set.add((X, Y))\n XY = X | Y\n prob_XY = 0\n prob_Y = 0\n if use_observed_frequency:\n prob_XY = mtv.fr(XY)\n prob_Y = mtv.fr(Y)\n else:\n prob_XY = cached_query(mtv, XY, cache)\n prob_Y = cached_query(mtv, Y, cache)\n\n if prob_X > float_precision and prob_Y > float_precision:\n\n conf = prob_XY / prob_X\n lift = conf / prob_Y\n\n rule = AssociationRule()\n rule.X = X\n rule.Y = Y\n rule.confidence = conf\n rule.lift = lift\n rules.append(rule)\n\n\n # Return sorted list, regading the rules as\n # either association or disassociation rules\n # Association rules, descending prob, lift > 1 for true rules\n association_rules = filter(lambda rule: rule.lift > 1, rules)\n\n # Sort association rules descendingly, and cut away lower half\n association_rules.sort(lambda ar1, ar2: ar1.lift < ar2.lift and 1 or -1)\n split = int(math.ceil(len(association_rules)/2.))\n association_rules_high_lift = association_rules[:split]\n association_rules_low_lift = association_rules[split:]\n association_rules_high_lift.sort(lambda ar1, ar2: ar1.confidence < ar2.confidence and 1 or -1)\n association_rules_low_lift.sort(lambda ar1, ar2: ar1.confidence < ar2.confidence and 1 or -1)\n association_rules = association_rules_high_lift + association_rules_low_lift\n\n # Disassociation rules, ascending prob, lift < 1 for true rules\n disassociation_rules = filter(lambda rule: rule.lift < 1, rules)\n # Sort disassociation rules, ascindinglyly, and cut away lower half\n disassociation_rules.sort(lambda ar1, ar2: ar1.lift < ar2.lift and -1 or 1)\n d_split = int(math.ceil(len(disassociation_rules)/2.))\n disassociation_lower_lift = disassociation_rules[:d_split]\n disassociation_higher_lift = disassociation_rules[d_split:]\n disassociation_lower_lift.sort(lambda ar1, ar2: ar1.confidence < ar2.confidence and -1 or 1)\n disassociation_higher_lift.sort(lambda ar1, ar2: ar1.confidence < ar2.confidence and -1 or 1)\n disassociation_rules = disassociation_lower_lift + disassociation_higher_lift\n\n return association_rules, disassociation_rules",
"def postprocessing(net, initial_marking, final_marking, A, B, pairs, loop_one_list):\n label_transition_dict = {}\n for label in loop_one_list:\n label_transition_dict[label] = PetriNet.Transition(label, label)\n net.transitions.add(label_transition_dict[label])\n\n # F L1L\n # Key is specific loop element\n for key, value in A.items():\n if key in B:\n A_without_B = value - B[key]\n B_without_A = B[key] - value\n pair = (A_without_B, B_without_A)\n for pair_try in pairs:\n in_part = pair_try[0]\n out_part = pair_try[1]\n if pair[0].issubset(in_part) and pair[1].issubset(out_part):\n pair_try_place = PetriNet.Place(str(pair_try))\n add_arc_from_to(label_transition_dict[key], pair_try_place, net)\n add_arc_from_to(pair_try_place, label_transition_dict[key], net)\n return net, initial_marking, final_marking",
"def generateFollowings(self):\n for f in self._genericGenerator(self.getFollowings):\n yield f",
"def apply_ruleset(self, ruleset):\n updates = [self._get_lexicon_update(ruleset['lexicon'])]\n updates += ruleset['rules']\n self.apply_updates(updates)",
"def __call__(self, config):\n # loop over the rules sorted according to their dependencies and\n # apply them\n for rule in networkx.topological_sort(self.graph):\n value = rule.apply(config)\n if value is not None:\n set_from_path(config, rule.name, value)",
"def test_find_multitable_conflicting_paths(self):\n ruleset_a = [\n Rule(priority=10, table=0,\n match=Match([('VLAN_VID', 1, None)]),\n instructions=Instructions(dup=goto1)),\n Rule(priority=10, table=0,\n match=Match([('VLAN_VID', 2, None)]),\n instructions=Instructions(dup=goto2)),\n Rule(priority=0, table=0),\n Rule(priority=20, table=1,\n match=Match([('IPV4_DST', 0, 0xFFFFFFFE)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=19, table=1,\n match=Match([('IPV4_DST', 0, None)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=0, table=1),\n Rule(priority=30, table=2,\n match=Match([('IPV4_DST', 0, None)]),\n instructions=Instructions()),\n Rule(priority=30, table=2,\n match=Match([('IPV4_DST', 1, None)]),\n instructions=Instructions()),\n Rule(priority=0, table=2)\n ]\n\n ruleset_b = [\n Rule(priority=14, table=0,\n match=Match([('VLAN_VID', 1, None), ('IPV4_DST', 0, None)])),\n Rule(priority=14, table=0,\n match=Match([('VLAN_VID', 1, None), ('IPV4_DST', 1, None)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=14, table=0,\n match=Match([('VLAN_VID', 2, None), ('IPV4_DST', 0, None)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=14, table=0,\n match=Match([('VLAN_VID', 2, None), ('IPV4_DST', 1, None)])),\n Rule(priority=0, table=0)\n ]\n\n single_a = to_single_table(ruleset_a)\n single_b = to_single_table(ruleset_b)\n norm_a = normalise(single_a)\n norm_b = normalise(single_b)\n\n result_ab = {\n (ruleset_a[0], ruleset_a[3]): frozenset([(ruleset_b[0],)]),\n (ruleset_a[1], ruleset_a[6]): frozenset([(ruleset_b[2],)])\n }\n result_ba = {\n (ruleset_b[0],): frozenset([(ruleset_a[0], ruleset_a[3])]),\n (ruleset_b[2],): frozenset([(ruleset_a[1], ruleset_a[6])])\n }\n\n equal_ab, diff_ab = check_equal(norm_a, norm_b, diff=True)\n self.assertFalse(equal_ab)\n equal_ba, diff_ba = check_equal(norm_b, norm_a, diff=True)\n self.assertFalse(equal_ba)\n\n paths_ab = find_conflicting_paths(diff_ab, single_a, single_b)\n paths_ba = find_conflicting_paths(diff_ab, single_b, single_a)\n\n self.assertEqual(paths_ab, result_ab)\n self.assertNotEqual(paths_ab, result_ba) # Sanity\n self.assertEqual(paths_ba, result_ba)",
"def _hint_comp2assume(self, hints: List[Hint],\n steps: List[List[Tuple[int, bool, TransType]]],\n first: int) -> Tuple[FrozenSet[FNode],\n FrozenSet[FNode]]:\n assert all(isinstance(h, Hint) for h in hints)\n assert all(isinstance(s, list) for s in steps)\n assert all(len(s) == len(hints) for s in steps)\n assert all(isinstance(s, tuple) for step in steps for s in step)\n assert all(len(s) == 3 for step in steps for s in step)\n assert all(isinstance(s[0], int) for step in steps for s in step)\n assert all(isinstance(s[1], bool) for step in steps for s in step)\n assert all(isinstance(s[2], TransType) for step in steps for s in step)\n assert isinstance(first, int)\n assert first >= 0\n\n if len(hints) == 0:\n return frozenset(), frozenset()\n\n def assign_true(pred: FNode, res: Set[FNode]):\n assert isinstance(pred, FNode)\n assert isinstance(res, set)\n preds = [pred]\n while preds:\n pred = preds.pop()\n if pred.is_and():\n preds.extend(pred.args())\n elif pred.is_not():\n assign_false(pred.arg(0), res)\n elif not pred.is_true():\n assert not pred.is_false()\n res.add(self.cn(pred))\n\n def assign_false(pred: FNode, res: Set[FNode]):\n assert isinstance(pred, FNode)\n assert isinstance(res, set)\n preds = [pred]\n while preds:\n pred = preds.pop()\n if pred.is_or():\n preds.extend(pred.args())\n elif pred.is_not():\n assign_true(pred.arg(0), res)\n elif not pred.is_false():\n assert not pred.is_true()\n if pred.is_lt() or pred.is_le():\n res.add(self.cn(not_rel(self.i_env, pred)))\n else:\n res.add(self.cn(self.i_mgr.Not(pred)))\n\n res_regions_trans: Set[FNode] = set()\n res_assumes: Set[FNode] = set()\n for step_idx, step in enumerate(steps):\n c_time = step_idx + first\n x_step_idx = (step_idx + 1) % len(steps)\n for hint_idx, (hint, (loc_idx, is_ranked, trans_t)) in enumerate(\n zip(hints, step)):\n assert isinstance(hint, Hint)\n assert isinstance(loc_idx, int)\n assert isinstance(trans_t, TransType)\n loc = hint[loc_idx]\n\n assign_true(self.totime(loc.region, c_time), res_regions_trans)\n assign_true(self.totime(loc.assume, c_time), res_assumes)\n if loc.rf is not None:\n if is_ranked:\n assign_true(self.totime(loc.rf.is_ranked, c_time),\n res_regions_trans)\n else:\n assign_false(self.totime(loc.rf.is_ranked, c_time),\n res_regions_trans)\n x_loc_idx = steps[x_step_idx][hint_idx][0]\n assert isinstance(x_loc_idx, int)\n if trans_t == TransType.PROGRESS:\n trans = loc.progress(x_loc_idx)\n elif trans_t == TransType.STUTTER:\n trans = loc.stutterT\n else:\n assert trans_t == TransType.RANKED\n trans = loc.rankT\n assert trans is not None\n assert isinstance(trans, FNode)\n assert not trans.is_false()\n assert trans in self.i_mgr.formulae.values()\n assign_true(self.totime(trans, c_time), res_regions_trans)\n\n assert all(self.cn(p) == p for p in res_regions_trans)\n assert all(self.cn(p) == p for p in res_assumes)\n return frozenset(res_regions_trans), frozenset(res_assumes)",
"def generateAssociationRule(freqSet):",
"def follow_following_followers(self):\n self.logger.log(\"starting follow_following_followers...\")\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n try:\n try:\n followw = perform_with_ran_delay(self.instagram.get_followers, acc, 150, 15,\n delayed=True)\n accountstofollow = followw[\"accounts\"]\n random.shuffle(accountstofollow)\n if len(accountstofollow) > 10:\n accountstofollow = accountstofollow[:10]\n for ac in accountstofollow:\n if not self.is_user_following(ac.identifier):\n self.add_following(ac.identifier)\n self.logger.log(\"following: {}\".format(ac.username))\n except Exception as e:\n print(e)\n self.logger.log(str(e))\n finally:\n sleep(3)",
"def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)",
"def generateRules(singleCovering, decisions):\n tempCovering = tupleToDict(singleCovering)\n tempDecisions = tupleToDict(decisions)\n\n coverDF = pd.DataFrame(tempCovering)\n decisionsDF = pd.DataFrame(tempDecisions)\n\n combinedDF = pd.concat([coverDF, decisionsDF], axis=1)\n\n ruleDF = combinedDF[combinedDF.iloc[:,-1] != 'madhu']\n # ruleDF = ruleDF.drop_duplicates()\n conceptblockDF = ruleDF.copy(deep=True)\n del conceptblockDF['class']\n\n ruleDict = conceptblockDF.T.to_dict().values()\n ruleTuple = dictToTuple(ruleDict)\n\n\n ruleset = set(ruleDF.index.values)\n\n for i in range(len(ruleTuple)):\n listofsets = []\n count = 0\n\n for j in range(len(ruleTuple[i])):\n # collect the cases that are satisfying a rule from the ruleTuple\n listofsets.append(set(combinedDF[combinedDF[ruleTuple[i][j][0]] == ruleTuple[i][j][1]].index.values))\n\n for m in range(len(listofsets)):\n if (len(listofsets) > 1):\n # drop the first condition from the rule\n appendlast = listofsets.pop(0)\n\n # compute the case Numbers thar are satifying the ruleTUple\n u = set.intersection(*listofsets)\n\n if (not u.issubset(ruleset)):\n # Check whether the remaining attributes satisfy the cases\n # if not append the condition to the attribute list\n listofsets.append(appendlast)\n elif(len(ruleTuple[i]) > 1):\n # if yes remove the dropped attribute from the list\n ruleTuple[i].pop(m-count)\n count = count + 1\n\n return list(set([tuple(i) for i in ruleTuple]))",
"def follows(self):\r\n return relationships.Follows(self)",
"def make_all_rules(self):\n\n def compatible(pattern1, pattern2, direction):\n \"\"\"Returns `True` if `pattern2` is compatible with `pattern1` in the `direction`,\n otherwise return `False`.\"\"\"\n if direction == 0:\n return pattern1[:-1] == pattern2[1:]\n if direction == 2:\n return [line[:-1] for line in pattern1] == [line[1:] for line in pattern2]\n\n for index in range(len(self.patterns)):\n for ind in range(index + 1):\n for direction in (0, 2):\n if compatible(self.patterns[index], self.patterns[ind], direction):\n self.rules[index][direction].add(ind)\n self.rules[ind][direction + 1].add(index)",
"def follow(self, followerId, followeeId):\n if followerId in self.follows:\n self.follows[followerId].add(followeeId)\n else:\n self.follows[followerId] = set([followeeId])",
"def left_refactor(self,target_rule_name,stop_at_set):\n name_suffix = \".post.{}\".format(target_rule_name)\n\n # Map a rule name X to a set of rules Y where X appears\n # as a first nonterminal in one of Y's options.\n appears_first_in = defaultdict(set)\n for name, rule in self.rules.items():\n for option in rule.as_container():\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(name)\n #print(\"appears first dict\\n{}\\n\\n\".format(appears_first_in))\n\n po = PrintOption()\n po.is_canonical = self.is_canonical\n po.inline_synthetic = False\n candidates = set(self.rules.keys())\n while len(candidates) > 0:\n for A in list(candidates):\n candidates.remove(A)\n if A in stop_at_set:\n continue\n rule = self.rules[A]\n (starts,others,terms,empties) = rule.partition(target_rule_name)\n if len(starts) > 0 and (len(others)+len(terms)+len(empties) == 0):\n #print(\"processing {}\".format(A))\n # Create the new rule.\n new_rule_name = \"{}{}\".format(A,name_suffix)\n # Form alpha1 ... alphaN\n new_options = []\n for option in rule:\n if len(option.as_container()) == 1:\n new_options.append(self.MakeEmpty())\n else:\n assert option.is_container() and (len(option)>1)\n new_options.append(self.MakeSeq(option[1:]))\n self.rules[new_rule_name] = self.MakeChoice(new_options)\n\n # Rewrite A itself.\n self_parts = [self.MakeSymbolName(x) for x in [target_rule_name,new_rule_name]]\n self.rules[A] = self.MakeChoice([self.MakeSeq(self_parts)])\n\n # Update bookkeeping for appears_first_in\n for option in new_options:\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(new_rule_name)\n\n # Replace the old rule everywhere it appears in the first\n # position\n for parent_name in list(appears_first_in[A]):\n if parent_name == A:\n # Already processed above\n continue\n parent = self.rules[parent_name]\n (starts,others,terms,empties) = parent.partition(A)\n new_options = []\n for option in starts:\n parts = []\n parts.append(self.MakeSymbolName(target_rule_name))\n parts.append(self.MakeSymbolName(new_rule_name))\n parts.extend(option.as_container()[1:])\n new_options.append(self.MakeSeq(parts))\n new_options.extend(others+terms+empties)\n self.rules[parent_name] = self.MakeChoice(new_options)\n appears_first_in[A].remove(parent_name)\n appears_first_in[target_rule_name].add(parent_name)\n # Set up transitive closure.\n candidates.add(parent_name)\n\n #print()\n #print()\n #print()\n\n #self.absorb_post(target_rule_name)\n self.remove_unused_rules()",
"def update_acc_by_rules(self) -> None:\n for rule, coeff in self.rules.items():\n acc_delta = rule(self) # can't call self.rule\n self.update_acc(acc_delta, coeff)"
] |
[
"0.7028939",
"0.6961307",
"0.68911374",
"0.66436577",
"0.604697",
"0.5467792",
"0.546096",
"0.54508024",
"0.53658897",
"0.532478",
"0.5312309",
"0.52231175",
"0.51560307",
"0.51270264",
"0.51243985",
"0.50737786",
"0.5060376",
"0.50545925",
"0.50366515",
"0.5035921",
"0.4992643",
"0.49841753",
"0.49428928",
"0.49193773",
"0.49083284",
"0.48955745",
"0.48885062",
"0.48782253",
"0.48595157",
"0.48594898"
] |
0.7579764
|
0
|
Emits the internal representation of the grammar to stdout
|
def dump(self):
dump_grammar(self.rules)
print(self.registry)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __init__(self, grammar, trace=...):\n ...",
"def output_grammar():\r\n dummy = Word(alphas.lower(), exact=1)\r\n axis = Or([positive_integer('size'), dummy('identifier')])\r\n shape = squareBracketedExpr( ( delimitedList(axis) ) )('shape')\r\n default = Suppress(Literal('=')) + (number | identifier(''))('default')\r\n output = type_argument + Optional(shape) + identifier + Optional(default)\r\n\r\n return delimitedList(Group(output))",
"def status(self):\n print(\"Parser type: %s\" % self._parser_type)\n if self._parser_type == \"lark\":\n print(\" Employed grammar path: %s\" % self._grammar_path)\n print(\" Options:\")\n print(\" Parser class: %s\" % self._parser.parser_class)\n print(\" Parser: %s\" % self._parser.options.parser)\n print(\" Lexer: %s\" % self._parser.options.lexer)\n print(\" Ambiguity: %s\" % self._parser.options.ambiguity)\n print(\" Start: %s\" % self._parser.options.start)\n print(\" Tree class: %s\" % self._parser.options.tree_class)\n print(\n \" Propagate positions: %s\" % self._parser.options.propagate_positions\n )",
"def output(self, out):\n res = \"# File: \" + out + \"\\n# NFA\\n# Q_ - the set of states\\n\"\n for q in self.states:\n res += q + ' '\n res = res[0:-1]\n res += \"\\n# Sigma_ the alphabet\\n\"\n for a in self.alphabet:\n res += a + ' '\n res = res[0:-1]\n res += '\\n# q_0_ the start state\\n' + self.q_0 + \"\\n# F_ the set of accept states\\n\"\n for f in self.final:\n res += f + ' '\n res = res[0:-1]\n res += \"\\n# delta_ the transition function\\n\"\n for x in self.transition:\n splitted = list(str(x).split(','))\n res += splitted[0] + \" \" + splitted[1]\n for i in self.transition[x]:\n res += \" \" + i\n res += '\\n'\n f = open(out, 'w')\n f.write(res)\n f.close()",
"def serialize(self):\n res = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n self.grammar_elem.attr[\"xmlns\"] = \\\n \"http://relaxng.org/ns/structure/1.0\"\n self.grammar_elem.attr[\"datatypeLibrary\"] = \\\n \"http://www.w3.org/2001/XMLSchema-datatypes\"\n for ns in self.namespaces:\n self.grammar_elem.attr[\"xmlns:\" + self.namespaces[ns]] = ns\n res += self.grammar_elem.start_tag()\n for ch in self.grammar_elem.children:\n res += ch.serialize()\n if not self.no_data:\n res += \"<start>\" + self.root.serialize() + \"</start>\"\n for d in self.defs:\n res += self.defs[d].serialize()\n if self.has_anyxml:\n res += self.anyxml_def\n return res + self.grammar_elem.end_tag()",
"def print_graph() -> None:\n raise NotImplementedError",
"def print_out():\n pass",
"def show(self):\n\n print(self._walk(self, depth=1))",
"def main():\n grammar_file = sys.argv[1]\n cfg_grammar = nltk.data.load(grammar_file)\n\n sentence_file = sys.argv[2]\n sentences = open(sentence_file, \"r\")\n sentences = sentences.readlines()\n\n output_file = sys.argv[3]\n with open(output_file, \"w\") as f:\n\n parser = PCKY(cfg_grammar)\n\n for sentence in sentences:\n tree = parser.parse(sentence)\n print(tree, file=f)",
"def demo_legacy_grammar():\n from nltk.grammar import parse_fcfg\n\n g = parse_fcfg(\"\"\"\n % start S\n S[sem=<hello>] -> 'hello'\n \"\"\")\n print \"Reading grammar: %s\" % g\n print \"*\" * 20\n for reading in batch_interpret(['hello'], g, semkey='sem'):\n syn, sem = reading[0]\n print\n print \"output: \", sem",
"def genout(self):\n ch = self.buffer_output()\n while ch:\n print(ch, end='')\n ch = self.buffer_output()",
"def __repr__(self):\n return u\"<ConcreteGramar: {}, {}, {}>\".format(\n self.grammar, self.parent.tag, self.child.tag)",
"def __str__(self):\n out = self.san\n if self.comment != \"\":\n out += \" {\" + self.comment.replace('\\n', ' ') + \"}\"\n if len(self.nags) > 0:\n for n in self.nags:\n out += \" \" + n\n for v in self.variations:\n out += \" (\" + str(v).strip(' ') + \")\"\n return out",
"def cli(yamlfile, **kwargs):\n print(RDFGenerator(yamlfile, **kwargs).serialize(**kwargs))",
"def output(self):\r\n self.logic ( )\r\n return self.output",
"def print_output(tree):\n print_value(tree)\n print_tree(tree)",
"def phrase_output(self, phrase):\n Phrase(self.selected_phrase, self.player_guess, False, False)\n print('{}'.format(''.join(self.consol_output)))",
"def pprint(self):\n ParseHub.pprint(self)",
"def pprint(self):\n ParseHub.pprint(self)",
"def cli(yamlfile, **args):\n print(LogicProgramGenerator(yamlfile, **args).serialize(**args))",
"def demo():\n # Create some nonterminals\n S, NP, VP, PP = nonterminals('S, NP, VP, PP')\n N, V, P, Det = nonterminals('N, V, P, Det')\n VP_slash_NP = VP/NP\n\n print 'Some nonterminals:', [S, NP, VP, PP, N, V, P, Det, VP/NP]\n print ' S.symbol() =>', `S.symbol()`\n print\n\n # Create some CFG Productions\n prods = [CFGProduction(S, [NP, VP]), CFGProduction(PP, [P, NP]),\n CFGProduction(NP, [Det, N]), CFGProduction(NP, [NP, PP]),\n CFGProduction(VP, [V, NP]), CFGProduction(VP, [VP, PP]),\n CFGProduction(Det, ['a']), CFGProduction(Det, ['the']),\n CFGProduction(N, ['dog']), CFGProduction(N, ['cat']), \n CFGProduction(V, ['chased']), CFGProduction(V, ['sat']),\n CFGProduction(P, ['on']), CFGProduction(P, ['in'])]\n\n prod = prods[2]\n print 'A CFG production:', `prod`\n print ' prod.lhs() =>', `prod.lhs()`\n print ' prod.rhs() =>', `prod.rhs()`\n print\n\n # Create and print a CFG\n cfg = CFG(S, prods)\n print 'A CFG grammar:', `cfg`\n print ' cfg.start() =>', `cfg.start()`\n print ' cfg.productions() =>',\n # Use string.replace(...) is to line-wrap the output.\n print `cfg.productions()`.replace(',', ',\\n'+' '*25)\n print\n\n # Create some probabilistic CFG Productions\n A, B, C = nonterminals('A, B, C')\n pcfg_prods = [PCFGProduction(A, [B, B], prob=0.3),\n PCFGProduction(A, [C, B, C], prob=0.7),\n PCFGProduction(B, [B, 'b'], prob=0.5),\n PCFGProduction(B, [C], prob=0.5),\n PCFGProduction(C, ['a'], prob=0.1),\n PCFGProduction(C, ['b'], prob=0.9)] \n \n pcfg_prod = pcfg_prods[2]\n print 'A PCFG production:', `pcfg_prod`\n print ' pcfg_prod.lhs() =>', `pcfg_prod.lhs()`\n print ' pcfg_prod.rhs() =>', `pcfg_prod.rhs()`\n print ' pcfg_prod.prob() =>', `pcfg_prod.prob()`\n print\n\n # Create and print a PCFG\n pcfg = PCFG(S, pcfg_prods)\n print 'A PCFG grammar:', `pcfg`\n print ' pcfg.start() =>', `pcfg.start()`\n print ' pcfg.productions() =>',\n # Use string.replace(...) is to line-wrap the output.\n print `pcfg.productions()`.replace(',', ',\\n'+' '*26)\n print",
"def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"",
"def printOutput(self):\n pass",
"def view(self):\n from devito.ir.iet.visitors import printAST\n return printAST(self)",
"def __repr__(self):\n\n if self.tokenizer is not None:\n tok = self.tokenizer.__repr__()\n else:\n tok = None\n\n if self.grammar is not None:\n gr = self.grammar.__repr__()\n else:\n gr = None\n\n return \"%s(%s, %s)\" % (self.__class__.__name__, tok, gr)",
"def print_out(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n lb = self.arc_info[arc]['lower_bound']\n u = self.arc_info[arc]['upper_bound']\n print(\"{} {} {} {} flow={}, edgeId={}\".format(s, t, lb, u, w,\n arc))",
"def show(self, output_file=\"ast_viz.pdf\"):\n pos = radial_tree_layout(self.graph, self.graph.vertex(0))\n scale = self.graph.num_vertices()\n\n graph_draw(self.graph, vertex_text=self.graph.vp.type, # self.graph.vertex_index, #\n pos=pos, vertex_font_size=scale,\n output=output_file, output_size=(scale * 200, scale * 200))",
"def output_synthesis(self, prefix, outfile):\n if prefix:\n outfile.write(\"#\\n## Component %s\\n\" % prefix[:-1])\n else:\n outfile.write(\"#\\n## Top Component\\n\")\n \n # Define sequences\n for seq in list(self.base_seqs.values()):\n if not seq.dummy:\n if not seq.in_strand:\n warning(\"Sequence %s is defined but never used in a strand. It probably will not be designed.\" % seq.full_name)\n outfile.write(\"sequence %s = %s : %d\\n\" % (seq.full_name, seq.const, seq.length))\n \n # Define super-sequences\n for sup_seq in list(self.sup_seqs.values()):\n if not seq.dummy:\n const = \" \".join(seq.full_name for seq in sup_seq.seqs if not seq.dummy)\n outfile.write(\"sup-sequence %s = %s : %d\\n\" % (sup_seq.full_name, const, sup_seq.length))\n \n # Define strands\n for strand in list(self.strands.values()):\n if not strand.in_structure:\n warning(\"Strand %s is defined but never used in a structure. It may not be designed.\" % strand.full_name)\n const = \" \".join(seq.full_name for seq in strand.seqs if not seq.dummy)\n if strand.dummy:\n dummy = \"[dummy] \"\n else:\n dummy = \"\"\n outfile.write(\"strand %s%s = %s : %d\\n\" % (dummy, strand.full_name, const, strand.length))\n \n # Define structures\n for struct in list(self.structs.values()):\n strands = \" + \".join([strand.full_name for strand in struct.strands])\n outfile.write(\"structure [%dnt] %s = %s : %s\\n\" % (struct.opt, struct.full_name, strands, struct.struct))\n \n # Define kinetics\n for kin in list(self.kinetics.values()):\n inputs = \" + \".join(struct.full_name for struct in kin.inputs)\n outputs = \" + \".join(struct.full_name for struct in kin.outputs)\n outfile.write(\"kinetic [%f /M/s < k < %f /M/s] %s -> %s\\n\" % (kin.low, kin.high, inputs, outputs))",
"def dump(self):\n print(\"Total number of documents/queries processed: \"+str(self.num_documents))\n print(\"Total number of unique expressions per document involved: \"+str(self.num_expressions))\n print(\"Total number of expression instances involved: \" + str(self.global_expressions))\n print(\"Total number of keywords involved: \"+str(self.num_keywords))\n print(\"Total non-unique expressions containing error nodes: \" + str(self.expressions_with_e))\n if len(self.missing_tags) == 0:\n print(\"No unrecognized tags found in expressions\")\n else:\n print(\"Unrecognized tags found in expressions:\")\n for key,value in self.missing_tags.items():\n print(\" \",key,\": \",value)\n if len(self.problem_files) == 0:\n print(\"All files/queries parsed successfully\")\n else:\n print(\"Problem files/queries:\")\n for key,value in self.problem_files.items():\n print(\" \",key,\": \",value)",
"def __repr__(self):\r\n printer = 'text model name: ' + str(self.name) + '\\n'\r\n printer += ' number of words: ' + str(len(self.words)) +'\\n'\r\n printer += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\r\n printer += ' number of stems: ' + str(len(self.stems)) + '\\n'\r\n printer += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\r\n printer += ' number of different punctuations: ' + str(len(self.punctuation)) \r\n return printer"
] |
[
"0.6304773",
"0.6263026",
"0.6086649",
"0.59790736",
"0.5811984",
"0.5795127",
"0.57577735",
"0.57212657",
"0.5668163",
"0.5666324",
"0.56441975",
"0.5634003",
"0.5626058",
"0.5603363",
"0.5550799",
"0.5526594",
"0.55197406",
"0.55122966",
"0.55122966",
"0.5499346",
"0.54946667",
"0.54927003",
"0.5491511",
"0.548875",
"0.54788285",
"0.54787326",
"0.5476402",
"0.54649687",
"0.54451364",
"0.5443418"
] |
0.6871682
|
0
|
Registers an item set, and return an index such that any item set with the same core will map to the same index. Indices start at 0 and go up by 1. Returns its index.
|
def register_item_set(self,item_set):
assert isinstance(item_set,ItemSet)
core = item_set.kernel_item_ids
if core in self.item_set_core_index:
return self.item_set_core_index[core]
# Register it
result = len(self.item_set_core_index)
self.item_set_core_index[core] = result
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def index(self, pset):\n self._sets.index(pset)",
"def set_indx(self, item):\n for i, s in enumerate(self._data):\n if item in s:\n return i\n return None",
"def register_index(self) -> int:\n return self._parent_node.register_index",
"def get_combo_idx(datasets: List[str], task: str) -> int:\n # Check if data combination registry has been created yet\n combine_dir = os.path.join(get_root(), \"COMBINE\", f\"{task}1\")\n registry = os.path.join(combine_dir, \"data\", \"registry.txt\")\n if not os.path.isfile(registry):\n return -1\n\n # Check if this particular combo is in the data combo registry\n datasets = sorted(set(datasets))\n with open(registry) as f:\n registry = [sorted(set(line.rstrip().split())) for line in f]\n combo_idxs = [i+1 for i, d in enumerate(registry) if d == datasets]\n\n return -1 if len(combo_idxs) == 0 else combo_idxs[0]",
"def get_idx_set(i, sets):\n idxs = []\n for j, set_j in enumerate(sets):\n if i in set_j: idxs.append(j)\n return idxs",
"def test_allocator_single_confilicting_sets():\n indexSets = [set([1]), set([1])]\n allocator = Allocator(indexSets)\n assert len(allocator.slots) == 2\n allocation = allocator.allocate()\n assert not allocation",
"def define_readset(self, rs):\n try: return self.readsets.index(rs)\n except:\n self.readsets.append(rs)\n return len(self.readsets)-1",
"def increment_register_index(self) -> None:\n self._parent_node.increment_register_index()",
"def add_input_set(name, my_session):\n iset = InputSet(name=name)\n my_session.add(iset)\n my_session.commit()\n log.info('Added input set \"%s\"' % name, 'input.py')\n return iset.id",
"def GetIndex(num_set):\n for i in enumerate(num_set1):\n print(i)\n\n for index, num in enumerate(num_set1, start=5):\n print(f'下标是{index}, 对应的数字是{num}')",
"def get_index(self):\r\n i = 0\r\n for container in self.settings[\"containers\"]:\r\n if container[\"name\"] == self.container[\"name\"]:\r\n return i\r\n i += 1",
"def index(self, item: T) -> int:\n pass",
"def index(self, item: T) -> int:\n pass",
"def _init_symbol_tracker(self):\n # Initialize with an empty set\n atoms_indx = {symb: set([]) for symb in self.symbols}\n\n # Populate the sets\n for atom in self.atoms:\n symb = atom.symbol\n atoms_indx[symb].add(atom.index)\n return atoms_indx",
"def test_allocator_single_sets():\n indexSets = [set([3]), set([2]), set([1]), set([0])]\n allocator = Allocator(indexSets)\n assert len(allocator.slots) == 4\n allocation = allocator.allocate()\n LOGGER.info(allocator.report())\n validate_allocation(indexSets, allocation)",
"def index(self, item):\n return self._items.index(item)",
"def __getitem__(self, index):\n assert(isinstance(index,int)), \"Index should be an integer value\"\n assert(0 <= index < len(self.set)), \" Index out of bounds\"\n return self.set[index]",
"def add_item(self, item: str) -> int:\n item = item.encode('utf-8')\n if item not in self.item2idx:\n self.idx2item.append(item)\n self.item2idx[item] = len(self.idx2item) - 1\n return self.item2idx[item]",
"def index_set(self):\n return self._index",
"def index(self, item: Any) -> int:\n index_so_far = 0\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return index_so_far\n index_so_far += 1\n curr = curr.next\n raise ValueError",
"def add_sets(self, key, member):\n return self.redis.sadd(key, member)",
"def index(self, item, **kwargs):\n # type: (Any, dict) -> int\n return list.index(self, self.ref(item), **kwargs)",
"def insert(self, index, pset):\n self._sets.insert(index, pset)",
"def atomIndex(molecule):\n for i in range(len(molecule.index) - 1):\n start = molecule.index[i]\n end = molecule.index[i+1]\n if end - start > 1:\n for I in range(start, end):\n element = molecule.type_list[I] + str(I)\n molecule.setAtoms(I, element = element)\n molecule.sort()\n return molecule",
"def install(self, index_set):\n index_set.indices = map(self.stem, index_set.indices)\n index_set.required_indices = map(self.stem, index_set.required_indices)\n self.unique_target_concepts[index_set.target_concept] = True\n for index in index_set.indices:\n if not index in self.target_concepts.get(index, []):\n self.target_concepts[index] = ([index_set.target_concept] +\n self.target_concepts.get(index, []))\n if not index_set in self.index_sets.get(index, []):\n self.index_sets[index] = [index_set] + self.index_sets.get(index, [])",
"def test_allocator_triple_sets():\n indexSets = [set([3, 1, 4]), set([3, 2, 1]), set([1]), set([0, 1])]\n allocator = Allocator(indexSets)\n assert len(allocator.slots) == 5\n allocation = allocator.allocate()\n LOGGER.info(allocator.report())\n validate_allocation(indexSets, allocation)",
"def slot_for_container(self, table, index):\n\n i = 0\n for t in self.metalist:\n l = len(t)\n if t is table:\n if l == 0 or l <= index:\n return -1\n else:\n i += index\n return i\n else:\n i += l\n return -1",
"def index(self, item):\n return self.__values.index(item)",
"def global_index(self):\n raise NotImplementedError",
"def LALR1(self, max_item_sets=None):\n\n # Part 1. Compute LALR(1) item sets\n\n # Mapping from a core index to an already-discovered item set.\n by_index = dict()\n\n root_item = self.MakeItem(LANGUAGE, self.rules[LANGUAGE][0],0)\n\n # An ItemSet can be found by any of the items in its core.\n # Within an ItemSet, an item maps to its lookahead set.\n\n root_item_set = ItemSet(self, {root_item: LookaheadSet({self.end_of_text})}).close(self)\n by_index[root_item_set.core_index] = root_item_set\n\n item_set_core_ids = set({root_item_set.core_index})\n\n dirty_set = item_set_core_ids.copy()\n keep_going = True\n #while len(dirty_set) > 0:\n while keep_going:\n keep_going = False\n #work_list = dirty_set.copy()\n #dirty_set = set()\n if max_item_sets is not None:\n if len(by_index) > max_item_sets:\n break\n # Sort the work list so we get deterministic ordering, and therefore\n # deterministic itemset core numbering.\n # Go backwards to try to explore the most recently changed items first.\n work_list = sorted(item_set_core_ids, reverse=True)\n for core_index in work_list:\n item_set = by_index[core_index]\n (changed,gotos) = item_set.gotos(self,by_index_memo=by_index)\n keep_going = keep_going | changed\n for (X, item_set_for_X) in gotos:\n if item_set_for_X.core_index not in by_index:\n item_set_core_ids.add(item_set_for_X.core_index)\n by_index[item_set_for_X.core_index] = item_set_for_X\n dirty_set.add(item_set_for_X.core_index)\n keep_going = True\n\n # Now this is a list of item_sets\n sorted_item_set_core_ids = sorted(item_set_core_ids)\n\n # Part 2. Compute the action table and conflicts.\n # Do this as a second pass because it's conceivable that an item set may\n # go from non-accepting to accepting during initial exploration\n # of the item sets.\n\n conflicts = []\n # Maps (item_set.core_index, terminal.reg_info.index) to an Action.\n action_table = dict()\n def addAction(item_set, terminal, action):\n isinstance(item_set, ItemSet) or raiseRE(\"expected ItemSet\")\n terminal.is_terminal() or raiseRE(\"expected terminal: \" + str(terminal))\n isinstance(action,Action) or raiseRE(\"expected action\")\n\n # Use indices, for speed.\n # But also keep the terminal prompting this action.\n action_key = (item_set.core_index,terminal.reg_info.index)\n if action_key not in action_table:\n action_table[action_key] = action\n else:\n prev_action = action_table[action_key]\n if prev_action != action:\n # Record the conflict, and only keep the original.\n conflicts.append(Conflict(item_set,terminal,prev_action,action))\n\n # Maps an item index to its reduction index.\n reduced_items = dict()\n # List, where element i is the Reduce object with index i\n reductions = []\n def make_reduce(item):\n if item.reg_info.index in reduced_items:\n return reductions[reduced_items[item.reg_info.index]]\n index = len(reduced_items)\n reduced_items[item.reg_info.index] = index\n result = Reduce(item,index)\n reductions.append(result)\n return result\n\n # The goto table for noterminals\n # Maps (item_set, nonterminal) to the next item set\n nonterminal_goto = dict()\n\n for item_set_core_id in sorted_item_set_core_ids:\n item_set = by_index[item_set_core_id]\n # Register Reduce and Accept actions\n for item_id, lookahead in item_set.id_to_lookahead.items():\n item = item_set.id_to_item[item_id]\n if item.is_accepting() and lookahead.includesEndOfText():\n addAction(item_set, self.end_of_text, Accept())\n if item.at_end() and (item.lhs.content != LANGUAGE):\n # Register reductions\n for terminal in lookahead:\n addAction(item_set, terminal, make_reduce(item))\n\n # Register Shift actions\n for xid, edge in item_set.goto.items():\n X = self.findByIndex(xid)\n item_set_for_X = edge.NextItemSet(self)[1]\n if X.is_terminal():\n # Can't be EndOfText by construction of the goto result\n isinstance(X,Token) or raiseRE(\"internal error: expected a token\")\n addAction(item_set, X, Shift(item_set_for_X))\n elif X.is_symbol_name():\n nonterminal_goto[(item_set.core_index,X)] = item_set_for_X\n\n item_sets = [by_index[i] for i in sorted_item_set_core_ids]\n\n return ParseTable(self,item_sets, action_table, nonterminal_goto, reductions, conflicts)"
] |
[
"0.6214665",
"0.5874514",
"0.58666277",
"0.54375255",
"0.537685",
"0.5279745",
"0.5150319",
"0.51177126",
"0.51001793",
"0.5087408",
"0.5042109",
"0.5026496",
"0.5026496",
"0.49999517",
"0.4973817",
"0.49672085",
"0.49609482",
"0.49571735",
"0.4955578",
"0.49481794",
"0.49374232",
"0.49191487",
"0.49155015",
"0.49102068",
"0.4909297",
"0.48887727",
"0.48867005",
"0.48787314",
"0.4875012",
"0.486165"
] |
0.8432474
|
0
|
Returns the names of rules, in order, based on the preorder traversal starting from the LANGUAGE start node. Assumes the grammar is in canonical form
|
def preorder(self):
assert self.is_canonical
# Names of visited nodes
visited = set()
# Names of nodes to visit
worklist = [LANGUAGE]
result = []
while len(worklist) > 0:
successors = []
for rule_name in worklist:
if rule_name in visited:
continue
result.append(rule_name)
visited.add(rule_name)
rule = self.rules[rule_name].as_container()
for rhs in rule:
phrase = rhs.as_container()
# Note: this tolerates duplicates among siblings.
successors.extend([x.content for x in phrase if x.is_symbol_name() and x.content not in visited])
worklist = successors
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_rule_names(self):\n return self.rules.keys()",
"def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matchedRules' in self.results:\n return self.results['matchedRules']\n # We do not have support for the remaining scanners (yet).\n return []",
"def pre_order(root):\n # type: (Nonterminal) -> Generator\n\n def traverse_rule(item, callback):\n yield item\n for el in item.to_symbols:\n yield callback(el)\n\n def traverse_nonterminal(item, callback):\n yield item\n yield callback(item.to_rule)\n\n def traverse_terminal(item, callback):\n yield item\n\n return Traversing.traverse_separated(root, traverse_rule, traverse_nonterminal, traverse_terminal)",
"def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules",
"def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules",
"def fetch_languages(self):\r\n \r\n # tokenize, clean and filter document tokens \r\n toks = [re.sub(r'[^a-zA-Z]','', tok.text.lower().strip()) for tok in self.doc]\r\n toks = [tok for tok in toks if len(tok)>1 and tok in LANGUAGES]\r\n toks = sorted(set(toks))\r\n \r\n return toks",
"def getListOfRules(self):\n return self.model.getListOfRules()",
"def get_ordered_names(self):\n nodes = self.get_ordered_nodes()\n return [node.name for node in nodes if node in self.leaves]",
"def get_all_lexer_names():\n \n # retrieves list of tuples with valid lexer names\n lexer_names = []\n for names_tuple in LEXERNAMES:\n for name_ in names_tuple:\n lexer_names.append(name_)\n return lexer_names",
"def getListOfRules(self, *args):\n return _libsbml.Model_getListOfRules(self, *args)",
"def read_rules(grammar_filename):\n rules = set()\n with open(grammar_filename) as f:\n for rule in f.readlines():\n rule = rule.strip()\n log_prob, lhs, rhs = rule.split('\\t')\n rhs = tuple(rhs.split(' '))\n assert rhs and rhs[0], rule\n rules.add(GrammarRule(lhs, rhs, math.log(float(log_prob))))\n return rules",
"def get_all_rules(self):\n\n rules = set()\n for a_dict in self.get_dicts():\n rules = keywords.union(a_dict['rules'])\n return sorted(keywords)",
"def __init__(self, rules):\n\n self.grammar = defaultdict(list)\n self.word_pos = dict()\n self.pos = set()\n\n for rule in rules:\n rule = rule.rstrip()\n if len(rule) > 0:\n rule = rule.split('->') # split start/end\n left = rule[0].strip()\n right = [(re.sub(r'[^a-zA-Z\\d\\s-]', '', r)).strip().split(' ') for r in rule[1].split('|')]\n self.grammar[left] += right\n\n # extract POS tags\n # pos iff on lhs of rhs without lhs\n # det -> that\n # that -> #\n for left, right in self.grammar.iteritems():\n for r in right:\n for r2 in r:\n if not self.grammar.has_key(r2):\n self.pos.add(left)",
"def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules",
"def grammar_type(rules):\n for rule in rules:\n if len(rule[0]) > len(rule[1]):\n return 0\n for rule in rules:\n if len(rule[0]) != 1 or rule[0] != rule[0].upper():\n return 1\n for rule in rules:\n if rule != (\"S\", \"L\") and \\\n (len(rule[1]) > 2 or\n (len(rule[1]) == 2 and (rule[1][0] != rule[1][0].lower() or rule[1][1] != rule[1][1].upper())) or\n (len(rule[1]) == 1 and rule[1] != rule[1].lower())):\n return 2\n return 3",
"def lexname(self):\n s = self._synset(self.text)\n\n if not s:\n return []\n\n return s.lexname().split('.')[1]",
"def sorted_languages():\n # Python 3: Use functools.cmp_to_key\n def compare(a, b):\n if a.name == u\"English\":\n return -1\n elif b.name == u\"English\":\n return 1\n else:\n return cmp(a, b)\n return sorted(Language.query.all(), cmp=compare)",
"def lexers():\n result = [(lexer[0], lexer[1][0]) for lexer in get_all_lexers()]\n result.sort()\n return result",
"def FindLHSs(self):\n return self.lhslist",
"def read_grammar(grammar_file):\n with open(grammar_file) as cfg:\n lines = cfg.readlines()\n return [x.replace(\"->\", \"\").split() for x in lines]",
"def languages():\n return \", \".join(sorted(\"{}: '{}'\".format(gTTS.LANGUAGES[k], k) for k in gTTS.LANGUAGES))",
"def compute_first_sets(grammar,rules):\n grammar.reset_first_follow()\n\n names_of_non_terminals = []\n grammar.end_of_text.first_data = set({grammar.end_of_text})\n grammar.empty.first_data = set({grammar.empty})\n for key, rule in rules.items():\n if rule.is_terminal() or rule.is_empty():\n # If X is a terminal, then First(X) is {X}\n # Lazy load it.\n dummy = rule.first()\n elif rule.is_symbol_name():\n names_of_non_terminals.append(key)\n else:\n # rule is a Choice node\n for rhs in rule:\n # If X -> empty is a production, then add Empty\n if rhs.is_empty():\n rule.first_data = set({rhs})\n names_of_non_terminals.append(key)\n\n def lookup(rule):\n return rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n def dynamic_first(rule,depth):\n \"\"\"\n Returns the currently computed approximation to the First set for a\n rule.\n\n The rule is from a Canonical grammar, so a non-terminal can be as\n complex as a Choice over Sequences over symbols that may reference\n other non-terminals. Gather updated First set info for at most\n those first two levels, and use a previous-computed approximation for\n the nonterminals at that second level.\n\n Args:\n rule: the Rule in question\n depth: recursion depth\n\n Returns:\n A new approximation to the First set for the given rule.\n \"\"\"\n\n if rule.is_symbol_name():\n return rules[rule.content].first()\n if rule.is_empty():\n return rule.first()\n if rule.is_terminal():\n # The terminal isn't registered in the dictionary.\n return set({rule})\n if isinstance(rule,Choice):\n result = rule.first()\n #for item in [lookup(i) for i in rule]:\n for item in rule:\n result = result.union(dynamic_first(item,depth+1))\n return result\n if isinstance(rule,Seq):\n result = rule.first()\n\n # Only recurse 2 levels deep\n if depth < 2:\n items = [lookup(item) for item in rule]\n else:\n items = rule\n # Add the first sets for Yi if all the earlier items can derive\n # empty. But don't add empty itself from this prefix.\n for item in items:\n from_first = dynamic_first(item,depth+1)\n from_first = without_empty(from_first)\n result = result.union(from_first)\n if not item.derives_empty():\n # Not known to derive empty. Stop here.\n break\n # If all the items derive empty, then add Empty to the first set.\n if all([lookup(item).derives_empty() for item in rule]):\n result = result.union({grammar.empty})\n return result\n raise RuntimeError(\"trying to dynamically compute the First set of: \"\n + str(rule))\n\n # Repeat until settling.\n keep_going = True\n while keep_going:\n keep_going = False\n for key in names_of_non_terminals:\n rule = rules[key]\n # Accumulate First items from right-hand sides\n df = dynamic_first(rule,0)\n new_items = df - rule.first()\n if len(new_items) > 0:\n rule.first_data = rule.first().union(new_items)\n keep_going = True",
"def read_grammar_rules(istream):\n for line in istream:\n line = line.strip()\n if not line:\n continue\n fields = line.split('|||')\n if len(fields) != 3:\n raise ValueError('I expected 3 fields: %s', fields)\n lhs = fields[0].strip()\n\n if lhs[0] == '[':\n lhs = Nonterminal(lhs[1:-1])\n else:\n lhs = Terminal(lhs)\n rhs = fields[1].strip().split()\n new_rhs = []\n for r in rhs:\n if r[0] == '[':\n r = Nonterminal(r[1:-1])\n else:\n r = Terminal(r)\n new_rhs.append(r)\n\n prob = float(fields[2].strip())\n yield Rule(lhs, new_rhs, prob)",
"def get_steps_names(self) -> List[str]:\n return [step.Name.lower() for step in self.Sequence if isinstance(step, Step) and step.Name != \"\"]",
"def preorder_traverse(self):\n \n keys = []\n\n if not self.node:\n return keys\n \n keys.append(self.node.vp.index)\n keys.extend(self.node.left.preorder_traverse())\n keys.extend(self.node.right.preorder_traverse())\n\n return keys",
"def reordered_rules(self):\n return tuple(r for r in self.rules if r.reordered)",
"def train(self):\n self.transitions = {}\n if self.order > len(self.tokens) - 1:\n print(\"Unable to train: Hit upper bound on order, given corpus.\")\n for i in range(0, len(self.tokens) - self.order):\n ngram = tuple(self.tokens[i:i+self.order])\n if ngram in self.transitions:\n self.transitions[ngram].append(self.tokens[i+self.order])\n elif ngram not in self.transitions:\n self.transitions[ngram] = [self.tokens[i+self.order]]",
"def get_rules(self):\n return [phi for psi in self._Psi for phi in psi]",
"def __expandLanguage(self, language):\n\n # Priority Chain:\n # de_DE => de => C (default language) => code\n\n all = [language]\n if \"_\" in language:\n all.append(language[:language.index(\"_\")])\n all.append(\"C\")\n\n return all",
"def GetRulePriorities(self, G, results):\n rules = results.Defrules\n proceededRules = []\n curRulesSet = []\n curSalience = 10000\n\n # Fill the list of input templates, would be unshanged through all processing\n inputTemplates = set()\n for rule in rules:\n for tmp in rule.InputTemplates:\n inputTemplates.add(tmp.TemplateName)\n\n # We have to proceed all rules\n while len(proceededRules) < len(rules):\n outputTemplates = set()\n\n # Tke outputing template names from not proceeded rules\n for rule in rules:\n if not rule in proceededRules:\n for tmp in rule.OutputTemplates:\n outputTemplates.add(tmp.TemplateName)\n\n # Valid input templates at current iteration are the ones which are not produced with existing rules\n validInputs = inputTemplates.difference(outputTemplates)\n\n # Get applicable rules from not proceeded\n curRuleSet = [rule for rule in rules if not rule in proceededRules and len(set([templ.TemplateName for templ in rule.InputTemplates]).difference(validInputs)) == 0]\n\n # Set the proper priority for selected rules\n for rule in curRuleSet:\n rule.Priority = curSalience\n\n # Update proceeded list\n proceededRules += curRuleSet\n\n # Decrease salience\n curSalience -= 1"
] |
[
"0.60005575",
"0.58476996",
"0.55949664",
"0.55783236",
"0.5526621",
"0.5481297",
"0.5413346",
"0.5225151",
"0.5198539",
"0.5161763",
"0.5130225",
"0.51194996",
"0.5093414",
"0.5093071",
"0.5053559",
"0.5044478",
"0.5043568",
"0.5019573",
"0.5008506",
"0.5006204",
"0.49716237",
"0.49714062",
"0.4963662",
"0.49575666",
"0.494576",
"0.49403125",
"0.49012792",
"0.48934376",
"0.4889459",
"0.48726004"
] |
0.7482429
|
0
|
Refactor the grammar, shifting uses of 'target_rule_name' in the first position out to the invoking context. That is, when 'target_rule_name' names nonterminal X, and 'A' is not in 'stop_at_set',
|
def left_refactor(self,target_rule_name,stop_at_set):
name_suffix = ".post.{}".format(target_rule_name)
# Map a rule name X to a set of rules Y where X appears
# as a first nonterminal in one of Y's options.
appears_first_in = defaultdict(set)
for name, rule in self.rules.items():
for option in rule.as_container():
first = option.as_container()[0]
if first.is_symbol_name():
appears_first_in[first.content].add(name)
#print("appears first dict\n{}\n\n".format(appears_first_in))
po = PrintOption()
po.is_canonical = self.is_canonical
po.inline_synthetic = False
candidates = set(self.rules.keys())
while len(candidates) > 0:
for A in list(candidates):
candidates.remove(A)
if A in stop_at_set:
continue
rule = self.rules[A]
(starts,others,terms,empties) = rule.partition(target_rule_name)
if len(starts) > 0 and (len(others)+len(terms)+len(empties) == 0):
#print("processing {}".format(A))
# Create the new rule.
new_rule_name = "{}{}".format(A,name_suffix)
# Form alpha1 ... alphaN
new_options = []
for option in rule:
if len(option.as_container()) == 1:
new_options.append(self.MakeEmpty())
else:
assert option.is_container() and (len(option)>1)
new_options.append(self.MakeSeq(option[1:]))
self.rules[new_rule_name] = self.MakeChoice(new_options)
# Rewrite A itself.
self_parts = [self.MakeSymbolName(x) for x in [target_rule_name,new_rule_name]]
self.rules[A] = self.MakeChoice([self.MakeSeq(self_parts)])
# Update bookkeeping for appears_first_in
for option in new_options:
first = option.as_container()[0]
if first.is_symbol_name():
appears_first_in[first.content].add(new_rule_name)
# Replace the old rule everywhere it appears in the first
# position
for parent_name in list(appears_first_in[A]):
if parent_name == A:
# Already processed above
continue
parent = self.rules[parent_name]
(starts,others,terms,empties) = parent.partition(A)
new_options = []
for option in starts:
parts = []
parts.append(self.MakeSymbolName(target_rule_name))
parts.append(self.MakeSymbolName(new_rule_name))
parts.extend(option.as_container()[1:])
new_options.append(self.MakeSeq(parts))
new_options.extend(others+terms+empties)
self.rules[parent_name] = self.MakeChoice(new_options)
appears_first_in[A].remove(parent_name)
appears_first_in[target_rule_name].add(parent_name)
# Set up transitive closure.
candidates.add(parent_name)
#print()
#print()
#print()
#self.absorb_post(target_rule_name)
self.remove_unused_rules()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def hoist_until(self,target_rule_name,stop_at_set):\n assert self.is_canonical\n\n\n def expand_first(grammar,rule):\n \"\"\"\n When rule is\n Seq(A rest)\n and A -> A1 | ... | An\n Return [ A1 rest | ... | An rest ]\n\n If Ai is epsilon, then its corresponding term is just 'rest'\n \"\"\"\n result = []\n # Hoist the rule for 'other' nonterminal.\n phrase = rule.as_container()\n first = phrase[0]\n assert first.is_symbol_name() and (first.content != target_rule_name)\n #print(\" elaborating rule for {} \".format(first.content))\n rest = phrase[1:]\n other_rule = self.rules[first.content]\n for other_rhs in other_rule.as_container():\n result.append(grammar.MakeSeq(list_without_empty(other_rhs.as_container()) + rest))\n return result\n\n\n # Process in reverse order to reduce duplication.\n order_of_attack = list(reversed(self.preorder()))\n keep_going = True\n ancestors = set()\n while keep_going:\n keep_going = False\n #print(\"hoisting worklist: {}\".format(\" \".join(order_of_attack)))\n\n for candidate_rule_name in order_of_attack:\n rule = self.rules[candidate_rule_name]\n #print(\"consider {}\".format(candidate_rule_name))\n (with_target_rule_name,other_rules,term,empty) = rule.partition(target_rule_name)\n #print(\" {} {} {} {}\".format(len(with_target_rule_name),len(other_rules),len(term), len(empty)))\n if len(with_target_rule_name) > 0 and len(other_rules) > 0:\n #print(\" need to hoist\")\n # Need to hoist\n replacement = with_target_rule_name\n for other in other_rules:\n replacement.extend(expand_first(self,other))\n replacement.extend(term)\n replacement.extend(empty)\n self.rules[candidate_rule_name] = self.MakeChoice(replacement)\n #print(\"setting {} to {}\".format(candidate_rule_name,str(self.rules[candidate_rule_name])))\n keep_going = True\n if candidate_rule_name not in stop_at_set:\n ancestors.add(candidate_rule_name)\n\n for candidate_rule_name in order_of_attack:\n for ancestor in ancestors:\n rule = self.rules[candidate_rule_name]\n (with_ancestor,other_rules,term,empty) = rule.partition(ancestor)\n #print(\" {} {} {} {}\".format(len(with_ancestor),len(other_rules),len(term), len(empty)))\n if len(with_ancestor) > 0:\n #print(\" expanding ancestor {}\".format(ancestor))\n replacement = []\n for a_rule in with_ancestor:\n replacement.extend(expand_first(self,a_rule))\n replacement.extend(other_rules)\n replacement.extend(term)\n replacement.extend(empty)\n self.rules[candidate_rule_name] = self.MakeChoice(replacement)\n #print(\"setting {} to {}\".format(candidate_rule_name,str(self.rules[candidate_rule_name])))\n keep_going = True",
"def prepare_parsing(grammar_name):\n grammar = input_format[grammar_name]\n antlr_lexer_class, antlr_parser_class = build_antlr_grammars()\n replacements, action_positions = analyze_grammars(antlr_lexer_class, antlr_parser_class, grammar['files'], grammar['replacements'])\n logger.debug('Replacements are calculated...')\n\n current_workdir = join(grammar_workdir, grammar_name) if grammar_name else grammar_workdir\n makedirs(current_workdir, exist_ok=True)\n if current_workdir not in sys.path:\n sys.path.append(current_workdir)\n\n # Inject actions into the target grammars to help localizing part of the test case that are optional.\n for i, g in enumerate(grammar['files']):\n grammar['files'][i] = join(current_workdir, basename(g))\n inject_optional_actions(g, action_positions[g], grammar['files'][i])\n\n target_lexer_class, target_parser_class, target_listener_class = build_grammars(tuple(grammar['files']), current_workdir, antlr, lang)\n logger.debug('Target grammars are processed...')\n\n if lang == 'java':\n compile_java_sources(target_lexer_class, target_parser_class, target_listener_class, current_workdir)\n input_format[grammar_name].update({'lexer': target_lexer_class, 'parser': target_parser_class, 'listener': target_listener_class, 'replacements': replacements})\n return\n\n class ExtendedTargetParser(target_parser_class):\n \"\"\"\n ExtendedTargetParser is a subclass of the original parser implementation.\n It can trigger state changes that are needed to identify parts of the input\n that are not needed to keep it syntactically correct.\n \"\"\"\n def enter_optional(self):\n self.trigger_listener('enter_optional')\n\n def exit_optional(self):\n self.trigger_listener('exit_optional')\n\n def enterRecursionRule(self, localctx: ParserRuleContext, state: int, ruleIndex: int, precedence: int):\n target_parser_class.enterRecursionRule(self, localctx, state, ruleIndex, precedence)\n self.trigger_listener('recursion_enter')\n\n def pushNewRecursionContext(self, localctx: ParserRuleContext, state: int, ruleIndex: int):\n target_parser_class.pushNewRecursionContext(self, localctx, state, ruleIndex)\n self.trigger_listener('recursion_push')\n\n def unrollRecursionContexts(self, parentCtx: ParserRuleContext):\n target_parser_class.unrollRecursionContexts(self, parentCtx)\n self.trigger_listener('recursion_unroll')\n\n def trigger_listener(self, event):\n for listener in self.getParseListeners():\n if hasattr(listener, event):\n getattr(listener, event)()\n\n def syntax_error_warning(self):\n if self._syntaxErrors:\n logger.warning('%s finished with %d syntax errors. This may decrease reduce quality.',\n target_parser_class.__name__, self._syntaxErrors)\n\n class ExtendedTargetListener(target_listener_class):\n \"\"\"\n ExtendedTargetListener is a subclass of the original listener implementation.\n It can trigger state changes that are needed to identify parts of the input\n that are not needed to keep it syntactically correct.\n \"\"\"\n def __init__(self, parser):\n self.parser = parser\n self.current_node = None\n self.island_nodes = []\n self.root = None\n\n def recursion_enter(self):\n assert isinstance(self.current_node, HDDRule)\n node = HDDRule(self.current_node.name)\n self.current_node.add_child(node)\n self.current_node.recursive_rule = True\n self.current_node = node\n\n def recursion_push(self):\n assert len(self.current_node.parent.children) > 0\n\n first_child = self.current_node.parent.children[0]\n self.current_node.parent.remove_child(first_child)\n self.current_node.add_child(first_child)\n\n def recursion_unroll(self):\n assert self.current_node.recursive_rule\n assert len(self.current_node.children) == 1 and self.current_node.name == self.current_node.children[0].name\n children_to_lift = self.current_node.children[0].children\n parent = self.current_node.parent\n if children_to_lift:\n self.current_node.children = []\n self.current_node.add_children(children_to_lift)\n self.current_node.start = self.current_node.children[0].start\n self.current_node.end = self.current_node.children[-1].end\n else:\n parent.remove_child(self.current_node)\n self.current_node = parent\n\n def enterEveryRule(self, ctx:ParserRuleContext):\n name = self.parser.ruleNames[ctx.getRuleIndex()]\n node = HDDRule(name)\n if not self.root:\n self.root = node\n else:\n assert self.current_node\n self.current_node.add_child(node)\n self.current_node = node\n\n def exitEveryRule(self, ctx:ParserRuleContext):\n # If the input contains syntax error, then the last optional block was may not closed.\n while isinstance(self.current_node, HDDQuantifier):\n self.exit_optional()\n\n assert self.current_node.name == self.parser.ruleNames[ctx.getRuleIndex()],\\\n '%s (%s) != %s' % (self.current_node.name, repr(self.current_node), self.parser.ruleNames[ctx.getRuleIndex()])\n\n start, _ = self.tokenBoundaries(ctx.start)\n _, end = self.tokenBoundaries(ctx.stop if ctx.stop else ctx.start)\n self.current_node.start = start\n self.current_node.end = end\n\n if self.current_node.parent:\n self.current_node = self.current_node.parent\n\n def tokenBoundaries(self, token):\n line_breaks = token.text.count('\\n')\n return Position(token.line, token.column), \\\n Position(token.line + line_breaks,\n token.column + len(token.text) if not line_breaks else\n len(token.text) - token.text.rfind('\\n'))\n\n def visitTerminal(self, ctx:TerminalNode):\n name, text = (self.parser.symbolicNames[ctx.symbol.type], ctx.symbol.text) if ctx.symbol.type != Token.EOF else ('EOF', '')\n start, end = self.tokenBoundaries(ctx.symbol)\n\n node = HDDToken(name, text, start=start, end=end)\n self.current_node.add_child(node)\n if name in grammar['islands']:\n self.island_nodes.append(node)\n\n def visitErrorNode(self, ctx:ErrorNode):\n if hasattr(ctx, 'symbol'):\n start, end = self.tokenBoundaries(ctx.symbol)\n self.current_node.add_child(HDDErrorToken(ctx.symbol.text, start=start, end=end))\n\n def enter_optional(self):\n quant_node = HDDQuantifier()\n self.current_node.add_child(quant_node)\n self.current_node = quant_node\n\n def exit_optional(self):\n assert self.current_node.parent, 'Quantifier node has no parent.'\n assert self.current_node.children, 'Quantifier node has no children.'\n\n self.current_node.start = self.current_node.children[0].start\n self.current_node.end = self.current_node.children[-1].end\n self.current_node = self.current_node.parent\n\n def print_tree(self):\n if self.root and logger.isEnabledFor(logging.DEBUG):\n logger.debug(self.root.tree_str(current=self.current_node))\n\n input_format[grammar_name].update({'lexer': target_lexer_class, 'parser': ExtendedTargetParser, 'listener': ExtendedTargetListener, 'replacements': replacements})",
"def translate(self, target) -> Symbol:\n pass",
"def expand_first(grammar,rule):\n result = []\n # Hoist the rule for 'other' nonterminal.\n phrase = rule.as_container()\n first = phrase[0]\n assert first.is_symbol_name() and (first.content != target_rule_name)\n #print(\" elaborating rule for {} \".format(first.content))\n rest = phrase[1:]\n other_rule = self.rules[first.content]\n for other_rhs in other_rule.as_container():\n result.append(grammar.MakeSeq(list_without_empty(other_rhs.as_container()) + rest))\n return result",
"def match_rule(name, lhs, rhs, wm):\n print(\" ------------ Matching Rule '\", name, \"' --------------\")\n print(\" lhs = \", lhs)\n print(\" rhs = \", rhs)\n print(\" wm = \", wm)\n print()\n def mr_helper(queue, new_wm):\n # Each state in queue is\n # (anteceds-left, subs)\n # print(\" ----- matching rule helper ------\")\n # print(\" queue = \", queue)\n # print(\" new_wm = \", new_wm)\n # print()\n if queue == []: # if the queue is empty, return new_wm\n return new_wm\n else: # else examine the first item in the queue (call it state1)\n state1 = queue[0]\n if state1[0] == []: # If state1 has no antecedents, state1 is a goal state (the rule is matched);\n # call \"execute\" on rhs using the substitution in state1\n derived = execute(state1[1], rhs, new_wm)\n # But don't stop here (this is exhaustive):\n # return mr_helper applied to the rest of the queue, appending\n # whatever new WM assertions \"execute\" returned.\n new_wm = update_wm(new_wm, derived)\n return mr_helper(queue[1:], new_wm)\n elif state1[0] != []: # Else if state1 has antecedents, apply \"match_antecedent\" to them along with wm and the substitutions in state1.\n matched = match_antecedent(state1[0], wm, state1[1])\n if matched == []: # If \"match_antecedent\" returns no new states, return mr_helper on rest of the queue without changing states.\n return mr_helper(queue[1:], new_wm)\n else:\n # Else return mr_helper on the updated queue,\n # i.e., the old one with the new states found\n # by \"match_antecedent\" replacing state1\n queue = matched + queue[1:]\n return mr_helper(queue, new_wm)\n return mr_helper(match_antecedent(lhs, wm ,[]), [])",
"def _normalizeTarget(self):\n\n if not self.target:\n return\n\n # really we should have a urllib.unquote() first, but in practice this\n # format may be rare enough to ignore\n\n # [[__init__]] -> [[init]]\n self.target = self._SPACE_RE.sub(' ', self.target).strip()\n if self.capitalizeTarget:\n self.target = self.target[:1].upper() + self.target[1:]",
"def apply_rule(seq):\n for idx,prop in enumerate(seq.ant):\n\n if prop.conn == \"not\":\n # create a copy of seq (we don't want to mutate it)\n new_seq = Sequent(seq.ant[:],seq.con[:])\n # pop the proposition from the list\n not_a = new_seq.ant.pop(idx)\n # make sure we popped the correct one\n assert not_a.conn == \"not\"\n # apply the rule\n new_seq.con = [ not_a.p1 ] + new_seq.con\n # return a list of 3 values with seq2 being None\n # (since there is not split in this rule)\n return [new_seq , None, \"not left\"]\n\n elif prop.conn == \"or\":\n # create two copies of seq\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_or_c = new_seq1.ant.pop(idx)\n # make sure we popped the correct one\n assert b_or_c.conn == \"or\"\n assert b_or_c == new_seq2.ant.pop(idx)\n # apply the rule\n new_seq1.ant.append(b_or_c.p1)\n new_seq2.ant.append(b_or_c.p2)\n # return the obtained sequents and the rule name\n # here we have two sequents since \"or left\"\n # has two sequents at the top\n return [new_seq1 , new_seq2, \"or left\"]\n\n elif prop.conn == \"and\":\n #create one copy of seq\n new_seq = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_and_c = new_seq.ant.pop(idx)\n # make sure we popped the correct one\n assert b_and_c.conn == \"and\"\n # apply the rule\n new_seq.ant.append(b_and_c.p1)\n new_seq.ant.append(b_and_c.p2)\n # return a list of 3 values with seq2 being None\n return [new_seq, None, 'and left']\n\n \n elif prop.conn == \"imp\":\n # create two copies of seq\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_imp_c = new_seq1.ant.pop(idx)\n # make sure we popped the correct one\n assert b_imp_c.conn == \"imp\"\n assert b_imp_c == new_seq2.ant.pop(idx)\n # apply the rule\n new_seq1.ant.append(b_imp_c.p2)\n new_seq2.con.append(b_imp_c.p1)\n # return the obtained sequents and the rule name\n return [new_seq1 , new_seq2, \"implies left\"]\n\n for idx,prop in enumerate(seq.con):\n if prop.conn == \"not\":\n new_seq = Sequent(seq.ant[:],seq.con[:])\n # pop the proposition from the list\n not_a = new_seq.con.pop(idx)\n # make sure we popped the correct one\n assert not_a.conn == \"not\"\n # apply the rule\n new_seq.ant = [ not_a.p1 ] + new_seq.ant\n # return a list of 3 values with seq2 being None\n return [new_seq , None, \"not right\"]\n elif prop.conn == \"or\":\n # create one copy of seq\n new_seq = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_or_c = new_seq.con.pop(idx)\n # make sure we popped the correct one\n assert b_or_c.conn == \"or\" \n # apply the rule\n new_seq.con.append(b_or_c.p1)\n new_seq.con.append(b_or_c.p2)\n # return the obtained sequent and the rule name\n return [new_seq , None, \"or right\"]\n\n elif prop.conn == 'and':\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n b_and_c = new_seq1.con.pop(idx)\n assert b_and_c.conn == \"and\"\n assert b_and_c == new_seq2.con.pop(idx)\n new_seq1.con.append(b_and_c.p1)\n new_seq2.con.append(b_and_c.p2)\n return [new_seq1 , new_seq2, \"and right\"]\n\n elif prop.conn == 'imp':\n new_seq = Sequent(seq.ant[:], seq.con[:])\n b_imp_c = new_seq.con.pop(idx)\n assert b_imp_c.conn == \"imp\"\n new_seq.ant.append(b_imp_c.p1)\n new_seq.con.append(b_imp_c.p2)\n return [new_seq , None, \"implies right\"]",
"def lexEmitter(target, source, env) -> tuple:\n\n sourceBase, sourceExt = os.path.splitext(to_String(source[0]))\n if sourceExt == \".lm\": # If using Objective-C\n target = [sourceBase + \".m\"] # the extension is \".m\".\n\n # With --header-file and ----tables-file, the file to write is defined\n # by the option argument. Extract this and include in the list of targets.\n # NOTE: a filename passed to the command this way is not modified by SCons,\n # and so will be interpreted relative to the project top directory at\n # execution time, while the name added to the target list will be\n # interpreted relative to the SConscript directory - a possible mismatch.\n #\n # These are GNU flex-only options.\n # TODO: recognize --outfile also?\n file_gen_options = [\"--header-file=\", \"--tables-file=\"]\n lexflags = env.subst_list(\"$LEXFLAGS\", target=target, source=source)\n for option in lexflags[0]:\n for fileGenOption in file_gen_options:\n l = len(fileGenOption)\n if option[:l] == fileGenOption:\n # A file generating option is present, so add the\n # file name to the target list.\n file_name = option[l:].strip()\n target.append(file_name)\n\n lexheaderfile = env.subst(\"$LEX_HEADER_FILE\", target=target, source=source)\n if lexheaderfile:\n target.append(lexheaderfile)\n # rewrite user-supplied file string with a node, we need later\n env.Replace(LEX_HEADER_FILE=env.File(lexheaderfile))\n\n lextablesfile = env.subst(\"$LEX_TABLES_FILE\", target=target, source=source)\n if lextablesfile:\n target.append(lextablesfile)\n # rewrite user-supplied file string with a node, we need later\n env.Replace(LEX_TABLES_FILE=env.File(lextablesfile))\n\n return target, source",
"def applyModifiers(markup):\n if( not markup.getScopeUpdated() ):\n markupNew = updateScopes(markup)\n else:\n markupNew = markup.copy()\n targets = markupNew.getConTextModeNodes(\"target\")\n modifiers = markupNew.getConTextModeNodes(\"modifier\")\n for target in targets:\n for modifier in modifiers:\n if( TO.applyRule(modifier,target) ):\n if( markupNew.getVerbose() ):\n print u\"applying relationship between\",modifier,target\n markupNew.add_edge(modifier, target)\n return markupNew",
"def rewrite_goal(self, id, th_name, *, backward=False):\n self.apply_tactic(id, tactic.rewrite(), args=th_name)",
"def target_naming(ty,target):\n de = ty.description(target)\n de = de[0].upper() + de[1:] + \".\"\n return de",
"def convert_grammar(grammar):\n\n # Remove all the productions of the type A -> X B C or A -> B a.\n global RULE_DICT\n unit_productions, result = [], []\n res_append = result.append\n index = 0\n\n for rule in grammar:\n new_rules = []\n if len(rule) == 2 and rule[1][0] != \"'\":\n # Rule is in form A -> X, so back it up for later and continue with the next rule.\n unit_productions.append(rule)\n add_rule(rule)\n continue\n elif len(rule) > 2:\n # Rule is in form A -> X B C [...] or A -> X a.\n terminals = [(item, i) for i, item in enumerate(rule) if item[0] == \"'\"]\n if terminals:\n for item in terminals:\n # Create a new non terminal symbol and replace the terminal symbol with it.\n # The non terminal symbol derives the replaced terminal symbol.\n rule[item[1]] = f\"{rule[0]}{str(index)}\"\n new_rules += [f\"{rule[0]}{str(index)}\", item[0]]\n index += 1\n while len(rule) > 3:\n new_rules.append([f\"{rule[0]}{str(index)}\", rule[1], rule[2]])\n rule = [rule[0]] + [f\"{rule[0]}{str(index)}\"] + rule[3:]\n index += 1\n # Adds the modified or unmodified (in case of A -> x i.e.) rules.\n add_rule(rule)\n res_append(rule)\n if new_rules:\n result.extend(new_rules)\n # Handle the unit productions (A -> X)\n while unit_productions:\n rule = unit_productions.pop()\n if rule[1] in RULE_DICT:\n for item in RULE_DICT[rule[1]]:\n new_rule = [rule[0]] + item\n if len(new_rule) > 2 or new_rule[1][0] == \"'\":\n result.insert(0, new_rule)\n else:\n unit_productions.append(new_rule)\n add_rule(new_rule)\n return result",
"def _validate_rule_target_name(name: str) -> None:\n if not name:\n raise common_exceptions.RuleTargetValidationError(\n \"A `name` field must be supplied.\"\n )",
"def use(target, name):",
"def main(rules, antecedent_prefix, consequent_prefix, deltas_prefix):\n _main(rules, antecedent_prefix, consequent_prefix, deltas_prefix)",
"def derive_new_rule(\n example: ExamplePair, rule: Rule, max_changes=1, *args, **kwargs\n) -> List[Rule]:\n # Determine how pattern in rule.lhs matched token.\n token, lemma = example.token, example.lemma\n m = rule.match(token)\n if not m:\n raise ValueError(\"Provided token did not match left-hand side of rule.\")\n # Start and end indices of subsequences that matched wildcards (*).\n token_placeholder_spans = _captured_groups_as_spans(m)\n # Reverse engineered: token_mask that could have generated rule.\n token_mask = _mask_from_spans(token_placeholder_spans, len(token))\n # Character indexes that are masked by token_mask (have value '1')\n masked_idxs = [i for i in range(len(token)) if token_mask[i] == 1]\n # All possible combinations of masking idxs that will be unmasked.\n derived_rule_candidates = []\n # Token mask hypotheses based on 'flipping' one or n < max_changes idxs in\n # `masked_idxs`.\n for unmasking_hyp in it.chain.from_iterable(\n it.combinations(masked_idxs, n + 1) for n in range(max_changes)\n ):\n new_token_mask = token_mask.copy()\n for idx in unmasking_hyp:\n new_token_mask[idx] = 0\n new_lemma_mask = _infer_corresponding_lemma_mask(token, new_token_mask, lemma)\n if not new_lemma_mask:\n continue\n try:\n new_rule = Rule(\n *_lhs_rhs_patterns(\n _apply_mask(token, new_token_mask), _apply_mask(lemma, new_lemma_mask)\n )\n )\n except UncollapseableWildcardsException:\n continue\n derived_rule_candidates.append((new_rule, new_token_mask))\n # If valid rules found by first strategy, select the rule whose lhs differs\n # the least from the lhs of the original rule.\n return [r for r, _ in derived_rule_candidates]\n\n # # select least different\n # sm = SequenceMatcher(None)\n # sm.set_seq2(rule.lhs)\n # most_sim_dr_i, max_sim = 0, 0\n # for dr_i, (dr_rule, dr_token_mask) in enumerate(derived_rule_candidates):\n # sm.set_seq1(dr_rule.lhs)\n # similarity = sm.ratio()\n # if similarity > max_sim:\n # max_sim = similarity\n # most_sim_dr_i = dr_i\n # best_rule, underlying_token_mask = derived_rule_candidates[most_sim_dr_i]\n # # Fallback to prime rule lhs merged with token_mask according to lhs of current rule.\n # else:\n # new_token_mask = underlying_token_mask = bytearray(\n # map(op.and_, token_mask, example.token_mask)\n # )\n # new_lemma_mask = _infer_corresponding_lemma_mask(token, new_token_mask, lemma)\n # if not new_lemma_mask:\n # return None\n # try:\n # best_rule = Rule(\n # *_lhs_rhs_patterns(\n # _apply_mask(token, new_token_mask), _apply_mask(lemma, new_lemma_mask)\n # )\n # )\n # except UncollapseableWildcardsException:\n # return None\n # if not any(underlying_token_mask):\n # raise TokenMaskExhaustedException(best_rule, example)\n # else:\n # return best_rule",
"def rule(name):\n\tdef df(s, loc, tok):\n\t\trules = []\n\t\tundec_tok = []\n\t\tfor t in tok:\n\t\t\tif isinstance(t, tuple):\n\t\t\t\trules += [t[0]]\n\t\t\t\tundec_tok += [t[1]]\n\t\t\telse:\n\t\t\t\tundec_tok += [t]\n\t\treturn ((name, rules), undec_tok)\n\treturn df",
"def applyModifiers(self):\n if not self.getScopeUpdated():\n self.updateScopes()\n targets = self.getConTextModeNodes(\"target\")\n modifiers = self.getConTextModeNodes(\"modifier\")\n for target in targets:\n for modifier in modifiers:\n if modifier.applyRule(target):\n if self.getVerbose():\n print(\"applying relationship between\", modifier, target)\n\n self.add_edge(modifier, target)",
"def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)",
"def get_targets(self):\n\t\n\t\tself.target = []\n\t\ttarget_ins = self.settings['target']\n\t\tfor key in target_ins.keys():\n\t\t\tif key == 'raw':\n\t\t\t\tself.target.append(target_ins[key])\n\t\t\telif key == 'textfile':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,_].+\\s*:\\s*[A-Z].+$',t):\n\t\t\t\t\t\tself.target.append(tuple([i.strip() for i in t.split(':')]))\n\t\t\telif key == 'textfile_rna':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,0-9,_].+\\s*:\\s*[A-Z,a-z].+$',t):\n\t\t\t\t\t\tself.target.append(list([i.strip() for i in t.split(':')]))\n\t\t\t\t\t\trnaseq = self.target[-1][1]\n\t\t\t\t\t\t#---extra substitutions for later\n\t\t\t\t\t\tif 'regex_subs' in self.settings.keys():\n\t\t\t\t\t\t\tfor regex in self.settings['regex_subs']:\n\t\t\t\t\t\t\t\trnaseq = re.sub(regex[0],regex[1],rnaseq)\n\t\t\t\t\t\trnaseq = rnaseq.upper()\n\t\t\t\t\t\trnaseq = re.sub('T','U',rnaseq)\n\t\t\t\t\t\taminoseq = ''.join([dna_mapping[i] for i in [rnaseq[i:i+3] \n\t\t\t\t\t\t\tfor i in range(0,len(rnaseq),3)]])\n\t\t\t\t\t\tself.target[-1][1] = re.sub('T','U',aminoseq)\n\t\t\t\t\t\tself.target[-1] = tuple(self.target[-1])\n\t\t\telse: raise Exception('except: unclear target type')",
"def close(self,grammar):\n def lookup(rule):\n return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n dirty_dict = self.id_to_lookahead.copy()\n while len(dirty_dict) > 0:\n # From the dragon book, 1st ed. 4.38 Sets of LR(1) items construction.\n #\n # For each item [ A -> alpha . B beta, a ] in I,\n # and each production \" B -> gamma \" in the grammar,\n # and each terminal b in FIRST(beta a),\n # add [ B -> . gamma, b ] to I if it is not already there.\n work_list = dirty_dict\n dirty_dict = dict()\n for item_id, lookahead in work_list.items():\n item = self.id_to_item[item_id]\n if item.at_end():\n continue\n B = item.next()\n if not B.is_symbol_name():\n continue\n\n # Compute lookahead. (A fresh LookaheadSet)\n new_item_lookahead = item.rest_lookahead_with_other_lookahead(lookahead)\n\n # Iterate over items [ B -> . B_prod ]\n # for each production B -> B_prod in the grammar.\n for candidate in item.items_generated_by_next():\n candidate_id = candidate.reg_info.index\n if candidate_id not in self.id_to_item:\n la = LookaheadSet(new_item_lookahead)\n self.internal_add(candidate, LookaheadSet(new_item_lookahead))\n dirty_dict[candidate_id] = la\n else:\n if self.id_to_lookahead[candidate_id].merge(new_item_lookahead):\n dirty_dict[candidate_id] = self.id_to_lookahead[candidate_id]\n return self",
"def _step1a(self, word):\n # this NLTK-only rule extends the original algorithm, so\n # that 'flies'->'fli' but 'dies'->'die' etc\n if self.mode == self.NLTK_EXTENSIONS:\n if word.endswith(\"ies\") and len(word) == 4:\n return self._replace_suffix(word, \"ies\", \"ie\")\n\n return self._apply_rule_list(\n word,\n [\n (\"sses\", \"ss\", None), # SSES -> SS\n (\"ies\", \"i\", None), # IES -> I\n (\"ss\", \"ss\", None), # SS -> SS\n (\"s\", \"\", None), # S ->\n ],\n )",
"def make_rule(name, seq_id, action, protocol, src_ip, src_mask, dst_ip,\n dst_mask, sport_operator, sport_low, sport_high,\n dport_operator, dport_low, dport_high, count, log, dscp):\n xml_tring = template.IP_ACL_RULE.format()\n the_config = etree.fromstring(xml_tring)\n remove_unused_tags(the_config, name, action, protocol, src_ip, dst_ip,\n sport_operator, (sport_low, sport_high), dport_operator,\n (dport_low, dport_high), count, log, dscp)\n\n for elt in the_config.iterdescendants():\n if elt.tag == ('seq-id'):\n add_text_to_ele(elt, seq_id)\n elif elt.tag == ('action'):\n add_text_to_ele(elt, action)\n elif elt.tag == ('protocol-type'):\n add_text_to_ele(elt, protocol)\n elif elt.tag == ('src-host-any-sip'):\n add_text_to_ele(elt, src_ip)\n elif elt.tag == ('src-mask'):\n add_text_to_ele(elt, src_mask)\n elif elt.tag == ('dst-host-any-dip'):\n add_text_to_ele(elt, dst_ip)\n elif elt.tag == ('dst-mask'):\n add_text_to_ele(elt, dst_mask)\n elif elt.tag == ('sport'):\n add_text_to_ele(elt, sport_operator)\n elif \"sport-number-eq-neq\" in elt.tag:\n add_text_to_ele(elt, sport_low)\n elif \"sport-number-range-lower\" in elt.tag:\n add_text_to_ele(elt, sport_low)\n elif \"sport-number-range-higher\" in elt.tag:\n add_text_to_ele(elt, sport_high)\n elif elt.tag == ('dport'):\n add_text_to_ele(elt, dport_operator)\n elif \"dport-number-eq-neq\" in elt.tag:\n add_text_to_ele(elt, dport_low)\n elif \"dport-number-range-lower\" in elt.tag:\n add_text_to_ele(elt, dport_low)\n elif \"dport-number-range-higher\" in elt.tag:\n add_text_to_ele(elt, dport_high)\n elif \"dscp\" in elt.tag:\n add_text_to_ele(elt, dscp)\n\n xml_request = etree.tostring(the_config, pretty_print=True)\n return xml_request",
"def main():\n\n # ARGS\n #\n parser = ArgumentParser()\n parser.add_argument('-c', '--configuration',\n help=\"Configuration of the program\",\n required=True)\n\n action_grp = parser.add_mutually_exclusive_group(required=True)\n action_grp.add_argument('-l','--learn',\n help=\"Suggest new anime rules that are found in source directory\",\n action=\"store_true\")\n action_grp.add_argument('-e', '--execute',\n help=\"Move anime according to stored rules\",\n action=\"store_true\")\n action_grp.add_argument('-s', '--show',\n help=\"Show all stored rule\",\n action=\"store_true\")\n action_grp.add_argument('-d', '--delete',\n help=\"Try to delete rule by pattern\",\n action=\"store\")\n action_grp.add_argument('--cleanup',\n help=\"Try to remove old rules that aren't matched since a while (according to conf.)\",\n action=\"store\")\n\n args = parser.parse_args()\n\n # CONFIG FILE\n #\n cfg = Config(path=args.configuration)\n init(config=cfg)\n\n # LOCALE\n #\n if sys.stdout.encoding is None:\n print(\"Encoding for output seems missing... \", file=sys.stderr)\n \"You should set env variable PYTHONIOENCODING=UTF-8. \"\n \"Example: running 'export PYTHONIOENCODING=UTF-8' before calling this program\"\n exit(1)\n\n # DIRECTORY\n #\n if not os.path.exists(cfg.src_dir):\n raise Exception(\"The source directory '%s' doesn't exist, check your config.\" % cfg.src_dir)\n if not os.path.isdir(cfg.src_dir):\n raise Exception(\"The source directory '%s' isn't a directory, check your config.\" % cfg.src_dir)\n\n if not os.path.exists(cfg.tgt_dir):\n raise Exception(\"The target directory '%s' doesn't exist, check your config.\" % cfg.tgt_dir)\n if not os.path.isdir(cfg.tgt_dir):\n raise Exception(\"The target directory '%s' isn't a directory, check your config.\" % cfg.tgt_dir)\n\n # PID LOCK\n #\n pid = str(os.getpid())\n\n if os.path.isfile(cfg.lock_file):\n if cfg.verbose:\n print(\"Lock file found (%s), stopping program...\" % cfg.lock_file)\n sys.exit()\n else:\n if cfg.verbose:\n print(\"Starting operations...\")\n print(\"Creating lock file (%s)\" % cfg.lock_file)\n with open(cfg.lock_file, 'w') as f:\n f.write(pid)\n\n # EXIT HANDLER\n #\n remote = None\n\n def handler(signum=None, frame=None):\n print(\"Exiting...\")\n print(remote)\n\n if remote.process is not None:\n try:\n remote.process.terminate()\n except:\n print(\"Operation stopped\")\n\n os.unlink(cfg.lock_file)\n exit(0)\n\n # signal.SIGHUP, signal.SIGQUIT\n for sig in [signal.SIGTERM, signal.SIGINT]:\n signal.signal(sig, handler)\n\n try:\n if args.learn:\n # learning new rules\n learn = Learn(config=cfg)\n\n animes = learn.find_distinct_names()\n print(\"Searching new animes... %s candidates !\" % len(animes))\n\n for anime in animes:\n if learn.exist(anime):\n print(\"Ignored (exist): %s\" % anime)\n else:\n learn.suggest_add_name(anime)\n\n elif args.execute:\n # Applying rules\n execute = Execute(config=cfg)\n\n animes = execute.find_all()\n\n for anime in animes:\n execute.apply(anime)\n\n elif args.show:\n # Show all stored rules\n show = Show(config=cfg)\n show.show_all()\n\n elif args.delete:\n # Removing rule by pattern\n remove = Remove(config=cfg)\n\n print(\"Trying to remove rule (pattern='%s')\" % args.delete)\n success = remove.remove(pattern=args.delete)\n\n if success:\n print(\"Rule removed...\")\n else:\n print(\"Rule not found !\")\n\n elif args.cleanup:\n # Cleaning up old rules\n remove = Remove(config=cfg)\n\n print(\"Cleaning rules older than %s days...\" % cfg.rule_cleanup_days)\n success = remove.cleanup(cfg.rule_cleanup_days)\n\n else:\n # (No actions)\n print(\"You haven't asked any action... Printing Help.\")\n parser.print_help()\n\n except:\n print(\"Fatal error\")\n traceback.print_exc()\n\n if os.path.isfile(cfg.lock_file):\n if cfg.verbose:\n print(\"Removing lock file (%s)\" % cfg.lock_file)\n\n os.unlink(cfg.lock_file)\n\n exit(0)",
"def refactor_post(self,post_name):\n for name in list(self.rules):\n related_post = \"{}.post.{}\".format(name,post_name)\n if related_post in self.rules:\n parts = [self.MakeSymbolName(x) for x in [post_name, related_post]]\n self.rules[name] = self.MakeChoice([self.MakeSeq(parts)])",
"def __init__(self, walker, target, target_index=0):\n super(MoveWalkerToTarget, self).__init__(walker)\n self._target = target\n self._target_id = target_index",
"def dedup_rhs(self,inline_stop=set(),verbose=False):\n\n # Map an object index to the nonterminal that first defines it.\n index_to_name = dict()\n # Map a rule name to the rule name it should be replaced by.\n replacement = dict()\n\n def process_replacement(grammar,name,replacement_dict):\n # Update this rule with any scheduled replacements.\n rule = self.rules[name]\n changed_rule = False\n new_options = []\n for option in rule.as_container():\n changed_parts = False\n parts = []\n for x in option.as_container():\n if x.is_symbol_name() and x.content in replacement:\n parts.append(self.MakeSymbolName(replacement[x.content]))\n changed_parts = True\n changed_rule = True\n else:\n parts.append(x)\n new_options.append(self.MakeSeq(parts) if changed_parts else option)\n if changed_rule:\n self.rules[name] = self.MakeChoice(new_options)\n\n for A in reversed(self.preorder()):\n if A not in inline_stop:\n A_rule = self.rules[A]\n A_index = A_rule.reg_info.index\n if verbose:\n print(\" {} {} \".format(A,A_index))\n if A_index in index_to_name:\n if verbose:\n print(\"Replace {} with {}\".format(A,index_to_name[A_index]))\n replacement[A] = index_to_name[A_index]\n else:\n index_to_name[A_index] = A\n process_replacement(self,A,replacement)\n\n\n for A in self.preorder():\n process_replacement(self,A,replacement)\n\n self.remove_unused_rules()",
"def toolset_from_grammar():\n ### <toolset>\n def replace_by_space(node):\n node.value = ' '\n \n \n return locals().copy()",
"def transformed_retrace(\n q_tm1: Array,\n q_t: Array,\n a_tm1: Array,\n a_t: Array,\n r_t: Array,\n discount_t: Array,\n pi_t: Array,\n mu_t: Array,\n lambda_: float,\n eps: float = 1e-8,\n stop_target_gradients: bool = True,\n tx_pair: TxPair = IDENTITY_PAIR,\n) -> Array:\n chex.assert_rank([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],\n [2, 2, 1, 1, 1, 1, 2, 1])\n chex.assert_type([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],\n [float, float, int, int, float, float, float, float])\n\n pi_a_t = base.batched_index(pi_t, a_t)\n c_t = jnp.minimum(1.0, pi_a_t / (mu_t + eps)) * lambda_\n target_tm1 = transformed_general_off_policy_returns_from_action_values(\n tx_pair, q_t, a_t, r_t, discount_t, c_t, pi_t, stop_target_gradients)\n q_a_tm1 = base.batched_index(q_tm1, a_tm1)\n return target_tm1 - q_a_tm1",
"def process_swrl_rule(self, rule_name, data):\n self.ensure_is_new_name(rule_name)\n\n type_object = self.get_named_object(data, \"isA\")\n\n # TODO find out what Imp actually means and whether it is needed in the yaml-source at all\n assert type_object is Imp\n\n rule_src = data[\"rule_src\"]\n\n # create the instance\n new_rule = type_object()\n new_rule.set_as_rule(rule_src)\n self.rules.append(new_rule)\n\n self.name_mapping[rule_name] = new_rule"
] |
[
"0.63939667",
"0.5259394",
"0.5166907",
"0.5160086",
"0.51084006",
"0.5063297",
"0.5055394",
"0.50120264",
"0.49730772",
"0.4949256",
"0.49390268",
"0.48663703",
"0.4849125",
"0.48239273",
"0.47951698",
"0.47782853",
"0.4711918",
"0.46807438",
"0.46658933",
"0.46512583",
"0.46377835",
"0.46169358",
"0.46047923",
"0.4603427",
"0.46002355",
"0.45951346",
"0.45896277",
"0.45870814",
"0.45794427",
"0.45627254"
] |
0.71751463
|
0
|
If two nonterminals have the same right hand side, combine them. Don't combine any rules named in inline_stop.
|
def dedup_rhs(self,inline_stop=set(),verbose=False):
# Map an object index to the nonterminal that first defines it.
index_to_name = dict()
# Map a rule name to the rule name it should be replaced by.
replacement = dict()
def process_replacement(grammar,name,replacement_dict):
# Update this rule with any scheduled replacements.
rule = self.rules[name]
changed_rule = False
new_options = []
for option in rule.as_container():
changed_parts = False
parts = []
for x in option.as_container():
if x.is_symbol_name() and x.content in replacement:
parts.append(self.MakeSymbolName(replacement[x.content]))
changed_parts = True
changed_rule = True
else:
parts.append(x)
new_options.append(self.MakeSeq(parts) if changed_parts else option)
if changed_rule:
self.rules[name] = self.MakeChoice(new_options)
for A in reversed(self.preorder()):
if A not in inline_stop:
A_rule = self.rules[A]
A_index = A_rule.reg_info.index
if verbose:
print(" {} {} ".format(A,A_index))
if A_index in index_to_name:
if verbose:
print("Replace {} with {}".format(A,index_to_name[A_index]))
replacement[A] = index_to_name[A_index]
else:
index_to_name[A_index] = A
process_replacement(self,A,replacement)
for A in self.preorder():
process_replacement(self,A,replacement)
self.remove_unused_rules()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_cnf(self):\n nonterm = set(self.nonterminal)\n term = set(self.terminal)\n\n rules = list(self.rules)\n cnf = set()\n\n # STEP 1: eliminate nonsolitary terminals\n for i in range(len(rules)):\n rule = rules[i]\n lhs, rhs, log_prob = rule\n if len(rhs) > 1:\n rhs_list = list(rhs)\n for j in range(len(rhs_list)):\n x = rhs_list[j]\n if x in term: # found nonsolitary terminal\n new_nonterm = 'NT_{}'.format(x)\n new_nonterm_rule = GrammarRule(new_nonterm, (x,), 0.0)\n\n if new_nonterm not in nonterm:\n nonterm.add(new_nonterm)\n cnf.add(new_nonterm_rule)\n else:\n assert new_nonterm_rule in cnf\n rhs_list[j] = new_nonterm\n rhs = tuple(rhs_list)\n rules[i] = GrammarRule(lhs, rhs, log_prob)\n\n # STEP 2: eliminate rhs with more than 2 nonterminals\n for i in range(len(rules)):\n rule = rules[i]\n lhs, rhs, log_prob = rule\n if len(rhs) > 2:\n assert all(x in nonterm for x in rhs), rule\n current_lhs = lhs\n for j in range(len(rhs) - 2):\n new_nonterm = 'BIN_\"{}\"_{}'.format(\n '{}->{}'.format(lhs, ','.join(rhs)), str(j))\n assert new_nonterm not in nonterm, rule\n nonterm.add(new_nonterm)\n cnf.add(\n GrammarRule(current_lhs,\n (rhs[j], new_nonterm),\n log_prob if j == 0 else 0.0))\n current_lhs = new_nonterm\n cnf.add(GrammarRule(current_lhs, (rhs[-2], rhs[-1]), 0.0))\n else:\n cnf.add(rule)\n\n return Grammar(cnf)",
"def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)",
"def one_time_rules(self):\n # There is also a hidden sameAs rule in RDF Semantics: if a literal appears in a triple, and another one has\n # the same value, then the triple should be duplicated with the other value.\n literals = self.literal_proxies.lit_to_bnode\n items = ((lt1, lt2) for lt1, lt2 in product(literals, literals) if lt1 != lt2)\n for lt1, lt2 in items:\n try:\n lt1_d = lt1.lit.toPython()\n lt2_d = lt2.lit.toPython()\n if lt1_d == lt2_d:\n # In OWL, this line is simply stating a sameAs for the corresponding BNodes, and then let\n # the usual rules take effect. In RDFS this is not possible, so the sameAs rule is,\n # essentially replicated...\n bn1 = self.literal_proxies.lit_to_bnode[lt1]\n bn2 = self.literal_proxies.lit_to_bnode[lt2]\n for (s, p, o) in self.graph.triples((None, None, bn1)):\n self.graph.add((s, p, bn2))\n except:\n # there may be a problem with one of the python conversions; the rule is imply ignored\n # raise e\n pass",
"def __or__(self, right_rule):\n self.__subrules.append(right_rule)\n return self",
"def product_2(m1, m2):\r\n return make_mono_admissible_2(list(m1) + list(m2))",
"def test_chained_right(self):\n n1, n2, n3 = Node('a'), Node('b'), Node('c')\n result = n1 | n2 * 'foo' | n3\n self.assertEqual(n1.eout, [Edge(n1, n2)])\n self.assertEqual(n2.ein, [Edge(n1, n2)])\n self.assertEqual(n2.eout, [Edge(n2, n3, 'foo')])\n self.assertEqual(n3.ein, [Edge(n2, n3, 'foo')])",
"def postfix_filter(self):\n s_options_for_ending = set()\n for s in self.s_options_total:\n a_line = a_lines[s]\n b_line = b_lines[s]\n if a_line.endswith(b_line) or b_line.endswith(a_line):\n s_options_for_ending.add(s)\n\n return s_options_for_ending",
"def __ror__(self, other):\n return whitespaces.CURRENT.normalize(other) | self",
"def combine_expression(self, connector, sub_expressions):\n lhs, rhs = sub_expressions\n if connector == '%%':\n return 'MOD(%s)' % ','.join(sub_expressions)\n elif connector == '&':\n return 'BAND(%s)' % ','.join(sub_expressions)\n elif connector == '|':\n return 'BOR(%s)' % ','.join(sub_expressions)\n elif connector == '^':\n return 'POWER(%s)' % ','.join(sub_expressions)\n elif connector == '<<':\n return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}\n elif connector == '>>':\n return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}\n return super().combine_expression(connector, sub_expressions)",
"def test_combine_multiple_or(self):\n inv_search = 'author:\"ellis, j*\" and (title:report or keyword:\"cross section\")'\n spi_search = 'find a j ellis and (t report or k \"cross section\")'\n self._compare_searches(inv_search, spi_search)",
"def _process_last(self, first, second):\n if not self.can_combine(first, second):\n # no combining\n self.combined.append(first)\n self.combined.append(second)\n else:\n # combine and terminate\n self.move_cursors_to_end(second)\n self.combine_and_select_block(first)",
"def __radd__(self, other):\n if other is Ellipsis:\n return SkipTo(self)(\"_skipped\") + self\n\n return whitespaces.CURRENT.normalize(other) + self",
"def combine(*rules):\n\n def rule(symbol):\n for r in rules:\n result = r(symbol)\n if result is not None:\n return result\n return rule",
"def RewriteOR(self, left, right):\n return None",
"def __rxor__(self, other):\n return whitespaces.CURRENT.normalize(other) ^ self",
"def merge_scaffolds(scaff1, scaff2):\n if scaff1[-2] == other_end(scaff2[1]):\n new_scaff = scaff1[:-1] + scaff2[1:]\n elif scaff1[-3] == scaff2[1] and scaff1[-2] == scaff2[2]:\n new_scaff = scaff1[:-1] + scaff2[3:]\n else:\n new_scaff = scaff1[:-1] + [other_end(scaff1[-2]), other_end(scaff2[1])] + scaff2[1:]\n return new_scaff",
"def join_union(self, other):\n\n assert type(self) is type(other), 'Expected NestedRE instance'\n\n A = self.make_flat()\n B = other.make_flat()\n\n if A == B and A !='ϵ':\n return self.merge_union(A, [self.closure, other.closure])\n elif A == 'ϵ' and B == 'ϵ':\n return NestedRE('ϵ')\n elif A == 'ϵ':\n return NestedRE(B, '?')\n elif B == 'ϵ':\n return NestedRE(A, '?')\n else:\n return NestedRE( '(' + A + '|' + B + ')' )",
"def __or__(self, second_rule):\n return OrRule(self, second_rule)",
"def difference_grammar(one, another):\n\tdiff_grammar = WCFG()\n\tfor rule in one:\n\t\tfor r in another:\n\t\t\tif r.rhs == rule.rhs and r.lhs == rule.lhs:\n\t\t\t\tapprox_prob = r.prob\n\t\t\t\tbreak\n\t\tdiff_grammar.add(Rule(rule.lhs, rule.rhs, abs(rule.prob-approx_prob)))\n\treturn diff_grammar",
"def proc_sw_both_not_empty(forward_pairs, uni_word, morphs, backward_pairs):\n sandwich_pairs = []\n morphs_str = ' + '.join([str(morph) for morph in morphs])\n word_morphs_str = '%s %s' % (uni_word.encode('UTF-8'), morphs_str)\n if word_morphs_str in _CORRECT_ALIGNS:\n sandwich_pairs.append(WordStrMorphsPair(uni_word, morphs))\n elif word_morphs_str in ['해야 하/VV + 아야/EC + 하/VX', '해야 하/XSV + 아야/EC + 하/VX', '야 아야/EC + 하/VX',\n '야 어야/EC + 하/VX', '봐야 보/VX + 아야/EC + 하/VX', '해야 하/VX + 아야/EC + 하/VX'] \\\n and backward_pairs and str(backward_pairs[0].morphs[0]) == '겠/EP':\n sandwich_pairs.append(WordStrMorphsPair(uni_word, morphs[:-1]))\n backward_pairs[0].morphs.insert(0, morphs[-1])\n del morphs[:]\n elif word_morphs_str in ['졌 어/EC + 지/VX + 었/EP', '져 어/EC + 지/VX + 어/EC', '봐야 아/EC + 보/VX + 아야/EC',\n '왔 아/EC + 오/VX + 았/EP', '왔 어/EC + 오/VX + 았/EP', '봤 아/EC + 보/VX + 았/EP',\n '봐 아/EC + 보/VX + 아/EF', '왔 아/EC + 오/VV + 았/EP', '냈 아/EC + 내/VX + 었/EP',\n '내 아/EC + 내/VX + 어/EC'] \\\n and forward_pairs and forward_pairs[-1].morphs[-1].tag == 'VV':\n forward_pairs[-1].morphs.append(morphs[0])\n sandwich_pairs.append(WordStrMorphsPair(uni_word, morphs[1:]))\n del morphs[:]\n elif word_morphs_str == '봐야 아/EC + 보/VX + 아야/EC + 하/VX' \\\n and forward_pairs and forward_pairs[-1].morphs[-1].tag == 'VV' \\\n and backward_pairs and str(backward_pairs[0].morphs[0]) == '겠/EP':\n forward_pairs[-1].morphs.append(morphs[0])\n sandwich_pairs.append(WordStrMorphsPair(uni_word, morphs[1:-1]))\n backward_pairs[0].morphs.insert(0, morphs[-1])\n del morphs[:]\n elif uni_word in [u'해졌', u'해봤', u'해했', u'해봐', u'해줬', u'해줘', u'해와', u'해져'] and len(morphs) == 4:\n sandwich_pairs.append(WordStrMorphsPair(uni_word[0], morphs[:2]))\n sandwich_pairs.append(WordStrMorphsPair(uni_word[1], morphs[2:]))\n del morphs[:]\n elif word_morphs_str == '잖 이/VCP + 지/EC + 않/VX' \\\n and forward_pairs and forward_pairs[-1].morphs[-1].tag.startswith('N'):\n forward_pairs[-1].morphs.append(morphs[0])\n sandwich_pairs.append(WordStrMorphsPair(uni_word, morphs[1:]))\n del morphs[:]\n elif word_morphs_str in ['해서 하/VV + 아서/EC + 이/VCP', '해서 하/XSV + 아서/EC + 이/VCP',\n '해서 하/VX + 아서/EC + 이/VCP', '서] 아서/EC+이/VCP'] \\\n and backward_pairs and backward_pairs[0].morphs[0].tag.startswith('E'):\n sandwich_pairs.append(WordStrMorphsPair(uni_word, morphs[:-1]))\n backward_pairs[0].morphs.insert(0, morphs[-1])\n del morphs[:]\n elif word_morphs_str in ['서 아서/EC + 이/VCP'] \\\n and backward_pairs and backward_pairs[0].morphs[0].tag.startswith('E'):\n sandwich_pairs.append(WordStrMorphsPair(uni_word, morphs[:-1]))\n backward_pairs[0].morphs.insert(0, morphs[-1])\n del morphs[:]\n else:\n sim = calc_similarity(uni_word, unicode(''.join([morph.lex for morph in morphs]), 'UTF-8'))\n if sim >= _SIMILARITY_MIN:\n # string similarity is greater than threshold\n sandwich_pairs.append(WordStrMorphsPair(uni_word, morphs))\n else:\n raise AlignError()\n return sandwich_pairs",
"def __or__(self, other):\n if isinstance(other, (Terminal, NonTerminal, Epsilon)):\n return ProductionAlternatives(Production(self), Production(other))\n elif isinstance(other, Production):\n return ProductionAlternatives(Production(self), other)\n else:\n raise TypeError(\"Unexpected type\")",
"def doCombineCurEnd(self, endofword, nrc='', nextvowel=''): # nrc = next root consonant\n if not self.end:\n return\n self.final = PhonStateCAT.getFinal(self.end)\n nasalPhon = ''\n postVowelPhon = ''\n preVowelPhon = ''\n # geminates\n geminates = False\n if self.end.startswith('w'):\n preVowelPhon = 'w'\n self.vowel = self.end[1:2]\n else:\n self.vowel = self.end[:1]\n vowelPhon = self.vowel\n if nrc == self.final and self.final != '':\n geminates = True\n if self.gemminatesStrategy == 'len' or self.gemminatesStrategy == 'lentone':\n postVowelPhon = 'ː'\n ## Suffix\n finalPhon = ''\n if self.final == 'ng':\n nasalPhon = self.nasalchar # ?\n if geminates:\n pass\n elif self.final in PhonStateCAT.simpleFinalMapping:\n finalPhon = PhonStateCAT.simpleFinalMapping[self.final]\n elif self.final == '':\n if self.latent != '' and self.prefixStrategy != 'never' and (self.prefixSyllable == 'afterEmptyCoda' or self.prefixSyllable == 'afterEmptyCoda+'):\n finalPhon = PhonStateCAT.simpleLatentMapping[self.latent]\n finalPhon = ''\n else:\n print(\"unrecognized final: \"+self.final)\n self.phon += preVowelPhon+vowelPhon+nasalPhon+postVowelPhon+finalPhon\n if not endofword:\n self.phon += self.syllablesepchar",
"def __add__(self, right_rule):\n self.__subrules.append(right_rule)\n return self",
"def optimize_right(connect):\n if intersect(connect[0], connect[1], connect[2]) == False:\n return connect\n else:\n connect = connect_right(connect[1], connect[2])\n if intersect(connect[0], connect[1], connect[2]) == True:\n return optimize_right(connect)\n else:\n return connect",
"def concatenate(self, other: \"CFG\") -> \"CFG\":\n start_temp = Variable(\"#STARTCONC#\")\n temp_0 = Terminal(\"#0CONC#\")\n temp_1 = Terminal(\"#1CONC#\")\n production0 = Production(start_temp, [temp_0, temp_1])\n cfg_temp = CFG({start_temp},\n {temp_0, temp_1},\n start_temp,\n {production0})\n return cfg_temp.substitute({temp_0: self,\n temp_1: other})",
"def eliminate_immediate_recursion(self):\n assert self.is_canonical\n # Eliminate immediate left recursion\n # Replace rules\n # A -> A alpha1 | A alpha2 | beta1 | beta2\n # with\n # A -> beta1 A' | beta2 A'\n # A' -> alpha1 A' | alpha2 A' | epsilon\n #\n # When A can produce epsilon directly:\n # A -> A alpha1 | A alpha2 | beta1 | beta2 | epsilon\n # with\n # A -> beta1 A' | beta2 A' | A'\n # A' -> alpha1 A' | alpha2 A' | epsilon\n preorder_names = self.preorder()\n for rule_name in preorder_names:\n rule = self.rules[rule_name]\n changed = False\n has_immediate_left_recursion = False\n for rhs in rule.as_container():\n first = rhs.as_container()[0]\n if first.is_symbol_name() and first.content is rule_name:\n has_immediate_left_recursion = True\n break\n if has_immediate_left_recursion:\n self_parts = [] # Becomes new right-hand-side for A\n rest_name = \"{}.rest\".format(rule_name)\n assert rest_name not in self.rules\n rest_parts = [] # Becomes new right-hand-side for A'\n for rhs in rule.as_container():\n phrase = rhs.as_container()\n first = phrase[0]\n rest = phrase[1:]\n if first.is_symbol_name() and first.content is rule_name:\n rest_parts.append(self.MakeSeq(rest + [self.MakeSymbolName(rest_name)]))\n else:\n # TODO: use list_without_empty to shorten this\n if len(phrase) > 0 and phrase[0].is_empty():\n # beta is epsilon\n assert len(phrase) == 1\n self_parts.append( self.MakeSymbolName(rest_name) )\n else:\n self_parts.append( self.MakeSeq([x for x in phrase] + [self.MakeSymbolName(rest_name)]) )\n rest_parts.append(self.MakeEmpty())\n self.rules[rule_name] = self.MakeChoice(self_parts)\n self.rules[rest_name] = self.MakeChoice(rest_parts)",
"def __mul__(self, other, nested=False):\n\n other = Formula(other)\n\n selftermnames = self.termnames()\n othertermnames = other.termnames()\n\n I = len(selftermnames)\n J = len(othertermnames)\n\n terms = []\n termnames = []\n\n for i in range(I):\n for j in range(J):\n termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))\n pieces = sorted(termname.split('*'))\n termname = '*'.join(pieces)\n termnames.append(termname)\n\n selfnames = self.terms[i].names()\n othernames = other.terms[j].names()\n\n if self.terms[i].name is 'intercept':\n _term = other.terms[j]\n _term.namespace = other.namespace\n elif other.terms[j].name is 'intercept':\n _term = self.terms[i]\n _term.namespace = self.namespace\n else:\n names = []\n\n d1 = len(selfnames)\n d2 = len(othernames)\n\n for r in range(d1):\n for s in range(d2):\n name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))\n pieces = sorted(name.split('*'))\n name = '*'.join(pieces)\n names.append(name)\n\n def product_func(value, d1=d1, d2=d2):\n\n out = []\n for r in range(d1):\n for s in range(d2):\n out.append(value[r] * value[d1+s])\n return np.array(out)\n\n cself = copy.copy(self.terms[i])\n cother = copy.copy(other.terms[j])\n sumterms = cself + cother\n sumterms.terms = [cself, cother] # enforce the order we want\n\n _term = Quantitative(names, func=sumterms,\n termname=termname,\n transform=product_func)\n\n if _namespace_equal(self.namespace, other.namespace):\n _term.namespace = self.namespace\n\n terms.append(_term)\n\n return Formula(terms)",
"def radical_c2(self, atoms):\n\n c1, c2 = atoms.keys()\n c1_ndx, c2_ndx = atoms.values()\n\n chain1, chain2 = self.determine_chains([c1, c2])\n\n # to get indexing right\n c1_ndx -= self.monomer.indices[chain1]['C1']\n c2_ndx -= self.monomer.indices[chain2]['C2']\n\n # types after reaction\n types = {'chain1': {'C1': 'c3', 'C2': 'c2', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'ha',\n 'H4': 'ha', 'H5': 'ha'}, # chain1 contains c1\n 'chain2': {'C1': 'c3', 'C2': 'c3', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'hc',\n 'H4': 'ha', 'H5': 'ha'}} # chain2 contains c2 radical\n\n # update types\n reacted_types = {'chain1': {c1_ndx + self.monomer.indices[chain1][a]: types['chain1'][a]\n for a in types['chain1'].keys()},\n 'chain2': {c2_ndx + self.monomer.indices[chain2][a]: types['chain2'][a]\n for a in types['chain2'].keys()}}\n\n # new bonds\n bonds = [[c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C2'], 'carbon']]\n\n # no dummy bonds to add\n\n # define indices of left-over radicals\n radicals = [c1_ndx + self.monomer.indices[chain1]['C2']]\n\n chain1_impropers = ['C1'] # [1]\n chain2_impropers = ['C2'] # [2]\n rm_improper = []\n for c in chain1_impropers:\n rm_improper.append([c1_ndx + self.monomer.indices[chain1][x] for x in self.monomer.impropers[chain1][c]])\n for c in chain2_impropers:\n rm_improper.append([c2_ndx + self.monomer.indices[chain2][x] for x in self.monomer.impropers[chain2][c]])\n\n # define terminated atoms\n terminated = [c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C2']]\n\n return reacted_types, bonds, radicals, rm_improper, terminated",
"def adp(lhs,rhs):\n test=lambda s: s[0]=='`'\n assert test(lhs)==True,'error: lhs should be non-terminal'\n lhs=so.getSymbol(lhs[1:],terminal=False,autocreate=True)\n rhs=[so.getSymbol(s[1:],False,True) if test(s) else so.getSymbol(s,True,True) for s in rhs]\n return addProduction(lhs,rhs)",
"def test_combine_multiple(self):\n inv_search = 'author:\"gattringer, c*\" keyword:chiral keyword:symmetry -title:chiral'\n spi_search = \"find a c gattringer and k chiral symmetry and not title chiral\"\n self._compare_searches(inv_search, spi_search)"
] |
[
"0.5043339",
"0.5019824",
"0.49600214",
"0.48698428",
"0.48641443",
"0.48044404",
"0.47883224",
"0.47840193",
"0.47740465",
"0.4722412",
"0.47152784",
"0.46691644",
"0.46416342",
"0.46308494",
"0.4580326",
"0.45753255",
"0.45638022",
"0.4558784",
"0.45551687",
"0.4530562",
"0.4517733",
"0.45148554",
"0.44874737",
"0.4450741",
"0.44503692",
"0.4434321",
"0.44284838",
"0.4424166",
"0.4422105",
"0.44169497"
] |
0.6068014
|
0
|
Inline a rule when it only has one option, and at least one of the symbols is a symbol name. Don't inline any symbol named by excepting_set.
|
def inline_single_choice_with_nonterminal(self,excepting_set=set()):
# Map a rule name to the phrase it should be replaced with.
replacement = dict()
# Needed for computing follow sets
excepting_set = set(excepting_set) | {self.start_symbol}
# Process descendants first
for A in reversed(self.preorder()):
A_rule = self.rules[A].as_container()
if (len(A_rule) == 1) and (A not in excepting_set):
# There is only one option in the choice
rhs = A_rule[0].as_container()
# Skip inlining token definitions.
if any([x.is_symbol_name() for x in rhs]):
replacement[A] = rhs
# Update this rule with any scheduled replacements.
changed_rule = False
new_options = []
for option in A_rule:
changed_parts = False
parts = []
for x in option.as_container():
if x.is_symbol_name() and x.content in replacement:
parts.extend(replacement[x.content])
changed_parts = True
changed_rule = True
else:
parts.append(x)
new_options.append(self.MakeSeq(parts) if changed_parts else option)
if changed_rule:
self.rules[A] = self.MakeChoice(new_options)
self.remove_unused_rules()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _want_sym(sym):\n if sym is None or len(sym) < 2:\n return False\n if sym['name'] in extract_ignore_names:\n return False\n bad_types = ['t', 'b', 'r', 'd', 'w']\n return (sym['type'] not in bad_types\n and sym['name'] not in ['__bss_start', '_end', '_edata'])",
"def ifind_symbols(self, name=\"any\", **kw):\n for sym in self.itersymbols():\n if (name==\"any\" or name==sym.sym.name) and \\\n sym.sym.k==kw:\n yield sym.sym",
"def mark_used(self, symbol):\n assert isinstance(symbol, str)\n if (symbol in self._relation_symbols\n or symbol in self._self_symbols\n or symbol in self._child_symbols):\n raise ValueError('Symbol {} already used'.format(symbol))\n self._self_symbols.add(symbol)",
"def inline_specific(self,specific_set):\n\n # Map a rule name to the phrase it should be replaced with.\n replacement = dict()\n\n # Process descendants first\n for A in reversed(self.preorder()):\n A_rule = self.rules[A].as_container()\n if A in specific_set:\n assert(len(A_rule)==1)\n replacement[A] = A_rule.as_container()[0].as_container()\n\n # Update this rule with any scheduled replacements.\n changed_rule = False\n new_options = []\n for option in A_rule:\n changed_parts = False\n parts = []\n for x in option.as_container():\n if x.is_symbol_name() and x.content in replacement:\n parts.extend(replacement[x.content])\n changed_parts = True\n changed_rule = True\n else:\n parts.append(x)\n new_options.append(self.MakeSeq(parts) if changed_parts else option)\n if changed_rule:\n self.rules[A] = self.MakeChoice(new_options)\n\n self.remove_unused_rules()",
"def allowed_other_sans(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_other_sans\")",
"def allowed_other_sans(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_other_sans\")",
"def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})",
"def test_single_extra_token(self):\n self.helper_test_evaluate_raises(\n 'A and not B',\n expected_exc_type=ExtraSymbolError,\n A=1,\n B=1,\n C=0)",
"def test_validate_self_input_symbol_subset(self):\n with nose.assert_raises(exceptions.MissingSymbolError):\n self.dtm1.input_symbols.add('2')\n self.dtm1.validate_self()",
"def notpexpr(*disallowed_heads):\n return some(lambda x: not (\n isinstance(x, HyExpression) and\n x and\n isinstance(x[0], HySymbol) and\n x[0] in disallowed_heads))",
"def _check_fixed_others(self, symbol_id):\n\n # Get the next symbol\n self.symbol = self.scanner.get_symbol()\n if self.symbol.type == self.scanner.KEYWORD and \\\n self.symbol.id == symbol_id:\n self.symbol = self.scanner.get_symbol()\n self._check_semicolon_else_skip(self.symbol)\n elif self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n else:\n # Error in symbol\n self._display_syntax_error(symbol_id)\n # Skip to semicolon at end of line\n self._semicolon_skipper()",
"def dedup_rhs(self,inline_stop=set(),verbose=False):\n\n # Map an object index to the nonterminal that first defines it.\n index_to_name = dict()\n # Map a rule name to the rule name it should be replaced by.\n replacement = dict()\n\n def process_replacement(grammar,name,replacement_dict):\n # Update this rule with any scheduled replacements.\n rule = self.rules[name]\n changed_rule = False\n new_options = []\n for option in rule.as_container():\n changed_parts = False\n parts = []\n for x in option.as_container():\n if x.is_symbol_name() and x.content in replacement:\n parts.append(self.MakeSymbolName(replacement[x.content]))\n changed_parts = True\n changed_rule = True\n else:\n parts.append(x)\n new_options.append(self.MakeSeq(parts) if changed_parts else option)\n if changed_rule:\n self.rules[name] = self.MakeChoice(new_options)\n\n for A in reversed(self.preorder()):\n if A not in inline_stop:\n A_rule = self.rules[A]\n A_index = A_rule.reg_info.index\n if verbose:\n print(\" {} {} \".format(A,A_index))\n if A_index in index_to_name:\n if verbose:\n print(\"Replace {} with {}\".format(A,index_to_name[A_index]))\n replacement[A] = index_to_name[A_index]\n else:\n index_to_name[A_index] = A\n process_replacement(self,A,replacement)\n\n\n for A in self.preorder():\n process_replacement(self,A,replacement)\n\n self.remove_unused_rules()",
"def allowed_other_sans(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"allowed_other_sans\")",
"def _nonkey_extended():\n p = ((Pattern._nonkey() + \n c.paren((next_word('or') + Pattern._nonkey()).treat(lib.snd).plus()).possibly()) +\n c.paren(Pattern._nonkey().plus()).many())\n def f(item):\n item1 = p.process(item)\n ((a,bs),cs) = item1.acc\n vals = [a.value]+ [i.value for i in bs]\n c.synonym_add(vals)\n return c.update((a,cs),item1)\n return Parse(f)",
"def left_refactor(self,target_rule_name,stop_at_set):\n name_suffix = \".post.{}\".format(target_rule_name)\n\n # Map a rule name X to a set of rules Y where X appears\n # as a first nonterminal in one of Y's options.\n appears_first_in = defaultdict(set)\n for name, rule in self.rules.items():\n for option in rule.as_container():\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(name)\n #print(\"appears first dict\\n{}\\n\\n\".format(appears_first_in))\n\n po = PrintOption()\n po.is_canonical = self.is_canonical\n po.inline_synthetic = False\n candidates = set(self.rules.keys())\n while len(candidates) > 0:\n for A in list(candidates):\n candidates.remove(A)\n if A in stop_at_set:\n continue\n rule = self.rules[A]\n (starts,others,terms,empties) = rule.partition(target_rule_name)\n if len(starts) > 0 and (len(others)+len(terms)+len(empties) == 0):\n #print(\"processing {}\".format(A))\n # Create the new rule.\n new_rule_name = \"{}{}\".format(A,name_suffix)\n # Form alpha1 ... alphaN\n new_options = []\n for option in rule:\n if len(option.as_container()) == 1:\n new_options.append(self.MakeEmpty())\n else:\n assert option.is_container() and (len(option)>1)\n new_options.append(self.MakeSeq(option[1:]))\n self.rules[new_rule_name] = self.MakeChoice(new_options)\n\n # Rewrite A itself.\n self_parts = [self.MakeSymbolName(x) for x in [target_rule_name,new_rule_name]]\n self.rules[A] = self.MakeChoice([self.MakeSeq(self_parts)])\n\n # Update bookkeeping for appears_first_in\n for option in new_options:\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(new_rule_name)\n\n # Replace the old rule everywhere it appears in the first\n # position\n for parent_name in list(appears_first_in[A]):\n if parent_name == A:\n # Already processed above\n continue\n parent = self.rules[parent_name]\n (starts,others,terms,empties) = parent.partition(A)\n new_options = []\n for option in starts:\n parts = []\n parts.append(self.MakeSymbolName(target_rule_name))\n parts.append(self.MakeSymbolName(new_rule_name))\n parts.extend(option.as_container()[1:])\n new_options.append(self.MakeSeq(parts))\n new_options.extend(others+terms+empties)\n self.rules[parent_name] = self.MakeChoice(new_options)\n appears_first_in[A].remove(parent_name)\n appears_first_in[target_rule_name].add(parent_name)\n # Set up transitive closure.\n candidates.add(parent_name)\n\n #print()\n #print()\n #print()\n\n #self.absorb_post(target_rule_name)\n self.remove_unused_rules()",
"def prefix(name):\n def rule(symbol):\n return symbol.startswith(name) or None\n return rule",
"def _check_semicolon_else_skip(self, symbol):\n if symbol.type == self.scanner.SEMICOLON:\n pass\n else:\n self._display_syntax_error(\"semicolon\")\n # Skip to semicolon at end of line\n self._semicolon_skipper()",
"def as_dummy(self):\n from .symbol import Dummy, Symbol\n def can(x):\n # mask free that shadow bound\n free = x.free_symbols\n bound = set(x.bound_symbols)\n d = {i: Dummy() for i in bound & free}\n x = x.subs(d)\n # replace bound with canonical names\n x = x.xreplace(x.canonical_variables)\n # return after undoing masking\n return x.xreplace({v: k for k, v in d.items()})\n if not self.has(Symbol):\n return self\n return self.replace(\n lambda x: hasattr(x, 'bound_symbols'),\n can,\n simultaneous=False)",
"def ko_rule(self):\n pass",
"def inline_single_starrable(self):\n\n # Map a rule name to the phrase it should be replaced with.\n replacement = dict()\n\n # Process descendants first\n for A in reversed(self.preorder()):\n A_rule = self.rules[A].as_container()\n if len(A_rule) == 1:\n option = A_rule[0].as_container()\n if len(option) == 1:\n first = option[0]\n if first.is_symbol_name():\n first_name = first.content\n if self.rules[first_name].as_starred(first_name) is not None:\n replacement[A] = [first]\n\n # Update this rule with any scheduled replacements.\n changed_rule = False\n new_options = []\n for option in A_rule:\n changed_parts = False\n parts = []\n for x in option.as_container():\n if x.is_symbol_name() and x.content in replacement:\n parts.extend(replacement[x.content])\n changed_parts = True\n changed_rule = True\n else:\n parts.append(x)\n new_options.append(self.MakeSeq(parts) if changed_parts else option)\n if changed_rule:\n self.rules[A] = self.MakeChoice(new_options)\n\n self.remove_unused_rules()",
"def set_optimizeable_hydrogens(self):\n for residue in self.biomolecule.residues:\n optinstance = self.is_optimizeable(residue)\n if optinstance is None:\n continue\n for atom in residue.atoms:\n if atom.name in optinstance.map:\n atom.optimizeable = 1",
"def _unique_shorthand(self, name):\n shorthand = name[0]\n\n for i in range(1, len(name)):\n if shorthand not in self.__used_names:\n # This is unique.\n self.__used_names.add(shorthand)\n return shorthand\n\n # Add another letter.\n shorthand += name[i]\n\n raise ValueError(\"Duplicate param '%s'?\" % (name))",
"def test_override_symbol(self):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2.)\n a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.)\n a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.)\n op = Operator(Eq(a, a + 3))\n op()\n op(a=a1)\n op(a=a2)\n shape = [d.size for d in [i, j, k, l]]\n\n assert(np.allclose(a.data, np.zeros(shape) + 5))\n assert(np.allclose(a1.data, np.zeros(shape) + 6))\n assert(np.allclose(a2.data, np.zeros(shape) + 7))",
"def test_missing_multiple_tokens(self):\n self.helper_test_evaluate_raises(\n 'A or (B and (C and not D))',\n expected_exc_type=MissingSymbolError,\n A=0,\n D=1)",
"def test_missing_single_token(self):\n self.helper_test_evaluate_raises(\n 'A or (B and (C and not D))',\n expected_exc_type=MissingSymbolError,\n A=0,\n B=1,\n D=1)",
"def should_strip_setter_whitespace(self):\n\n def parse_jscs_option(val):\n if type(val) == bool:\n return val\n\n if isinstance(val, list) and '=' in val:\n return True\n\n return False\n\n return dict(\n before=parse_jscs_option(\n self.jscs_options.get('disallowSpaceBeforeBinaryOperators')),\n after=parse_jscs_option(\n self.jscs_options.get('disallowSpaceAfterBinaryOperators'))\n )",
"def extrn(self, name):\n if name in self.symbols:\n raise Redeclaration(name)\n self.symbols[name] = Symbol(name, 'extrn', None)",
"def __sub__(self, other):\n return self + And.SyntaxErrorGuard() + whitespaces.CURRENT.normalize(other)",
"def test_exclude_include_overlapping_ambiguous_single_env_init(capsys):\n errorline = [None]\n\n with raises(ConfigException) as exinfo:\n # No most specific\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA(aa=0):\n errorline[0] = next_line_num()\n item(mc_exclude=[dev1], mc_include=[dev1, pp])\n\n sout, _serr = capsys.readouterr()\n assert sout == \"\"\n\n assert exp_dev1_ambiguous in str(exinfo.value)\n\n with raises(ConfigException) as exinfo:\n # No most specific\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA(aa=0):\n errorline[0] = next_line_num()\n item(mc_exclude=[pp, dev1], mc_include=[dev1])\n\n assert exp_dev1_ambiguous in str(exinfo.value)",
"def add_extra_compiler_flags(self, op):\n if is_listing(op):\n for ii in op:\n self.add_extra_compiler_flags(ii)\n elif not op in self.__include_directories and not op in self.__definitions:\n self.__compiler_flags_extra += [op]"
] |
[
"0.4967638",
"0.4935038",
"0.4867223",
"0.47817987",
"0.47697067",
"0.47697067",
"0.4762051",
"0.4745731",
"0.4715457",
"0.4708826",
"0.4695875",
"0.46882424",
"0.4686827",
"0.46597877",
"0.46322995",
"0.46064657",
"0.45897946",
"0.45387805",
"0.45264187",
"0.45204505",
"0.44970325",
"0.4492379",
"0.4467344",
"0.44558865",
"0.44460076",
"0.44409996",
"0.44304344",
"0.43693438",
"0.4368071",
"0.43659016"
] |
0.5666457
|
0
|
If there are rules X > ... X.post.POST Then set X > POST X.post.POST
|
def refactor_post(self,post_name):
for name in list(self.rules):
related_post = "{}.post.{}".format(name,post_name)
if related_post in self.rules:
parts = [self.MakeSymbolName(x) for x in [post_name, related_post]]
self.rules[name] = self.MakeChoice([self.MakeSeq(parts)])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def passivize(rule):\n rule[\"mother\"][\"subcat\"] = {\n \"obj\": None,\n \"preps\": {\n \"by\": [[\"*Subj\"]]}}\n\n rule[\"mother\"][\"hooks\"] = {\n \"head\": [\"*Obj\"]}\n\n rule[\"dtrs\"][0][\"subcat\"] = {\n \"obj\": [\"*Obj\"]}\n\n rule[\"dtrs\"][0][\"hooks\"] = {\n \"subj\": [\"*Subj\"]}\n\n return rule",
"def update_rules():\n update_all_rules()\n return \"OK\"",
"def test_update_rule(self):\n pass",
"def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)",
"def apply_rules(term: Term, rules):\n return functools.reduce(apply_rule, rules, term)",
"def _filter_post(post):\n\n return True",
"def left_refactor(self,target_rule_name,stop_at_set):\n name_suffix = \".post.{}\".format(target_rule_name)\n\n # Map a rule name X to a set of rules Y where X appears\n # as a first nonterminal in one of Y's options.\n appears_first_in = defaultdict(set)\n for name, rule in self.rules.items():\n for option in rule.as_container():\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(name)\n #print(\"appears first dict\\n{}\\n\\n\".format(appears_first_in))\n\n po = PrintOption()\n po.is_canonical = self.is_canonical\n po.inline_synthetic = False\n candidates = set(self.rules.keys())\n while len(candidates) > 0:\n for A in list(candidates):\n candidates.remove(A)\n if A in stop_at_set:\n continue\n rule = self.rules[A]\n (starts,others,terms,empties) = rule.partition(target_rule_name)\n if len(starts) > 0 and (len(others)+len(terms)+len(empties) == 0):\n #print(\"processing {}\".format(A))\n # Create the new rule.\n new_rule_name = \"{}{}\".format(A,name_suffix)\n # Form alpha1 ... alphaN\n new_options = []\n for option in rule:\n if len(option.as_container()) == 1:\n new_options.append(self.MakeEmpty())\n else:\n assert option.is_container() and (len(option)>1)\n new_options.append(self.MakeSeq(option[1:]))\n self.rules[new_rule_name] = self.MakeChoice(new_options)\n\n # Rewrite A itself.\n self_parts = [self.MakeSymbolName(x) for x in [target_rule_name,new_rule_name]]\n self.rules[A] = self.MakeChoice([self.MakeSeq(self_parts)])\n\n # Update bookkeeping for appears_first_in\n for option in new_options:\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(new_rule_name)\n\n # Replace the old rule everywhere it appears in the first\n # position\n for parent_name in list(appears_first_in[A]):\n if parent_name == A:\n # Already processed above\n continue\n parent = self.rules[parent_name]\n (starts,others,terms,empties) = parent.partition(A)\n new_options = []\n for option in starts:\n parts = []\n parts.append(self.MakeSymbolName(target_rule_name))\n parts.append(self.MakeSymbolName(new_rule_name))\n parts.extend(option.as_container()[1:])\n new_options.append(self.MakeSeq(parts))\n new_options.extend(others+terms+empties)\n self.rules[parent_name] = self.MakeChoice(new_options)\n appears_first_in[A].remove(parent_name)\n appears_first_in[target_rule_name].add(parent_name)\n # Set up transitive closure.\n candidates.add(parent_name)\n\n #print()\n #print()\n #print()\n\n #self.absorb_post(target_rule_name)\n self.remove_unused_rules()",
"def custom_cleaning_before_rules(dc):\n pass",
"def apply_rules(self, token_parse_list):\r\n return token_parse_list",
"def with_post_criteria(self, fn):\n return self._using_post_criteria([fn])",
"def vrules(self):\n ...",
"def apply_rule(seq):\n for idx,prop in enumerate(seq.ant):\n\n if prop.conn == \"not\":\n # create a copy of seq (we don't want to mutate it)\n new_seq = Sequent(seq.ant[:],seq.con[:])\n # pop the proposition from the list\n not_a = new_seq.ant.pop(idx)\n # make sure we popped the correct one\n assert not_a.conn == \"not\"\n # apply the rule\n new_seq.con = [ not_a.p1 ] + new_seq.con\n # return a list of 3 values with seq2 being None\n # (since there is not split in this rule)\n return [new_seq , None, \"not left\"]\n\n elif prop.conn == \"or\":\n # create two copies of seq\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_or_c = new_seq1.ant.pop(idx)\n # make sure we popped the correct one\n assert b_or_c.conn == \"or\"\n assert b_or_c == new_seq2.ant.pop(idx)\n # apply the rule\n new_seq1.ant.append(b_or_c.p1)\n new_seq2.ant.append(b_or_c.p2)\n # return the obtained sequents and the rule name\n # here we have two sequents since \"or left\"\n # has two sequents at the top\n return [new_seq1 , new_seq2, \"or left\"]\n\n elif prop.conn == \"and\":\n #create one copy of seq\n new_seq = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_and_c = new_seq.ant.pop(idx)\n # make sure we popped the correct one\n assert b_and_c.conn == \"and\"\n # apply the rule\n new_seq.ant.append(b_and_c.p1)\n new_seq.ant.append(b_and_c.p2)\n # return a list of 3 values with seq2 being None\n return [new_seq, None, 'and left']\n\n \n elif prop.conn == \"imp\":\n # create two copies of seq\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_imp_c = new_seq1.ant.pop(idx)\n # make sure we popped the correct one\n assert b_imp_c.conn == \"imp\"\n assert b_imp_c == new_seq2.ant.pop(idx)\n # apply the rule\n new_seq1.ant.append(b_imp_c.p2)\n new_seq2.con.append(b_imp_c.p1)\n # return the obtained sequents and the rule name\n return [new_seq1 , new_seq2, \"implies left\"]\n\n for idx,prop in enumerate(seq.con):\n if prop.conn == \"not\":\n new_seq = Sequent(seq.ant[:],seq.con[:])\n # pop the proposition from the list\n not_a = new_seq.con.pop(idx)\n # make sure we popped the correct one\n assert not_a.conn == \"not\"\n # apply the rule\n new_seq.ant = [ not_a.p1 ] + new_seq.ant\n # return a list of 3 values with seq2 being None\n return [new_seq , None, \"not right\"]\n elif prop.conn == \"or\":\n # create one copy of seq\n new_seq = Sequent(seq.ant[:], seq.con[:])\n # pop the proposition from the list\n b_or_c = new_seq.con.pop(idx)\n # make sure we popped the correct one\n assert b_or_c.conn == \"or\" \n # apply the rule\n new_seq.con.append(b_or_c.p1)\n new_seq.con.append(b_or_c.p2)\n # return the obtained sequent and the rule name\n return [new_seq , None, \"or right\"]\n\n elif prop.conn == 'and':\n new_seq1 = Sequent(seq.ant[:], seq.con[:])\n new_seq2 = Sequent(seq.ant[:], seq.con[:])\n b_and_c = new_seq1.con.pop(idx)\n assert b_and_c.conn == \"and\"\n assert b_and_c == new_seq2.con.pop(idx)\n new_seq1.con.append(b_and_c.p1)\n new_seq2.con.append(b_and_c.p2)\n return [new_seq1 , new_seq2, \"and right\"]\n\n elif prop.conn == 'imp':\n new_seq = Sequent(seq.ant[:], seq.con[:])\n b_imp_c = new_seq.con.pop(idx)\n assert b_imp_c.conn == \"imp\"\n new_seq.ant.append(b_imp_c.p1)\n new_seq.con.append(b_imp_c.p2)\n return [new_seq , None, \"implies right\"]",
"def test_post_chain(self):\n pass",
"def make_rules(self, old_rules):\n rules = defaultdict(set)\n\n def recurse_disc_rule(attr, rule):\n \"\"\"\n Recursively partition multivalued discrete attributes if\n its worth it\n \"\"\"\n\n\n ro = RuleObj(rule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n\n if not self.prune_rule(ro):\n return set([ro])\n \n c = rule.filter.conditions[0]\n var_type = rule.data.domain[c.position].var_type\n\n if (var_type == Orange.feature.Type.Discrete):\n if len(c.values) == 1:\n return [ro]\n \n refiner = BeamRefiner(attrs=[attr], fanout=10)\n ret = set()\n for _, newrule in refiner(rule):\n ret.update(recurse_disc_rule(attr, newrule))\n return ret\n else:\n if len(rule.data) < self.min_pts:\n return [ro]\n return [ro]\n\n # XXX: figure out this logic!\n\n refiner = BeamRefiner(attrs=[attr], fanout=2)\n ret = set()\n for _, newrule in refiner(rule):\n newro = RuleObj(newrule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n ret.update(recurse_disc_rule(attr, newrule))\n\n \n if old_rules is None:\n base_rule = SDRule(self.full_table, None) \n refiner = BeamRefiner(attrs=self.cols, fanout=10)\n #refiner = BeamRefiner(attrs=['recipient_nm'], fanout=30) \n\n \n for attr, rule in refiner(base_rule):\n ros = recurse_disc_rule(attr, rule)\n #self.top_k({None:ros})\n ros = filter(self.prune_rule, ros)\n rules[(attr,)].update(ros)\n\n else:\n attrs = old_rules.keys()\n for a_idx, attr1 in enumerate(attrs):\n for attr2 in attrs[a_idx+1:]:\n merged_attrs = set(attr1).union(attr2)\n max_attrs_len = max(len(attr1), len(attr2))\n if len(merged_attrs) == max_attrs_len:\n continue\n \n \n a1rules, a2rules = old_rules[attr1], old_rules[attr2]\n\n for ro in self.merge_dims(a1rules, a2rules):\n key = ro.rule.attributes\n\n #self.top_k({None:(ro,)})\n if self.prune_rule(ro):\n rules[key].add(ro)\n \n return rules",
"def update_post(prev_data, data, db_conn):\n\n schema = get_post_schema(data)\n post_kind = prev_data['kind']\n if post_kind is 'post' or post_kind is 'proposal':\n data = pick(data, ('body',))\n elif post_kind is 'vote':\n data = pick(data, ('body', 'response',))\n data, errors = update_document(schema, prev_data, data, db_conn)\n if not errors:\n add_post_to_es(data, db_conn)\n return data, errors",
"def post_process_stage1(self, pred):\n return (pred>0)*1",
"def ko_rule(self):\n pass",
"def test_multiple_rules(self):\n rule = (\n 'alert(name:\"test1\"; side:client; match:\"A\",1; replace:\"B\";)\\n'\n 'alert(name:\"test2\"; side:client; match:\"B\",1; replace:\"A\";)\\n')\n tests = {\n (\"ABCD\", \"BACD\"): [\"proxying connection from\",\n \"INFO : filter matched: 'test1'\",\n \"INFO : filter matched: 'test2'\", ],\n }\n\n self.run_rules(rule, tests, echo=True)\n\n rule = (\n 'alert(name:\"test1\"; side:client; match:\"A\",1;)\\n'\n 'alert(name:\"test2\"; side:client; match:\"B\",1;)\\n')\n tests = {\n (\"ABCD\", \"ABCD\"): [\"proxying connection from\",\n \"INFO : filter matched: 'test1'\",\n \"INFO : filter matched: 'test2'\", ],\n }\n\n self.run_rules(rule, tests, echo=True)\n\n rule = (\n 'alert(name:\"test1\"; side:client; match:\"B\",1;)\\n'\n 'alert(name:\"test2\"; side:client; match:\"A\",1;)\\n')\n tests = {\n (\"ABCD\", \"ABCD\"): [\"proxying connection from\",\n \"INFO : filter matched: 'test2'\",\n \"INFO : filter matched: 'test1'\", ],\n }\n\n self.run_rules(rule, tests, echo=True)",
"def update_validity(sender, instance, **kwargs):\n\tinstance.up_to_date = True\n\n\tfor sol in instance.post_set.all():\n\t\tsol.up_to_date = False\n\t\tsol.save()",
"def hrules(self):\n ...",
"def cleanUpRules(self):\n\n\t\t# initialize\n\t\tscoreDict = {}\n\t\tnewRules = {}\n\n\t\t# loop through rules\n\t\tfor i, tup in enumerate(self.generatedRules):\n\n\n\t\t\tantecedent = str(tup[0].antecedent)\n\n\t\t\t# if there is no rule in the scoredictionary yet with the same antecedent, put it in both dictionaries\n\t\t\tif (not antecedent in scoreDict):\n\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\telse:\n\n\t\t\t\t# if there is, then first compare if the degree is higher before overwriting\n\t\t\t\tif (tup[1] > scoreDict[antecedent]):\n\t\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\t\telse:\n\t\t\t\t\t# not higher? don't overwrite\n\t\t\t\t\tcontinue\n\n\t\t# save rules\n\t\tself.generatedRules = []\n\t\tfor key in newRules:\n\t\t\tself.generatedRules.append(newRules[key])\n\n\t\treturn",
"def test_redundant_set_field(self):\n SF1, SF2 = (\"SET_FIELD\", (\"IPV4_DST\", 1)), (\"SET_FIELD\", (\"IPV4_DST\", 2))\n SF3, SF4 = (\"SET_FIELD\", (\"IPV4_DST\", 3)), (\"SET_FIELD\", (\"IPV4_DST\", 4))\n OUT = (\"OUTPUT\", 1)\n n1 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF2, OUT])),\n Rule(priority=0)\n ])\n n2 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF1, SF2, OUT])),\n Rule(priority=0)\n ])\n n3 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF3, SF2, OUT])),\n Rule(priority=0)\n ])\n n4 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF4, SF3, SF1, SF2, OUT])),\n Rule(priority=0)\n ])\n n5 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF2, SF2, SF2, SF2, OUT])),\n Rule(priority=0)\n ])\n self.assertTrue(check_equal(n1, n2))\n self.assertTrue(check_equal(n1, n3))\n self.assertTrue(check_equal(n1, n4))\n self.assertTrue(check_equal(n1, n5))\n\n # Sanity check\n n6 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF4, SF3, SF1, SF1, OUT])),\n Rule(priority=0)\n ])\n self.assertFalse(check_equal(n1, n6))",
"def __init__(self, rules):\n self.value = rules",
"def or_rule(self, step):\n self.proof[step.seq_num] = self.proof[step.assms[0]]",
"def rule(self, rules):\n\n if not isinstance(rules, list):\n rules = [rules]\n\n for rule in rules:\n self.__addRule(rule)",
"def process_post(new_post, cfg):\n id_already_handled_in_db = i18n['debug']['id_already_handled_in_db']\n discovered_submit_title = i18n['posts']['discovered_submit_title']\n rules_comment = i18n['posts']['rules_comment']\n yt_already_has_transcripts = i18n['posts']['yt_already_has_transcripts']\n\n if new_post['subreddit'] in cfg.upvote_filter_subs:\n # ignore posts if they don't meet the threshold for karma and the sub\n # is in our list of upvoted filtered ones\n if new_post['ups'] < cfg.upvote_filter_subs[new_post['subreddit']]:\n return\n\n if not is_valid(new_post['name'], cfg):\n logging.debug(id_already_handled_in_db.format(new_post['name']))\n return\n\n if new_post['archived']:\n return\n\n if new_post['author'] is None:\n # we don't want to handle deleted posts, that's just silly\n return\n\n logging.info(\n f'Posting call for transcription on ID {new_post[\"name\"]} posted by '\n f'{new_post[\"author\"]}'\n )\n\n if new_post['domain'] in cfg.image_domains:\n content_type = 'image'\n content_format = cfg.image_formatting\n\n elif new_post['domain'] in cfg.audio_domains:\n content_type = 'audio'\n content_format = cfg.audio_formatting\n\n elif new_post['domain'] in cfg.video_domains:\n if 'youtu' in new_post['domain']:\n if not valid_youtube_video(new_post['url']):\n add_complete_post_id(new_post['name'], cfg)\n return\n if get_yt_transcript(new_post['url']):\n np = cfg.r.submission(id=new_post['name'])\n np.reply(_(\n yt_already_has_transcripts\n ))\n add_complete_post_id(new_post['name'], cfg)\n logging.info(\n f'Found YouTube video, {get_yt_video_id(new_post[\"url\"])},'\n f' with good transcripts.'\n )\n return\n content_type = 'video'\n content_format = cfg.video_formatting\n else:\n # This means we pulled from a subreddit bypassing the filters.\n content_type = 'Other'\n content_format = cfg.other_formatting\n\n # Truncate a post title if it exceeds 250 characters, so the added\n # formatting still fits in Reddit's 300 char limit for post titles\n post_title = new_post['title']\n max_title_length = 250\n if len(post_title) > max_title_length:\n post_title = post_title[:max_title_length - 3] + '...'\n\n # noinspection PyBroadException\n try:\n result = cfg.tor.submit(\n title=discovered_submit_title.format(\n sub=new_post['subreddit'],\n type=content_type.title(),\n title=post_title\n ),\n url=reddit_url.format(new_post['permalink'])\n )\n result.reply(\n _(\n rules_comment.format(\n post_type=content_type,\n formatting=content_format,\n header=cfg.header\n )\n )\n )\n flair_post(result, flair.unclaimed)\n\n add_complete_post_id(new_post['name'], cfg)\n cfg.redis.incr('total_posted', amount=1)\n\n if cfg.OCR and content_type == 'image':\n # hook for OCR bot; in order to avoid race conditions, we add the\n # key / value pair that the bot isn't looking for before adding\n # to the set that it's monitoring.\n cfg.redis.set(new_post['name'], result.fullname)\n cfg.redis.rpush('ocr_ids', new_post['name'])\n\n cfg.redis.incr('total_new', amount=1)\n\n # The only errors that happen here are on Reddit's side -- pretty much\n # exclusively 503s and 403s that arbitrarily resolve themselves. A missed\n # post or two is not the end of the world.\n except Exception as e:\n logging.error(\n f'{e} - unable to post content.\\nID: {new_post[\"name\"]}\\n '\n f'Title: {new_post[\"title\"]}\\n Subreddit: '\n f'{new_post[\"subreddit\"]}'\n )",
"def forwards(apps, schema_editor):\n Referral = apps.get_model(\"core\", \"Referral\")\n\n for referral in Referral.objects.filter(state=ReferralState.ASSIGNED):\n if referral.answers.count() > 0:\n if referral.answers.filter(validation_request__isnull=False).count() > 0:\n referral.state = ReferralState.IN_VALIDATION\n referral.save()\n else:\n referral.state = ReferralState.PROCESSING\n referral.save()",
"def post(self, request, *args, **kwargs):\n rule = self.get_object()\n try:\n updates = json.loads(request.body.decode('utf-8'))\n except Exception as e:\n return error('unable to marshal json', str(e))\n try:\n validate_rule_json(updates)\n except RuleValidationException as e:\n return error('error validating json', str(e))\n\n # TODO this can take place in the save method on Rule, which would also\n # cover creation and deletion.\n change = RuleChange(\n rule=rule,\n change_user=updates['user'],\n change_comment=updates['comment'])\n change.populate(rule.full_values())\n if rule.enabled and not updates['enabled']:\n change.change_type = 'd'\n else:\n change.change_type = 'u'\n change.save()\n rule.populate(updates)\n rule.save()\n return success({\n 'rule': rule.summary(),\n 'change': change.summary(),\n })",
"def post_process(self, relevant_targets):\r\n pass",
"def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)"
] |
[
"0.5904426",
"0.56316584",
"0.523491",
"0.5202099",
"0.51211447",
"0.51167923",
"0.50520116",
"0.5017483",
"0.5003283",
"0.49718618",
"0.49518344",
"0.49451202",
"0.4943816",
"0.49202237",
"0.49096993",
"0.4888172",
"0.4884482",
"0.48383063",
"0.4826782",
"0.48211488",
"0.4820198",
"0.48060107",
"0.48007584",
"0.4790065",
"0.47876275",
"0.47724354",
"0.47675338",
"0.47654453",
"0.4757967",
"0.47496268"
] |
0.64331836
|
0
|
Hoists the rules for a a nonterminal into its ancestors. When target_rule_name holds the name for nonterminal X, and
|
def hoist_until(self,target_rule_name,stop_at_set):
assert self.is_canonical
def expand_first(grammar,rule):
"""
When rule is
Seq(A rest)
and A -> A1 | ... | An
Return [ A1 rest | ... | An rest ]
If Ai is epsilon, then its corresponding term is just 'rest'
"""
result = []
# Hoist the rule for 'other' nonterminal.
phrase = rule.as_container()
first = phrase[0]
assert first.is_symbol_name() and (first.content != target_rule_name)
#print(" elaborating rule for {} ".format(first.content))
rest = phrase[1:]
other_rule = self.rules[first.content]
for other_rhs in other_rule.as_container():
result.append(grammar.MakeSeq(list_without_empty(other_rhs.as_container()) + rest))
return result
# Process in reverse order to reduce duplication.
order_of_attack = list(reversed(self.preorder()))
keep_going = True
ancestors = set()
while keep_going:
keep_going = False
#print("hoisting worklist: {}".format(" ".join(order_of_attack)))
for candidate_rule_name in order_of_attack:
rule = self.rules[candidate_rule_name]
#print("consider {}".format(candidate_rule_name))
(with_target_rule_name,other_rules,term,empty) = rule.partition(target_rule_name)
#print(" {} {} {} {}".format(len(with_target_rule_name),len(other_rules),len(term), len(empty)))
if len(with_target_rule_name) > 0 and len(other_rules) > 0:
#print(" need to hoist")
# Need to hoist
replacement = with_target_rule_name
for other in other_rules:
replacement.extend(expand_first(self,other))
replacement.extend(term)
replacement.extend(empty)
self.rules[candidate_rule_name] = self.MakeChoice(replacement)
#print("setting {} to {}".format(candidate_rule_name,str(self.rules[candidate_rule_name])))
keep_going = True
if candidate_rule_name not in stop_at_set:
ancestors.add(candidate_rule_name)
for candidate_rule_name in order_of_attack:
for ancestor in ancestors:
rule = self.rules[candidate_rule_name]
(with_ancestor,other_rules,term,empty) = rule.partition(ancestor)
#print(" {} {} {} {}".format(len(with_ancestor),len(other_rules),len(term), len(empty)))
if len(with_ancestor) > 0:
#print(" expanding ancestor {}".format(ancestor))
replacement = []
for a_rule in with_ancestor:
replacement.extend(expand_first(self,a_rule))
replacement.extend(other_rules)
replacement.extend(term)
replacement.extend(empty)
self.rules[candidate_rule_name] = self.MakeChoice(replacement)
#print("setting {} to {}".format(candidate_rule_name,str(self.rules[candidate_rule_name])))
keep_going = True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def left_refactor(self,target_rule_name,stop_at_set):\n name_suffix = \".post.{}\".format(target_rule_name)\n\n # Map a rule name X to a set of rules Y where X appears\n # as a first nonterminal in one of Y's options.\n appears_first_in = defaultdict(set)\n for name, rule in self.rules.items():\n for option in rule.as_container():\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(name)\n #print(\"appears first dict\\n{}\\n\\n\".format(appears_first_in))\n\n po = PrintOption()\n po.is_canonical = self.is_canonical\n po.inline_synthetic = False\n candidates = set(self.rules.keys())\n while len(candidates) > 0:\n for A in list(candidates):\n candidates.remove(A)\n if A in stop_at_set:\n continue\n rule = self.rules[A]\n (starts,others,terms,empties) = rule.partition(target_rule_name)\n if len(starts) > 0 and (len(others)+len(terms)+len(empties) == 0):\n #print(\"processing {}\".format(A))\n # Create the new rule.\n new_rule_name = \"{}{}\".format(A,name_suffix)\n # Form alpha1 ... alphaN\n new_options = []\n for option in rule:\n if len(option.as_container()) == 1:\n new_options.append(self.MakeEmpty())\n else:\n assert option.is_container() and (len(option)>1)\n new_options.append(self.MakeSeq(option[1:]))\n self.rules[new_rule_name] = self.MakeChoice(new_options)\n\n # Rewrite A itself.\n self_parts = [self.MakeSymbolName(x) for x in [target_rule_name,new_rule_name]]\n self.rules[A] = self.MakeChoice([self.MakeSeq(self_parts)])\n\n # Update bookkeeping for appears_first_in\n for option in new_options:\n first = option.as_container()[0]\n if first.is_symbol_name():\n appears_first_in[first.content].add(new_rule_name)\n\n # Replace the old rule everywhere it appears in the first\n # position\n for parent_name in list(appears_first_in[A]):\n if parent_name == A:\n # Already processed above\n continue\n parent = self.rules[parent_name]\n (starts,others,terms,empties) = parent.partition(A)\n new_options = []\n for option in starts:\n parts = []\n parts.append(self.MakeSymbolName(target_rule_name))\n parts.append(self.MakeSymbolName(new_rule_name))\n parts.extend(option.as_container()[1:])\n new_options.append(self.MakeSeq(parts))\n new_options.extend(others+terms+empties)\n self.rules[parent_name] = self.MakeChoice(new_options)\n appears_first_in[A].remove(parent_name)\n appears_first_in[target_rule_name].add(parent_name)\n # Set up transitive closure.\n candidates.add(parent_name)\n\n #print()\n #print()\n #print()\n\n #self.absorb_post(target_rule_name)\n self.remove_unused_rules()",
"def match_rule(name, lhs, rhs, wm):\n print(\" ------------ Matching Rule '\", name, \"' --------------\")\n print(\" lhs = \", lhs)\n print(\" rhs = \", rhs)\n print(\" wm = \", wm)\n print()\n def mr_helper(queue, new_wm):\n # Each state in queue is\n # (anteceds-left, subs)\n # print(\" ----- matching rule helper ------\")\n # print(\" queue = \", queue)\n # print(\" new_wm = \", new_wm)\n # print()\n if queue == []: # if the queue is empty, return new_wm\n return new_wm\n else: # else examine the first item in the queue (call it state1)\n state1 = queue[0]\n if state1[0] == []: # If state1 has no antecedents, state1 is a goal state (the rule is matched);\n # call \"execute\" on rhs using the substitution in state1\n derived = execute(state1[1], rhs, new_wm)\n # But don't stop here (this is exhaustive):\n # return mr_helper applied to the rest of the queue, appending\n # whatever new WM assertions \"execute\" returned.\n new_wm = update_wm(new_wm, derived)\n return mr_helper(queue[1:], new_wm)\n elif state1[0] != []: # Else if state1 has antecedents, apply \"match_antecedent\" to them along with wm and the substitutions in state1.\n matched = match_antecedent(state1[0], wm, state1[1])\n if matched == []: # If \"match_antecedent\" returns no new states, return mr_helper on rest of the queue without changing states.\n return mr_helper(queue[1:], new_wm)\n else:\n # Else return mr_helper on the updated queue,\n # i.e., the old one with the new states found\n # by \"match_antecedent\" replacing state1\n queue = matched + queue[1:]\n return mr_helper(queue, new_wm)\n return mr_helper(match_antecedent(lhs, wm ,[]), [])",
"def expand_first(grammar,rule):\n result = []\n # Hoist the rule for 'other' nonterminal.\n phrase = rule.as_container()\n first = phrase[0]\n assert first.is_symbol_name() and (first.content != target_rule_name)\n #print(\" elaborating rule for {} \".format(first.content))\n rest = phrase[1:]\n other_rule = self.rules[first.content]\n for other_rhs in other_rule.as_container():\n result.append(grammar.MakeSeq(list_without_empty(other_rhs.as_container()) + rest))\n return result",
"def findDepravedMatchingRules(self, nodeId):\n tokens = list(self.tree.node(nodeId))\n assert len(tokens) > 0\n # Build lexical pseudo rule.\n if len(tokens) == 1:\n target = self.sense.tokens[tokens[0]-1][1]\n pseudoRule = self.ruletable.buildPsuedoRule(target, [])\n return [pseudoRule]\n # Build normal pseudo rule.\n terminalPositions = [n for n in range(len(tokens)) if tokens[n] > 0]\n rules = []\n mapPositionToNewNode = {}\n\n # Create new nodes for terminal tokens.\n for pos in terminalPositions:\n tokenId = tokens[pos]\n newNodeId = max(self.tree.nodes) + 1\n mapPositionToNewNode[pos] = newNodeId\n self.tree.nodes[newNodeId] = [tokenId]\n self.tree.mapParent[newNodeId] = nodeId\n self.tree.mapChildren.setdefault(nodeId, []).append(newNodeId)\n self.sense.mapNodeToMainToken[newNodeId] = tokenId\n self.tree.nodes[nodeId][pos] = -newNodeId\n # Build sites for these rules\n sites = []\n for pos in range(len(tokens)):\n if pos in mapPositionToNewNode:\n sites.append(mapPositionToNewNode[pos])\n elif tokens[pos] < 0:\n sites.append(-tokens[pos])\n # Get pseudo rule.\n pseudoRule = self.ruletable.buildPsuedoRule(None, sites)\n self.recordDependentSitesForNode(nodeId, sites)\n return [pseudoRule]",
"def close(self,grammar):\n def lookup(rule):\n return grammar.rules[rule.content] if isinstance(rule,SymbolName) else rule\n\n dirty_dict = self.id_to_lookahead.copy()\n while len(dirty_dict) > 0:\n # From the dragon book, 1st ed. 4.38 Sets of LR(1) items construction.\n #\n # For each item [ A -> alpha . B beta, a ] in I,\n # and each production \" B -> gamma \" in the grammar,\n # and each terminal b in FIRST(beta a),\n # add [ B -> . gamma, b ] to I if it is not already there.\n work_list = dirty_dict\n dirty_dict = dict()\n for item_id, lookahead in work_list.items():\n item = self.id_to_item[item_id]\n if item.at_end():\n continue\n B = item.next()\n if not B.is_symbol_name():\n continue\n\n # Compute lookahead. (A fresh LookaheadSet)\n new_item_lookahead = item.rest_lookahead_with_other_lookahead(lookahead)\n\n # Iterate over items [ B -> . B_prod ]\n # for each production B -> B_prod in the grammar.\n for candidate in item.items_generated_by_next():\n candidate_id = candidate.reg_info.index\n if candidate_id not in self.id_to_item:\n la = LookaheadSet(new_item_lookahead)\n self.internal_add(candidate, LookaheadSet(new_item_lookahead))\n dirty_dict[candidate_id] = la\n else:\n if self.id_to_lookahead[candidate_id].merge(new_item_lookahead):\n dirty_dict[candidate_id] = self.id_to_lookahead[candidate_id]\n return self",
"def get_cnf(self):\n nonterm = set(self.nonterminal)\n term = set(self.terminal)\n\n rules = list(self.rules)\n cnf = set()\n\n # STEP 1: eliminate nonsolitary terminals\n for i in range(len(rules)):\n rule = rules[i]\n lhs, rhs, log_prob = rule\n if len(rhs) > 1:\n rhs_list = list(rhs)\n for j in range(len(rhs_list)):\n x = rhs_list[j]\n if x in term: # found nonsolitary terminal\n new_nonterm = 'NT_{}'.format(x)\n new_nonterm_rule = GrammarRule(new_nonterm, (x,), 0.0)\n\n if new_nonterm not in nonterm:\n nonterm.add(new_nonterm)\n cnf.add(new_nonterm_rule)\n else:\n assert new_nonterm_rule in cnf\n rhs_list[j] = new_nonterm\n rhs = tuple(rhs_list)\n rules[i] = GrammarRule(lhs, rhs, log_prob)\n\n # STEP 2: eliminate rhs with more than 2 nonterminals\n for i in range(len(rules)):\n rule = rules[i]\n lhs, rhs, log_prob = rule\n if len(rhs) > 2:\n assert all(x in nonterm for x in rhs), rule\n current_lhs = lhs\n for j in range(len(rhs) - 2):\n new_nonterm = 'BIN_\"{}\"_{}'.format(\n '{}->{}'.format(lhs, ','.join(rhs)), str(j))\n assert new_nonterm not in nonterm, rule\n nonterm.add(new_nonterm)\n cnf.add(\n GrammarRule(current_lhs,\n (rhs[j], new_nonterm),\n log_prob if j == 0 else 0.0))\n current_lhs = new_nonterm\n cnf.add(GrammarRule(current_lhs, (rhs[-2], rhs[-1]), 0.0))\n else:\n cnf.add(rule)\n\n return Grammar(cnf)",
"def preorder(self):\n assert self.is_canonical\n # Names of visited nodes\n visited = set()\n # Names of nodes to visit\n worklist = [LANGUAGE]\n\n result = []\n while len(worklist) > 0:\n successors = []\n for rule_name in worklist:\n if rule_name in visited:\n continue\n result.append(rule_name)\n visited.add(rule_name)\n\n rule = self.rules[rule_name].as_container()\n for rhs in rule:\n phrase = rhs.as_container()\n # Note: this tolerates duplicates among siblings.\n successors.extend([x.content for x in phrase if x.is_symbol_name() and x.content not in visited])\n worklist = successors\n return result",
"def _find_assignment_target_parent(self, name: str) -> \"Scope\":\n return self.parent._find_assignment_target_parent(name)",
"def identify_rule(graph, node_u, node_v):\n in_degree_u = graph.get_deductive_in_degree(node_u)\n in_degree_v = graph.get_deductive_in_degree(node_v)\n\n out_degree_u = graph.get_deductive_out_degree(node_u)\n out_degree_v = graph.get_deductive_out_degree(node_v)\n\n ancestor_target_u = graph.get_node_attribute(node_u, graph.ANCESTOR_TARGET)\n ancestor_target_v = graph.get_node_attribute(node_v, graph.ANCESTOR_TARGET)\n\n is_collapsed_u = graph.get_node_attribute(node_u, graph.COLLAPSED)\n is_collapsed_v = graph.get_node_attribute(node_v, graph.COLLAPSED)\n\n is_hypothesis_u = False\n is_hypothesis_v = False\n\n if in_degree_u == 0:\n is_hypothesis_u = True\n if in_degree_v == 0:\n is_hypothesis_v = True\n\n collapse_edge = common_out_neighbor(graph, node_v, node_u)\n\n formula_u = graph.get_node_attribute(node_u, \"formula\")\n formula_v = graph.get_node_attribute(node_v, \"formula\")\n\n # print \"node_u: [ id: \", node_u, \"formula: \", formula_u, \", ancestor: \", ancestor_target_u, \"]\"\n # print \"node_v: [ id: \", node_v, \"formula: \", formula_v, \", ancestor: \", ancestor_target_v, \"]\"\n\n features_u = get_node_features(graph, node_u)\n features_v = get_node_features(graph, node_v)\n\n if not collapse_edge:\n if not features_u and not features_v:\n return rule_1\n elif features_u == {HYP} and not features_v:\n return rule_2\n elif not features_u and features_v == {HYP}:\n return rule_3\n elif features_u == {HYP} and features_v == {HYP}:\n return rule_4\n elif features_u == {CPS} and not features_v:\n return rule_5\n elif features_u == {CPS} and features_v == {HYP}:\n return rule_6\n elif features_u == {ATGT} and features_v == {ATGT}:\n return rule_11\n elif (features_u == {ATGT} and not features_v) or \\\n (not features_u and features_v == {ATGT}):\n return rule_11a\n elif (features_u == {ATGT} and features_v == {HYP}) or \\\n (features_u == {HYP} and features_v == {ATGT}):\n return rule_11b\n elif (features_u == {ATGT, HYP} and not features_v) or \\\n (not features_u and features_v == {ATGT, HYP}):\n return rule_11c\n elif (features_u == {ATGT, HYP} and features_v == {HYP}) or \\\n (features_u == {HYP} and features_v == {ATGT, HYP}):\n return rule_11d\n elif features_u == {CPS, HYP} and features_v == {ATGT, HYP}:\n return rule_11d\n elif features_u == {ATGT} and features_v == {ATGT, HYP}:\n return rule_12\n elif features_u == {ATGT, HYP} and features_v == {ATGT}:\n return rule_13\n elif features_u == {ATGT, HYP} and features_v == {ATGT, HYP}:\n return rule_14\n elif features_u == {CPS} and features_v == {ATGT}:\n return rule_17\n elif features_u == {CPS} and features_v == {ATGT, HYP}:\n return rule_18\n elif features_u == {CPS, HYP} and features_v == {HYP}:\n return rule_11f\n else:\n raise Exception(\"0 - Regra desconhecida: ftu: {}, ftv: {}\".format(\n features_u, features_v))\n else:\n if features_u == {ATGT} and features_v == {ATGT}:\n return rule_7\n elif features_u == {ATGT} and features_v == {ATGT, HYP}:\n return rule_8\n elif features_u == {ATGT, HYP} and features_v == {ATGT}:\n return rule_9\n elif features_u == {ATGT, HYP} and features_v == {ATGT, HYP}:\n return rule_10\n elif features_u == {CPS} and features_v == {ATGT}:\n return rule_15\n elif features_u == {ATGT} and features_v == {ATGT, HYP}:\n return rule_16\n elif features_u == {HYP, CPS} and features_v == {HYP, ATGT}:\n return rule_11e\n elif features_u == {CPS} and not features_v:\n return rule_11g\n else:\n raise Exception(\"1 - Regra desconhecida: ftu: {}, ftv: {}\".format(\n features_u, features_v))",
"def path_rules(self, from_symbol, to_symbol):\n # type: (Type[Nonterminal], Type[Nonterminal]) -> List[Type[Rule]]\n if from_symbol not in self.t or to_symbol not in self.t:\n return []\n return self.f[self.t[from_symbol]][self.t[to_symbol]] or []",
"def get_rules_by_rhs(grammar, symbol):\n\trules = []\n\tfor rule in grammar:\n\t\tif symbol in rule.rhs:\n\t\t\trules.append(rule)\n\treturn rules",
"def generate_graph(rule_dict, rule_list):\n\n node_counter = 1\n non_terminals = set()\n # new_g = nx.MultiGraph()\n new_g = LightMultiGraph()\n\n new_g.add_node(0, label=0)\n non_terminals.add(0)\n\n rule_ordering = [] # list of rule ids in the order they were fired\n\n while len(non_terminals) > 0: # continue until no more non-terminal nodes\n # choose a non terminal node at random\n node_sample = random.sample(non_terminals, 1)[0]\n lhs = new_g.nodes[node_sample]['label']\n\n rhs_candidates = list(filter(lambda rule: rule.is_active, rule_dict[lhs]))\n # consider only active rules\n\n if len(rhs_candidates) == 1:\n rhs = rhs_candidates[0]\n else:\n weights = np.array([rule.frequency for rule in rhs_candidates])\n weights = weights / np.sum(weights) # normalize into probabilities\n idx = int(np.random.choice(range(len(rhs_candidates)), size=1, p=weights)) # pick based on probability\n rhs = rhs_candidates[idx]\n\n # print(f'firing rule {rule_list.index(rhs)}')\n # rule_ordering.append(rule_list.index(rhs))\n # print('Selected node {} with label {}'.format(node_sample, lhs))\n\n broken_edges = find_boundary_edges(new_g, [node_sample])\n\n # print('broken edges: ', broken_edges)\n\n assert len(broken_edges) == lhs\n\n new_g.remove_node(node_sample)\n non_terminals.remove(node_sample)\n\n nodes = {}\n\n for n, d in rhs.graph.nodes(data=True): # all the nodes are internal\n new_node = node_counter\n nodes[n] = new_node\n new_g.add_node(new_node, attr_dict=d)\n if 'label' in d: # if it's a new non-terminal add it to the set of non-terminals\n non_terminals.add(new_node)\n node_counter += 1\n\n\n # randomly assign broken edges to boundary edges\n random.shuffle(broken_edges)\n\n # randomly joining the new boundary edges from the RHS to the rest of the graph - uniformly at random\n for n, d in rhs.graph.nodes(data=True):\n num_boundary_edges = d['b_deg']\n if num_boundary_edges == 0: # there are no boundary edges incident to that node\n continue\n\n assert len(broken_edges) >= num_boundary_edges\n\n edge_candidates = broken_edges[: num_boundary_edges] # picking the first num_broken edges\n broken_edges = broken_edges[num_boundary_edges: ] # removing them from future consideration\n\n for u, v in edge_candidates: # each edge is either (node_sample, v) or (u, node_sample)\n if u == node_sample:\n u = nodes[n]\n else:\n v = nodes[n]\n # print('adding broken edge ({}, {})'.format(u, v))\n new_g.add_edge(u, v)\n\n\n # adding the rhs to the new graph\n for u, v in rhs.graph.edges():\n # print('adding RHS internal edge ({}, {})'.format(nodes[u], nodes[v]))\n edge_multiplicity = rhs.graph[u][v]['weight'] #\n for _ in range(edge_multiplicity):\n new_g.add_edge(nodes[u], nodes[v])\n return new_g, rule_ordering",
"def prepare_parsing(grammar_name):\n grammar = input_format[grammar_name]\n antlr_lexer_class, antlr_parser_class = build_antlr_grammars()\n replacements, action_positions = analyze_grammars(antlr_lexer_class, antlr_parser_class, grammar['files'], grammar['replacements'])\n logger.debug('Replacements are calculated...')\n\n current_workdir = join(grammar_workdir, grammar_name) if grammar_name else grammar_workdir\n makedirs(current_workdir, exist_ok=True)\n if current_workdir not in sys.path:\n sys.path.append(current_workdir)\n\n # Inject actions into the target grammars to help localizing part of the test case that are optional.\n for i, g in enumerate(grammar['files']):\n grammar['files'][i] = join(current_workdir, basename(g))\n inject_optional_actions(g, action_positions[g], grammar['files'][i])\n\n target_lexer_class, target_parser_class, target_listener_class = build_grammars(tuple(grammar['files']), current_workdir, antlr, lang)\n logger.debug('Target grammars are processed...')\n\n if lang == 'java':\n compile_java_sources(target_lexer_class, target_parser_class, target_listener_class, current_workdir)\n input_format[grammar_name].update({'lexer': target_lexer_class, 'parser': target_parser_class, 'listener': target_listener_class, 'replacements': replacements})\n return\n\n class ExtendedTargetParser(target_parser_class):\n \"\"\"\n ExtendedTargetParser is a subclass of the original parser implementation.\n It can trigger state changes that are needed to identify parts of the input\n that are not needed to keep it syntactically correct.\n \"\"\"\n def enter_optional(self):\n self.trigger_listener('enter_optional')\n\n def exit_optional(self):\n self.trigger_listener('exit_optional')\n\n def enterRecursionRule(self, localctx: ParserRuleContext, state: int, ruleIndex: int, precedence: int):\n target_parser_class.enterRecursionRule(self, localctx, state, ruleIndex, precedence)\n self.trigger_listener('recursion_enter')\n\n def pushNewRecursionContext(self, localctx: ParserRuleContext, state: int, ruleIndex: int):\n target_parser_class.pushNewRecursionContext(self, localctx, state, ruleIndex)\n self.trigger_listener('recursion_push')\n\n def unrollRecursionContexts(self, parentCtx: ParserRuleContext):\n target_parser_class.unrollRecursionContexts(self, parentCtx)\n self.trigger_listener('recursion_unroll')\n\n def trigger_listener(self, event):\n for listener in self.getParseListeners():\n if hasattr(listener, event):\n getattr(listener, event)()\n\n def syntax_error_warning(self):\n if self._syntaxErrors:\n logger.warning('%s finished with %d syntax errors. This may decrease reduce quality.',\n target_parser_class.__name__, self._syntaxErrors)\n\n class ExtendedTargetListener(target_listener_class):\n \"\"\"\n ExtendedTargetListener is a subclass of the original listener implementation.\n It can trigger state changes that are needed to identify parts of the input\n that are not needed to keep it syntactically correct.\n \"\"\"\n def __init__(self, parser):\n self.parser = parser\n self.current_node = None\n self.island_nodes = []\n self.root = None\n\n def recursion_enter(self):\n assert isinstance(self.current_node, HDDRule)\n node = HDDRule(self.current_node.name)\n self.current_node.add_child(node)\n self.current_node.recursive_rule = True\n self.current_node = node\n\n def recursion_push(self):\n assert len(self.current_node.parent.children) > 0\n\n first_child = self.current_node.parent.children[0]\n self.current_node.parent.remove_child(first_child)\n self.current_node.add_child(first_child)\n\n def recursion_unroll(self):\n assert self.current_node.recursive_rule\n assert len(self.current_node.children) == 1 and self.current_node.name == self.current_node.children[0].name\n children_to_lift = self.current_node.children[0].children\n parent = self.current_node.parent\n if children_to_lift:\n self.current_node.children = []\n self.current_node.add_children(children_to_lift)\n self.current_node.start = self.current_node.children[0].start\n self.current_node.end = self.current_node.children[-1].end\n else:\n parent.remove_child(self.current_node)\n self.current_node = parent\n\n def enterEveryRule(self, ctx:ParserRuleContext):\n name = self.parser.ruleNames[ctx.getRuleIndex()]\n node = HDDRule(name)\n if not self.root:\n self.root = node\n else:\n assert self.current_node\n self.current_node.add_child(node)\n self.current_node = node\n\n def exitEveryRule(self, ctx:ParserRuleContext):\n # If the input contains syntax error, then the last optional block was may not closed.\n while isinstance(self.current_node, HDDQuantifier):\n self.exit_optional()\n\n assert self.current_node.name == self.parser.ruleNames[ctx.getRuleIndex()],\\\n '%s (%s) != %s' % (self.current_node.name, repr(self.current_node), self.parser.ruleNames[ctx.getRuleIndex()])\n\n start, _ = self.tokenBoundaries(ctx.start)\n _, end = self.tokenBoundaries(ctx.stop if ctx.stop else ctx.start)\n self.current_node.start = start\n self.current_node.end = end\n\n if self.current_node.parent:\n self.current_node = self.current_node.parent\n\n def tokenBoundaries(self, token):\n line_breaks = token.text.count('\\n')\n return Position(token.line, token.column), \\\n Position(token.line + line_breaks,\n token.column + len(token.text) if not line_breaks else\n len(token.text) - token.text.rfind('\\n'))\n\n def visitTerminal(self, ctx:TerminalNode):\n name, text = (self.parser.symbolicNames[ctx.symbol.type], ctx.symbol.text) if ctx.symbol.type != Token.EOF else ('EOF', '')\n start, end = self.tokenBoundaries(ctx.symbol)\n\n node = HDDToken(name, text, start=start, end=end)\n self.current_node.add_child(node)\n if name in grammar['islands']:\n self.island_nodes.append(node)\n\n def visitErrorNode(self, ctx:ErrorNode):\n if hasattr(ctx, 'symbol'):\n start, end = self.tokenBoundaries(ctx.symbol)\n self.current_node.add_child(HDDErrorToken(ctx.symbol.text, start=start, end=end))\n\n def enter_optional(self):\n quant_node = HDDQuantifier()\n self.current_node.add_child(quant_node)\n self.current_node = quant_node\n\n def exit_optional(self):\n assert self.current_node.parent, 'Quantifier node has no parent.'\n assert self.current_node.children, 'Quantifier node has no children.'\n\n self.current_node.start = self.current_node.children[0].start\n self.current_node.end = self.current_node.children[-1].end\n self.current_node = self.current_node.parent\n\n def print_tree(self):\n if self.root and logger.isEnabledFor(logging.DEBUG):\n logger.debug(self.root.tree_str(current=self.current_node))\n\n input_format[grammar_name].update({'lexer': target_lexer_class, 'parser': ExtendedTargetParser, 'listener': ExtendedTargetListener, 'replacements': replacements})",
"def main(rules, antecedent_prefix, consequent_prefix, deltas_prefix):\n _main(rules, antecedent_prefix, consequent_prefix, deltas_prefix)",
"def applyModifiers(self):\n if not self.getScopeUpdated():\n self.updateScopes()\n targets = self.getConTextModeNodes(\"target\")\n modifiers = self.getConTextModeNodes(\"modifier\")\n for target in targets:\n for modifier in modifiers:\n if modifier.applyRule(target):\n if self.getVerbose():\n print(\"applying relationship between\", modifier, target)\n\n self.add_edge(modifier, target)",
"def _preorder_depth_first_walk(self, target_filepath):\n\n target = None\n current_metadata = self.metadata['current']\n role_names = ['targets']\n\n # Ensure the client has the most up-to-date version of 'targets.txt'.\n # Raise 'tuf.NoWorkingMirrorError' if the changed metadata cannot be successfully\n # downloaded and 'tuf.RepositoryError' if the referenced metadata is\n # missing. Target methods such as this one are called after the top-level\n # metadata have been refreshed (i.e., updater.refresh()).\n self._update_metadata_if_changed('targets')\n\n # Preorder depth-first traversal of the tree of target delegations.\n while len(role_names) > 0 and target is None:\n\n # Pop the role name from the top of the stack.\n role_name = role_names.pop(-1)\n\n # The metadata for 'role_name' must be downloaded/updated before\n # its targets, delegations, and child roles can be inspected.\n # self.metadata['current'][role_name] is currently missing.\n # _refresh_targets_metadata() does not refresh 'targets.txt', it\n # expects _update_metadata_if_changed() to have already refreshed it,\n # which this function has checked above.\n self._refresh_targets_metadata(role_name, include_delegations=False)\n\n role_metadata = current_metadata[role_name]\n targets = role_metadata['targets']\n delegations = role_metadata.get('delegations', {})\n child_roles = delegations.get('roles', [])\n target = self._get_target_from_targets_role(role_name, targets,\n target_filepath)\n\n if target is None:\n\n # Push children in reverse order of appearance onto the stack.\n # NOTE: This may be a slow operation if there are many delegated roles.\n for child_role in reversed(child_roles):\n child_role_name = self._visit_child_role(child_role, target_filepath)\n if child_role_name is None:\n logger.debug('Skipping child role '+repr(child_role_name))\n else:\n logger.debug('Adding child role '+repr(child_role_name))\n role_names.append(child_role_name)\n\n else:\n logger.debug('Found target in current role '+repr(role_name))\n\n return target",
"def generate_and_add_key_from_rule(self, root):\n if root.children:\n childrens = self.get_children(root)\n all_child_label = \" \".join(childrens)\n #print s\n\n if root.label in self.unique_LHS_count:\n self.unique_LHS_count[root.label] += 1\n else:\n self.unique_LHS_count[root.label] = 1\n\n key = root.label+'-->'+all_child_label\n\n if key in self.unique_rules_and_their_count_dict:\n self.unique_rules_and_their_count_dict[key] += 1\n else:\n self.unique_rules_and_their_count_dict[key] = 1\n\n for each_child in root.children:\n self.generate_and_add_key_from_rule(each_child)\n else:\n return",
"def GuessTargets(self, target_name):\n return difflib.get_close_matches(target_name, self.GetTargets(), 10, 0.4)",
"def backchain_to_goal_tree(rules, hypothesis):\n goal_tree = []\n for rule in rules:\n var = match(rule.consequent(),hypothesis)\n if var: \n sub_hypothesis = populate(rule.antecedent(), var)\n if isinstance(rule.antecedent(), OR):\n sub_tree = [backchain_to_goal_tree(rules, antecedent) for antecedent in sub_hypothesis]\n goal_tree.append(OR(sub_tree))\n\n elif isinstance(rule.antecedent(), AND):\n sub_tree = [backchain_to_goal_tree(rules, antecedent) for antecedent in sub_hypothesis]\n goal_tree.append(AND(sub_tree))\n \n else:\n goal_tree.append(backchain_to_goal_tree(rules, sub_hypothesis))\n \n return simplify(OR(hypotesis, goal_tree)",
"def postprocess_cell_fn(nodes):\n targets_to_rules = {}\n for node in nodes:\n # Discard targets that are not substrings of the gold target.\n if node.target_string in target:\n # Keep one arbitrary derivation per target.\n # TODO(petershaw): Revisit this.\n targets_to_rules[node.target_string] = node\n new_nodes = list(targets_to_rules.values())\n return new_nodes",
"def _parse_rule(self, tokens):\n if self._currently_parsed_declaration is None:\n self.tokenizer.syntax_error(\"Got a rule outside of \"+\n \"a unit declaration.\")\n\n self._check_indentation(tokens[0])\n\n sub_rules = self.tokens_to_sub_rules(tokens[1:])\n\n relevant_dict = None\n if self._currently_parsed_declaration[0] == pu.UnitType.alias:\n relevant_dict = self.alias_definitions\n elif self._currently_parsed_declaration[0] == pu.UnitType.slot:\n relevant_dict = self.slot_definitions\n else: # intent\n relevant_dict = self.intent_definitions\n\n name = self._currently_parsed_declaration[1]\n variation_name = self._currently_parsed_declaration[2].variation_name\n relevant_dict[name].add_rule(sub_rules, variation_name)",
"def get_parents(target, concept_map):\n parents = []\n target_index = concept_map[CONCEPTS_STR].index(target)\n for row in range(len(concept_map[ADJ_MAT_STR])): \n # get value in adjMat for each row at target concept's col\n val = concept_map[ADJ_MAT_STR][row][target_index] \n if val > 0 and target_index != row: # don't care concepts are their own parents\n # print('parent found at {}, {}'.format(row, target_index)) # TODO remove\n parents.append(concept_map[CONCEPTS_STR][row])\n return parents",
"def __call__(self, config):\n # loop over the rules sorted according to their dependencies and\n # apply them\n for rule in networkx.topological_sort(self.graph):\n value = rule.apply(config)\n if value is not None:\n set_from_path(config, rule.name, value)",
"def _rules_to_trxf_dnf_ruleset(self, rules, label):\n conjunctions = list()\n for rule in rules:\n conjunction = self._rule_to_trxf_conjunction(rule)\n conjunctions.append(conjunction)\n dnf_ruleset = DnfRuleSet(conjunctions, label)\n return dnf_ruleset",
"def hrules(self):\n ...",
"def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)",
"def cleanUpRules(self):\n\n\t\t# initialize\n\t\tscoreDict = {}\n\t\tnewRules = {}\n\n\t\t# loop through rules\n\t\tfor i, tup in enumerate(self.generatedRules):\n\n\n\t\t\tantecedent = str(tup[0].antecedent)\n\n\t\t\t# if there is no rule in the scoredictionary yet with the same antecedent, put it in both dictionaries\n\t\t\tif (not antecedent in scoreDict):\n\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\telse:\n\n\t\t\t\t# if there is, then first compare if the degree is higher before overwriting\n\t\t\t\tif (tup[1] > scoreDict[antecedent]):\n\t\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\t\telse:\n\t\t\t\t\t# not higher? don't overwrite\n\t\t\t\t\tcontinue\n\n\t\t# save rules\n\t\tself.generatedRules = []\n\t\tfor key in newRules:\n\t\t\tself.generatedRules.append(newRules[key])\n\n\t\treturn",
"def explain(self):\n assert (self.target_label is not None), 'Not fitted or not fitted for a specific pos value. Use export_rules ' \\\n 'in the latter case. '\n\n if len(self._rule_map.items()) == 0:\n return DnfRuleSet([], self.target_label)\n for label, rules in self._rule_map.items():\n if label == self.target_label:\n return self._rules_to_trxf_dnf_ruleset(rules, label)\n raise Exception('No rules found for label: ' + str(self.target_label))",
"def LL1(self):\n\n conflicts = []\n table = dict()\n def add(lhs,terminal,action):\n action_key = (lhs,terminal)\n if action_key in table:\n # Record the conflict, and only keep the original.\n prev = table[action_key]\n conflicts.append((lhs,terminal,prev,action))\n else:\n table[action_key] = action\n\n for lhs, rule in self.rules.items():\n if rule.is_container():\n # Top-level rules are Choice nodes.\n if not isinstance(rule,Choice):\n raise RuntimeError(\"expected Choice node for \"+\n +\"'{}' rule, got: {}\".format(lhs,rule))\n # For each rule A -> alpha,\n for rhs in rule:\n for x in first(self,rhs.as_container()):\n if x.is_empty():\n # Add A -> alpha to M[A,b] for each terminal\n # b in Follow(A)\n for f in rule.follow:\n add(lhs,f,LLReduce(lhs,rhs))\n else:\n # For each terminal x in First(alpha), add\n # A -> alpha to M[A,x]\n add(lhs,x,LLReduce(lhs,rhs))\n return (table,conflicts)",
"def _generate_relative_location_rule_action(target_object, target_object_id,\n neighbor_object,\n context_direction_str):\n input_content_list = []\n action_result_list = []\n action_rule = common.ActionRules.NEIGHBOR_CONTEXT_RULE\n\n if _valid_clickable_object(target_object):\n (verb_str, action_type) = _get_verb_str_action_type('click',\n target_object.obj_type)\n input_content_list = [config.LABEL_DEFAULT_VALUE_STRING]\n elif _valid_typable_object_with_name(target_object):\n (verb_str, action_type) = _get_verb_str_action_type('input',\n target_object.obj_type)\n input_content_list = [\n _generate_string_seq()\n for _ in range(config.INPUT_ACTION_UPSAMPLE_RATIO)\n ]\n obj_desc_str = _get_obj_desc_str(\n action_rule,\n neighbor_object,\n context_direction_str=context_direction_str,\n target_ui_object=target_object)\n\n for input_content_str in input_content_list:\n action = common.Action(\n verb_str=verb_str,\n obj_desc_str=obj_desc_str,\n input_content_str=input_content_str,\n action_type=action_type,\n action_rule=action_rule,\n target_obj_idx=target_object_id)\n action_result_list.append(action)\n for action_element in action_result_list:\n _fill_action_info(action_element)\n return action_result_list"
] |
[
"0.53636134",
"0.48799896",
"0.4737397",
"0.47098532",
"0.46533763",
"0.46415",
"0.46326968",
"0.45365122",
"0.44336197",
"0.44258782",
"0.43993366",
"0.4395593",
"0.4362665",
"0.43400556",
"0.43341917",
"0.4332908",
"0.43276796",
"0.4327197",
"0.4322775",
"0.43044192",
"0.42946708",
"0.42914796",
"0.4283468",
"0.42758742",
"0.42739615",
"0.42618683",
"0.42481264",
"0.42398027",
"0.42320332",
"0.4219582"
] |
0.6397924
|
0
|
Constructs an LL(1) parser table and associated conflicts (if any).
|
def LL1(self):
conflicts = []
table = dict()
def add(lhs,terminal,action):
action_key = (lhs,terminal)
if action_key in table:
# Record the conflict, and only keep the original.
prev = table[action_key]
conflicts.append((lhs,terminal,prev,action))
else:
table[action_key] = action
for lhs, rule in self.rules.items():
if rule.is_container():
# Top-level rules are Choice nodes.
if not isinstance(rule,Choice):
raise RuntimeError("expected Choice node for "+
+"'{}' rule, got: {}".format(lhs,rule))
# For each rule A -> alpha,
for rhs in rule:
for x in first(self,rhs.as_container()):
if x.is_empty():
# Add A -> alpha to M[A,b] for each terminal
# b in Follow(A)
for f in rule.follow:
add(lhs,f,LLReduce(lhs,rhs))
else:
# For each terminal x in First(alpha), add
# A -> alpha to M[A,x]
add(lhs,x,LLReduce(lhs,rhs))
return (table,conflicts)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def testLR0ParseTable(self):\r\n from pydsl.Parser.LR0 import _slr_build_parser_table, build_states_sets\r\n state_sets = build_states_sets(productionset0)\r\n self.assertEqual(len(state_sets), 5)\r\n #0 . EI: : . exp $ , \r\n # exp : .SR\r\n # transitions: S -> 2,\r\n # goto: exp -> 1\r\n #1 EI: exp . $ ,\r\n # transitions: $ -> 3\r\n #2 exp: S . R,\r\n # transitions: R -> 4\r\n #3 EI: exp $ .\r\n #4 exp: S R .\r\n # reduce\r\n\r\n parsetable = _slr_build_parser_table(productionset0)\r\n self.assertEqual(len(parsetable), 4)",
"def __init__(self, rules):\n\n self.grammar = defaultdict(list)\n self.word_pos = dict()\n self.pos = set()\n\n for rule in rules:\n rule = rule.rstrip()\n if len(rule) > 0:\n rule = rule.split('->') # split start/end\n left = rule[0].strip()\n right = [(re.sub(r'[^a-zA-Z\\d\\s-]', '', r)).strip().split(' ') for r in rule[1].split('|')]\n self.grammar[left] += right\n\n # extract POS tags\n # pos iff on lhs of rhs without lhs\n # det -> that\n # that -> #\n for left, right in self.grammar.iteritems():\n for r in right:\n for r2 in r:\n if not self.grammar.has_key(r2):\n self.pos.add(left)",
"def _slr_build_parser_table(productionset):\r\n result = ParserTable()\r\n statesset = build_states_sets(productionset)\r\n for itemindex, itemset in enumerate(statesset):\r\n LOG.debug(\"_slr_build_parser_table: Evaluating itemset:\" + str(itemset))\r\n for symbol in productionset.getSymbols() + [EndSymbol()]:\r\n numberoptions = 0\r\n for lritem in itemset.itemlist:\r\n #if cursor is before a terminal, and there is a transition to another itemset with the following terminal, append shift rule\r\n if isinstance(symbol, TerminalSymbol) and lritem.next_symbol() == symbol and itemset.has_transition(symbol):\r\n destinationstate = statesset.index(itemset.get_transition(symbol))\r\n result.append(itemindex, symbol, \"Shift\", destinationstate)\r\n numberoptions += 1\r\n if isinstance(symbol, NonTerminalSymbol) and lritem.next_symbol() == symbol and itemset.has_transition(symbol):\r\n destinationstate = statesset.index(itemset.get_transition(symbol))\r\n result.append_goto(itemindex, symbol, destinationstate)\r\n #if cursor is at the end of the rule, then append reduce rule and go transition\r\n if lritem.previous_symbol() == symbol and lritem.is_last_position() and symbol != Extended_S:\r\n for x in productionset.next_lookup(symbol):\r\n from pydsl.Grammar.Definition import String\r\n if isinstance(x, list):\r\n result.append(itemindex, TerminalSymbol(String(x[0])), \"Reduce\", None, lritem.rule)\r\n else:\r\n result.append(itemindex, TerminalSymbol(String(x)), \"Reduce\", None, lritem.rule)\r\n numberoptions += 1\r\n #if cursor is at the end of main rule, and current symbol is end, then append accept rule\r\n if isinstance(symbol, EndSymbol) and lritem.previous_symbol() == productionset.initialsymbol and lritem.next_symbol() == EndSymbol():\r\n result.append(itemindex, symbol, \"Accept\", None)\r\n numberoptions += 1\r\n if not numberoptions:\r\n LOG.info(\"No rule found to generate a new parsertable entry \")\r\n LOG.debug(\"symbol: \" + str(symbol))\r\n LOG.debug(\"itemset: \" + str(itemset))\r\n elif numberoptions > 1: #FIXME can it count duplicated entries?\r\n raise Exception(\"LR Conflict %s\" % symbol)\r\n return result",
"def BNF():\n\n sect_begin = Literal(\"{\").suppress()\n sect_end = Literal(\"}\").suppress()\n array_begin = Literal(\"[\").suppress()\n array_end = Literal(\"]\").suppress()\n tag_begin = Literal(\"<\").suppress()\n tag_end = Literal(\">\").suppress()\n eql = Literal(\"=\").suppress()\n dmark = Literal('$').suppress()\n end_data = Literal('$end').suppress()\n prtable = alphanums + r'!$%&*+-./<>?@^_|~'\n int_t = Regex('[-]?\\d+')\n float_t = Regex('-?\\d+\\.\\d*([eE]?[+-]?\\d+)?')\n bool_t = Regex('([Yy]es|[Nn]o|[Tt]rue|[Ff]alse|[Oo]n|[Oo]ff)')\n\n # Helper definitions\n kstr = quotedString.setParseAction(\n removeQuotes) ^ float_t ^ int_t ^ bool_t ^ Word(prtable)\n name = Word(alphas + \"_\", alphanums + \"_\")\n vec = array_begin + delimitedList(\n float_t ^ int_t ^ bool_t ^ Word(prtable) ^ Literal(\"\\n\").suppress() ^\n quotedString.setParseAction(removeQuotes)) + array_end\n sect = name + sect_begin\n tag_sect = name + Group(tag_begin + name + tag_end) + sect_begin\n\n # Grammar\n keyword = name + eql + kstr\n vector = name + eql + vec\n data = Combine(dmark + name) + SkipTo(end_data) + end_data\n #section = Forward()\n sect_def = (sect | tag_sect)\n #input = section | data | vector | keyword\n input = sect_def | data | vector | keyword | sect_end\n #section << sect_def + ZeroOrMore(input) + sect_end\n\n # Parsing actions\n int_t.setParseAction(token_actions.to_int)\n float_t.setParseAction(token_actions.to_float)\n bool_t.setParseAction(token_actions.to_bool)\n keyword.setParseAction(token_actions.to_scalar)\n vector.setParseAction(token_actions.to_array)\n data.setParseAction(token_actions.to_data)\n sect.setParseAction(token_actions.to_section)\n tag_sect.setParseAction(token_actions.to_section)\n sect_end.setParseAction(token_actions.end_of_section)\n\n bnf = ZeroOrMore(input) + StringEnd().setFailAction(\n token_actions.parse_error)\n bnf.ignore(pythonStyleComment)\n\n return Dict(bnf)",
"def __init__(self, Ls, germs, nMinorRows, nMinorCols, aliases=None,\n sequenceRules=None):\n self.Ls = Ls[:]\n self.germs = germs[:]\n self.nMinorRows = nMinorRows\n self.nMinorCols = nMinorCols\n self.aliases = aliases.copy() if (aliases is not None) else None\n self.sequenceRules = sequenceRules[:] if (sequenceRules is not None) else None\n\n self.allstrs = []\n self.allstrs_set = set()\n self.unindexed = []\n self._plaquettes = {}\n self._firsts = []\n self._baseStrToLGerm = {}\n super(LsGermsSerialStructure, self).__init__()",
"def __init__(self):\n # just the list of class/construct types\n self.lut = {}\n self.lut[\"struct\"] = structure\n self.lut[\"typedef\"] = typedef\n self.lut[\"define\"] = define\n self.lut[\"enum\"] = enum\n self.lut[\"enumEntry\"] = enumEntry\n self.lut[\"ifdef\"] = ifdef\n self.lut[\"ifndef\"] = ifndef\n self.lut[\"hashIf\"] = hashIf\n self.lut[\"hashElse\"] = hashElse\n self.lut[\"hashElif\"] = hashElif\n self.lut[\"endif\"] = endif\n self.lut[\"banner\"] = banner\n self.lut[\"general\"] = general\n self.lut[\"listDefine\"] = listDefine\n self.lut[\"listEntry\"] = listEntry\n self.lut[\"listNumEls\"] = listNumEls\n self.lut[\"union\"] = union\n\n # and the dictionary of all symbols we declare\n self.symbols = {}",
"def __init__(self, rules, lib_name=None):\n if len(rules) < 1:\n raise PegvmException(\"Cannot create a grammar with no rules!\")\n if \"EOI\" in rules:\n raise PegvmException(\"Invalid rule name: 'EOI'\")\n\n self.lib_name = lib_name\n self.lib = imp.load_source('lib', self.lib_name+'.py') if self.lib_name != None else None\n self.top_rule = rules[0]\n self.rules = rules\n self.rule_dict = {}\n for rule in rules:\n rule.set_grammar(self)\n self.rule_dict[rule.name] = rule\n self.rule_dict[\"EOI\"] = EOI([])",
"def __init__(self, Ls, germs, prepStrs, effectStrs, aliases=None,\n sequenceRules=None):\n self.Ls = Ls[:]\n self.germs = germs[:]\n self.prepStrs = prepStrs[:]\n self.effectStrs = effectStrs[:]\n self.aliases = aliases.copy() if (aliases is not None) else None\n self.sequenceRules = sequenceRules[:] if (sequenceRules is not None) else None\n\n self.allstrs = []\n self.allstrs_set = set()\n self.unindexed = [] # unindexed strings\n self._plaquettes = {}\n self._firsts = []\n self._baseStrToLGerm = {}\n super(LsGermsStructure, self).__init__()",
"def __init__(self):\n self.table = {}\n self.ls = []",
"def generate_table(self):\n states = self.get_canonical_collection()\n # self.print_canonical_collection(states)\n table = [{} for _ in range(len(states))]\n\n for index in range(len(states)):\n state = states[index]\n first_rule_cnt = 0\n second_rule_cnt = 0\n third_rule_cnt = 0\n beta = []\n for prod in state:\n dot_index = prod[1].index('.')\n alpha = prod[1][:dot_index]\n beta = prod[1][dot_index + 1:]\n if len(beta) != 0:\n first_rule_cnt += 1\n else:\n if prod[0] != 'S1':\n second_rule_cnt += 1\n production_index = self.grammar.P.index((prod[0], alpha))\n elif alpha == [self.grammar.S[0]]:\n third_rule_cnt += 1\n if first_rule_cnt == len(state):\n table[index]['action'] = 'shift'\n\n elif second_rule_cnt == len(state):\n table[index]['action'] = 'reduce ' + str(production_index)\n\n elif third_rule_cnt == len(state):\n table[index]['action'] = 'acc'\n else:\n conflict_msg = 'Conflict! State I' + str(index) + ': ' + str(state) + '\\nSymbol: ' + beta[0]\n raise (Exception(conflict_msg))\n for symbol in self.grammar.N + self.grammar.E: # the goto part of the table\n next_state = self.go_to(state, symbol)\n if next_state in states:\n table[index][symbol] = states.index(next_state)\n # print(\"table\", table)\n return table",
"def __init__(self, start, productions):\n assert _chktype(1, start, Nonterminal)\n assert _chktype(2, productions, (CFGProduction,), [CFGProduction])\n self._start = start\n self._productions = tuple(productions)\n # Index of lhs nonterminals to rules\n self._index = {}\n # Reverse index of rhs tokens to rules\n self._rindex = {}\n # List of productions that have some terminals in the rhs\n self._lexicon_grammar = []\n # List of productions that have no terminals in the rhs\n self._nt_grammar = []\n for production,n in zip(self._productions,range(len(self._productions))):\n self._index.setdefault(production.lhs(),[])\n self._index[production.lhs()].append(n)\n nonterminals = 1\n for token in production.rhs():\n nonterminals = nonterminals and isinstance(token,Nonterminal)\n if self._rindex.has_key(token): self._rindex[token].append(n)\n else: self._rindex[token] = [n]\n if nonterminals: self._nt_grammar.append(n)\n else: self._lexicon_grammar.append(n)",
"def parse_ltag_from_dict(self, tree_dict):\n\n spine = re.sub('[()]', '', tree_dict['spine'])\n node_labels = spine.split()\n nodes = [SpinalLTAG(label, children=[], tree_type=tree_dict['type']) for label in node_labels]\n nodes.append(tree_dict['terminal'])\n\n for current, next in pairwise(nodes):\n current.append(next)\n\n root = nodes[0]\n root.predicate = tree_dict['predicate']\n root.roleset_id = tree_dict['roleset_id']\n root.num_args = tree_dict['num_args']\n root.tree_id = tree_dict['tree_id']\n root.parent_id = tree_dict['parent_id']\n root.parent_attach_id = tuple(tree_dict['parent_attach_id']) if tree_dict['parent_attach_id'] is not None else None\n\n # Create rules and assign them to nodes in tree\n for rule_dict in tree_dict['rules']:\n rule = Rule.from_dict(rule_dict)\n root = self.add_rule_to_tree(root, rule)\n\n return root",
"def __init__(self, warnings):\n self.tables = list()\n self.text = list()\n self.context = ''\n self.ancestor = ''\n self.content = ''\n self.warnings = warnings",
"def __init__(self, str):\n super().__init__(str)\n\n # TODO error mng\n self.internal_code = self.parse_def(str)\n self.table_name = self.extract_tables(str)\n\n print(self.internal2RustStruct())\n print()\n print(self.internal2RustDieselSchema([\"TODO\", \"TODO2\"]))\n print()",
"def __init__(self,l=None,c=True):\r\n\t\t\r\n\t\t# default None to zero\r\n\t\tif l is None:\r\n\t\t\tl = 0\r\n\t\t\t\r\n\t\tif l == []:\r\n\t\t\tl = 0\r\n\t\t\r\n\t\t# attempt to translate from string\r\n\t\ttry:\r\n\t\t\tl = Li._translate(l)\r\n\t\t\t\r\n\t\t# otherwise try to make a Term in a list\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tl = [Te(l)]\r\n\t\t\t\t\t\r\n\t\t\t# or assume already list of Terms\r\n\t\t\texcept:\r\n\t\t\t\tl = [Te(i) for i in l]\r\n\t\t\t\r\n\t\t# condense?\r\n\t\tif c:\r\n\t\t\tl = Li._condense(l)\r\n\t\t\t\r\n\t\t# deposit terms, skip zeroes\r\n\t\tfor i in l:\r\n\t\t\tif 0 not in i:\r\n\t\t\t\tself.append(i)",
"def init_line_list():\n # Get str lengths from defs\n len_line = defs.str_len()['ion']\n len_src = defs.str_len()['Source']\n # Load sources to check\n sources = arcl_io.load_source_table()\n src_files = sources['File'].data\n if len(src_files[0]) > len_src:\n raise ValueError(\"Source filename now exceeds table. Should fix source name\")\n dummy_src = str('#')*len_src\n # Arc Line name\n dummy_line = str('#')*len_line\n #\n\n # Dict for Table\n idict = OrderedDict()\n idict['ion'] = dummy_line\n idict['wave'] = 0.\n idict['NIST'] = 0\n idict['Instr'] = 0 # Flag for instrument\n idict['amplitude'] = 0\n idict['Source'] = dummy_src\n\n # Table\n tkeys = idict.keys()\n lst = [[idict[tkey]] for tkey in tkeys]\n init_tbl = Table(lst, names=tkeys)\n\n # Return\n return init_tbl",
"def make_dfsm_from_table(table: AnyStr) -> DFSM:\n start = None\n accepting = []\n transitions = []\n states = []\n\n def read_state_symbol(sym):\n nonlocal start\n nonlocal accepting\n nonlocal symbols\n state = ''\n for letter in sym:\n if letter == '(':\n pass\n elif letter == '[':\n pass\n elif letter == ')':\n start = state\n elif letter == ']':\n accepting.append(state)\n else:\n state += letter\n states.append(state)\n return state\n\n lines = table.strip('\\n\\t ').split('\\n')\n symbols = lines.pop(0).replace(' ', '').replace('\\t', '')\n for line in lines:\n parts = [c for c in line.split(' ') if c not in ['', '\\t']]\n state = read_state_symbol(parts.pop(0))\n for i in range(len(parts)):\n transitions.append((state, symbols[i], parts[i]))\n\n dfsm = DFSM(states, symbols, start, accepting)\n for t in transitions:\n dfsm.add_transition(*t)\n return dfsm",
"def parse(source):\n\n def blockify(source):\n\n \"\"\"This is the first step, where the source is broken into paragraphs,\n based on blank lines in the source. The output is a list of strings.\n Each string is a paragraph. Newlines (with any trailing whitespace)\n inside paragraphs are converted to single spaces.\"\"\"\n\n paragraphs = [\"\"]\n for line in source.strip().split(\"\\n\"):\n line = line.strip()\n if line: paragraphs[-1] += line + \" \"\n elif paragraphs[-1]: paragraphs.append(\"\")\n\n return paragraphs\n\n def subparse(block):\n\n \"\"\"This function parses a single paragraph of source, as returned by\n the `blockify` function. This finds the individual verses within the\n given paragraph. It returns an AST for the paragraph, as previously\n described.\n\n TODO: Validate the input based on the AST.\n \"\"\"\n\n verses = []\n context = None\n for char in block:\n\n if char == \"[\":\n if verses: verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n verses.append({\"surah\": \"\", \"verse\": \"\", \"quran\": \"\"})\n context = \"surah\"\n elif char == \":\" and context == \"surah\":\n verses[-1][\"surah\"] = int(verses[-1][\"surah\"])\n context = \"verse\"\n elif char == \"]\":\n verses[-1][\"verse\"] = int(verses[-1][\"verse\"])\n context = \"quran\"\n else: verses[-1][context] += char\n\n verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n return verses\n\n return [ subparse(block) for block in blockify(source) ]",
"def __init__(self, t, ln):\n self.t = t\n self.ln = ln\n self.index = []\n size = len(t)\n for i in range(len(t) - ln + 1):\n self.index.append((t[i:i + ln], i)) # add <substr, offset> pair\n self.index.sort() # sort pairs",
"def createLsystemFromFile( filename ):\n\tfp = open(filename, \"r\")\n\tlines = fp.readlines()\n\tfp.close()\n\tlsys = init()\n\tfor line in lines:\n\t\twords = line.split()\n\t\tif words[0] == 'base':\n\t\t\tsetBase(lsys, words[1])\n\t\telif words[0] == 'rule':\n\t\t\taddRule(lsys, words[1:])\n\treturn lsys",
"def parser_bnf():\n at = Literal(\"@\").suppress()\n caret = Literal(\"^\")\n colon = Literal(\":\").suppress()\n left_bracket = Literal(\"[\").suppress()\n period = Literal(\".\").suppress()\n right_bracket = Literal(\"]\").suppress()\n\n # zero_index ::= [0-9]+\n zero_index = Word(nums).setParseAction(lambda s, l, t: int(t[0]))\n\n # filename ::= [A-Za-z0-9][-A-Za-z0-9._ ]+\n filename_first = Word(alphanums, exact=1)\n filename_rest = Word(alphanums + \"-_/. \")\n filename = Combine(filename_first + Optional(filename_rest))\n\n # millisecs ::= \".\" [0-9]+\n millisecs = (Word(nums).setParseAction(\n lambda s, l, t: int(t[0][:3].ljust(3, \"0\")))\n .setResultsName(\"ms\"))\n\n # hours, minutes, seconds ::= zero_index\n hours = zero_index.setResultsName(\"hh\")\n minutes = zero_index.setResultsName(\"mm\")\n seconds = zero_index.setResultsName(\"ss\")\n\n hours_minutes = hours + colon + minutes + colon | minutes + colon\n secs_millisecs = (seconds + Optional(period + millisecs) |\n period + millisecs)\n\n # timestamp ::= [[hours \":\"] minutes \":\"] seconds [\".\" millisecs]\n timestamp = Optional(hours_minutes) + secs_millisecs\n\n # duration_file ::= \"@\", filename\n # We need a separate item for a lonely duration file timestamp so\n # that we can attach a parse action just to the lonely case. Using\n # duration_file alone means the parse action is attached to all\n # instances of duration_file.\n duration_file = at + filename.setResultsName(\"filename\")\n lonely_duration_file = at + filename.setResultsName(\"filename\")\n\n # timespecs ::= timestamp [duration_file | {timestamp}]\n # If duration_file timestamp is lonely, prepend a zero timestamp.\n timespecs = Or(\n [lonely_duration_file.setParseAction(\n lambda s, l, t: [timestamp.parseString(\"00:00:00.000\"), t]),\n Group(timestamp) + duration_file,\n OneOrMore(Group(timestamp.setParseAction(default_timestamp_fields)))])\n \n # last_frame ::= \"-1\" | \"last\"\n last_frame = oneOf([\"-1\", \"last\"]).setParseAction(replaceWith(-1))\n\n # frame_number ::= \":\" (zero_index | last_frame)\n frame_number = colon - (zero_index | last_frame).setResultsName(\"num\")\n\n # stream_number ::= \":\" zero_index\n stream_number = colon - zero_index.setResultsName(\"num\")\n\n # input_file ::= \":\" [filename]\n input_file = colon - Optional(filename).setResultsName(\"filename\")\n\n # previous_segment ::= \":\" \"^\"\n previous_segment = colon - caret.setResultsName(\"filename\")\n\n # frame_input_file ::= input_file | previous_segment\n frame_input_file = Or([input_file, previous_segment])\n\n # av_trailer ::= input_file [stream_number]\n av_trailer = input_file + Optional(stream_number)\n\n # frame_type ::= \"frame\" | \"f\"\n frame_type = oneOf([\"f\", \"frame\"]).setParseAction(replaceWith(\"frame\"))\n\n # frame_input ::= frame_type [frame_input_file [frame_number]]\n frame_input = (frame_type.setResultsName(\"type\") +\n Optional(frame_input_file + Optional(frame_number)))\n\n # video_type ::= \"video\" | \"v\"\n video_type = oneOf([\"v\", \"video\"]).setParseAction(replaceWith(\"video\"))\n\n # audio_type ::= \"audio\" | \"a\"\n audio_type = oneOf([\"a\", \"audio\"]).setParseAction(replaceWith(\"audio\"))\n\n # av_input ::= (audio_type | video_type) [av_trailer]\n av_input = ((audio_type | video_type).setResultsName(\"type\") +\n Optional(av_trailer))\n\n # inputspec ::= \"[\" (av_input | frame_input) \"]\"\n inputspec = (left_bracket + \n delimitedList(av_input | frame_input, delim=\":\")\n .setParseAction(default_input_fields) -\n right_bracket)\n\n # segmentspec ::= inputspec [timespecs]\n segmentspec = Group(inputspec + \n Group(Optional(timespecs)).setResultsName(\"times\"))\n\n # config ::= {segmentspec}\n config = ZeroOrMore(segmentspec)\n config.ignore(pythonStyleComment)\n \n return config",
"def __init__(self):\n\n self.prim_parser = parser.Parser()",
"def parse_ls(self,ins):\n global Creg\n if ins.instr == 'lb':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1])\n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'lbu':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lh':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lhu':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'lw':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n \n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'dlw':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.gen = self.double_reg(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'dmfc1':\n if len(ins.args) == 2:\n self.need = [ins.args[1]] \n self.gen = self.double_reg(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'l.s':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n \n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'l.d':\n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n \n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'sb': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)\n \n elif ins.instr == 'sbu': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1])\n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sh': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1])\n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'shu': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'sw': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.need = [ins.args[0]] + self.need\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'dsw': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.need = self.double_reg(ins.args[0]) + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'dsz': \n if len(ins.args) == 1:\n ins.args[0] = str(ins.args[0]) \n g = re.match(Creg, ins.args[0])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[0]] \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 's.s': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]]\n self.need = [ins.args[0]] + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 's.d': \n if len(ins.args) == 2:\n ins.args[1] = str(ins.args[1]) \n g = re.match(Creg, ins.args[1])\n if g:\n self.c = g.group(1)\n self.need = [Register(g.group(2))]\n else:\n self.need = [ins.args[1]] \n self.need = self.double_reg(ins.args[0]) + self.need \n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'move':\n if len(ins.args) == 2:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'mov.d':\n if len(ins.args) == 2:\n self.need = self.double_reg(ins.args[1])\n self.gen = self.double_reg(ins.args[0])\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'mov.s':\n if len(ins.args) == 2:\n self.need = [ins.args[1]]\n self.gen = [ins.args[0]]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr) \n \n elif ins.instr == 'li':\n if len(ins.args) == 2:\n self.gen = [ins.args[0]]\n self.ival = ins.args[1]\n else:\n raise Exception(\"Invalid number of args for ins: \", ins.instr)",
"def parse_L(self, line: str):\n node1, node2, value, v0, i0, name = self.parse_C_or_L(line, \"h\")\n return IComponent.L(node1, node2, value, v0, i0, name)",
"def make_schema(w):\n\n def _part(w, level, suffix):\n \"\"\" Return a tuple (colheading + options, start_token, end_token, partlist, info)\n where the partlist is again a list of the component schemas - or a terminal\n matching a single token - or None if empty \"\"\"\n if w is None:\n # Epsilon node: return empty list\n return None\n if w.is_token:\n return ([ level ] + suffix, w.start, w.end, None, (w.terminal, w.token.text))\n # Interior nodes are not returned\n # and do not increment the indentation level\n if not w.is_interior:\n level += 1\n # Accumulate the resulting parts\n plist = [ ]\n ambig = w.is_ambiguous\n add_suffix = [ ]\n\n for ix, pc in enumerate(w.enum_children()):\n prod, f = pc\n if ambig:\n # Uniquely identify the available parse options with a coordinate\n add_suffix = [ ix ]\n\n def add_part(p):\n \"\"\" Add a subtuple p to the part list plist \"\"\"\n if p:\n if p[0] is None:\n # p describes an interior node\n plist.extend(p[3])\n elif p[2] > p[1]:\n # Only include subtrees that actually contain terminals\n plist.append(p)\n\n if isinstance(f, tuple):\n add_part(_part(f[0], level, suffix + add_suffix))\n add_part(_part(f[1], level, suffix + add_suffix))\n else:\n add_part(_part(f, level, suffix + add_suffix))\n\n if w.is_interior:\n # Interior node: relay plist up the tree\n return (None, 0, 0, plist, None)\n # Completed nonterminal\n assert w.is_completed\n assert w.nonterminal is not None\n return ([level - 1] + suffix, w.start, w.end, plist, w.nonterminal)\n\n # Start of make_schema\n\n if w is None:\n return None\n return _part(w, 0, [ ])",
"def grammar(description, whitespace=r'\\s*'):\n G={' ':whitespace}\n description = description.replace('\\t',' ') # handle tabs in description\n for line in split(description,\"\\n\"):\n lhs, rhs = split(line,\"=>\")\n alternatives = split(rhs, ' | ')\n G[lhs]=tuple(map(split, alternatives))\n return G",
"def makeTableNamesList(n, ):",
"def _parse(self):\n with open(_join(self.man_dir, self.man_fn)) as fp:\n lines = fp.readlines()\n \n desc_indxs = []\n for i, L in enumerate(lines):\n if \"#landuse\" in L or \" # landuse\" in L:\n desc_indxs.append(i-1)\n desc_indxs.append(i-2)\n desc_indxs.append(i-3)\n \n lines = [L[:L.find('#')].strip() for L in lines]\n lines = [L for i, L in enumerate(lines) if len(L) > 0 or i in desc_indxs]\n\n del desc_indxs\n \n self.datver = lines.pop(0)\n self.nofe = int(lines.pop(0))\n self.sim_years = int(lines.pop(0))\n \n # Read Plant Growth Section\n self.plants = PlantLoops(lines, self)\n\n # Read Operation Section\n self.ops = OpLoops(lines, self)\n \n # Read Initial Condition Section\n self.inis = IniLoops(lines, self)\n \n # Read Surface Effects Section\n self.surfs = SurfLoops(lines, self)\n \n # Read Contour Section\n self.contours = ContourLoops(lines, self)\n \n # Read Drainage Section\n self.drains = DrainLoops(lines, self)\n \n # Read Yearly Section\n self.years = YearLoops(lines, self)\n \n # Read Management Section \n self.man = ManagementLoop(lines, self)",
"def __init__(self, rules):\n self.rules = rules\n\n self._rhs_rules = defaultdict(list)\n self._rhs_unary_rules = defaultdict(list)\n\n self._nonterm = set(rule.lhs for rule in rules)\n self._term = set(token for rhs in chain(rule.rhs for rule in rules)\n for token in rhs if token not in self._nonterm)\n\n for rule in rules:\n _, rhs, _ = rule\n self._rhs_rules[rhs].append(rule)\n\n for rhs_rules in self._rhs_rules.values():\n rhs_rules.sort(key=lambda r: r.log_prob, reverse=True)\n\n self._is_cnf = all(len(rule.rhs) == 1\n or (len(rule.rhs) == 2\n and all(s in self._nonterm for s in rule.rhs))\n for rule in self.rules)",
"def make_minilang_parser():\n gramm = Grammar.from_string(GRAMMAR)\n return parser_from_grammar(gramm, 'program')"
] |
[
"0.55573875",
"0.54191315",
"0.5143745",
"0.51170874",
"0.50475824",
"0.5011598",
"0.50006133",
"0.49978584",
"0.4950667",
"0.49418342",
"0.49181917",
"0.48893735",
"0.48863432",
"0.48850852",
"0.48627818",
"0.48621088",
"0.4854347",
"0.4852148",
"0.48512858",
"0.48301548",
"0.48219302",
"0.4819891",
"0.48172316",
"0.4810401",
"0.48061118",
"0.48033127",
"0.47563553",
"0.4750107",
"0.473706",
"0.47349125"
] |
0.63953745
|
0
|
Constructs the LR(1) sets of items.
|
def LR1_ItemSets(self):
# The root item is the one representing the entire language.
# Since the grammar is in canonical form, it's a Choice over a
# single sequence.
root_item = self.MakeItem(LANGUAGE, self.rules[LANGUAGE][0],0)
# An ItemSet can be found by any of the items in its core.
# Within an ItemSet, an item maps to its lookahead set.
root_item_set = ItemSet(self, {root_item: LookaheadSet({self.end_of_text})}).close(self)
LR1_item_sets_result = set({root_item_set})
dirty_set = LR1_item_sets_result.copy()
while len(dirty_set) > 0:
work_list = dirty_set.copy()
dirty_set = set()
# Sort the work list so we get deterministic ordering, and therefore
# deterministic itemset core numbering.
for item_set in sorted(work_list):
(_,gotos) = item_set.gotos(self)
for (X, dest_item_set) in gotos:
if dest_item_set not in LR1_item_sets_result:
LR1_item_sets_result.add(dest_item_set)
dirty_set.add(dest_item_set)
return sorted(LR1_item_sets_result,key=ItemSet.pretty_key)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet",
"def LALR1(self, max_item_sets=None):\n\n # Part 1. Compute LALR(1) item sets\n\n # Mapping from a core index to an already-discovered item set.\n by_index = dict()\n\n root_item = self.MakeItem(LANGUAGE, self.rules[LANGUAGE][0],0)\n\n # An ItemSet can be found by any of the items in its core.\n # Within an ItemSet, an item maps to its lookahead set.\n\n root_item_set = ItemSet(self, {root_item: LookaheadSet({self.end_of_text})}).close(self)\n by_index[root_item_set.core_index] = root_item_set\n\n item_set_core_ids = set({root_item_set.core_index})\n\n dirty_set = item_set_core_ids.copy()\n keep_going = True\n #while len(dirty_set) > 0:\n while keep_going:\n keep_going = False\n #work_list = dirty_set.copy()\n #dirty_set = set()\n if max_item_sets is not None:\n if len(by_index) > max_item_sets:\n break\n # Sort the work list so we get deterministic ordering, and therefore\n # deterministic itemset core numbering.\n # Go backwards to try to explore the most recently changed items first.\n work_list = sorted(item_set_core_ids, reverse=True)\n for core_index in work_list:\n item_set = by_index[core_index]\n (changed,gotos) = item_set.gotos(self,by_index_memo=by_index)\n keep_going = keep_going | changed\n for (X, item_set_for_X) in gotos:\n if item_set_for_X.core_index not in by_index:\n item_set_core_ids.add(item_set_for_X.core_index)\n by_index[item_set_for_X.core_index] = item_set_for_X\n dirty_set.add(item_set_for_X.core_index)\n keep_going = True\n\n # Now this is a list of item_sets\n sorted_item_set_core_ids = sorted(item_set_core_ids)\n\n # Part 2. Compute the action table and conflicts.\n # Do this as a second pass because it's conceivable that an item set may\n # go from non-accepting to accepting during initial exploration\n # of the item sets.\n\n conflicts = []\n # Maps (item_set.core_index, terminal.reg_info.index) to an Action.\n action_table = dict()\n def addAction(item_set, terminal, action):\n isinstance(item_set, ItemSet) or raiseRE(\"expected ItemSet\")\n terminal.is_terminal() or raiseRE(\"expected terminal: \" + str(terminal))\n isinstance(action,Action) or raiseRE(\"expected action\")\n\n # Use indices, for speed.\n # But also keep the terminal prompting this action.\n action_key = (item_set.core_index,terminal.reg_info.index)\n if action_key not in action_table:\n action_table[action_key] = action\n else:\n prev_action = action_table[action_key]\n if prev_action != action:\n # Record the conflict, and only keep the original.\n conflicts.append(Conflict(item_set,terminal,prev_action,action))\n\n # Maps an item index to its reduction index.\n reduced_items = dict()\n # List, where element i is the Reduce object with index i\n reductions = []\n def make_reduce(item):\n if item.reg_info.index in reduced_items:\n return reductions[reduced_items[item.reg_info.index]]\n index = len(reduced_items)\n reduced_items[item.reg_info.index] = index\n result = Reduce(item,index)\n reductions.append(result)\n return result\n\n # The goto table for noterminals\n # Maps (item_set, nonterminal) to the next item set\n nonterminal_goto = dict()\n\n for item_set_core_id in sorted_item_set_core_ids:\n item_set = by_index[item_set_core_id]\n # Register Reduce and Accept actions\n for item_id, lookahead in item_set.id_to_lookahead.items():\n item = item_set.id_to_item[item_id]\n if item.is_accepting() and lookahead.includesEndOfText():\n addAction(item_set, self.end_of_text, Accept())\n if item.at_end() and (item.lhs.content != LANGUAGE):\n # Register reductions\n for terminal in lookahead:\n addAction(item_set, terminal, make_reduce(item))\n\n # Register Shift actions\n for xid, edge in item_set.goto.items():\n X = self.findByIndex(xid)\n item_set_for_X = edge.NextItemSet(self)[1]\n if X.is_terminal():\n # Can't be EndOfText by construction of the goto result\n isinstance(X,Token) or raiseRE(\"internal error: expected a token\")\n addAction(item_set, X, Shift(item_set_for_X))\n elif X.is_symbol_name():\n nonterminal_goto[(item_set.core_index,X)] = item_set_for_X\n\n item_sets = [by_index[i] for i in sorted_item_set_core_ids]\n\n return ParseTable(self,item_sets, action_table, nonterminal_goto, reductions, conflicts)",
"def __init__(self, *items, **kw):\n self.dTrees = []\n self.ntrees = 50 # TODO\n self.beta = 0.5",
"def __init__(self, items):\n if len(items) == 0:\n self._first = None\n self._rest = None\n else:\n self._first = items[0]\n self._rest = LinkedListRec(items[1:])",
"def _build_item_closure(itemset, productionset):\r\n #For every item inside current itemset, if we have the following rule:\r\n # xxx <cursor><nonterminalSymbol> xxx append every rule from self._productionruleset that begins with that NonTerminalSymbol\r\n if not isinstance(itemset, LR0ItemSet):\r\n raise TypeError\r\n import copy\r\n resultset = copy.copy(itemset)\r\n changed = True\r\n while changed:\r\n changed = False\r\n for currentitem in resultset.itemlist:\r\n nextsymbol = currentitem.next_symbol()\r\n if nextsymbol is None:\r\n break\r\n for rule in productionset.productions:\r\n newitem = LR0Item(rule)\r\n if rule.leftside[0] == nextsymbol and newitem not in resultset.itemlist:\r\n resultset.append_item(newitem)\r\n changed = True\r\n return resultset",
"def __init__(self, items):\r\n if len(items) == 0: # No items, and an empty list!\r\n self._first = None\r\n else:\r\n self._first = _Node(items[0])\r\n curr = self._first\r\n for item in items[1:]:\r\n curr.next = _Node(item)\r\n curr = curr.next",
"def _slr_build_parser_table(productionset):\r\n result = ParserTable()\r\n statesset = build_states_sets(productionset)\r\n for itemindex, itemset in enumerate(statesset):\r\n LOG.debug(\"_slr_build_parser_table: Evaluating itemset:\" + str(itemset))\r\n for symbol in productionset.getSymbols() + [EndSymbol()]:\r\n numberoptions = 0\r\n for lritem in itemset.itemlist:\r\n #if cursor is before a terminal, and there is a transition to another itemset with the following terminal, append shift rule\r\n if isinstance(symbol, TerminalSymbol) and lritem.next_symbol() == symbol and itemset.has_transition(symbol):\r\n destinationstate = statesset.index(itemset.get_transition(symbol))\r\n result.append(itemindex, symbol, \"Shift\", destinationstate)\r\n numberoptions += 1\r\n if isinstance(symbol, NonTerminalSymbol) and lritem.next_symbol() == symbol and itemset.has_transition(symbol):\r\n destinationstate = statesset.index(itemset.get_transition(symbol))\r\n result.append_goto(itemindex, symbol, destinationstate)\r\n #if cursor is at the end of the rule, then append reduce rule and go transition\r\n if lritem.previous_symbol() == symbol and lritem.is_last_position() and symbol != Extended_S:\r\n for x in productionset.next_lookup(symbol):\r\n from pydsl.Grammar.Definition import String\r\n if isinstance(x, list):\r\n result.append(itemindex, TerminalSymbol(String(x[0])), \"Reduce\", None, lritem.rule)\r\n else:\r\n result.append(itemindex, TerminalSymbol(String(x)), \"Reduce\", None, lritem.rule)\r\n numberoptions += 1\r\n #if cursor is at the end of main rule, and current symbol is end, then append accept rule\r\n if isinstance(symbol, EndSymbol) and lritem.previous_symbol() == productionset.initialsymbol and lritem.next_symbol() == EndSymbol():\r\n result.append(itemindex, symbol, \"Accept\", None)\r\n numberoptions += 1\r\n if not numberoptions:\r\n LOG.info(\"No rule found to generate a new parsertable entry \")\r\n LOG.debug(\"symbol: \" + str(symbol))\r\n LOG.debug(\"itemset: \" + str(itemset))\r\n elif numberoptions > 1: #FIXME can it count duplicated entries?\r\n raise Exception(\"LR Conflict %s\" % symbol)\r\n return result",
"def __init__(self, type, n):\n self.type = type\n if type == \"sep\":\n self.items = [LinkedChainTable()]*n # regel waar implementatie van seperate chaining kan verandert worden\n else:\n self.items = [None]*n\n self.n = n",
"def __init__(self):\r\n self._items = [[] for _ in range(20)]",
"def NewItems(self) -> _n_1_t_7:",
"def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)",
"def __init__(self, Ls, germs, prepStrs, effectStrs, aliases=None,\n sequenceRules=None):\n self.Ls = Ls[:]\n self.germs = germs[:]\n self.prepStrs = prepStrs[:]\n self.effectStrs = effectStrs[:]\n self.aliases = aliases.copy() if (aliases is not None) else None\n self.sequenceRules = sequenceRules[:] if (sequenceRules is not None) else None\n\n self.allstrs = []\n self.allstrs_set = set()\n self.unindexed = [] # unindexed strings\n self._plaquettes = {}\n self._firsts = []\n self._baseStrToLGerm = {}\n super(LsGermsStructure, self).__init__()",
"def __init__(self):\n \n self.items = [] \n self.ind = defaultdict(set) # item -> index into the items array",
"def items(ruleSet, terminals, nonTerminals):\n symbols = nonTerminals + terminals\n #start with closure of [ [S' -> S, $] ]\n C = [closure([startItem], ruleSet, terminals)]\n added = 1\n while added:\n added = 0\n for I in C:\n for X in symbols:\n g = goto(I, X, ruleSet, terminals)\n if g and not fullIn(C, g):# not in C:\n C.append(g)\n added = 1\n return C",
"def create_iterables(self):\n iterables = [[0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1]]\n self.states = []\n for t in itertools.product(*iterables):\n self.states.append(t)",
"def build_model(self):\n insts1, attrs1, rels1 = self.arg1.get_triples()\n insts2, attrs2, rels2 = self.arg2.get_triples()\n for items, shld_norm in [(insts1, True), (insts2, True), (attrs1, True),\n (attrs2, True), (rels1, False), (rels2, False)]:\n for i in range(len(items)):\n # GUROBI cant handle Unicode so step down to ASCII\n items[i] = [items[i][0].encode('ascii', 'ignore').lower(),\n items[i][1].encode('ascii', 'ignore'),\n items[i][2].encode('ascii', 'ignore')]\n # normalize concept names -- instances and attributes\n if shld_norm:\n items[i][2] = SmatchILP.normalize(items[i][2])\n\n # Attributes are same as relations\n rels1.extend(attrs1)\n rels2.extend(attrs2)\n\n log.debug(\"AMR 1 Instances:\\n %s\" % insts1)\n log.debug(\"AMR 1 Relations:\\n %s\" % rels1)\n log.debug(\"AMR 2 Instances:\\n %s\" % insts2)\n log.debug(\"AMR 2 Relations:\\n %s\" % rels2)\n\n for index, items in [(self.arg1vars, insts1), (self.arg2vars, insts2)]:\n for name, var, concept in items:\n assert name == 'instance' # relation name is instance ==> variable definition\n assert var not in index # variable name is unique\n index[var] = concept\n\n var_choices = set() # possible variable matches\n for v1 in self.arg1vars.keys():\n for v2 in self.arg2vars.keys():\n var_choices.add((v1, v2))\n\n # instances are relations too\n rels1.extend(insts1)\n rels2.extend(insts2)\n\n self.arg1size = len(rels1)\n self.arg2size = len(rels2)\n\n trpl_choices = set()\n trpl_var_consts = {}\n for name1, var11, var12 in rels1:\n id1 = \"%s:%s:%s\" % (name1, var11, var12)\n for name2, var21, var22 in rels2:\n possible = 0\n id2 = \"%s:%s:%s\" % (name2, var21, var22)\n # triple name matches && first argument to triples can be matched\n if name1 == name2 and (var11, var21) in var_choices:\n # second argument to triple can also be matched OR\n possible += 1\n if (var12, var22) in var_choices or (\n # they are the same concepts\n # var12 not in self.arg1vars and var22 not in self.arg2vars and\n var12 == var22):\n possible += 1\n trpl_choices.add((id1, id2))\n # constrains between variables and triples\n trpl_var_consts[id1, id2] = [(var11, var21)]\n # if second argument is also variable\n\n if (var12, var22) in var_choices:\n trpl_var_consts[id1, id2].append((var12, var22))\n log.debug('\\t %s <--> %s ? %s ' % (id1, id2, possible))\n\n # Add variables to ILP model\n model = GRBModel('Smatch ILP')\n if log.getLogger().getEffectiveLevel() >= log.INFO:\n model.Params.OutputFlag = 0 # disable output\n log.info(\"Number of possible variable matches %s\" % len(var_choices))\n log.info(\"Number of possible triple matches %s\" % len(trpl_choices))\n\n self.vars = model.addVars(var_choices, vtype=GRB.BINARY, name=\"v\")\n self.trpls = model.addVars(trpl_choices, vtype=GRB.BINARY, name=\"t\")\n\n # constraints\n for v1 in self.arg1vars:\n model.addConstr(self.vars.sum(v1, '*') <= 1, name='to max 1 var')\n for v2 in self.arg2vars:\n model.addConstr(self.vars.sum('*', v2) <= 1, name='from max 1 var')\n\n for trpl_idx, var_idxs in trpl_var_consts.items():\n for var_idx in var_idxs:\n model.addConstr(self.trpls[trpl_idx] <= self.vars[var_idx], name=\"%s::%s\" % (trpl_idx, var_idx))\n\n # objective\n model.setObjective(self.trpls.sum(), GRB.MAXIMIZE)\n self.model = model\n\n # stats for how big the problem is\n var_trpl_consts_count = sum(len(x) for x in trpl_var_consts.values())\n num_constr = len(var_choices) + len(trpl_choices) + var_trpl_consts_count\n num_vars = len(var_choices) + len(trpl_choices)\n log.info(\"ILP SIZE: %d binary variables (%d vars + %d triple vars)\" % (num_vars, len(var_choices), len(trpl_choices)))\n log.info(\"ILP SIZE: %d constraints (%d b/w arg vars and triples)\" % (num_constr, var_trpl_consts_count))",
"def build_karels():\n build_karel1()\n build_karel2()\n build_karel3()\n build_karel4()",
"def __init__(self, items: list) -> None:\n if items == []:\n self._first = None\n self._rest = None\n else:\n self._first = items[0]\n self._rest = RecursiveList(items[1:])",
"def createSetsFromLabels(self):\n \n self.tots = [0]*self.n\n for i in range(self.n):\n self.sets.append([])\n for i in range(self.nPoints):\n self.sets[self.labels[i]].append(i)\n self.tots[self.labels[i]] += 1",
"def __init__(self, record_list=[[]]) -> None:\n self.record_list = record_list\n self.all_item = []\n # all items in the record to build the n*n matrix\n for record in record_list:\n if record[0] not in self.all_item:\n self.all_item.append(record[0])\n if record[1] not in self.all_item:\n self.all_item.append(record[1])\n self.item_num = len(self.all_item)\n # number od unique items\n self.item_mat = np.zeros(shape=(self.item_num, self.item_num))\n # The matrix, considered as the Massey matrix in MasseyRanking, fo example.\n self.ranking = []\n # A copy of list to return.",
"def __init__(self, items: Optional[Sequence] = None, first: Optional[_Node] = None) -> None:\n self._first = first\n self._length = 0\n\n if items:\n for item in items:\n self.append(item)",
"def __init__(self, item):\n\n #: Instance of the item the Syllogism is constructed on. The instance\n #: is copied in order to prevent reference mismatches from happening.\n self.item = Item(\n item.identifier,\n item.domain,\n item.task_str,\n item.response_type,\n item.choices_str,\n item.sequence_number)\n\n #: Reference to the task the Syllogism is constructed on.\n self.task = self.item.task\n\n #: String representation of the task\n self.encoded_task = encode_task(self.task)\n\n #: List representation of the first premise\n self.p1 = self.task[0]\n\n #: List representation of the second premise\n self.p2 = self.task[1]\n\n #: Quantifier of the first premise\n self.quantifier_p1 = self.task[0][0]\n\n #: Quantifier of the second premise\n self.quantifier_p2 = self.task[1][0]\n\n #: Figure of the syllogism\n self.figure = int(self.encoded_task[-1])\n\n # Figure out the figure and identify the terms\n if self.figure == 1:\n self.A, self.B, self.C = self.task[0][1], self.task[0][2], self.task[1][2]\n elif self.figure == 2:\n self.A, self.B, self.C = self.task[0][2], self.task[0][1], self.task[1][1]\n elif self.figure == 3:\n self.A, self.B, self.C = self.task[0][1], self.task[0][2], self.task[1][1]\n elif self.figure == 4:\n self.A, self.B, self.C = self.task[0][2], self.task[0][1], self.task[1][2]",
"def __init__(self):\n\n # names of atoms that make up relevant segements of each chain\n self.chains = {'a': {'C': 'C1', 'C1': 'C2', 'C2': 'C3', 'C3': 'C4', 'C4': 'C5', 'H': 'H1', 'H1': 'H2',\n 'H2': 'H3', 'H3': 'H4', 'H4': 'H5'},\n 'b': {'C45': 'C1', 'C44': 'C2', 'C43': 'C3', 'C42': 'C4', 'C41': 'C5', 'H81': 'H1', 'H80': 'H2',\n 'H79': 'H3', 'H78': 'H4', 'H77': 'H5'}\n }\n\n self.nchains = len(list(self.chains.keys()))\n\n self.chain_numbers = {'a': 0, 'b': 1} # used to number chains\n\n # self.initial_types = {'C1': 'c2', 'C2': 'ce', 'C3': 'ce', 'C4': 'c2', 'H1': 'ha', 'H2': 'ha', 'H3': 'ha',\n # 'H4': 'ha', 'H5': 'ha'}\n\n # all indices numbered from 0. D1, D2, ... correspond to dummies attached to C1, C2, ... respectively\n self.indices = {'a': {'C1': 0, 'C2': 1, 'C3': 2, 'C4': 3, 'C5': 4, 'H1': 52, 'H2': 53, 'H3': 54, 'H4': 55,\n 'H5': 56, 'D1': 136, 'D2': 137, 'D3': 138, 'D4': 139},\n 'b': {'C1': 49, 'C2': 48, 'C3': 47, 'C4': 46, 'C5': 45, 'H1': 133, 'H2': 132, 'H3': 131,\n 'H4': 130, 'H5': 129, 'D1': 140, 'D2': 141, 'D3': 142, 'D4': 143}\n }\n\n self.dummy_connectivity = {'a': {'C': 'D1', 'C1': 'D2', 'C2': 'D3', 'C3': 'D4'},\n 'b': {'C45': 'D1', 'C44': 'D2', 'C43': 'D3', 'C42': 'D4'}}\n\n self.hydrogen_connectivity = {'C': ['H1', 'H2'], 'C1': ['H3'], 'C2': ['H4'], 'C3': ['H5'],\n 'C45': ['H1', 'H2'], 'C44': ['H3'], 'C43': ['H4'], 'C42': ['H5']}\n\n self.dummy_mass = 1.008 # mass of hydrogen\n\n # write these in order of priority\n # for efficiency, don't repeat things. For example self.carbons['C1']: self.carbons['C2'] is the same as\n # self.carbons['C2']: self.carbons['C1']. Otherwise, computational expense goes up and a new reaction has\n # to be defined below.\n self.carbons = {'C1': ['C', 'C45'], 'C2': ['C1', 'C44'], 'C3': ['C2', 'C43'], 'C4': ['C3', 'C42']}\n self.bonds_with = [[self.carbons['C1'], self.carbons['C2']]]\n\n # define which improper dihedrals to remove -- written in same order as .itp file!!!\n # note that the order of the atoms may be different for each chain\n # NOTE: C3 not tested\n self.impropers = {'a': {'C1': ['H2', 'C1', 'H1', 'C2'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']},\n 'b': {'C1': ['C2', 'H2', 'C1', 'H1'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']}}",
"def gen(length):\n return itertools.product(LABELS,repeat=length)",
"def __init__(self, Ls, germs, nMinorRows, nMinorCols, aliases=None,\n sequenceRules=None):\n self.Ls = Ls[:]\n self.germs = germs[:]\n self.nMinorRows = nMinorRows\n self.nMinorCols = nMinorCols\n self.aliases = aliases.copy() if (aliases is not None) else None\n self.sequenceRules = sequenceRules[:] if (sequenceRules is not None) else None\n\n self.allstrs = []\n self.allstrs_set = set()\n self.unindexed = []\n self._plaquettes = {}\n self._firsts = []\n self._baseStrToLGerm = {}\n super(LsGermsSerialStructure, self).__init__()",
"def __init__(self):\n self.items = []\n self.indexes: Dict[int, Set] = defaultdict(set)",
"def setup_RL(self):\n for n in range(self.L):\n self.update_RL(n)\n return self.R[-1]",
"def __init__(self, *args):\n this = _libsbml.new_ListOfInitialAssignments(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _libsbml.new_ListOfRules(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\n self.l = []\n self.r = []"
] |
[
"0.61705273",
"0.5918468",
"0.5712086",
"0.5710754",
"0.56861305",
"0.5651593",
"0.56297493",
"0.56052625",
"0.55712545",
"0.55698526",
"0.55202484",
"0.54824775",
"0.54575783",
"0.545201",
"0.5429481",
"0.54270715",
"0.53982484",
"0.5382677",
"0.53647447",
"0.5356855",
"0.535141",
"0.53456616",
"0.53257644",
"0.5319373",
"0.5257385",
"0.5253026",
"0.5239113",
"0.5233121",
"0.52176875",
"0.52140874"
] |
0.633001
|
0
|
Will compare running instances to purchased reservations across all EC2 regions for all of the accounts supplied.
|
def reserved_compare(options):
running_instances = defaultdict(dict)
reserved_purchases = defaultdict(dict)
regions = boto.ec2.regions()
good_regions = [r for r in regions if r.name not in ['us-gov-west-1',
'cn-north-1']]
for region in good_regions:
if options.trace:
print " Scanning region {0}".format(region.name)
conn = region.connect()
filters = {'instance-state-name': 'running'}
zones = defaultdict(dict)
if options.trace:
print " Fetching running instances"
reservations = conn.get_all_instances(filters=filters)
for reservation in reservations:
for inst in reservation.instances:
if options.debug:
print instance_string(inst, options, verbose=True)
if inst.state != 'running':
if options.debug:
print "Skip {0.id} state {0.state}".format(inst)
continue
if inst.spot_instance_request_id:
if options.debug:
print "Skip {0.id} has spot id {0.spot_instance_request_id}".format(inst)
continue
if 'aws:autoscaling:groupName' in inst.tags:
if options.debug:
print "Skip {0.id} is an autoscale instance".format(inst)
continue
if inst.platform == 'Windows' or inst.platform == 'windows':
if options.debug:
print "Skip {0.id} has platform {0.platform}".format(inst)
continue
if inst.instance_type not in zones[inst.placement]:
zones[inst.placement][inst.instance_type] = []
zones[inst.placement][inst.instance_type].append(inst)
if zones:
running_instances[region.name] = zones
purchased = defaultdict(dict)
if options.trace:
print " Fetching reservations"
reserved = conn.get_all_reserved_instances()
for r in reserved:
if options.debug:
print reservation_string(r, verbose=True)
if r.state != 'active':
continue
if r.instance_tenancy != 'default':
print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))
continue
if r.instance_type not in purchased[r.availability_zone]:
purchased[r.availability_zone][r.instance_type] = [r]
else:
purchased[r.availability_zone][r.instance_type].append(r)
if purchased:
reserved_purchases[region.name] = purchased
return check_reservation_use(options, running_instances,
reserved_purchases)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations",
"def calculate_ec2_ris(session, results):\n ec2_conn = session.client('ec2')\n\n\n paginator = ec2_conn.get_paginator('describe_instances')\n page_iterator = paginator.paginate(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n\n # Loop through running EC2 instances and record their AZ, type, and\n # Instance ID or Name Tag if it exists.\n for page in page_iterator:\n for reservation in page['Reservations']:\n for instance in reservation['Instances']:\n # Ignore spot instances\n if 'SpotInstanceRequestId' not in instance:\n #az = instance['Placement']['AvailabilityZone']\n instance_type = instance['InstanceType']\n # Check for 'skip reservation' tag and name tag\n found_skip_tag = False\n instance_name = None\n cloud_bill_review_comment = None\n if 'Tags' in instance:\n for tag in instance['Tags']:\n if tag['Key'] == 'Name' and len(tag['Value']) > 0:\n instance_name = tag['Value'] \n if tag['Key'] == 'CloudBillReviewComment' and len(tag['Value']) > 0:\n cloud_bill_review_comment = tag['Value'] \n \n instancekey = instance['InstanceId'] if not instance_name else instance_name \n #print(instancekey)\n\n #if instance_type == 't2.small':\n # print('instance type: %s instance_id: %s' %(instance_type, instancekey))\n\n if cloud_bill_review_comment:\n tags[instancekey] = cloud_bill_review_comment\n #print(tags[instancekey])\n cloud_bill_review_comment = None\n\n results['ec2_running_instances'][(instance_type)] = \\\n results['ec2_running_instances'].get((instance_type), 0) + 1\n \n instance_ids[(instance_type)].append(\n instance['InstanceId'] if not instance_name\n else instance_name)\n\n #print(instance_ids[('t2.small')])\n\n\n # Loop through active EC2 RIs and record their AZ and type.\n for reserved_instance in ec2_conn.describe_reserved_instances(\n Filters=[{'Name': 'state', 'Values': ['active']}])['ReservedInstances']:\n # Detect if an EC2 RI is a regional benefit RI or not\n\n instance_type = reserved_instance['InstanceType']\n\n results['ec2_reserved_instances'][(\n instance_type)] = results['ec2_reserved_instances'].get(\n (instance_type), 0) + reserved_instance['InstanceCount']\n\n reserve_expiry[(instance_type)].append(calc_expiry_time(\n expiry=reserved_instance['End']))\n \n\n return results",
"def runner(event, context):\n\n logger.info('Running available_ips...')\n for region in regions:\n for acct in accounts:\n logger.info(\n \"\"\"\n cidr-house-rules-{0}-available_ips on account {1} in region {2}\n \"\"\".format(environment, acct['id'], region)\n )\n invoke_process(function_name, acct['id'], region)",
"def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr",
"def __get_reservations(self, instance_ids=None):\n if instance_ids:\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n try:\n return euca_conn.get_all_instances(instance_ids)\n except:\n euca.display_error_and_exit('%s' % ex)\n return False",
"def get_ec2_reservations(profile, running_filter):\n try:\n ec2_client = boto3.Session(profile_name=profile).client('ec2')\n except ProfileNotFound:\n print(\"Profile: %s not found\" % profile, file=sys.stderr)\n sys.exit(1)\n filtered_instances = ec2_client.describe_instances(Filters=running_filter)\n return filtered_instances['Reservations']",
"def __get_multi_instances(self, reservations, instance_ids=None, policies=None):\n check_instance_ids = False\n if ( instance_ids and len(instance_ids) > 0 ):\n check_instance_ids = True\n instances = [] \n for reservation in reservations:\n if check_instance_ids:\n for instance in reservation.instances:\n if instance.id in instance_ids:\n instances.append(instance)\n elif policies:\n for instance in reservation.instances:\n if 'typevm' in policies and instance.instance_type == policies['typevm']:\n instances.append(instance) \n elif policies.get('level')==1:\n if self.__compare_types_instances(policies, instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n elif policies.get('level') == 0:\n if self.__is_adaptive_instance(self.__get_metrics_adapted(policies), instance.instance_type.encode(\"latin-1\")):\n instances.append(instance)\n else:\n instances=[]\n else:\n instances += reservation.instances\n return instances, len(instances)",
"def main():\n # Initialize\n args = get_args()\n profiles = get_profiles(args)\n running_filter = [{'Name': 'instance-state-name', 'Values': ['running']}]\n\n # Sort through servers for a matching name\n matching_list = []\n for profile in profiles:\n filtered_instances = get_ec2_reservations(profile, running_filter)\n for reservation in filtered_instances:\n # Filter for instances with a 'Name' tag that matches filter_string\n instances = [\n instance for instance in reservation['Instances']\n if instance.get('Tags') and [\n tag for tag in instance['Tags']\n if tag['Key'] == 'Name' and args.filter_string in tag['Value']\n ]\n ]\n # Add matching instances to matching_list\n for instance in instances:\n matching_list.append({\n 'Name': [tag['Value'] for tag in instance['Tags'] if tag['Key'] == 'Name'][0],\n 'InstanceId': instance['InstanceId'],\n 'PublicDnsName': instance['PublicDnsName'] if instance.get('PublicDnsName')\n else 'No Public DNS',\n 'PrivateIpAddress': instance['PrivateIpAddress']\n if instance.get('PrivateIpAddress')\n else 'No Private IP'\n })\n\n # If flag for full run not added, exit one there instances are found\n if matching_list and not args.no_early_exit:\n stop_and_tabulate(matching_list)\n\n # Tabulate output once done\n stop_and_tabulate(matching_list)",
"def run(self):\n ilist = []\n key_filter = filters[self.args['filter_group']]\n for item in self.client.describe_instances()['Reservations']:\n for instance in item['Instances']:\n idict = {}\n for tag in instance['Tags']:\n if not any(t['Key'] == 'Name' for t in instance['Tags']):\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n if tag['Key'] == 'Name':\n if tag['Value'] == \"\":\n tag['Value'] = 'Unnamed'\n idict['Name'] = tag['Value']\n for key in key_filter:\n try:\n if key in ['AvailabilityZone','Tenancy']:\n idict[key] = instance['Placement'][key]\n elif key == 'SecurityGroups':\n sg_list = []\n for sg in instance[key]:\n sg_list.append(sg['GroupId'])\n if self.args['output'] == 'csv':\n sg_string = \" \\n\"\n idict[key] = sg_string.join(sg_list)\n else:\n idict[key] = ','.join(sg_list)\n elif key == 'BlockDeviceMappings':\n devices = []\n for dev in instance[key]:\n devices.append(dev['DeviceName'])\n if self.args['output'] == 'csv':\n dev_string = \" \\n\"\n idict[key] = dev_string.join(devices)\n else:\n idict[key] = ','.join(devices)\n elif key == 'State':\n idict[key] = instance[key]['Name']\n else:\n if instance[key]:\n idict[key] = instance[key]\n except Exception as e:\n idict[key] = 'N/A'\n ilist.append(idict)\n self.template(self.sortList(ilist))",
"def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances",
"def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances",
"def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()",
"def start_stop_instances(instances, schedule):\n for reservation in instances:\n for instance in reservation.instances:\n region = instance.placement\n if instance.state == 'running' and _get_desired_state(schedule) == 'stop':\n print \"Should stop \" + instance.id + \".\"\n instance.stop()\n elif instance.state == 'stopped' and _get_desired_state(schedule) == 'start':\n print \"Should start \" + instance.id + \".\"\n instance.start()\n else:\n print \"Nothing to do.\"",
"def test_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)",
"def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)",
"def wait_instances_running(ec2, instances: Iterable[Boto2Instance]) -> Iterable[Boto2Instance]:\n running_ids = set()\n other_ids = set()\n while True:\n pending_ids = set()\n for i in instances:\n if i.state == 'pending':\n pending_ids.add(i.id)\n elif i.state == 'running':\n assert i.id not in running_ids\n running_ids.add(i.id)\n yield i\n else:\n assert i.id not in other_ids\n other_ids.add(i.id)\n yield i\n logger.info('%i instance(s) pending, %i running, %i other.',\n *list(map(len, (pending_ids, running_ids, other_ids))))\n if not pending_ids:\n break\n seconds = max(a_short_time, min(len(pending_ids), 10 * a_short_time))\n logger.info('Sleeping for %is', seconds)\n time.sleep(seconds)\n for attempt in retry_ec2():\n with attempt:\n instances = ec2.get_only_instances(list(pending_ids))",
"def main(self, _):\n all_addresses = find_addresses.probe_regions()\n\n print(\"\")\n if not all_addresses:\n print(\"No namespace elastic IP addresses found.\")\n\n for region in consts.REGIONS:\n region_addresses = [address for address in all_addresses\n if address['region'] == region]\n if not region_addresses:\n continue\n\n print(f\"{region}: {len(region_addresses)} address(es) found:\")\n for address in region_addresses:\n if 'instance_name' in address:\n print(f\" {address['ip']} ({address['instance_name']})\")\n elif 'association_id' in address:\n print(f\" {address['ip']} (unknown association)\")\n else:\n print(f\" {address['ip']} (not associated)\")",
"def __run_instances(self, number=1, policies={}):\n try:\n self.euca = Euca2ool('k:n:t:g:d:f:z:',\n ['key=', 'kernel=', 'ramdisk=', 'instance-count=', 'instance-type=',\n 'group=', 'user-data=', 'user-data-file=', 'addressing=', 'availability-zone='])\n except Exception, ex:\n sys.exit(1)\n\n instance_type = policies.get('instance_type') or 'm1.small'\n image_id = policies.get('image_id') or self.__get_image_id()[0]\n min_count = number\n max_count = number\n keyname = 'mykey'\n \n kernel_id = None\n ramdisk_id = None\n group_names = []\n user_data = None\n addressing_type = None\n zone = None\n user_data = None\n \n if image_id:\n euca_conn = self.__make_connection()\n try:\n reservation = euca_conn.run_instances(image_id = image_id,\n min_count = min_count,\n max_count = max_count,\n key_name = keyname,\n security_groups = group_names,\n user_data = user_data,\n addressing_type = addressing_type,\n instance_type = instance_type,\n placement = zone,\n kernel_id = kernel_id,\n ramdisk_id = ramdisk_id)\n except Exception, ex:\n self.euca.display_error_and_exit('error:%s' % ex)\n return reservation\n return False",
"def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()",
"def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []",
"def yield_instances_in_zones(self, zones, instance_filter=None):\n for zone in zones:\n for instance in self.yield_instances_in_zone(zone, instance_filter):\n yield instance",
"def perform_snapshot(context, region, installed_region='us-east-1'):\n LOG.info('Reviewing snapshots in region %s', region)\n\n # fetch these, in case we need to figure out what applies to an instance\n configurations = dynamo.list_configurations(context, installed_region)\n LOG.debug('Fetched all possible configuration rules from DynamoDB')\n\n # build a list of any IDs (anywhere) that we should ignore\n ignore_ids = utils.build_ignore_list(configurations)\n\n # setup some lookup tables\n cache_data = utils.build_cache_maps(context, configurations, region, installed_region)\n all_instances = cache_data['instance_id_to_data']\n instance_configs = cache_data['instance_id_to_config']\n volume_snap_recent = cache_data['volume_id_to_most_recent_snapshot_date']\n\n for instance_id in set(all_instances.keys()):\n # before we go do some work\n if timeout_check(context, 'perform_snapshot'):\n break\n\n if instance_id in ignore_ids:\n continue\n\n snapshot_settings = instance_configs[instance_id]\n\n # parse out snapshot settings\n retention, frequency = utils.parse_snapshot_settings(snapshot_settings)\n\n # grab the data about this instance id, if we don't already have it\n instance_data = all_instances[instance_id]\n\n ami_id = instance_data['ImageId']\n LOG.info('Reviewing snapshots in region %s on instance %s', region, instance_id)\n\n for dev in instance_data.get('BlockDeviceMappings', []):\n # before we go make a bunch more API calls\n if timeout_check(context, 'perform_snapshot'):\n break\n\n # we probably should have been using volume keys from one of the\n # caches here, but since we're not, we're going to have to check here too\n LOG.debug('Considering device %s', dev)\n volume_id = dev['Ebs']['VolumeId']\n\n if volume_id in ignore_ids:\n continue\n\n # find snapshots\n recent = volume_snap_recent.get(volume_id)\n now = datetime.datetime.now(dateutil.tz.tzutc())\n\n # snapshot due?\n if should_perform_snapshot(frequency, now, volume_id, recent):\n LOG.debug('Performing snapshot for %s, calculating tags', volume_id)\n else:\n LOG.debug('NOT Performing snapshot for %s', volume_id)\n continue\n\n # perform actual snapshot and create tag: retention + now() as a Y-M-D\n delete_on_dt = now + retention\n delete_on = delete_on_dt.strftime('%Y-%m-%d')\n\n volume_data = utils.get_volume(volume_id, region=region)\n expected_tags = utils.calculate_relevant_tags(\n instance_data.get('Tags', None),\n volume_data.get('Tags', None))\n\n utils.snapshot_and_tag(\n instance_id,\n ami_id,\n volume_id,\n delete_on,\n region,\n additional_tags=expected_tags)",
"def _scheduler_for_regions(self, regions):\n template_json = json.loads(self._template('scheduler'))\n resources = template_json['Resources']\n for role_policy in resources['Role']['Properties']['Policies']:\n policy_name = role_policy['PolicyName']\n if policy_name not in ('FlotillaDynamo', 'FlotillaQueue'):\n continue\n\n statements = role_policy['PolicyDocument']['Statement']\n for statement in statements:\n # Replace \"this region\" reference with every managed region:\n new_resources = []\n for region in regions:\n region_resource = deepcopy(statement['Resource'])\n region_resource['Fn::Join'][1][1] = region\n new_resources.append(region_resource)\n statement['Resource'] = new_resources\n\n return json.dumps(template_json)",
"def _scheduler_for_regions(self, regions):\n template_json = json.loads(self._template('scheduler'))\n resources = template_json['Resources']\n for role_policy in resources['Role']['Properties']['Policies']:\n policy_name = role_policy['PolicyName']\n if policy_name not in ('FlotillaDynamo', 'FlotillaQueue'):\n continue\n\n statements = role_policy['PolicyDocument']['Statement']\n for statement in statements:\n # Replace \"this region\" reference with every managed region:\n new_resources = []\n for region in regions:\n region_resource = deepcopy(statement['Resource'])\n region_resource['Fn::Join'][1][1] = region\n new_resources.append(region_resource)\n statement['Resource'] = new_resources\n\n return json.dumps(template_json, indent=2)",
"def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances",
"def test_can_query_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, get_instances_by_tag_type, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n tag1 = {'fsimcluster': 'testcluster'}\n type = 'f1.2xlarge'\n\n # create an instance with only a single tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags=tag1)\n instances.should.have.length_of(1)\n\n tag2 = { 'secondtag': 'secondvalue' }\n # create an instance with additional tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={**tag1, **tag2})\n instances.shouldnt.be.empty\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)\n\n # get_instances_by_tag_type with both tags should only return one instance\n instances = get_instances_by_tag_type({**tag1, **tag2},type)\n list(instances).should.have.length_of(1)\n\n # and that instance should be the one with both tags\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n operation_params = {\n 'InstanceIds': ids\n }\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.equal({**tag1, **tag2})\n\n # get_instances_by_tag_type with only the original tag should return both instances\n instances = get_instances_by_tag_type(tag1,type)\n list(instances).should.have.length_of(2)",
"def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()",
"def wait_instances_running(ec2, instances):\n running_ids = set()\n other_ids = set()\n while True:\n pending_ids = set()\n for i in instances:\n if i.state == 'pending':\n pending_ids.add(i.id)\n elif i.state == 'running':\n assert i.id not in running_ids\n running_ids.add(i.id)\n yield i\n else:\n assert i.id not in other_ids\n other_ids.add(i.id)\n yield i\n log.info('%i instance(s) pending, %i running, %i other.',\n *map(len, (pending_ids, running_ids, other_ids)))\n if not pending_ids:\n break\n seconds = max(a_short_time, min(len(pending_ids), 10 * a_short_time))\n log.info('Sleeping for %is', seconds)\n time.sleep(seconds)\n for attempt in retry_ec2():\n with attempt:\n instances = ec2.get_only_instances(list(pending_ids))",
"def create_instances(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n instance_type=None,\n security_group_ids=None,\n instance_profile_arn=None,\n subnet_type=SUBNET_PRIVATE,\n subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False,\n template_name=None,\n ec2_client=None,\n **kwargs):\n if not instance_type:\n instance_type = 't3a.micro'\n if not template_name:\n template_name = \"%s-cloud-init-script.j2\" % app_name\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(\n Filters=[\n {'Name': 'tag:Name', 'Values': [\"*%s*\" % app_name]},\n {'Name': 'instance-state-name',\n 'Values': [EC2_RUNNING, EC2_STOPPED, EC2_PENDING]}])\n\n instances = None\n instance_ids = []\n stopped_instance_ids = []\n for reserv in resp['Reservations']:\n instances = reserv['Instances']\n for instance in reserv['Instances']:\n names = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n names = [name.strip() for name in tag['Value'].split(',')]\n break\n if app_name not in names:\n continue\n instance_ids += [instance['InstanceId']]\n if instance['State']['Name'] == EC2_STOPPED:\n stopped_instance_ids += [instance['InstanceId']]\n if stopped_instance_ids:\n ec2_client.start_instances(\n InstanceIds=stopped_instance_ids,\n DryRun=dry_run)\n LOGGER.info(\"%s restarted instances %s for '%s'\",\n tag_prefix, stopped_instance_ids, app_name)\n if instance_ids:\n LOGGER.info(\"%s found instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n # If instances are running and there is a message queue,\n # we assume the infrastructure for this app is ready to accept\n # containers.\n return instances\n\n search_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'templates')\n template_loader = jinja2.FileSystemLoader(searchpath=search_path)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template_name)\n user_data = template.render(\n logs_storage_location=\"s3://%s\" % s3_logs_bucket,\n identities_url=identities_url,\n remote_drop_repo=\"https://github.com/djaodjin/drop.git\",\n company_domain=company_domain,\n ldap_host=ldap_host,\n **kwargs)\n\n # Find the ImageId\n image_id = _get_image_id(\n image_name, instance_profile_arn=instance_profile_arn,\n ec2_client=ec2_client, region_name=region_name)\n\n if not storage_enckey:\n # Always make sure the EBS storage is encrypted.\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n block_devices = [\n {\n # `DeviceName` is required and must match expected name otherwise\n # an extra disk is created.\n 'DeviceName': '/dev/xvda', # XXX '/dev/sda1',\n #'VirtualName': 'string',\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html\n 'Ebs': {\n 'DeleteOnTermination': False,\n #'Iops': 100, # 'not supported for gp2'\n #'SnapshotId': 'string',\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n #'NoDevice': 'string'\n },\n ]\n if storage_enckey:\n # XXX Haven't been able to use the key we created but the default\n # aws/ebs is OK...\n for block_device in block_devices:\n block_device['Ebs'].update({\n 'KmsKeyId': storage_enckey,\n 'Encrypted': True\n })\n\n network_interfaces = [{\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n if not subnet_id:\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n if subnet_type == SUBNET_PRIVATE:\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type == SUBNET_DBS:\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(dbs_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type in [SUBNET_PUBLIC_READY, SUBNET_PUBLIC]:\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n if subnet_type == SUBNET_PUBLIC:\n network_interfaces = [{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n\n if not instances or not instance_ids:\n for _ in range(0, NB_RETRIES):\n # The IAM instance profile take some time to be visible.\n try:\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n resp = ec2_client.run_instances(\n BlockDeviceMappings=block_devices,\n ImageId=image_id,\n KeyName=ssh_key_name,\n InstanceType=instance_type,\n MinCount=1,\n MaxCount=1,\n#botocore.exceptions.ClientError: An error occurred (InvalidParameterCombination) when calling the RunInstances operation: Network interfaces and an instance-level subnet ID may not be specified on the same request\n# SubnetId=subnet_id,\n# SecurityGroupIds=security_group_ids,\n IamInstanceProfile={'Arn': instance_profile_arn},\n NetworkInterfaces=network_interfaces,\n TagSpecifications=[{\n 'ResourceType': \"instance\",\n 'Tags': [{\n 'Key': 'Name',\n 'Value': app_name\n }, {\n 'Key': 'Prefix',\n 'Value': tag_prefix\n }]\n }],\n UserData=user_data,\n DryRun=dry_run)\n instances = resp['Instances']\n instance_ids = [\n instance['InstanceId'] for instance in instances]\n break\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidParameterValue':\n raise\n LOGGER.info(\"%s waiting for IAM instance profile %s to be\"\\\n \" operational ...\", tag_prefix, instance_profile_arn)\n time.sleep(RETRY_WAIT_DELAY)\n LOGGER.info(\"%s started instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n for _ in range(0, NB_RETRIES):\n # It can take some time before the instances will appear\n # in a `describe_instances` call. We want to make sure\n # not to get errors later on if we execute too fast.\n try:\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n break\n except botocore.exceptions.ClientError as err:\n err_code = err.response.get('Error', {}).get('Code', 'Unknown')\n LOGGER.error(\"XXX err_code=%s\", err_code)\n if not err_code == 'InvalidInstanceID.NotFound':\n raise\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" operational ...\", tag_prefix, instance_ids)\n time.sleep(RETRY_WAIT_DELAY)\n\n return instances",
"def test_can_create_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n instances = launch_instances('f1.2xlarge', 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={'fsimcluster': 'testcluster', 'secondtag': 'secondvalue'})\n instances.shouldnt.be.empty\n\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n\n operation_params = {\n 'InstanceIds': ids\n }\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.have.key('fsimcluster')\n tags['fsimcluster'].should.equal('testcluster')\n tags.should.have.key('secondtag')\n tags['secondtag'].should.equal('secondvalue')"
] |
[
"0.658702",
"0.6300251",
"0.6290711",
"0.6167918",
"0.61583686",
"0.6010611",
"0.6007168",
"0.6005972",
"0.5818605",
"0.5817194",
"0.5642353",
"0.5628755",
"0.5611951",
"0.5504251",
"0.5503392",
"0.5461961",
"0.54465055",
"0.54377943",
"0.5424516",
"0.54001635",
"0.5366801",
"0.5365979",
"0.53561825",
"0.5343723",
"0.5342648",
"0.5337706",
"0.5333448",
"0.53291106",
"0.5311884",
"0.5307277"
] |
0.79189426
|
0
|
Returns whether or not this MPCFramework supports the given game
|
def supports_game(cls, game: Game) -> bool:
return game in cls.SUPPORTED_GAMES
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_support(mode: int) -> bool:\n return mode in supported_modes\n pass",
"def isValid(self, game):\n return True",
"def is_supported():\n return not isinstance(_the_app, StubApp)",
"def platform_supported(self):\n return platform.system().lower() in self.platforms if self.platforms else False",
"def game_mode(self):\n return self._get(\"game_mode\")",
"def playerCanPlay(game, situation, player):\r\n return True",
"def is_game_win(self):\n return not self.deck and not self.hand",
"def get_game_on_status(self) -> bool:\n return self._game_on.get()",
"def is_supported(self) -> bool:\n if self.builders and self.app.builder.name not in self.builders:\n return False\n if self.formats and self.app.builder.format not in self.formats:\n return False\n\n return True",
"def game_on(self):\n doc = self.documentation\n return (self.draw.accepted or doc[len(doc)-1].accepted) and (self.board.stones_set < self.board.max_nr_stones) and (self.board.score[opponent(self.draw.player)] > 0)",
"def is_game_won(self):\n return True",
"def game_check(self, mode, row=None, col=None, is_set=True,\r\n show_fail=True):\r\n self.add_play_move(PlayMove.GAME_CHECK, mode=mode, row=row, is_set=is_set, show_fail=show_fail)\r\n if mode == \"h\" or mode == \"v\":\r\n part = self.get_part(type=\"edge\", sub_type=mode, row=row, col=col)\r\n if part is None:\r\n raise SelectError(f\"game_check: no edge({mode}) found at row={row} col={col}\") \r\n is_on = part.is_turned_on()\r\n if is_on != is_set:\r\n result = False\r\n msg = (f\"Unexpected test result: {result}\"\r\n f\" for line({mode}) at row={row} col={col}\")\r\n SlTrace.lg(f\"game_check: {msg}\")\r\n if show_fail:\r\n raise SelectFail(msg)\r\n return False\r\n elif mode == \"sq\":\r\n part = self.get_part(type=\"region\", row=row, col=col)\r\n is_on = part.is_turned_on()\r\n if is_on != is_set:\r\n result = False\r\n msg = (f\"Unexpected test result: {result}\"\r\n f\" for square at row={row} col={col}\")\r\n SlTrace.lg(f\"game_check: {msg}\")\r\n if show_fail:\r\n raise SelectFail(msg)\r\n return False\r\n else:\r\n raise SelectFail(f\"Unrecognized game_check mode({mode}\")\r\n \r\n return True",
"def can_play(self) -> bool:\n purple_card = self.game.board.purple\n return (\n self.game.current_player != self\n and purple_card is not None\n and purple_card.space > len(self.game.board.yellow[self])\n )",
"def validGameSettings(self):\n if not isinstance(self.view, GView):\n return False\n if not isinstance(self.input, GInput):\n return False\n validStates = [STATE_INACTIVE, STATE_NEWWAVE, STATE_ACTIVE,\n STATE_PAUSED, STATE_CONTINUE, STATE_COMPLETE]\n if not self.getState() in validStates:\n return False\n if not self.getWave() is None or isinstance(self.getWave(), Wave):\n return False\n if not self.getText() is None or isinstance(self.getText(), GLabel):\n return False\n return True",
"def getClsStageSupported(cls, instcls, stage, slot):\n if cls.getordering(instcls, stage, slot) is not None:\n return ordering[instcls][stage][slot]['supported']\n return False",
"def _os_supported(self, plugin):\r\n return sys.platform in plugin.plugin_object.get_supported_os()",
"def canPlay(self):\r\n return self.__canPlay",
"def check_game(self):\n gameOver = None\n if self.turn > 4:\n gameOver = self.check_x_won()\n if gameOver is True:\n self.game_x_won()\n return\n\n gameOver = None\n if self.turn > 5:\n gameOver = self.check_o_won()\n if gameOver is True:\n self.game_o_won()\n return\n\n if self.turn >= 9:\n self.game_tie()\n return",
"def is_side_games_channel(channel: discord.TextChannel) -> bool:\n return get_active_feature(channel) == ActivationState.SIDE_GAMES",
"def is_graphic_driver(self):\n if self.class_id == \"0x03\":\n return True\n else:\n return False",
"def isOpen(self):\n\t\treturn not self.endgame",
"def passive_game(self):\n passive_game = False\n if len(self.moves) >= constant.MAX_MOVES_WITHOUT_CAPTURE:\n passive_game = True\n for move in range(constant.MAX_MOVES_WITHOUT_CAPTURE):\n if len(self.moves[-move][2]) != 0:\n passive_game = False\n break\n\n return passive_game",
"def is_playfield(cls):\n return True",
"def is_strategy_supported(self, mode: CalculationStrategy) -> bool:\n return mode == self.calculation_strategy",
"def game_allowed(self, uid=0):\n return True",
"def is_game_won(self):\n if self.game_is_tied():\n return False\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n if my_available_steps == 0 or opp_available_steps == 0:\n return True\n else:\n return False",
"def is_window(game_object: GameObject) -> bool:\n from sims4communitylib.enums.tags_enum import CommonGameTag\n from sims4communitylib.utils.objects.common_object_tag_utils import CommonObjectTagUtils\n return CommonObjectTagUtils.has_game_tags(game_object, (CommonGameTag.BUILD_WINDOW, ))",
"def isSupported(self, *args):\n return _libsbml.SBMLExtension_isSupported(self, *args)",
"def supported():\n return os.path.isfile(OPENCOR)",
"def still_playing_game(self):\n for player in self.players:\n if player.is_playing:\n return True\n return False"
] |
[
"0.65107864",
"0.62987363",
"0.6131972",
"0.61076665",
"0.6079678",
"0.60238105",
"0.5971767",
"0.5921434",
"0.58807635",
"0.57579356",
"0.574543",
"0.57257944",
"0.57189626",
"0.5686974",
"0.5674633",
"0.5671523",
"0.56437093",
"0.5639276",
"0.56391764",
"0.5628202",
"0.5610448",
"0.5598383",
"0.5575863",
"0.55677223",
"0.55527836",
"0.555175",
"0.5513284",
"0.54953486",
"0.54930323",
"0.5478005"
] |
0.86261773
|
0
|
Method that will be called to prepare input for an MPCFramework. This function takes `self.input_file` (given as a CSV) and converts it as necessary into a usable format for the framework.
|
async def prepare_input(self) -> Status:
with open(self.input_file) as fin:
reader = csv.DictReader(fin)
fieldnames = reader.fieldnames
if fieldnames is None:
raise ValueError("fieldnames is None from csv reader")
for col in self.game.input_columns[self.player.role]:
# Don't look for a literal column labeled "features"
if col != InputColumn.features and str(col) not in fieldnames:
raise MPCStartupError(f"{col} column required in input CSV")
return Status.OK
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def prepare_input(self):\n super().prepare_input()\n input_file = self._input_filepath.open(encoding='utf-8')\n input_formatted_file = Path(self.__input_formatted_filepath).open('w', encoding='utf8')\n for line in input_file.readlines():\n for token in line.split():\n if token.endswith('.') or token.endswith(','):\n input_formatted_file.write('{0}\\n{1}\\n'.format(token[:-1], token[-1]))\n else:\n input_formatted_file.write('{}\\n'.format(token))",
"def loadCSV(input_file):",
"def preprocess(self):\n inputMatrix = pd.read_csv(self.input, index_col = 0)\n\n original_filename = self.input.split(\"/\")[-1]\n mapped_filename = \"mapped_\" + self.desiredFormat + \"_\" + original_filename\n output = self.input\n output_filepath = \"/\".join(self.input.split(\"/\")[0:-1])\n #as the DataFormatter always transposes the data before any further processing, we can expect all genes to be in the columns\n genesInColumn = \"true\"\n #only map genes if the current format is not the desired format\n if (self.currentFormat != self.desiredFormat):\n output = output_filepath + \"/\" + mapped_filename\n benchutils.mapDataMatrix(inputMatrix, genesInColumn, self.currentFormat, self.desiredFormat, output, self.labeled)\n\n return output",
"def set_input_csv(self):\n if len(self[\"input_csv\"]) > 1:\n raise Exception(\"You must only specify *one* unified CSV file!\")\n self.csv_path = self[\"input_csv\"][0]\n print(\"Using input file\", self.csv_path)",
"def createInputFile(self):\r\n\r\n input_variables = []\r\n\r\n for variable in self.modelDescription.modelVariables:\r\n if variable.causality == 'input':\r\n input_variables.append(variable)\r\n\r\n if len(input_variables) == 0:\r\n QMessageBox.warning(self,\r\n \"Cannot create input file\",\r\n \"The input file cannot be created because the model has no input variables\")\r\n return\r\n\r\n filename, _ = os.path.splitext(self.filename)\r\n\r\n filename, _ = QFileDialog.getSaveFileName(parent=self,\r\n caption=\"Save Input File\",\r\n directory=filename + '_in.csv',\r\n filter=\"Comma Separated Values (*.csv);;All Files (*.*)\")\r\n\r\n if not filename:\r\n return\r\n\r\n with open(filename, 'w') as f:\r\n\r\n # column names\r\n f.write('\"time\"')\r\n for variable in input_variables:\r\n f.write(',\"%s\"' % variable.name)\r\n f.write('\\n')\r\n\r\n # example data\r\n f.write(','.join(['0'] * (len(input_variables) + 1)) + '\\n')\r\n\r\n self.ui.inputFilenameLineEdit.setText(filename)",
"def _preprocess_file(self, input_filename):\n headers = []\n output_order = []\n self._logger.info('Preprocessing {0}...'.format(input_filename))\n output_filename = '{0}.preprocessed'.format(input_filename)\n with open(input_filename, mode='r') as ifile, open(output_filename, mode='w') as ofile:\n self._files_to_delete.append(output_filename)\n ofile.write('{0}|optional_fields\\n'.format('|'.join(self.mandatory_fields)))\n for i, line in enumerate(ifile):\n fields = line.split('|')\n if i == 0:\n col_count = len(fields)\n headers = [x.lower().replace(' ', '_').strip() for x in fields]\n for field in self.mandatory_fields:\n if field not in headers:\n raise exceptions.PreprocessorCheckException(\n 'Missing mandatory field {0} when preprocessing {1}'.format(field, input_filename),\n statsd=self._statsd,\n metrics_failures_root=self._metrics_failures_root\n )\n output_order.append(headers.index(field))\n extra_cols = [x for x in range(0, col_count) if x not in output_order]\n else:\n if col_count != len(fields):\n raise exceptions.PreprocessorCheckException(\n 'Inconsistent number of fields per row on line {0:d} when '\n 'preprocessing {1}'.format(i, input_filename),\n statsd=self._statsd,\n metrics_failures_root=self._metrics_failures_root\n )\n\n # Replace any double-quote character with empty string\n fields = [field.strip().replace('\"', '') for field in fields]\n\n # Add any extra fields to a JSON dict\n json_dict = {}\n for i in extra_cols:\n val = fields[i].replace('\\n', '')\n json_dict[headers[i]] = None if len(val) == 0 else val\n\n # Replace the \\n they only appear at the end of the lines and aren't part of the field\n # Add \\x01 to start and end of Json field, these are treated as quotes by postgres and stop the\n # copy operation from trying to parse the JSON.\n # Swap the \\\" with \\\"\" so that the preprocessed file will pass the validator , this is stripped\n # out by the batch upload function.\n output_string = '{0}{1}{2}{3}{4}\\n'.format(\n '|'.join([fields[i].replace('\\n', '')\n .replace('\"', '\"\"') for i in output_order]),\n '|',\n self.postgres_quote_char,\n json.dumps(json_dict, ensure_ascii=False).replace('\\\\\"', '\\\\\"\"'),\n self.postgres_quote_char)\n ofile.write(output_string)\n\n self._logger.info('Preprocessed: {0}'.format(output_filename))\n return output_filename",
"def preprocess(self):\n df = pd.read_csv(self.input, sep=self.dataSeparator, index_col = 0)\n #ATTENTION: this processing assumes that the data is formatted in a way that header and index are automatically recognized. remove trailing commas/separators at first line of the file for this to be achieved\n if self.transposeMatrix:\n df = df.T\n\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_transposed.csv\"\n\n df.to_csv(filename)\n return filename",
"def process(self, filename=None):\n if not filename:\n filename = self.input_csv\n\n if filename.endswith('.csv'):\n\n with open(filename) as _file:\n reader = csv.reader(_file)\n data = list(reader)\n\n self.save(self.format(self.purify(data)))\n return\n\n raise TypeError(\n \"Excepting csv filetype got %s \\\n filetype instead.\" % (filename.split('.', 1)[1])\n )",
"def create(self): # , **kwargs):\n\n # Preamble\n csvReader = CSVReader()\n csvReader.inputs.in_file = self.csv_file.default_value\n csvReader.inputs.header = self.hasHeader.default_value\n csvOut = csvReader.run()\n\n print((\"=\" * 80))\n print((csvOut.outputs.__dict__))\n print((\"=\" * 80))\n\n iters = OrderedDict()\n label = list(csvOut.outputs.__dict__.keys())[0]\n result = eval(\"csvOut.outputs.{0}\".format(label))\n iters[\"tests\"], iters[\"trains\"] = sample_crossvalidation_set(\n result, self.sample_size.default_value\n )\n # Main event\n out_fields = [\"T1\", \"T2\", \"Label\", \"trainindex\", \"testindex\"]\n inputsND = Node(\n interface=IdentityInterface(fields=out_fields),\n run_without_submitting=True,\n name=\"inputs\",\n )\n inputsND.iterables = [\n (\"trainindex\", iters[\"trains\"]),\n (\"testindex\", iters[\"tests\"]),\n ]\n if not self.hasHeader.default_value:\n inputsND.inputs.T1 = csvOut.outputs.column_0\n inputsND.inputs.Label = csvOut.outputs.column_1\n inputsND.inputs.T2 = csvOut.outputs.column_2\n else:\n inputsND.inputs.T1 = csvOut.outputs.__dict__[\"t1\"]\n inputsND.inputs.Label = csvOut.outputs.__dict__[\"label\"]\n inputsND.inputs.T2 = csvOut.outputs.__dict__[\"t2\"]\n pass # TODO\n metaflow = Workflow(name=\"metaflow\")\n metaflow.config[\"execution\"] = {\n \"plugin\": \"Linear\",\n \"stop_on_first_crash\": \"false\",\n \"stop_on_first_rerun\": \"false\",\n # This stops at first attempt to rerun, before running, and before deleting previous results.\n \"hash_method\": \"timestamp\",\n \"single_thread_matlab\": \"true\", # Multi-core 2011a multi-core for matrix multiplication.\n \"remove_unnecessary_outputs\": \"true\",\n \"use_relative_paths\": \"false\", # relative paths should be on, require hash update when changed.\n \"remove_node_directories\": \"false\", # Experimental\n \"local_hash_check\": \"false\",\n }\n\n metaflow.add_nodes([inputsND])\n \"\"\"import pdb; pdb.set_trace()\"\"\"\n fusionflow = FusionLabelWorkflow()\n self.connect(\n [\n (\n metaflow,\n fusionflow,\n [\n (\"inputs.trainindex\", \"trainT1s.index\"),\n (\"inputs.T1\", \"trainT1s.inlist\"),\n ],\n ),\n (\n metaflow,\n fusionflow,\n [\n (\"inputs.trainindex\", \"trainLabels.index\"),\n (\"inputs.Label\", \"trainLabels.inlist\"),\n ],\n ),\n (\n metaflow,\n fusionflow,\n [\n (\"inputs.testindex\", \"testT1s.index\"),\n (\"inputs.T1\", \"testT1s.inlist\"),\n ],\n ),\n ]\n )",
"def buildFromCSV(self, filepath):\r\n\t\t# TODO: Implement\r\n\t\traise NotImplementedError('This function has not yet been implemented.')\r\n\t\t# with open(filepath, 'r') as scheduleFile:\r\n\t\t# \t# Reusing Parser.parseCSVs(), but not in the intended way; ok because validation is not yet implemented\r\n\t\t# \t# TODO: Split Parser.parseCSVs() into separate people/set file parsers \r\n\t\t# \tn, people, setConstraints = Parser.parseCSVs(-1, scheduleFile, [])\r",
"def preprocess(self):\n\n self._build_labels_dict(['one', 'two', 'three', 'four', 'five'])\n\n with open(self.data_path + self.file_name, 'rb') as csvfile:\n\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n self.texts.append(row[1])\n self.labels.append(self.labels_index[row[0]])\n\n print('Found %s texts.' % len(self.texts))",
"def parse_args(self):\n assert os.path.isfile(self.params.csv_input_file), \\\n \"Input CSV file %s not found\" % self.params.csv_input_file",
"def main(input_filepath, output_filepath):\n productsDict = dataToDict(input_filepath)\n productsList = dictToCSV(productsDict)\n toCSV(productsList, output_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def ProcessCSV(self, input_file, verbose, output_file):\n row_dict = self.CSVReader(input_file)\n report = []\n output_field_names = row_dict.fieldnames\n output_field_names.append('status')\n\n for row in row_dict:\n if 'action' in row.keys():\n if row['action'] == 'create':\n self.CreateUser(row)\n elif row['action'] == 'update':\n self.UpdateUser(row)\n elif row['action'] == 'delete':\n self.DeleteUser(row)\n else:\n row['status'] = ('Error: action must be create, update, or delete for'\n ' username %s' % (row['user_name']))\n else:\n print 'error - action is a required header in the input CSV file'\n sys.exit()\n\n # delete password attribute so we dont output that to screen or CSV\n if 'password' in row.keys():\n del row['password']\n report.append(row)\n if verbose is True:\n output = []\n list_tup = ()\n for k, v in row.items():\n if v:\n list_tup = (k, v)\n output.append(list_tup)\n print output\n self.OutputWriter(report, output_file, output_field_names)",
"def prepare_data(\n self,\n csv_file_path='../data/train-orig.csv',\n tiff_folder_path='../data/train/', # for validation: '../data/valid'\n mixup: bool = False,\n for_training=True,\n data_ratio_mixup=2,\n alpha_mixup=0.2,\n # if True, standardization parameters will be computed, if False then apply parameters computed from training data\n ):\n self.data = read_data(\n csv_file_path=csv_file_path,\n tiff_folder_path=tiff_folder_path,\n )\n # Mixup\n if mixup:\n self.mixup_data(data_ratio_produce=data_ratio_mixup, alpha=alpha_mixup)\n\n ### augment, do whatever you want (distinguish between train and validation setting!)",
"def load(self, input):",
"def prepare_preprocessed(inputFileName, sentence=False):\n\n # Define functions for stopwords, bigrams, trigrams and lemmatization\n def remove_stopwords(texts):\n return [[word for word in doc if word not in stop_words] for doc in texts]\n\n if sentence:\n data = readInputFileSentence(inputFileName, True)\n else:\n data = readInputFile(inputFileName, True)\n\n\n data_words_nostops = remove_stopwords(data)\n\n\n # Create Dictionary\n id2word = corpora.Dictionary(data_words_nostops)\n\n # Create Corpus\n # Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in data]\n\n return corpus, id2word, data, None #No bigrams in this case",
"def _read_csv(self, input_file, quotechar=None):\n with codecs.open(input_file, \"r\", encoding=\"UTF-8\") as f:\n reader = csv.reader(f, delimiter=\",\", quotechar=quotechar)\n examples = []\n seq_id = 0\n header = next(reader) # skip header\n for line in reader:\n example = InputExample(\n guid=seq_id, label=line[0], text_a=line[1])\n seq_id += 1\n examples.append(example)\n return examples",
"def parse_csv_input(input_file): # {{{\n parsed_infile = []\n try:\n with open(input_file) as infile:\n for line in csv.reader(infile):\n parsed_infile.append(line)\n\n temp_object_storage = []\n\n for line_index, line in enumerate(parsed_infile[1:]):\n temp_object_storage.append({})\n for category_index, category in enumerate(parsed_infile[0]):\n if category_index == 0:\n category = category[3:]\n temp_object_storage[line_index][category] = line[category_index]\n\n return temp_object_storage\n except FileNotFoundError as excep:\n LOGGER.info(\"error parsing csv file: %s\", excep) # }}}",
"def _read_input_file(self):\n pass",
"def _parse_input(self):\n #temperature\n regex = re.compile(\"TEMP=(\\d+\\.\\d*|\\d+)\")\n r = regex.search(self.file_dic['input'])\n if r:\n self.temperature = r.groups()[0]\n else:\n self.temperature = 298.15\n #theory\n regex = re.compile('(\\$contrl.+\\$end|\\$basis.+ \\$end)')\n temp_theory = regex.findall(self.file_dic['input'])\n contrl = temp_theory[0][:-4][7:].strip()\n basis = temp_theory[1][:-4][6:].strip()\n self.theory = contrl + ' ' + basis",
"def preprocess_csv_file(job_id, **kwargs):\n try:\n preprocess_job = PreprocessJob.objects.get(pk=job_id)\n\n print('preprocess_job obj', preprocess_job)\n except PreprocessJob.DoesNotExist:\n err_msg = 'PreprocessJob not found for id: %s' % job_id\n result_info = dict(success=False,\n job_id=job_id,\n user_message=err_msg)\n return err_resp(err_msg)\n\n if not preprocess_job.source_file:\n err_msg = 'No source file for PreprocessJob with id: %s' % job_id\n result_info = dict(success=False,\n job_id=job_id,\n user_message=err_msg)\n return err_resp(err_msg)\n\n input_file = preprocess_job.source_file\n\n start_time = time.time()\n print('(%s) Start preprocess: %s' % (start_time, input_file))\n\n kwargs['SCHEMA_INFO_DICT'] = get_temp_schema_info()\n kwargs['job_id'] = job_id\n\n\n run_info = PreprocessRunner.load_from_file(\\\n input_file,\n **kwargs)\n\n if not run_info.success:\n print('(%s) FAILED: %s' % (input_file, run_info.err_msg))\n result_info = dict(success=False,\n job_id=job_id,\n input_file=input_file,\n user_message=run_info.err_msg)\n else:\n runner = run_info.result_obj\n elapsed_time = time.time() - start_time\n elapsed_time_str = time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time))\n\n result_info = dict(success=True,\n job_id=job_id,\n input_file=input_file,\n user_message=\"File processed.\",\n elapsed_time=elapsed_time_str,\n data=runner.get_final_dict())\n\n updater = PreprocessResultUpdater(**result_info)\n\n if updater.has_error():\n return err_resp(updater.get_error_message())\n\n return ok_resp('All set')",
"def setUp(self, path, structure_file, input_file):\n database.clean()\n self.path = path\n self.structure_file = path + structure_file\n self.input_file = path + input_file\n\n string_processor.project = Project()\n\n self.input_project = Project()\n self.input_project.document_files.append(\n DocumentFile(path=self.input_file))\n self.input_project.save()\n\n with open(self.structure_file) as f:\n self.json = json.load(f)\n\n self.xml = etree.parse(self.input_file)\n self.extractor = StructureExtractor(string_processor,\n self.structure_file)",
"def __csv_schema_generator(file):\n try:\n # Parses the first line of the file to get all the headers.\n metadata = str(file.readline().decode('utf-8')).strip().split(',')\n # Will be further implemented in phase 3.\n return SchemaGenerator.__build_schema(metadata)\n except Exception as e:\n logging.error('Failed to parse csv file into schema: ' + str(e))\n raise FailedCreatingSchemaException(\"Failed to create schema from csv file.\")",
"def __init__(__self__, *,\n content_type: pulumi.Input[Union[str, 'FileImportContentType']],\n import_file: pulumi.Input['FileMetadataArgs'],\n ingestion_mode: pulumi.Input[Union[str, 'IngestionMode']],\n resource_group_name: pulumi.Input[str],\n source: pulumi.Input[str],\n workspace_name: pulumi.Input[str],\n file_import_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"content_type\", content_type)\n pulumi.set(__self__, \"import_file\", import_file)\n pulumi.set(__self__, \"ingestion_mode\", ingestion_mode)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"source\", source)\n pulumi.set(__self__, \"workspace_name\", workspace_name)\n if file_import_id is not None:\n pulumi.set(__self__, \"file_import_id\", file_import_id)",
"def __init__(self, input_file):\r\n self.input_file = input_file\r\n self.no_process = 0\r\n self.ids = []\r\n self.weights = []",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n # Get the data/raw/git-refactoring-commits-raw.csv file\n dataset = pd.read_csv(input_filepath, usecols=['Message', 'CommitSHA', 'IsRefactoring'])\n logger.info('Loaded data file ' + input_filepath + ' with ' + str(len(dataset)) + ' rows')\n\n logger.info('Applying pre-processing steps on the \"Message\" column...')\n dataset['Message'] = dataset['Message'].apply(preprocess)\n\n # Save the processed subset on data data/processed/git-refactoring-commits.csv\n logger.info('Saved processed results on ' + output_filepath + ' with ' + str(len(dataset)) + ' rows')\n dataset.to_csv(output_filepath, encoding='utf-8', index=False)",
"def preprocess(self, train_file, validation_file, test_file):\n chardict, labeldict = self.make_dictionary(train_file, validation_file, test_file)\n print 'preparing training data'\n training = self.parse_file(train_file, chardict, labeldict)\n \n print 'preparing validation data'\n validation = self.parse_file(validation_file, chardict, labeldict)\n\n print 'preparing test data'\n test = self.parse_file(test_file, chardict, labeldict)\n\n return Data(training, validation, test, chardict, labeldict)",
"def process_inputs(self, inputs):",
"def convertor(input_filename):\n try:\n text = open(input_filename, \"r\")\n except FileNotFoundError:\n print(\"File not found\", input_filename, \":(\")\n sys.exit(0)\n dir_name = os.path.dirname(input_filename)\n file_name = os.path.splitext(os.path.basename(input_filename))[0]\n output_file_name = os.path.join(dir_name, (file_name + '_new.csv'))\n output_list = []\n for n, line in enumerate(text):\n try:\n element_metres = float(line.strip())\n except ValueError:\n if ',' in line:\n print(\"More then one column in file\", n, ':', line.strip())\n sys.exit(0)\n if line.strip() == '':\n print('The line', n, 'is empty')\n sys.exit(0)\n print(\"Impossible to cast\", line.strip(), \"in float. At line = \", n)\n sys.exit(0)\n element_feet = element_metres * M_F\n output_list.append(line.strip() + ',' + str(element_feet) + '\\n')\n if not output_list:\n print('Your file is empty')\n sys.exit(0)\n try:\n output_file = open(output_file_name, \"w\")\n except IOError as ioe:\n print(\"Problem to write into file\", output_file_name)\n print('Got an Exception', ioe)\n sys.exit(0)\n for element in output_list:\n output_file.write(element)\n output_file.close()"
] |
[
"0.6678045",
"0.63659924",
"0.61832535",
"0.6178484",
"0.60234016",
"0.59841347",
"0.59212697",
"0.5921055",
"0.58992505",
"0.5816316",
"0.5810478",
"0.5741596",
"0.5741494",
"0.5665782",
"0.5596558",
"0.5566605",
"0.5560635",
"0.55366546",
"0.55172884",
"0.55052334",
"0.54981494",
"0.54818845",
"0.5472387",
"0.5455895",
"0.5443406",
"0.5434862",
"0.5433573",
"0.542975",
"0.5421314",
"0.5418853"
] |
0.6668474
|
1
|
Abstract method that is called to actually run the MPC program and get its results. Results are returned as a map of metrics to their values. For example, if a framework writes output data to a CSV, this method
|
async def run_mpc(self) -> Dict[str, Dict[Metric, int]]:
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_metrics(self, results: list) -> dict:",
"def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}",
"def evaluate_data():\n try:\n # General system related info\n ram = psutil.virtual_memory()\n total_ram = round((ram.total / 1024 / 1024),2)\n free_ram = round((ram.available / 1024 / 1024),2)\n used_ram = round((ram.used / 1024 / 1024),2)\n cpu_total = psutil.cpu_count(logical=True)\n cpu_loadavg = round([x / cpu_total * 100 for x in psutil.getloadavg()][0],2)\n acs_8080 = sp.getoutput(\"netstat -an|grep -c 8080\")\n acs_8181 = sp.getoutput(\"netstat -an|grep -c 8181\")\n acs_8443 = sp.getoutput(\"netstat -an|grep -c 8443\")\n mysql = sp.getoutput(\"netstat -an|grep -c 3306\")\n oracle = sp.getoutput(\"netstat -an|grep -c 1521\")\n logging.info('General system info obtained')\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n # Process specific details\n try:\n iis_pid = SystemInformation.get_pid(\"w3wp.exe\")\n iis_ram = SystemInformation.get_ram_usage(iis_pid)\n iis_cpu = SystemInformation.get_cpu_usage(iis_pid)\n java_pid = SystemInformation.get_pid(\"java.exe\")\n java_ram = SystemInformation.get_ram_usage(java_pid)\n java_cpu = SystemInformation.get_cpu_usage(java_pid)\n mysqld_pid = SystemInformation.get_pid(\"mysqld.exe\")\n mysqld_ram = SystemInformation.get_ram_usage(mysqld_pid) \n mysqld_cpu = SystemInformation.get_cpu_usage(mysqld_pid)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n\n try:\n dictionary = {}\n now = datetime.datetime.now()\n timestampt = now.strftime(\"%Y-%m-%d-%H:%M:%S\")\n fieldnames = ['timestampt','total_ram','free_ram','used_ram','cpu_total','cpu_loadavg','acs_8080','acs_8181','acs_8443','mysql','oracle','iis_ram','iis_cpu','java_ram','java_cpu','mysqld_ram','mysqld_cpu']\n for var in fieldnames:\n dictionary[var] = eval(var)\n \n logging.info('Data for report generated')\n return dictionary\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)",
"def evaluate(self) -> Dict[str, float]:\n eval_dataloader = self.get_eval_dataloader()\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n return output.metrics",
"def get_results(self):\n self.report('Checking finished evaluations.')\n outputs = {}\n while self.indices_to_retrieve:\n idx = self.indices_to_retrieve.pop(0)\n key = self.eval_key(idx)\n self.report('Retrieving output for evaluation {}'.format(idx))\n eval_proc = self.ctx[key]\n if not eval_proc.is_finished_ok:\n return self.exit_codes.ERROR_EVALUATE_PROCESS_FAILED\n outputs[idx] = get_outputs_dict(eval_proc)\n\n with self.optimizer() as opt:\n opt.update(outputs)",
"def evaluate(self, size: int) -> dict:\n if len(self.results) == 0:\n print_log(\n f'{self.__class__.__name__} got empty `self.results`. Please '\n 'ensure that the processed results are properly added into '\n '`self.results` in `process` method.',\n logger='current',\n level=logging.WARNING)\n\n if self.collect_device == 'cpu':\n results = collect_results(\n self.results,\n size,\n self.collect_device,\n tmpdir=self.collect_dir)\n else:\n results = collect_results(self.results, size, self.collect_device)\n\n if is_main_process():\n # cast all tensors in results list to cpu\n results = _to_cpu(results)\n _metrics = self.compute_metrics(results) # type: ignore\n # Add prefix to metric names\n if self.prefix:\n _metrics = {\n '/'.join((self.prefix, k)): v\n for k, v in _metrics.items()\n }\n metrics = [_metrics]\n else:\n metrics = [None] # type: ignore\n\n broadcast_object_list(metrics)\n\n # reset the results list\n self.results.clear()\n return metrics[0]",
"def getResults():",
"def load_results(self):\n self.find_benchmark_directories()\n for (benchmark, producer), result in self.results.items():\n print('Reading results for ' + benchmark + ' ' + producer)\n if not result.directory:\n print('No results found for ' + benchmark + ' ' + producer)\n else:\n print('Generating report for: ' + result.directory)\n report = Report(result.directory)\n result.reports = report.generate()",
"def gen_results(self, parser_name, input_file_path):\n report = mwcp.run(parser_name, input_file_path)\n results = report.metadata\n results[INPUT_FILE_PATH] = convert_to_unicode(input_file_path)\n return report, results",
"def return_results(self):\n out = {\n 'workflow_name': self.__class__.__name__,\n 'workflow_version': self._workflowversion,\n # 'initial_structure': self.inputs.structure.uuid,\n 'is_it_force_theorem': True,\n 'soc_energies': self.ctx.h_so,\n 'q_vectors': self.ctx.q_vectors,\n 'theta': self.ctx.mae_thetas,\n 'phi': self.ctx.mae_phis,\n 'angles': self.ctx.num_ang - 1,\n 'energy_units': 'eV',\n 'info': self.ctx.info,\n 'warnings': self.ctx.warnings,\n 'errors': self.ctx.errors,\n }\n\n out = save_output_node(Dict(dict=out))\n self.out('output_dmi_wc_para', out)",
"def results(self) -> ResultProcessor:\n if self.isAnalysisCompleted():\n return ResultProcessor('input')\n else:\n raise ValueError('Results were not available')",
"def return_results(self):\n\n caching_info = f'INFO: cache_source of BS calc node: {self.ctx.BS_run.get_cache_source}'\n self.report(caching_info)\n\n if not self.ctx.BS_run.is_finished_ok:\n self.ctx.successful = False\n error = f'ERROR BS calculation failed somehow it is in state {self.ctx.BS_run.process_state}'\n self.report(error)\n self.ctx.errors.append(error)\n return self.exit_codes.ERROR_BS_CALC_FAILED # pylint: disable=no-member\n\n # create dict to store results of workflow output\n outputnode_dict = {}\n outputnode_dict['workflow_name'] = self.__class__.__name__\n outputnode_dict['workflow_version'] = self._wf_version\n outputnode_dict['withmpi'] = self.ctx.withmpi\n outputnode_dict['resources'] = self.ctx.resources\n outputnode_dict['max_wallclock_seconds'] = self.ctx.max_wallclock_seconds\n outputnode_dict['queue_name'] = self.ctx.queue\n outputnode_dict['custom_scheduler_commands'] = self.ctx.custom_scheduler_commands\n outputnode_dict['BS_params'] = self.ctx.BS_params_dict\n if 'kpoints' not in self.inputs:\n outputnode_dict['structure_type'] = self.ctx.structure_data\n outputnode_dict['BS_wf_description'] = self.ctx.description_wf\n outputnode_dict['BS_wf_label'] = self.ctx.label_wf\n try:\n outputnode_dict['nspin'] = self.ctx.BS_run.res.nspin\n except:\n error = 'ERROR: nspin not extracted'\n self.report(error)\n self.ctx.successful = False\n self.ctx.errors.append(error)\n outputnode_dict['successful'] = self.ctx.successful\n outputnode_dict['list_of_errors'] = self.ctx.errors\n\n # create output node with data-provenance\n outputnode = Dict(outputnode_dict)\n outputnode.label = 'kkr_BS_wc_results'\n outputnode.description = 'Contains the info of the WC'\n\n self.report('INFO: create Banstructure results nodes')\n try:\n self.report(\n f'INFO: create Bandstructure results nodes. BS calc retrieved node={self.ctx.BS_run.outputs.retrieved}'\n )\n has_BS_run = True\n except AttributeError as e:\n self.report('ERROR: No Bandstructure calc retrieved node found')\n self.report(f'Caught AttributeError {e}')\n return self.exit_codes.ERROR_BS_CALC_FAILED # pylint: disable=no-member\n\n if has_BS_run:\n BS_retrieved = self.ctx.BS_run.outputs.retrieved\n\n ef = self.ctx.fermi_energy # in Ry unit\n kpoints = self.ctx.BS_kpoints\n\n # Here outdict dictionary has been created to set the Dict result_wf, BS_data\n # to the output(spec.output) of the wf\n outdict = {}\n if has_BS_run:\n ArraData = parse_BS_data(BS_retrieved, Float(ef), kpoints)\n outdict['BS_Data'] = ArraData['BS_Data']\n\n # link to the BS output nodes\n link_nodes = outdict.copy()\n\n outdict['results_wf'] = create_out_dict_node(outputnode, **link_nodes)\n\n # create links to output nodes\n for link_name, node in outdict.items():\n self.out(link_name, node)\n\n self.report('INFO: done with BS_workflow!\\n')",
"def compute_metrics(self, results: list) -> Dict[str, float]:\n logger: MMLogger = MMLogger.get_current_instance()\n\n # pred_coords: [N, K, D]\n pred_coords = np.concatenate(\n [result['pred_coords'] for result in results])\n if pred_coords.ndim == 4 and pred_coords.shape[1] == 1:\n pred_coords = np.squeeze(pred_coords, axis=1)\n # gt_coords: [N, K, D]\n gt_coords = np.stack([result['gt_coords'] for result in results])\n # mask: [N, K]\n mask = np.concatenate([result['mask'] for result in results])\n # action_category_indices: Dict[List[int]]\n action_category_indices = defaultdict(list)\n for idx, result in enumerate(results):\n action_category = result['action'].split('_')[0]\n action_category_indices[action_category].append(idx)\n\n error_name = self.mode.upper()\n\n logger.info(f'Evaluating {self.mode.upper()}...')\n metrics = dict()\n\n metrics[error_name] = keypoint_mpjpe(pred_coords, gt_coords, mask,\n self.ALIGNMENT[self.mode])\n\n for action_category, indices in action_category_indices.items():\n metrics[f'{error_name}_{action_category}'] = keypoint_mpjpe(\n pred_coords[indices], gt_coords[indices], mask[indices])\n\n return metrics",
"def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }",
"def run(self):\n\t\terrors = {}\n\t\tvalues = {}\n\t\tcpu = System.CPUUsage()\n\t\tmem = System.MemoryUsage()\n\t\tdisk = System.DiskUsage()\n\t\tif cpu > self.cpu:\n\t\t\terrors[\"cpu\"] = (cpu, self.cpu)\n\t\telse:\n\t\t\tvalues[\"cpu\"] = (cpu, self.cpu)\n\t\tif mem > self.mem:\n\t\t\terrors[\"cpu\"] = (mem, self.mem)\n\t\telse:\n\t\t\tvalues[\"mem\"] = (mem, self.mem)\n\t\tfor mount, usage in disk.items():\n\t\t\tif usage > self.disk:\n\t\t\t\terrors.setdefault(\"disk\", {})\n\t\t\t\terrors[\"disk\"][mount] = (usage, self.disk)\n\t\t\telse:\n\t\t\t\tvalues.setdefault(\"disk\", {})\n\t\t\t\tvalues[\"disk\"][mount] = (usage, self,disk)\n\t\tif errors:\n\t\t\treturn Failure(\"errors with %s\" % (\", \".join(errors.keys())), value=dict(values=values, errors=errors))\n\t\telse:\n\t\t\treturn Success(value=dict(values=values))",
"def return_results(self):\n\n message = 'INFO: entering return_results'\n self.report(message)\n\n # try/except to capture as mnuch as possible (everything that is there even when workflow exits unsuccessfully)\n # capture pk and uuids of last calc, params and remote\n try:\n last_calc_uuid = self.ctx.last_calc.uuid\n last_calc_pk = self.ctx.last_calc.pk\n last_params_uuid = self.ctx.last_params.uuid\n last_params_pk = self.ctx.last_params.pk\n last_remote_uuid = self.ctx.last_remote.uuid\n last_remote_pk = self.ctx.last_remote.pk\n except:\n last_calc_uuid = None\n last_calc_pk = None\n last_params_uuid = None\n last_params_pk = None\n last_remote_uuid = None\n last_remote_pk = None\n\n all_pks = []\n for calc in self.ctx.calcs:\n try:\n all_pks.append(calc.pk)\n except:\n self.ctx.warnings.append(f'cound not get pk of calc {calc}')\n\n # capture links to last parameter, calcualtion and output\n try:\n last_calc_out = self.ctx.kkr.out['output_parameters']\n last_calc_out_dict = last_calc_out.get_dict()\n last_RemoteData = self.ctx.last_remote\n last_InputParameters = self.ctx.last_params\n except:\n last_InputParameters = None\n last_RemoteData = None\n last_calc_out = None\n last_calc_out_dict = {}\n\n # capture convergence info\n try:\n last_rms = self.ctx.rms[-1]\n except:\n last_rms = None\n\n # now collect results saved in results node of workflow\n message = 'INFO: collect outputnode_dict'\n self.report(message)\n outputnode_dict = {}\n outputnode_dict['workflow_name'] = self.__class__.__name__\n outputnode_dict['workflow_version'] = self._workflowversion\n outputnode_dict['material'] = self.ctx.formula\n outputnode_dict['loop_count'] = self.ctx.loop_count\n outputnode_dict['warnings'] = self.ctx.warnings\n outputnode_dict['successful'] = self.ctx.successful\n outputnode_dict['last_params_nodeinfo'] = {'uuid': last_params_uuid, 'pk': last_params_pk}\n outputnode_dict['last_remote_nodeinfo'] = {'uuid': last_remote_uuid, 'pk': last_remote_pk}\n outputnode_dict['last_calc_nodeinfo'] = {'uuid': last_calc_uuid, 'pk': last_calc_pk}\n outputnode_dict['pks_all_calcs'] = all_pks\n outputnode_dict['convergence_value'] = last_rms\n outputnode_dict['convergence_values_all_steps'] = array(self.ctx.rms_all_steps)\n outputnode_dict['convergence_values_last_step'] = array(self.ctx.last_rms_all)\n outputnode_dict['convergence_reached'] = self.ctx.kkr_converged\n outputnode_dict['kkr_step_success'] = self.ctx.kkr_step_success\n outputnode_dict['used_higher_accuracy'] = self.ctx.kkr_higher_accuracy\n\n # report the status\n if self.ctx.successful:\n self.report(\n 'STATUS: Done, the convergence criteria are reached.\\n'\n 'INFO: The charge density of the KKR calculation pk= {} '\n 'converged after {} KKR runs and {} iterations to {} \\n'\n ''.format(\n last_calc_pk, self.ctx.loop_count - 1, sum(self.ctx.KKR_steps_stats.get('isteps', [])),\n self.ctx.last_rms_all[-1]\n )\n )\n else: # Termination ok, but not converged yet...\n self.report(\n 'STATUS/WARNING: Done, the maximum number of runs '\n 'was reached or something failed.\\n INFO: The '\n 'charge density of the KKR calculation pk= '\n 'after {} KKR runs and {} iterations is {} \"me/bohr^3\"\\n'\n ''.format(\n self.ctx.loop_count - 1, sum(self.ctx.KKR_steps_stats.get('isteps', [])), self.ctx.last_rms_all[-1]\n )\n )\n\n # create results node and link all calculations\n message = 'INFO: create results nodes'\n self.report(message)\n link_nodes = {}\n icalc = 0\n for calc in self.ctx.calcs:\n link_nodes[f'KkrimpCalc{icalc}'] = calc.outputs.remote_folder\n icalc += 1\n if not self.ctx.dos_run:\n link_nodes['final_imp_potential'] = self.ctx.last_pot\n outputnode_t = create_out_dict_node(Dict(dict=outputnode_dict), **link_nodes)\n outputnode_t.label = 'kkr_scf_wc_results'\n outputnode_t.description = 'Contains results of workflow (e.g. workflow version number, info about success of wf, lis tof warnings that occured during execution, ...)'\n\n self.out('workflow_info', outputnode_t)\n # store out_potential as SingleFileData only if this was no DOS run\n if not self.ctx.dos_run:\n self.out('host_imp_pot', self.ctx.last_pot)\n\n # print results table for overview\n # table layout:\n message = 'INFO: overview of the result:\\n\\n'\n message += '|------|---------|--------|------|--------|---------|-----------------|---------------------------------------------|\\n'\n message += '| irun | success | isteps | imix | mixfac | qbound | rms | pk and uuid |\\n'\n message += '| | | | | | | first | last | |\\n'\n message += '|------|---------|--------|------|--------|---------|--------|--------|---------------------------------------------|\\n'\n KKR_steps_stats = self.ctx.KKR_steps_stats\n for irun in range(len(KKR_steps_stats.get('success', []))):\n message += '|%6i|%9s|%8i|%6i|%.2e|%.3e|%.2e|%.2e|' % (\n irun + 1, KKR_steps_stats.get('success')[irun], KKR_steps_stats.get('isteps')[irun],\n KKR_steps_stats.get('imix')[irun], KKR_steps_stats.get('mixfac')[irun],\n KKR_steps_stats.get('qbound')[irun], KKR_steps_stats.get('first_rms')[irun],\n KKR_steps_stats.get('last_rms')[irun]\n )\n message += f\" {KKR_steps_stats.get('pk')[irun]} | {KKR_steps_stats.get('uuid')[irun]}|\\n\"\n message += '|------|---------|--------|------|--------|---------|-----------------|---------------------------------------------|\\n'\n \"\"\"\n message += \"#|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|\\n\".format(irun+1,\n KKR_steps_stats.get('success')[irun], KKR_steps_stats.get('isteps')[irun],\n KKR_steps_stats.get('imix')[irun], KKR_steps_stats.get('mixfac')[irun],\n KKR_steps_stats.get('qbound')[irun],\n KKR_steps_stats.get('first_rms')[irun], KKR_steps_stats.get('last_rms')[irun])\n \"\"\"\n self.report(message)\n\n # cleanup of unnecessary files after convergence\n # WARNING: THIS DESTROYS CACHABILITY OF THE WORKFLOW!!!\n if self.ctx.do_final_cleanup:\n if self.ctx.successful:\n self.report('INFO: clean output of calcs')\n remove_out_pot_impcalcs(self.ctx.successful, all_pks)\n self.report('INFO: clean up raw_input folders')\n clean_raw_input(self.ctx.successful, all_pks)\n\n # clean intermediate single file data which are not needed after successful run or after DOS run\n if self.ctx.successful or self.ctx.dos_run:\n self.final_cleanup()\n\n self.report('INFO: done with kkr_scf workflow!\\n')",
"def metrics(self) -> pulumi.Output['outputs.RuntimeMetricsResponse']:\n return pulumi.get(self, \"metrics\")",
"def compute_metrics(self):\n pass",
"def collect_data(self):\n exp_conf: ec.ExperimentConfiguration\n # Disabled multiprocess run because of huge memory usage\n processes_number = 1 # self._campaign_configuration['General']['j']\n if processes_number == 1:\n self._logger.info(\"-->Evaluate experiments (sequentially)\")\n for exp_conf in tqdm.tqdm(self._exp_confs, dynamic_ncols=True):\n exp_conf.evaluate()\n if bool(self._campaign_configuration['General']['generate_plots']):\n exp_conf.generate_plots()\n self._logger.info(\"<--\")\n else:\n self._logger.info(\"-->Evaluate experiments (in parallel)\")\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(evaluate_wrapper, self._exp_confs), total=len(self._exp_confs)))\n if bool(self._campaign_configuration['General']['generate_plots']):\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(plot_wrapper, self._exp_confs), total=len(self._exp_confs)))\n self._logger.info(\"<--\")\n\n self.raw_results = {}\n for exp_conf in self._exp_confs:\n self.raw_results[tuple(exp_conf.get_signature())] = exp_conf.mapes",
"def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results",
"def run(self) -> Dict[str, Union[float, str]]:\n try:\n self.is_run = True\n deque(self, maxlen=0) # feed the entire iterator into a zero-length deque\n info = gather_info(\n self.start_time, self.train_collector, self.test_collector,\n self.best_reward, self.best_reward_std\n )\n finally:\n self.is_run = False\n\n return info",
"def test_result_fields_with_metrics(cbcsdk_mock):\n api = cbcsdk_mock.api\n result = Result(api, initial_data=GET_RUN_RESULTS_RESP_1)\n metrics = result.metrics_\n assert metrics._info == {\"cpu\": 24.3, \"memory\": 8.0}",
"def _get_results(self, num_iters=True, keff=True, fluxes=False,\n num_fsrs=False, num_tracks=False, num_segments=False,\n hash_output=False):\n return super(SimpleLatticeTestHarness, self)._get_results(\n num_iters=num_iters, keff=keff, fluxes=fluxes,\n num_fsrs=num_fsrs, num_tracks=num_tracks,\n num_segments=num_segments, hash_output=hash_output)",
"def compute_and_print_eval_metrics(self):\n s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')\n precision, recall, f1, mean_precision, mean_recall, map50, map = 0., 0., 0., 0., 0., 0., 0.\n ap = []\n eval_stats = [np.concatenate(x, 0) for x in zip(*self.eval_stats)]\n if len(eval_stats) and eval_stats[0].any():\n precision, recall, ap, f1, ap_class = ap_per_class(*eval_stats)\n precision, recall, ap50, ap = precision[:, 0], recall[:, 0], ap[:, 0], ap.mean(1)\n mean_precision, mean_recall, map50, map = precision.mean(), recall.mean(), ap50.mean(), ap.mean()\n nt = np.bincount(eval_stats[3].astype(np.int64), minlength=len(self.class_names)) # number of targets per class\n else:\n nt = np.zeros(1)\n\n pf = '%20s' + '%12.5g' * 6 # print format\n print(\"\\n EVALUTAION \\n\")\n print(s)\n print(pf % ('all', self.seen, nt.sum(), mean_precision, mean_recall, map50, map))\n if self.cfg.eval.verbose:\n for indx, cls in enumerate(ap_class):\n print(pf % (self.class_names[cls], self.seen, nt[cls], precision[indx], recall[indx], ap50[indx], ap[indx]))",
"def get_results():\r\n #Get python results\r\n import mnist_nn\r\n import mnist_nn_gpu\r\n mnist_nn.save_results()\r\n mnist_nn_gpu.save_results()\r\n\r\n #Get cpp results\r\n import subprocess\r\n subprocess.call(['c++//./run.sh'])",
"def export_results(self):\n problemIDs = list(set([result.problemID for result in self.results]))\n configIDs = list(set([result.configID for result in self.results]))\n\n labels = []\n labels.extend(TestResults._fields)\n labels.extend(SizeMetrics._fields) \n # Remove unused columns\n labels.remove(\"size_metrics\")\n labels.remove(\"problemID\")\n labels.remove(\"configID\")\n\n # output = pd.Panel(items=labels, major_axis=problemIDs, minor_axis=configIDs)\n multiindex = pd.MultiIndex.from_product([problemIDs, configIDs], names=[\"problems\", \"configs\"])\n\n output = pd.DataFrame(index=multiindex, columns=labels)\n output.columns.names = [\"stats\"]\n\n for result in self.results:\n problemID = result.problemID\n configID = result.configID\n for label in [label for label in TestResults._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result, label)\n for label in [label for label in SizeMetrics._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result.size_metrics, label)\n\n # Compute Statistics\n output.fillna(value=np.nan, inplace=True)\n output.sort_index(inplace=True)\n try:\n TestFramework.compute_mosek_error(output, \"opt_val\", \"mosek_config\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_mosek_error: 'mosek_config' or 'opt_val' field not found.\")\n try:\n TestFramework.compute_performance(output, \"solve_time\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_performance: 'solve_time' field not found.\")\n return output",
"def get_results(self):\n if not self.sim_stop_flag.value:\n # we are only interested in entries of frames processed > 0\n max_index = np.sum(np.array(self.sim_results_struct.frames[0:50]) > 0)\n\n # convert the cstruct to dictionary\n return dict([(x, getattr(self.sim_results_struct, x)[0:max_index]) for (x,_) in self.sim_results_struct._fields_])\n else:\n return self.results",
"def get_specs(self, results: Dict[str, Dict], params: Dict) -> Dict[str, float] :\n raise NotImplementedError",
"def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results",
"def run(self, inputIn):\n measureList = self.inputToInternal(inputIn)\n outputDict = {}\n assert(len(self.features) == len(measureList))\n for metricInstance in self.metricsDict.values():\n metricEngine = MetricDistributor.factory.returnInstance('MetricDistributor', metricInstance)\n for cnt in range(len(self.targets)):\n nodeName = (str(self.targets[cnt]) + '_' + str(self.features[cnt])).replace(\"|\",\"_\")\n varName = metricInstance.name + '|' + nodeName\n output = metricEngine.evaluate(measureList[cnt], weights=self.weight, multiOutput=self.multiOutput)\n outputDict[varName] = np.atleast_1d(output)\n return outputDict"
] |
[
"0.67122155",
"0.6655885",
"0.6510935",
"0.6348492",
"0.63320374",
"0.61878014",
"0.6165703",
"0.61387163",
"0.612315",
"0.6116958",
"0.6081799",
"0.6070544",
"0.60652965",
"0.6057899",
"0.6014032",
"0.6010055",
"0.60026187",
"0.5997987",
"0.5995066",
"0.5994565",
"0.5993076",
"0.5984268",
"0.5980733",
"0.5971967",
"0.59549063",
"0.5930064",
"0.592115",
"0.58909035",
"0.58901817",
"0.58864754"
] |
0.72799563
|
0
|
Returns predefined, constant max rows per partition
|
def get_max_rows_per_partition() -> int:
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def largest_part_size():\n return usb_part_size(largest_partition())",
"def max_partition(\n table, schema=\"default\", field=None, filter_map=None, metastore_conn_id=\"metastore_default\"\n):\n from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook\n\n if \".\" in table:\n schema, table = table.split(\".\")\n hive_hook = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)\n return hive_hook.max_partition(schema=schema, table_name=table, field=field, filter_map=filter_map)",
"def get_max_partition(self, spec=None, skip_empty=True, reverse=False):\n if not self.table_schema.partitions:\n raise ValueError(\"Table %r not partitioned\" % self.name)\n return self.partitions.get_max_partition(\n spec, skip_empty=skip_empty, reverse=reverse\n )",
"def max_partition_contexts(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_partition_contexts\")",
"def num_partitions(self): # -> int:\n ...",
"def find_partitions(df, match_func, max_size=None, block_by=None):\n\n # If block_by is provided, then we apply the algorithm to each block and\n # stitch the results back together\n if block_by is not None:\n blocks = df.groupby(block_by).apply(lambda g: find_partitions(\n df=g,\n match_func=match_func,\n max_size=max_size\n ))\n\n keys = blocks.index.unique(block_by)\n for a, b in zip(keys[:-1], keys[1:]):\n blocks.loc[b, :] += blocks.loc[a].iloc[-1] + 1\n\n return blocks.reset_index(block_by, drop=True)\n\n def get_record_index(r):\n return r[df.index.name or 'index']\n\n # Records are easier to work with than a DataFrame\n records = df.to_records()\n\n # This is where we store each partition\n partitions = []\n\n def find_partition(at=0, partition=None, indexes=None):\n\n r1 = records[at]\n\n if partition is None:\n partition = {get_record_index(r1)}\n indexes = [at]\n\n # Stop if enough duplicates have been found\n if max_size is not None and len(partition) == max_size:\n return partition, indexes\n\n for i, r2 in enumerate(records):\n\n if get_record_index(r2) in partition or i == at:\n continue\n\n if match_func(r1, r2):\n partition.add(get_record_index(r2))\n indexes.append(i)\n find_partition(at=i, partition=partition, indexes=indexes)\n\n return partition, indexes\n\n while len(records) > 0:\n partition, indexes = find_partition()\n partitions.append(partition)\n records = np.delete(records, indexes)\n\n return pd.Series({\n idx: partition_id\n for partition_id, idxs in enumerate(partitions)\n for idx in idxs\n })",
"def GetMaxBatchSize(self, run_params):\n if run_params.dynamic_engine:\n return None\n return min(self.max_batch_sizes)",
"def get_partition_count_for_writing(is_sampled):\n if is_sampled:\n return 1\n return 25",
"def largest_partition():\n try:\n usb_partitions = sort_partitions()\n last = len(usb_partitions) - 1\n largest = usb_partitions[last]\n except IndexError:\n print(\"Not enough USB devices available\")\n exit(1)\n else:\n return str(largest[0])",
"def get_max_readings( self ):\n return 2500",
"def min_max_variable_partitioner(max_partitions=1, axis=0,\n min_slice_size=256 << 10,\n bytes_per_string_element=16):\n def _partitioner(shape, dtype):\n \"\"\"Partitioner that partitions list for a variable of given shape and type.\n\n Ex: Consider partitioning a variable of type float32 with\n shape=[1024, 1024].\n If `max_partitions` >= 16, this function would return\n [(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].\n If `max_partitions` < 16, this function would return\n [`max_partitions`, 1].\n\n Args:\n shape: Shape of the variable.\n dtype: Type of the variable.\n\n Returns:\n List of partitions for each axis (currently only one axis can be\n partitioned).\n\n Raises:\n ValueError: If axis to partition along does not exist for the variable.\n \"\"\"\n if axis >= len(shape):\n raise ValueError(\n f\"Cannot partition variable along axis {axis} when shape is \"\n f\"only {shape}\")\n if dtype.base_dtype == dtypes.string:\n bytes_per_element = bytes_per_string_element\n else:\n bytes_per_element = dtype.size\n total_size_bytes = shape.num_elements() * bytes_per_element\n partitions = total_size_bytes / min_slice_size\n partitions_list = [1] * len(shape)\n # We can not partition the variable beyond what its shape or\n # `max_partitions` allows.\n partitions_list[axis] = max(1, min(shape.dims[axis].value,\n max_partitions,\n int(math.ceil(partitions))))\n return partitions_list\n return _partitioner",
"def maxofrows(matrix,span=2):\n maximum= 0\n for i in range(0,len(matrix)):\n ans= maxinrow(matrix[i],span)\n maximum = ans if ans > maximum else maximum\n return maximum",
"def row_group_limits():\r\n from pymatgen import Element, periodic_table\r\n \r\n # Get all available elements in periodic table.\r\n rs = [e.row for e in periodic_table.Element]\r\n gs = [e.group for e in periodic_table.Element]\r\n \r\n return (max(rs), max(gs))",
"def testMax(self):\n top = 10\n table = self.auth.table(self.dataset, self.table, top=top)\n record_count = len(table)\n self.assertLessEqual(record_count, top)",
"def find_max_row_idx(self) -> int:\n return np.argmax([r.free_spots for r in self.rows])",
"def get_best_split(rows):\n best_gain = 0\n best_question = None\n current_impurity = get_gini(rows)\n n_features = len(rows[0])\n\n for col in range(n_features):\n\n for row in rows:\n question = Question(col, row[col])\n true_rows, false_rows = partition(rows, question)\n\n if len(true_rows) == 0 or len(false_rows) == 0:\n break\n\n question_gain = get_info_gain(true_rows, false_rows, current_impurity)\n\n if question_gain >= best_gain:\n best_gain = question_gain\n best_question = question\n\n print(best_gain)\n print(best_question)\n return best_gain, best_question",
"def get_max_row(self):\n return (None if self.is_raw() else self.structure.max_row)",
"def filter_rows_by_max_abs_val(df, max_=MAX_NUM_ROWS):\n df_temp = df.abs()\n top_rows = df_temp.max(axis=1).nlargest(max_)\n return df.ix[top_rows.index]",
"def get_partition_rate(self):\n\t\treturn float(self.data_split)",
"def maxinrow(row,span=2):\n maximum= 0\n offset= span - 1\n for i in range(0,len(row)-offset,1):\n print row[i:i+span]\n ans= product(row[i:i+span])\n maximum = ans if ans > maximum else maximum\n return maximum",
"def select_dim_by_card(cur, N):\n max_dim, max_card = -1, -1\n for n in range(N):\n ## cardinality of B_n\n card = get_parameter(cur, par=(\"card_B%d\" % n))\n if card > max_card:\n max_dim, max_card = n, card\n return max_dim",
"def CalculateMaxImageSize(self, partition_size):\n raise NotImplementedError",
"def num_partitions(self): # -> Unknown:\n ...",
"def take_max(self):\n return self.delete_first()",
"def brute(limit):\n c_lengths = {s: collatz_length(s) for s in range(1, limit+1)}\n return max(c_lengths, key=lambda x: c_lengths[x])",
"def storage_inter_max_constraint_rule(backend_model, node, tech, datestep):\n cluster = backend_model.lookup_datestep_cluster[datestep].value\n return (\n backend_model.storage_inter_cluster[node, tech, datestep]\n + backend_model.storage_intra_cluster_max[cluster, node, tech]\n <= backend_model.storage_cap[node, tech]\n )",
"def write_max_splits(io_stream):\n io_stream.write('value max_splits\\n1\\n')",
"def partition(attrs, df, partitions):\n if attrs in partitions:\n return partitions[attrs]\n shape = df.drop_duplicates(attrs).shape[0]\n partitions[attrs] = shape\n return shape",
"def constraints_max_offer_per_cust(n_row, n_col):\n constraints = np.identity(n_row * n_col)\n return constraints",
"def storage_upper_bound(index):\n i = index[0]\n return storage_para[i].pmax"
] |
[
"0.62912965",
"0.62024087",
"0.60331285",
"0.59018713",
"0.5811828",
"0.5743732",
"0.56790406",
"0.5608854",
"0.55901206",
"0.55634403",
"0.5531216",
"0.5530995",
"0.5518127",
"0.5492531",
"0.5490018",
"0.5482808",
"0.5481561",
"0.5477486",
"0.54766625",
"0.5462024",
"0.5441242",
"0.541653",
"0.541445",
"0.5410591",
"0.54077613",
"0.54029465",
"0.5399263",
"0.5396852",
"0.53959286",
"0.53911215"
] |
0.8459171
|
0
|
A list to a single node linked list
|
def lstToLinkedList(lst):
if not lst: return
LinkedList = Node(lst[0])
LinkedList.next = lstToLinkedList(lst[1:])
return LinkedList
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_to_listnode(input_list):\n # initialize node, not to be included in the final result\n node = ListNode()\n # keep node at the beginning of the linked list\n temp = node\n\n if len(input_list) == 1:\n # if single node, return as it is\n node.next = ListNode(val=input_list[0], next=None)\n else:\n for i in input_list:\n current_node = ListNode(val=i)\n # next node is current node\n temp.next = current_node\n # move node to next node to build the linked list\n temp = temp.next\n # exclude the root node\n return node.next",
"def lstToLinkedList(lst):\n if not lst: return\n LinkedList = Node(lst[0])\n LinkedList.next = lstToLinkedList(lst[1:])\n return LinkedList",
"def create_linked_list(input_list):\n head=None\n for value in input_list:\n if head is None:\n head=Node(value)\n else:\n current_node=head\n while current_node.next:\n current_node=current_node.next\n current_node.next=Node(value)\n# printlist(head)\n# print('------')\n return head",
"def lstToLinkedList2(lst):\n \"\"\"Not So Smart Way\"\"\"\n if not lst: return\n tail = Node(lst[-1])\n for i0 in range(len(lst) - 2, -1, -1):\n fakehead = Node(lst[i0])\n fakehead.next = tail\n tail = fakehead\n return tail",
"def l1():\n head = l1 = ListNode(3)\n l1.next = ListNode(4)\n l1.next.next = ListNode(5)\n return head",
"def create_linked_list(input_list):\n\t\ttry:\n\t\t\thead = Node(input_list.pop(0)) #remove the first list item and return as its head\n\n\t\t\twhile (len(input_list)>0):\n\t\t\t\tcurrent_node = head\n\t\t\t\twhile current_node.next:\n\t\t\t\t\tcurrent_node = current_node.next\n\t\t\t\tcurrent_node.next = Node(input_list.pop(0))\n\n\t\texcept IndexError:\n\t\t\t\thead = None\n\t\treturn head",
"def python_2_linked(python_list):\n head = None\n for i in range(len(python_list) - 1, -1, -1):\n head = Node(python_list[i], head)\n return head",
"def from_list(L):\n n = None\n for i in xrange(len(L)-1, -1, -1):\n n = Node(x=L[i], nxt=n)\n return n",
"def from_list(L):\n n = None\n for i in xrange(len(L)-1, -1, -1):\n n = Node(x=L[i], nxt=n)\n return n",
"def create_list(nums):\n return_node = ListNode(int(nums[0]))\n prev_node = return_node\n for i in range(1, len(nums)):\n curr_node = ListNode(int(nums[i]))\n prev_node.next = curr_node\n prev_node = curr_node\n return return_node",
"def linkedLstToList(Llst):\n if not Llst: return []\n return [Llst.value] + linkedLstToList(Llst.next)",
"def fromList(cls, lst):\n head = None\n\n while lst:\n s = lst.pop()\n node = Node(s)\n node.next = head\n head = node\n return head",
"def create_linked_list(input_list):\n if len(input_list) == 0:\n return None\n head = create_node(input_list[0])\n currNode = head\n previousNode = None\n for i in range(1, len(input_list)):\n if currNode.value is not None:\n previousNode = currNode\n currNode = create_node(input_list[i])\n previousNode.next = currNode \n return head",
"def constructListNode(input_list):\n root = ListNode(0)\n curr = root\n for i in range(len(input_list)):\n curr.next = ListNode(input_list[i])\n curr = curr.next\n return root.next",
"def l2():\n head = l2 = ListNode(2)\n l2.next = ListNode(4)\n l2.next.next = ListNode(5)\n return head",
"def copy_list(node):\n curr = node\n map = OrderedDict()\n while curr is not None:\n if not map.get(curr, None):\n map[curr] = Node(curr.val)\n if curr.next and not map.get(curr.next, None):\n map[curr.next] = Node(curr.next.val)\n map[curr].next = map[curr.next]\n if curr.random and not map.get(curr.random, None):\n map[curr.random] = Node(curr.next.random)\n map[curr].random = map[curr.random]\n curr = curr.next\n display(node, next(iter(map)))",
"def __to_list__(self):\r\n out = []\r\n node = self.head\r\n while node:\r\n out.append(node.value)\r\n node = node.next\r\n return out",
"def linkedLstToList(Llst):\n if not Llst: return []\n return [Llst.value] + linkedLstToList(Llst.next)",
"def __init__(self, head: ListNode):\n self.head = head\n self.list = []\n while head:\n self.list.append(head.val)\n head = head.next",
"def binary_tree_to_list(head_node):\n pass",
"def __init__(self, head: ListNode):\n self.nodes = []\n\n while(head):\n self.nodes.append(head)\n head = head.next",
"def list_to_link(lst):\n \"*** YOUR CODE HERE ***\"\n #if lst == []:\n #return\n #elif len(lst) == 1:\n #return Link(lst[0])\n if lst == []:\n return Link.empty # This is great. Notice that you're calling list_to_link([])\n # within the call list_to_link([last_elem])\n else:\n return Link(lst[0], list_to_link(lst[1:])) # remember: you don't have to specify the end index\n # I want from index 1 until the end of the list",
"def create_node_list(values: list[int]) -> ListNode:\n head = ListNode(values[0])\n\n last_node = head\n for value in values[1:]:\n node = ListNode(value)\n last_node.next = node\n last_node = node\n\n return head",
"def simple_ll():\n ll = LinkedList()\n ll.push(20)\n ll.push(4)\n ll.push(15)\n ll.push(85)\n return ll",
"def __init__(self, head: ListNode):\n self.l = []\n while head:\n self.l.append(head.val)\n head = head.next",
"def intToList(num):\n root_node = ListNode(num % 10)\n curr_node = root_node\n num //= 10\n while num:\n next_node = ListNode(num % 10)\n curr_node.next = next_node\n curr_node = next_node\n num //= 10\n \n return root_node",
"def reorderList(self, head: ListNode) -> None:\n nodes = []\n curNode = head\n while curNode:\n nodes.append(curNode)\n curNode = curNode.next\n\n if len(nodes) <= 2:\n return head\n\n startIdx, endIdx = 0, len(nodes)-1\n while startIdx < endIdx:\n nodes[startIdx].next = nodes[endIdx]\n nodes[endIdx].next = nodes[startIdx+1]\n startIdx += 1\n endIdx -= 1\n\n if startIdx == endIdx:\n nodes[startIdx].next = None\n else:\n nodes[endIdx+1].next = None\n\n return head",
"def _make_list_islot_otuples_from_nodelist(self):\n raise NotImplementedError",
"def to_list(n):\n L = []\n while n is not None:\n L.append(n.x)\n n = n.next\n return L",
"def to_list(n):\n L = []\n while n is not None:\n L.append(n.x)\n n = n.next\n return L"
] |
[
"0.7689867",
"0.74998283",
"0.7171626",
"0.71129197",
"0.6927131",
"0.6882673",
"0.6817767",
"0.67541057",
"0.67541057",
"0.6740895",
"0.67166805",
"0.67076075",
"0.6686264",
"0.6672835",
"0.6657356",
"0.66057336",
"0.65983284",
"0.6571332",
"0.6513007",
"0.649571",
"0.64805424",
"0.6421419",
"0.64106506",
"0.6395665",
"0.6365861",
"0.63537735",
"0.62738174",
"0.6268471",
"0.6259445",
"0.6259445"
] |
0.7675289
|
1
|
Save dicom images and images with masks on top of the corresponding images. All files will will be saved under main_dir by creating a new folder for contour type and then by creating a folder for each patient separately.
|
def save_images(all_patients, contour_type='i_contour',
main_dir='final_data/images/'):
# create folder for contour_type
dirname = main_dir + f'{contour_type}/'
os.makedirs(dirname, exist_ok=True)
for patient in all_patients:
# create patient folders for saving
dirname = main_dir + f'{contour_type}/{patient.dicom_id}/'
os.makedirs(dirname, exist_ok=True)
# create numpy arrays for the patient
patient.create_numpy_arrays()
# loop over slices in numpy array dict
for slice_no in patient.all_numpy_dict:
slice_dict = patient.all_numpy_dict[slice_no]
# only show image for given contour type
if slice_dict[f'{contour_type}_array'] is not None:
img_array = slice_dict['dicom_array']
msk_array = slice_dict[f'{contour_type}_array']
show_img_msk_fromarray(img_array,
msk_array,
cmap='Wistia',
sz=10, alpha=0.7,
save_path=dirname +f'slice_{slice_no}.png')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_patients(self):\n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n\n for patient in os.listdir(self.root_dir):\n if \".csv\" in patient or \".md\" in patient:\n continue\n patient_pth = os.path.join(self.root_dir, patient)\n out_patient_pth = os.path.join(self.out_dir, patient)\n num_imgs = len(os.listdir(patient_pth)) // 2 # Half the length to exclude mask counts\n img_stack, msk_stack = self._stack_images_masks_flair(patient_pth, patient, num_imgs)\n if not os.path.exists(out_patient_pth):\n os.mkdir(out_patient_pth)\n self._make_slices(img_stack, msk_stack, patient, out_patient_pth)",
"def write_all_patients():\n\n data_dir = sys.argv[1]\n output_dir = sys.argv[2]\n\n imgs, i_msks, o_msks = load_all_patients(data_dir=data_dir)\n\n for idx, array in enumerate(imgs):\n np.save(output_dir+'/img_'+str(idx), array)\n for idx, array in enumerate(i_msks):\n np.save(output_dir+'/i_msk_'+str(idx), array)\n for idx, array in enumerate(o_msks):\n np.save(output_dir + '/o_msk_' + str(idx), array)\n\n return None",
"def exporting_cropped_images (fpath_tiff):\n src = rasterio.open(fpath_tiff, 'r')\n outfolder_irregular = '/train/irregular'\n outfolder_healthy = '/train/healthy'\n outfolder_concrete = '/train/concrete'\n outfolder_incomplete = '/train/incomplete'\n outfolder_other = '/train/other'\n outfolder = '/train/batch'\n #os.makedirs (outfolder, exist_ok = True)",
"def save_imgs(self):\n print(\"Saving the images with required categories ...\")\n os.makedirs(self.imgs_dir, exist_ok=True)\n # Save the images into a local folder\n for im in tqdm(self.images):\n img_data = requests.get(im['coco_url']).content\n with open(os.path.join(self.imgs_dir, im['file_name']), 'wb') as handler:\n handler.write(img_data)",
"def save_for_dlc(self, imfolder, ext=\".png\", full_data=True, compress_level=9):\n # We don't allow for multiple experiments here\n cnt = 0\n self.camnames = self.camnames[0]\n warnings.warn(\n \"Note: generate_labels does not \\\n support multiple experiments at once. Converting camnames from dict to list\"\n )\n list_IDs_temp = self.list_IDs\n dsize = self.labels[list_IDs_temp[0]][\"data\"][self.camnames[0]].shape\n allcoords = np.zeros(\n (len(list_IDs_temp) * len(self.camnames), dsize[1], 3), dtype=\"int\"\n )\n fnames = []\n\n # Load in a sample so that size can be found when full_data=False\n camname = self.camnames[0]\n # TODO(refactor): Hard to read\n X = self.load_vid_frame(\n self.labels[list_IDs_temp[0]][\"frames\"][camname],\n camname,\n self.preload,\n self.extension,\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n\n for i, ID in enumerate(list_IDs_temp):\n for camname in self.camnames:\n if full_data:\n X = self.load_vid_frame(\n self.labels[ID][\"frames\"][camname],\n camname,\n self.preload,\n self.extension,\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n\n # Labels will now be the pixel positions of each joint.\n # Here, we convert them to probability maps with a numpy\n # meshgrid operation\n this_y = self.labels[ID][\"data\"][camname].copy()\n this_y[0, :] = this_y[0, :] - self.crop_width[0]\n this_y[1, :] = this_y[1, :] - self.crop_height[0]\n\n if self.downsample > 1:\n X = processing.downsample_batch(\n X[np.newaxis, :, :, :], fac=self.downsample, method=\"dsm\"\n )\n this_y = np.round(this_y / 2).astype(\"int\")\n if full_data:\n imageio.imwrite(\n imfolder + \"sample{}_\".format(ID) + camname + ext,\n X[0].astype(\"uint8\"),\n compress_level=compress_level,\n )\n else:\n if full_data:\n imageio.imwrite(\n imfolder + \"sample{}_\".format(ID) + camname + ext,\n X.astype(\"uint8\"),\n compress_level=compress_level,\n )\n\n allcoords[cnt, :, 0] = np.arange(dsize[1])\n allcoords[cnt, :, 1:] = this_y.T\n\n # TODO(os.path): This is unix-specific\n # These paths should be using AWS/UNIX only\n relpath = imfolder.split(os.sep)[-2]\n relpath = (\n \"..\"\n + os.sep\n + relpath\n + os.sep\n + \"sample{}_\".format(ID)\n + camname\n + ext\n )\n fnames.append(relpath)\n\n cnt = cnt + 1\n\n sio.savemat(\n imfolder + \"allcoords.mat\",\n {\n \"allcoords\": allcoords,\n \"imsize\": [X.shape[-1], X.shape[0], X.shape[1]],\n \"filenames\": fnames,\n },\n )",
"def save_external_right(data_input_dir, image_save_path):\n\n create_dir(image_save_path + '/train/ECAR')\n create_dir(image_save_path + '/test/ECAR')\n\n test_patience=[\"P429\"] # [P887,P891,P429,P438,P125,P530]\n\n for casei in os.listdir(data_input_dir): # 处理每个样本\n pi = casei.split(\"_\")[1]\n\n if pi in test_patience:\n continue\n\n print(data_input_dir + '/' + casei)\n dcm_img = read_dicom(data_input_dir + \"/\" + casei)\n # print(\"Dcm shape: \", dcm_img.shape)\n\n for arti in [\"ECAR\"]: # 处理外部左边\n cas_dir = data_input_dir + \"/\" + casei + \"/CASCADE-\" + arti\n qvs_path = cas_dir + \"/E\" + pi + \"S101_L.QVS\"\n qvsroot = ET.parse(qvs_path).getroot()\n\n if pi in [\"P556\", \"P576\", \"P887\"]:\n avail_slices = list_contour_slices_min(qvsroot) # 正样本+标签:slices\n else:\n avail_slices = list_contour_slices(qvsroot)\n\n print(\"case\", pi, \"art\", arti, \"avail_slices\", avail_slices)\n\n if len(avail_slices): # 只有当该位置的样本存在时候,才保存对应的正负样本,internal必定存在,external有可能不存在\n lower_index = min(avail_slices) # 正样本下界\n upper_index = max(avail_slices) # 正样本上界\n\n pos_slice = list(range(lower_index, upper_index + 1)) # 所有正样本区间\n pos_not_avail = list(set(pos_slice).difference(avail_slices)) # 正样本+没有标签:slices\n\n neg_slice = list(range(200, lower_index))\n neg_slice1 = list(range(upper_index + 1, 400))\n neg_slice.extend(neg_slice1) # 负样本区间\n\n for index in avail_slices: # 存储:正样本+有标签:样本\n img_save = dcm_img[280:440, 110:610, index]\n # print(img_save.shape)\n\n np.save(image_save_path + '/train/' + arti + '/' + pi + '_' + arti + '_' + str(\n index) + '_posLabel_.npy',\n img_save)\n\n fake_mask_index = [] # 寻找最近的伪标签\n for index in pos_not_avail: # 找出没有标签的正样本,对用的fake label的index\n close_index = 1000\n best_index = -1\n for idx in avail_slices:\n if abs(index - idx) < close_index:\n close_index = abs(index - idx)\n best_index = idx\n fake_mask_index.append(best_index)\n\n for index in range(len(pos_not_avail)): # 存储正样本+没有标签\n img_save = dcm_img[280:440, 110:610, pos_not_avail[index]]\n np.save(image_save_path + '/train/' + arti + '/' + pi + '_' + arti + '_' + str(\n pos_not_avail[index]) + '_' + str(fake_mask_index[index]) + '_posUnLabel_.npy',\n img_save)\n\n for index in neg_slice: # 存储负样本\n img_save = dcm_img[280:440, 110:610, index]\n np.save(image_save_path + '/train/' + arti + '/' + pi + '_' + arti + '_' + str(\n index) + '_negLabel_.npy',\n img_save)\n\n for casei in os.listdir(data_input_dir): # 处理每个样本\n pi = casei.split(\"_\")[1]\n\n if pi not in test_patience:\n continue\n\n print(data_input_dir + '/' + casei)\n dcm_img = read_dicom(data_input_dir + \"/\" + casei)\n # print(\"Dcm shape: \", dcm_img.shape)\n\n for arti in [\"ECAR\"]: # 处理外部左边\n cas_dir = data_input_dir + \"/\" + casei + \"/CASCADE-\" + arti\n qvs_path = cas_dir + \"/E\" + pi + \"S101_L.QVS\"\n qvsroot = ET.parse(qvs_path).getroot()\n\n if pi in [\"P556\", \"P576\", \"P887\"]:\n avail_slices = list_contour_slices_min(qvsroot) # 正样本+标签:slices\n else:\n avail_slices = list_contour_slices(qvsroot)\n\n print(\"case\", pi, \"art\", arti, \"avail_slices\", avail_slices)\n\n if len(avail_slices): # 只有当该位置的样本存在时候,才保存对应的正负样本,internal必定存在,external有可能不存在\n lower_index = min(avail_slices) # 正样本下界\n upper_index = max(avail_slices) # 正样本上界\n\n pos_slice = list(range(lower_index, upper_index + 1)) # 所有正样本区间\n pos_not_avail = list(set(pos_slice).difference(avail_slices)) # 正样本+没有标签:slices\n\n neg_slice = list(range(200, lower_index))\n neg_slice1 = list(range(upper_index + 1, 400))\n neg_slice.extend(neg_slice1) # 负样本区间\n\n for index in avail_slices: # 存储:正样本+有标签:样本\n img_save = dcm_img[280:440, 110:610, index]\n # print(img_save.shape)\n\n np.save(image_save_path + '/test/' + arti + '/' + pi + '_' + arti + '_' + str(\n index) + '_posLabel_.npy',\n img_save)\n\n fake_mask_index = [] # 寻找最近的伪标签\n for index in pos_not_avail: # 找出没有标签的正样本,对用的fake label的index\n close_index = 1000\n best_index = -1\n for idx in avail_slices:\n if abs(index - idx) < close_index:\n close_index = abs(index - idx)\n best_index = idx\n fake_mask_index.append(best_index)\n\n for index in range(len(pos_not_avail)): # 存储正样本+没有标签\n img_save = dcm_img[280:440, 110:610, pos_not_avail[index]]\n np.save(image_save_path + '/test/' + arti + '/' + pi + '_' + arti + '_' + str(\n pos_not_avail[index]) + '_' + str(fake_mask_index[index]) + '_posUnLabel_.npy',\n img_save)\n\n for index in neg_slice: # 存储负样本\n img_save = dcm_img[280:440, 110:610, index]\n np.save(image_save_path + '/test/' + arti + '/' + pi + '_' + arti + '_' + str(\n index) + '_negLabel_.npy',\n img_save)",
"def save_step_4(imgs, output_path=\"./output/step4\"):\n # ... your code here ...\n cv2.imwrite(output_path+\"/output.jpg\", imgs)",
"def convert_dataset(src_dir, dest_dir):\n subdirs = get_subdirs(src_dir)\n detector = dlib.simple_object_detector(MODEL_PATH)\n for img_dir in tqdm(subdirs):\n\tprint(img_dir)\n jpegs = get_img_paths_in_dir(img_dir)\n target_dir = dest_dir + img_dir.split('/')[-1]\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n for src_path in jpegs:\n target_path = target_dir + '/' + src_path.split('/')[-1]\n img = io.imread(src_path)\n dets = detector(img)\n bounding_boxes = get_bounding_boxes(dets)\n if bounding_boxes:\n square_box = find_square_box(bounding_boxes[0])\n if is_valid(square_box, img):\n box = bounding_boxes[0]\n square_box = find_square_box(box)\n cropped_img = crop_frame(img, square_box)\n PIL_img = PIL.Image.fromarray(cropped_img)\n resized_img = PIL_img.resize((54,54), PIL.Image.BILINEAR)\n\t\t resized_img.save(target_path)\n print(target_path)\n # grey_img = resized_img.convert('L')\n # grey_img.save(target_path)",
"def _crop_write_image(self, inroot, images, outroot):\n for image in images:\n inimage_path = osp.join(inroot, image)\n cvimg = cv2.imread(inimage_path)\n cvimg = cvimg[60:-30, 25:-25]\n h, w, _ = cvimg.shape\n assert h == w == 128\n outimage_path = osp.join(outroot, image)\n cv2.imwrite(outimage_path, cvimg)\n print(outimage_path)",
"def save_test_images(images):\n for description, img in images.items():\n save_to_image(img, description)\n save_to_netcdf(img, description)",
"def write_analysis(path, dataset_dict, datasettype, mask_part, start_time, supervised=True):\n for mask_el in mask_part:\n if mask_el == 'podocytes':\n filename = datasettype + '_podos.txt'\n filestr = 'podos images'\n elif mask_el == 'glomerulus':\n filename = datasettype + '_gloms.txt'\n filestr = 'gloms images'\n else:\n filename = datasettype + 'unknown.txt'\n filestr = 'unknown type'\n\n write_txt = open(str(os.path.join(path, filename)), \"w\")\n\n if supervised:\n dc_mean = np.sum(np.array(dataset_dict['dice_coeffs_%s' % mask_el])) / len(dataset_dict['dice_coeffs_%s'\n % mask_el])\n dc_min = np.min(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))\n dc_max = np.max(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))\n object_dc_mean = np.sum(np.array(dataset_dict['object_dc_%s' % mask_el])) / len(dataset_dict['object_dc_%s'\n % mask_el])\n object_dc_min = np.min(np.array(dataset_dict['object_dc_%s' % mask_el]))\n object_dc_max = np.max(np.array(dataset_dict['object_dc_%s' % mask_el]))\n pearson = calculate_pearson(dataset_dict['count_masks_%s' % mask_el], dataset_dict['count_preds_%s'\n % mask_el])\n\n write_txt.write(str(\"Mean dice coefficient on pixels of \" + filestr + \" compared to groundtruth: \") +\n str(dc_mean) + '\\n')\n write_txt.write(str(\"Min dice coefficient on pixels of \" + filestr + \" compared to groundtruth: \") +\n str(dc_min) + '\\n')\n write_txt.write(str(\"Max dice coefficient on pixels of \" + filestr + \" compared to groundtruth: \") +\n str(dc_max) + '\\n')\n write_txt.write(str(\"Pearson correlation coefficient on objects of \" + filestr +\n \" compared to groundtruth: \") + str(pearson) + '\\n')\n write_txt.write(str(\"Mean dice coeff on objects of \" + filestr + \" compared to groundtruth: \") +\n str(object_dc_mean) + '\\n')\n write_txt.write(str(\"Min dice coeff on objects of \" + filestr + \" compared to groundtruth: \") +\n str(object_dc_min) + '\\n')\n write_txt.write(str(\"Max dice coeff on objects of \" + filestr + \" compared to groundtruth: \") +\n str(object_dc_max) + '\\n')\n write_txt.write('\\n')\n\n duration = time.time() - start_time\n duration_std = int(duration / 3600)\n duration_min = int((duration % 3600) / 60)\n duration_sec = int(duration % 60)\n\n write_txt.write(str(\"Test time: \") + str(duration_std) + \"h \" + str(duration_min)\n + \"min \" + str(duration_sec) + 'sec \\n')\n write_txt.close()\n return",
"def save_images(unique_class_names, root_folder_to_save_images, img_names, y, original_images, perturbed_images):\n original_images = original_images / np.max(original_images)\n perturbed_images = perturbed_images / np.max(perturbed_images)\n\n if not os.path.isdir(root_folder_to_save_images):\n os.makedirs(root_folder_to_save_images, exist_ok=True)\n for class_names in unique_class_names:\n perturbed_images_save_path = os.path.join(root_folder_to_save_images, class_names, 'perturbed')\n original_images_save_path = os.path.join(root_folder_to_save_images, class_names, 'original')\n if not os.path.isdir(perturbed_images_save_path):\n os.makedirs(perturbed_images_save_path, exist_ok=True)\n if not os.path.isdir(original_images_save_path):\n os.makedirs(original_images_save_path, exist_ok=True)\n\n for name_of_image, label, original_image, adversarial_image in zip(img_names, y, original_images, perturbed_images):\n absolute_path_perturbed_image = os.path.join(root_folder_to_save_images, label, 'perturbed', name_of_image)\n absolute_path_orig_image = os.path.join(root_folder_to_save_images, label, 'original', name_of_image)\n perturbed_image = adversarial_image.copy()\n mp_img.imsave(absolute_path_orig_image, original_image)\n mp_img.imsave(absolute_path_perturbed_image, perturbed_image)",
"def _preprocess(self):\n print(\"Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)\")\n if osp.exists(self.imgs_labeled_dir) and \\\n osp.exists(self.imgs_detected_dir) and \\\n osp.exists(self.split_classic_det_json_path) and \\\n osp.exists(self.split_classic_lab_json_path) and \\\n osp.exists(self.split_new_det_json_path) and \\\n osp.exists(self.split_new_lab_json_path):\n return\n\n mkdir_if_missing(self.imgs_detected_dir)\n mkdir_if_missing(self.imgs_labeled_dir)\n\n print(\"Extract image data from {} and save as png\".format(self.raw_mat_path))\n mat = h5py.File(self.raw_mat_path, 'r')\n\n def _deref(ref):\n return mat[ref][:].T\n\n def _process_images(img_refs, campid, pid, save_dir):\n img_paths = [] # Note: some persons only have images for one view\n for imgid, img_ref in enumerate(img_refs):\n img = _deref(img_ref)\n # skip empty cell\n if img.size == 0 or img.ndim < 3: continue\n # images are saved with the following format, index-1 (ensure uniqueness)\n # campid: index of camera pair (1-5)\n # pid: index of person in 'campid'-th camera pair\n # viewid: index of view, {1, 2}\n # imgid: index of image, (1-10)\n viewid = 1 if imgid < 5 else 2\n img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)\n img_path = osp.join(save_dir, img_name)\n imageio.imwrite(img_path, img)\n img_paths.append(img_path)\n return img_paths\n\n def _extract_img(name):\n print(\"Processing {} images (extract and save) ...\".format(name))\n meta_data = []\n imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir\n for campid, camp_ref in enumerate(mat[name][0]):\n camp = _deref(camp_ref)\n num_pids = camp.shape[0]\n for pid in range(num_pids):\n img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)\n assert len(img_paths) > 0, \"campid{}-pid{} has no images\".format(campid, pid)\n meta_data.append((campid+1, pid+1, img_paths))\n print(\"done camera pair {} with {} identities\".format(campid+1, num_pids))\n return meta_data\n\n meta_detected = _extract_img('detected')\n meta_labeled = _extract_img('labeled')\n\n def _extract_classic_split(meta_data, test_split):\n train, test = [], []\n num_train_pids, num_test_pids = 0, 0\n num_train_imgs, num_test_imgs = 0, 0\n for i, (campid, pid, img_paths) in enumerate(meta_data):\n \n if [campid, pid] in test_split:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n test.append((img_path, num_test_pids, camid))\n num_test_pids += 1\n num_test_imgs += len(img_paths)\n else:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n train.append((img_path, num_train_pids, camid))\n num_train_pids += 1\n num_train_imgs += len(img_paths)\n return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs\n\n print(\"Creating classic splits (# = 20) ...\")\n splits_classic_det, splits_classic_lab = [], []\n for split_ref in mat['testsets'][0]:\n test_split = _deref(split_ref).tolist()\n\n # create split for detected images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_detected, test_split)\n splits_classic_det.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n # create split for labeled images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_labeled, test_split)\n splits_classic_lab.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n \n write_json(splits_classic_det, self.split_classic_det_json_path)\n write_json(splits_classic_lab, self.split_classic_lab_json_path)\n\n def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):\n tmp_set = []\n unique_pids = set()\n for idx in idxs:\n img_name = filelist[idx][0]\n camid = int(img_name.split('_')[2])\n pid = pids[idx]\n if relabel: pid = pid2label[pid]\n img_path = osp.join(img_dir, img_name)\n tmp_set.append((img_path, int(pid), camid))\n unique_pids.add(pid)\n return tmp_set, len(unique_pids), len(idxs)\n\n def _extract_new_split(split_dict, img_dir):\n train_idxs = split_dict['train_idx'].flatten() - 1 # index-0\n pids = split_dict['labels'].flatten()\n train_pids = set(pids[train_idxs])\n pid2label = {pid: label for label, pid in enumerate(train_pids)}\n query_idxs = split_dict['query_idx'].flatten() - 1\n gallery_idxs = split_dict['gallery_idx'].flatten() - 1\n filelist = split_dict['filelist'].flatten()\n train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)\n query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)\n gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)\n return train_info, query_info, gallery_info\n\n print(\"Creating new splits for detected images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_det_mat_path),\n self.imgs_detected_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_det_json_path)\n\n print(\"Creating new splits for labeled images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_lab_mat_path),\n self.imgs_labeled_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_lab_json_path)",
"def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')",
"def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)",
"def save_step_1(imgs, output_path='./output/step1'):\n # ... your code here ...\n i=0\n for each in imgs:\n i+=1\n cv2.imwrite(output_path+\"/output\"+str(i)+\".jpg\", each)",
"def save_images(d, image_dir='images/'):\n for (lkey, rkey) in zip(d.left, d.right):\n cv2.imwrite(image_dir+lkey+\"_left.png\", d.left[lkey])\n cv2.imwrite(image_dir+rkey+\"_right.png\", d.right[rkey])",
"def save_as_png(path):\r\n for _, _, filename in walk(path):\r\n for f in filename:\r\n medical_image = pydicom.dcmread(path + f)\r\n shape = medical_image.pixel_array.shape\r\n # Convert to float to avoid overflow or underflow losses\r\n brain_image = medical_image.pixel_array.astype(float)\r\n # Rescaling grey scale between 0-255\r\n scaled_image = (np.maximum(brain_image, 0) / brain_image.max()) * 255.0\r\n # Convert to uint\r\n scaled_image = np.uint8(scaled_image)\r\n # Write the PNG file\r\n with open(f'{path}png/{f.strip(\".dcm\")}.png', 'wb') as png_file:\r\n w = png.Writer(shape[1], shape[0], greyscale=True)\r\n w.write(png_file, scaled_image)",
"def main(vis_dirs, outdir):\n assert len(vis_dirs) == 4\n\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n for i, filename in enumerate(tqdm(os.listdir(vis_dirs[-1]))):\n # if i % 100 == 0:\n # print(i)\n\n files = [os.path.join(vis_dir, filename) for vis_dir in vis_dirs]\n outimg = os.path.join(outdir, filename)\n merge_four_images(files, outimg)\n\n print (\"Finished! Result dir is %s\" % outdir)",
"def extract(self, files):\n for i in range(len(files)):\n print(files[i])\n img = cv2.imread('{}/{}'.format('{}/{}/{}'.format(DIR_2DST_Mask, self.patient, self.plan), files[i]), 0)\n\n \"\"\"\n Find the indices of array elements that are non-zero, i.e,\n find the pixels' positions that represents the respiratory\n functions (pixels in the respiratory function are brighter).\n \"\"\"\n color_pts = np.argwhere(img > 70)\n\n \"\"\"\n Sorts the pixels according to their x coordenate.\n Obs: np.argwhere inverts x and y, it's like (y, x), because of it,\n the parameter of itemgetter is 1 (to get x coordinate)\n \"\"\"\n lcolor_pts = sorted(color_pts.tolist(), key=itemgetter(1))\n\n \"\"\"\n If there is no pixel representing the respiratory function\n (i.e., lighter pixel) it creates an empty image (without any\n respiratory function)\n \"\"\"\n if len(lcolor_pts) == 0:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open(\n # '{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], []))\n # file.close()\n\n continue\n\n # Reverse the coordinates and store the result in lordered_pts list\n lordered_pts = []\n for j in range(len(lcolor_pts)):\n lordered_pts.append(lcolor_pts[j][::-1])\n\n \"\"\"\n Convert pixels coordinates into a tuples and check which column\n has pixels that corresponding to diaphragmatic level\n Obs. There are some columns that doesnt have any pixel that\n correpond to diaphragmatic level.\n \"\"\"\n # Columns that have a pixel corresponding diaphragmatic level\n lcolumn_available = []\n for j in range(len(lordered_pts)):\n lordered_pts[j] = tuple(lordered_pts[j])\n lcolumn_available.append(lordered_pts[j][0])\n lcolumn_available = list(set(lcolumn_available))\n # print(\"Ordered points: \", lordered_pts)\n # print(\"Columns available: \", lcolumn_available)\n\n \"\"\"\n If there is not enough columns to build a respiratory pattern,\n create a blank image\n \"\"\"\n if len(lcolumn_available) < 20:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n continue\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n first column, assign to it the value of the second y coordinate\n \"\"\"\n if lcolumn_available[0] is not 0:\n y = max(\n [x for x in lordered_pts if x[0] == lcolumn_available[0]],\n key=itemgetter(1))[1]\n lordered_pts.insert(0, (0, y))\n lcolumn_available.insert(0, 0)\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n last column, assign to it the value of the penultimate y coordinate\n available\n \"\"\"\n if lcolumn_available[-1] is not 49:\n lordered_pts.append(\n (49, lordered_pts[len(lcolumn_available)][1]))\n lcolumn_available.append(49)\n\n \"\"\"\n Get the biggest y value in each column that represents the\n diaphragmatic level\n \"\"\"\n column = 0\n lcolumn = []\n ldiaphragm_pts = []\n for j in range(50):\n # Get the column's points\n lcolumn = [x for x in lordered_pts if x[0] == column]\n # print('{}: {}'.format(j, lcolumn))\n\n if len(lcolumn) > 0:\n ldiaphragm_pts.append(\n max(lcolumn, key=itemgetter(1))) # Get the biggest y\n else:\n # Get the y value from the previous column\n lcolumn_available.insert(column, column)\n ldiaphragm_pts.append((column, ldiaphragm_pts[-1][1]))\n column += 1\n lcolumn = []\n\n # Draw diaphragmatic level\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n j = 0\n while(j < len(lcolumn_available) - 1):\n cv2.line(\n diaphragmatic_lvl,\n ldiaphragm_pts[j], ldiaphragm_pts[j + 1],\n (0, 0, 255), 1)\n j = j + 1\n\n lcolumn_available = []\n\n print(\"Diaphragmatic's points: \", ldiaphragm_pts)\n cv2.imshow('Diaphragmatic level', diaphragmatic_lvl)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open('{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], ldiaphragm_pts))\n # file.close()\n\n # return ldiaphragm_pts",
"def save_crops(self, workspace):\n objects_name = self.objects_name.value\n objects = workspace.object_set.get_objects(objects_name)\n bit_depth = self.bit_depth.value\n if self.input_type == IF_IMAGE:\n image_name = self.image_name.value\n image = workspace.image_set.get_image(image_name)\n pixels = image.pixel_data\n elif self.input_type == IF_OBJECTS:\n obj_name = self.input_object_name.value\n inp_obj = workspace.object_set.get_objects(obj_name)\n pixels = inp_obj.get_segmented()\n else:\n raise (\"invalid choice of input\")\n\n filename = self.get_filename(workspace)\n object_extension = self.object_extension.value\n if filename is None: # failed overwrite check\n return\n\n slices = ndi.find_objects(objects.segmented)\n slices, labels = zip(\n *[(s, label) for label, s in enumerate(slices) if s is not None]\n )\n\n ext_slices = [\n self._extend_slice_touple(\n sl, object_extension, [pixels.shape[0], pixels.shape[1]]\n )\n for sl in slices\n ]\n out_folder = os.path.dirname(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n # the stack for imctools needs to be cxy, while it is xyc in cp\n if len(pixels.shape) == 2:\n stack = pixels.reshape([1] + list(pixels.shape))\n else:\n stack = np.rollaxis(pixels, 2, 0)\n\n # fix the dtype\n if bit_depth == BIT_DEPTH_8:\n stack = skimage.util.img_as_ubyte(stack)\n elif bit_depth == BIT_DEPTH_16:\n stack = skimage.util.img_as_uint(stack)\n elif bit_depth == BIT_DEPTH_FLOAT:\n stack = skimage.util.img_as_float(stack).astype(np.float32)\n\n self._save_object_stack(out_folder, basename, stack, ext_slices, labels)\n self.save_filename_measurements(workspace)\n if self.show_window:\n workspace.display_data.wrote_image = True",
"def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)",
"def make_processed_directories(zone, region, zoom_level = 19, image_size = 256):\n os.system(f'mkdir ../../data/processed/images-{image_size}-{region}-{zone}-{zoom_level}')\n os.system(f'mkdir ../../data/processed/masks-{image_size}-{region}-{zone}-{zoom_level}')\n img_path = f'../../data/processed/images-{image_size}-{region}-{zone}-{zoom_level}'\n mask_path = f'../../data/processed/masks-{image_size}-{region}-{zone}-{zoom_level}'\n return img_path, mask_path",
"def main(root_dir):\n # load annotations\n print('Loading instances and annotations...')\n captions_file = json.load(open('{}/annotations/captions_train2017.json'.format(root_dir), 'r'))\n categories_file = json.load(open('{}/annotations/instances_train2017.json'.format(root_dir), 'r'))\n print('Done.')\n\n # group categories by image\n image_categories = group_categories(categories_file)\n\n # group captions by image\n image_captions = group_captions(captions_file['annotations'])\n\n # get filename of each image\n image_file = get_filename(captions_file['images'])\n\n # assign each category an id.\n # we are not using the default ids given in the dataset because\n # the id ranges are not continuous.\n category_id, id_category = map_category_id(categories_file['categories'])\n \n # save parsed coco dataset\n save_dataset(image_categories, image_captions, image_file, category_id, id_category, root_dir)",
"def __init__(self, _dicom_dir, _contour_dir, inner_outer='inner'):\n\n self.data = []\n\n #list of indices for labeled masks\n self.labeled = []\n\n # iterate through all .dcm in the patient's dicom folder\n dicom_files = os.listdir(_dicom_dir)\n for dicom_file in dicom_files:\n if not dicom_file.endswith('.dcm'):\n pass\n else:\n idx = int(dicom_file[:-4])\n\n dcm = self.load_dicoms(_dicom_dir + \"/\" + dicom_file)\n height, width = dcm.shape\n\n ctr_i_b = None\n ctr_o_b = None\n if inner_outer == 'inner':\n ctr_i_b = self.load_contours(_contour_dir+'/i-contours/'+\n 'IM-0001-{:04d}-icontour-manual.txt'.format(idx), width, height)\n elif inner_outer == 'outer':\n ctr_o_b = self.load_contours(_contour_dir+'/o-contours/'+\n 'IM-0001-{:04d}-ocontour-manual.txt'.format(idx), width, height)\n elif inner_outer == 'both':\n ctr_i_b = self.load_contours(_contour_dir+'/i-contours/'+\n 'IM-0001-{:04d}-icontour-manual.txt'.format(idx), width, height)\n ctr_o_b = self.load_contours(_contour_dir+'/o-contours/'+\n 'IM-0001-{:04d}-ocontour-manual.txt'.format(idx), width, height)\n else:\n ctr_i_b = None\n ctr_o_b = None\n\n # if either inner or outer exists for given idx, add to self.labeled\n if ctr_i_b is not None:\n self.labeled.append(idx)\n elif ctr_o_b is not None:\n self.labeled.append(idx)\n\n self.data.append((idx, dcm, ctr_i_b, ctr_o_b))",
"def create_dataset(img_rows=128, img_cols=128):\n print('Creating original dataset from the raw data')\n # first, get the patients directory names located in the data/ directory. These names (e.g. 'patient0001') will\n # be used for indexing (also avoid hidden files & folders)\n patients = [name for name in os.listdir(os.path.join(os.curdir, 'data/')) if not name.startswith('.')]\n\n # We sort this list to get the patients id in increasing order\n patients.sort(key=lambda s: s[-3:]) # sort according to last 3 characters\n\n # create an empty numpy.ndarray which will contain the images (resized to (img_rows, img_cols))\n images = np.ndarray((2 * len(patients), img_rows, img_cols), dtype=np.uint8) # 2 images per patient\n masks = np.ndarray((2 * len(patients), img_rows, img_cols), dtype=np.uint8) # 2 masks per patient\n\n # we now go through each patient's directory :\n idx = 0\n for patient in patients:\n\n for phase in ['ED', 'ES']:\n\n # read image & mask\n img, _, _, _ = load_mhd_data('data/{pa}/{pa}_4CH_{ph}.mhd'.format(pa=patient, ph=phase))\n mask, _, _, _ = load_mhd_data('data/{pa}/{pa}_4CH_{ph}_gt.mhd'.format(pa=patient, ph=phase))\n\n # resize the img & the mask to (img_rows, img_cols) to keep the network input manageable\n img = resize(img, (img_cols, img_rows), mode='reflect', preserve_range=True)\n mask = resize(mask, (img_cols, img_rows), mode='reflect', preserve_range=True)\n\n # now, save the resized image to the images np.ndarray\n images[idx] = img\n\n # save the corresponding mask to masks np.ndarray (at the same index)\n masks[idx] = mask\n\n idx += 1\n\n print('Created 2 np.ndarrays containing images & masks.')\n\n # Create directory to store files.\n directory = os.path.join(os.getcwd(), 'output/processed_data/')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # save all ndarrays to a .npy files (for faster loading later)\n np.save('output/processed_data/images.npy', images)\n np.save('output/processed_data/masks.npy', masks)\n print('Saving to .npy files done: see files\\noutput/processed_data/images.npy & \\noutput/processed_data/masks.npy.')",
"def decompose_images(data_dir, output_dir, save_individually, **kwargs):\n\n # parse parameters\n opt = TestOptions()\n opt.parse(kwargs)\n # print(kwargs)\n\n # torch setting\n pytorch_settings.set_(with_random=False, determine=True)\n\n # visualize\n V.create_a_visualizer(opt)\n\n # NIID-Net Manager\n model = create_model(opt)\n model.switch_to_eval()\n\n # List all image files in the directory (exclude subdirectory)\n image_file_list = list_files(data_dir, ['jpg', 'jpeg', 'png', 'tif', 'JPG'])\n print('Total image in the directory %s: %d' % (data_dir, len(image_file_list)))\n\n # Decompose images\n for file_name in image_file_list:\n # Read image\n img_path = os.path.join(data_dir, file_name)\n o_img = Image.open(img_path)\n o_img = o_img.convert(\"RGB\")\n\n # Resize input image\n # input_img = resize_image(o_img)\n input_img = o_img\n\n # Predict\n input_img = TF.to_tensor(input_img).unsqueeze(0)\n pred_N, pred_R, pred_L, pred_S, rendered_img = model.predict({'input_srgb': input_img}, normal=True, IID=True)\n\n # Save results\n idx = 0\n pred_imgs = {\n 'pred_N': pred_N[idx].cpu(),\n 'pred_R': pred_R[idx].cpu(),\n 'pred_L': pred_L[idx].cpu(),\n 'pred_S': pred_S[idx].cpu(),\n 'rendered_img': rendered_img[idx].cpu(),\n 'input_srgb': input_img[idx],\n }\n f = '%s_decomposed' % (file_name[:file_name.rfind('.')])\n image_util.save_intrinsic_images(output_dir, pred_imgs, label=f, individual=save_individually)\n torch.save(pred_imgs, os.path.join(output_dir, f+'.pth.tar'))\n print('Decompose %s successfully!' % file_name)",
"def reconstruct_folder(data_root_paths, pixel_size, na, emission_wavelengths, excitation_wavelengths,\n affine_data_paths, otf_data_fname, dmd_pattern_data_fpath,\n channel_inds=None, crop_image=False, img_centers=None,\n crop_sizes=None, use_scmos_cal=False, scmos_calibration_file=None, widefield_only=False,\n nangles=3, nphases=3, npatterns_ignored=0, saving=True,\n zinds_to_use=None, tinds_to_use=None, xyinds_to_use=None,\n save_tif_stack=True, **kwargs):\n\n nfolders = len(data_root_paths)\n if nfolders == 0:\n raise ValueError(\"No folder paths were provided.\")\n\n ncolors = len(emission_wavelengths)\n if ncolors == 0:\n raise ValueError(\"No wavelength channels were provided.\")\n\n if channel_inds is None:\n channel_inds = list(range(ncolors))\n\n # ensure crop_sizes is a list the same size as number of folders\n if not isinstance(crop_sizes, list):\n crop_sizes = [crop_sizes]\n\n if len(crop_sizes) == 1 and nfolders > 1:\n crop_sizes = crop_sizes * nfolders\n\n if len(img_centers) == 1 and nfolders > 1:\n img_centers = img_centers * nfolders\n\n # ############################################\n # load affine data\n # ############################################\n affine_xforms = []\n for p in affine_data_paths:\n with open(p, 'rb') as f:\n affine_xforms.append(pickle.load(f)['affine_xform'])\n\n # ############################################\n # load DMD patterns frequency and phase data\n # ############################################\n frqs_dmd = np.zeros((ncolors, nangles, 2))\n phases_dmd = np.zeros((ncolors, nangles, nphases))\n for kk in range(ncolors):\n ppath = dmd_pattern_data_fpath[kk]\n xform = affine_xforms[kk]\n\n with open(ppath, 'rb') as f:\n pattern_data = pickle.load(f)\n\n # DMD intensity frequency and phase (twice electric field frq/phase)\n frqs_dmd[kk] = 2 * pattern_data['frqs']\n phases_dmd[kk] = 2 * pattern_data['phases']\n dmd_nx = pattern_data['nx']\n dmd_ny = pattern_data['ny']\n\n # ############################################\n # load OTF data\n # ############################################\n with open(otf_data_fname, 'rb') as f:\n otf_data = pickle.load(f)\n otf_p = otf_data['fit_params']\n\n if len(otf_p) == 1:\n otf_fn = lambda f, fmax: 1 / (1 + (f / fmax * otf_p[0]) ** 2) * \\\n psf.circ_aperture_otf(f, 0, na, 2 * na / fmax)\n else:\n otf_fn = lambda f, fmax: 1 / (\n 1 + (f / fmax * otf_p[0]) ** 2 + (f / fmax * otf_p[1]) ** 4 + (f / fmax * otf_p[2]) ** 6 +\n (f / fmax * otf_p[3]) ** 8) * psf.circ_aperture_otf(f, 0, na, 2 * na / fmax)\n # ############################################\n # load camera calibration file, if we need it\n # ############################################\n if use_scmos_cal:\n with open(scmos_calibration_file, 'rb') as f:\n data = pickle.load(f)\n gain_map = data['gains']\n offsets = data['offsets']\n #varmap = data['vars']\n\n # ############################################\n # SIM images\n # ############################################\n if not crop_image:\n crop_sizes = [np.nan] * len(data_root_paths)\n img_centers = [[np.nan, np.nan]] * len(data_root_paths)\n\n for rpath, crop_size, img_center in zip(data_root_paths, crop_sizes, img_centers):\n folder_path, folder = os.path.split(rpath)\n print(\"# ################################################################################\")\n print(\"analyzing folder: %s\" % folder)\n print(\"located in: %s\" % folder_path)\n\n tstamp = tools.get_timestamp()\n # path to store processed results\n if saving:\n sim_results_path = os.path.join(rpath, '%s_sim_reconstruction' % tstamp)\n if not os.path.exists(sim_results_path):\n os.mkdir(sim_results_path)\n print(\"save directory: %s\" % sim_results_path)\n\n # copy useful data files here\n for kk in range(ncolors):\n # copy affine data here\n _, fname = os.path.split(affine_data_paths[kk])\n fpath = os.path.join(sim_results_path, fname)\n shutil.copyfile(affine_data_paths[kk], fpath)\n\n # copy otf data here\n _, fname = os.path.split(otf_data_fname)\n fpath = os.path.join(sim_results_path, fname)\n shutil.copyfile(otf_data_fname, fpath)\n\n # copy DMD pattern data here\n _, fname = os.path.split(dmd_pattern_data_fpath[kk])\n fpath = os.path.join(sim_results_path, fname)\n shutil.copyfile(dmd_pattern_data_fpath[kk], fpath)\n\n # load metadata\n metadata, dims, summary = tools.parse_mm_metadata(rpath)\n start_time = datetime.datetime.strptime(summary['StartTime'], '%Y-%d-%m;%H:%M:%S.%f')\n nz = dims['z']\n nxy = dims['position']\n nt = dims['time']\n\n # use this construction as zinds can be different for different folders\n if zinds_to_use is None:\n zinds_to_use_temp = range(nz)\n else:\n zinds_to_use_temp = zinds_to_use\n nz_used = len(zinds_to_use_temp)\n\n if tinds_to_use is None:\n tinds_to_use_temp = range(nt)\n else:\n tinds_to_use_temp = tinds_to_use\n nt_used = len(tinds_to_use_temp)\n\n if xyinds_to_use is None:\n xyinds_to_use_temp = range(nxy)\n else:\n xyinds_to_use_temp = xyinds_to_use\n nxy_used = len(xyinds_to_use_temp)\n\n if pixel_size is None:\n pixel_size = metadata['PixelSizeUm'][0]\n\n # set up image size\n # load one file to check size\n fname = os.path.join(rpath, metadata['FileName'].values[0])\n im, _ = tools.read_tiff(fname, [metadata['ImageIndexInFile'].values[0]])\n _, ny_raw, nx_raw = im.shape\n if crop_image:\n # or pick ROI\n roi = tools.get_centered_roi(img_center, [crop_size, crop_size])\n\n # check points don't exceed image size\n if roi[0] < 0:\n roi[0] = 0\n if roi[1] > ny_raw:\n roi[1] = ny_raw\n if roi[2] < 0:\n roi[2] = 0\n if roi[3] > nx_raw:\n roi[3] = nx_raw\n else:\n roi = [0, ny_raw, 0, nx_raw]\n\n ny = roi[1] - roi[0]\n nx = roi[3] - roi[2]\n\n # arrays to save results\n imgs_sr = []\n imgs_os = []\n imgs_wf = []\n imgs_deconvolved = []\n counter = 1\n for kk in range(ncolors):\n sim_options = {'pixel_size': pixel_size, 'wavelength': emission_wavelengths[kk], 'na': na}\n\n # estimate otf\n fmax = 1 / (0.5 * emission_wavelengths[kk] / na)\n fx = tools.get_fft_frqs(nx, sim_options['pixel_size'])\n fy = tools.get_fft_frqs(ny, sim_options['pixel_size'])\n ff = np.sqrt(fx[None, :] ** 2 + fy[:, None] ** 2)\n otf = otf_fn(ff, fmax)\n otf[ff >= fmax] = 0\n\n # guess frequencies/phases\n frqs_guess = np.zeros((nangles, 2))\n phases_guess = np.zeros((nangles, nphases))\n for ii in range(nangles):\n for jj in range(nphases):\n # estimate frequencies based on affine_xform\n frqs_guess[ii, 0], frqs_guess[ii, 1], phases_guess[ii, jj] = \\\n affine.xform_sinusoid_params_roi(frqs_dmd[kk, ii, 0], frqs_dmd[kk, ii, 1],\n phases_dmd[kk, ii, jj], [dmd_ny, dmd_nx], roi, xform)\n\n # convert from 1/mirrors to 1/um\n frqs_guess = frqs_guess / pixel_size\n\n # analyze pictures\n for ii in tinds_to_use_temp:\n for bb in xyinds_to_use_temp:\n for aa in zinds_to_use_temp:\n tstart = time.process_time()\n\n identifier = \"%.0fnm_nt=%d_nxy=%d_nz=%d\" % (excitation_wavelengths[kk] * 1e3, ii, bb, aa)\n file_identifier = \"nc=%d_nt=%d_nxy=%d_nz=%d\" % (kk, ii, bb, aa)\n\n # where we will store results for this particular set\n if not widefield_only:\n sim_diagnostics_path = os.path.join(sim_results_path, identifier)\n if not os.path.exists(sim_diagnostics_path):\n os.mkdir(sim_diagnostics_path)\n\n # find images and load them\n raw_imgs = tools.read_dataset(metadata, z_indices=aa, xy_indices=bb, time_indices=ii,\n user_indices={\"UserChannelIndex\": channel_inds[kk],\n \"UserSimIndex\": list(range(npatterns_ignored, npatterns_ignored + nangles * nphases))})\n\n # error if we have wrong number of images\n if np.shape(raw_imgs)[0] != (nangles * nphases):\n raise ValueError(\"Found %d images, but expected %d images at channel=%d,\"\n \" zindex=%d, tindex=%d, xyindex=%d\" % (\n np.shape(raw_imgs)[0], nangles * nphases,\n channel_inds[kk], aa, ii, bb))\n\n # optionally convert from ADC to photons\n # todo: not very useful to do this way...\n if use_scmos_cal:\n imgs_sim = camera_noise.adc2photons(raw_imgs, gain_map, offsets)\n else:\n imgs_sim = raw_imgs\n\n # reshape to [nangles, nphases, ny, nx]\n imgs_sim = imgs_sim.reshape((nangles, nphases, raw_imgs.shape[1], raw_imgs.shape[2]))\n imgs_sim = imgs_sim[:, :, roi[0]:roi[1], roi[2]:roi[3]]\n\n # instantiate reconstruction object\n r = SimImageSet(sim_options, imgs_sim, frqs_guess, phases_guess=phases_guess, otf=otf,\n save_dir=sim_diagnostics_path, **kwargs)\n\n # if not saving stack, maybe want to handle in class?\n if saving and not save_tif_stack:\n fname = os.path.join(sim_results_path, \"sim_os_%s.tif\" % file_identifier)\n tools.save_tiff(r.imgs_os, fname, dtype='float32', datetime=start_time)\n\n fname = os.path.join(sim_results_path, \"widefield_%s.tif\" % file_identifier)\n tools.save_tiff(r.widefield, fname, dtype='float32', datetime=start_time)\n else:\n # store widefield and os\n imgs_os.append(r.imgs_os)\n imgs_wf.append(r.widefield)\n\n if not widefield_only:\n # do reconstruction\n r.reconstruct()\n r.plot_figs()\n\n if saving and not save_tif_stack:\n fname = os.path.join(sim_results_path, \"sim_sr_%s.tif\" % file_identifier)\n tools.save_tiff(r.img_sr, fname, dtype='float32', datetime=start_time)\n\n fname = os.path.join(sim_results_path, \"deconvolved_%s.tif\" % file_identifier)\n tools.save_tiff(r.widefield_deconvolution, fname, dtype='float32', datetime=start_time)\n else:\n # store sr and deconvolved\n imgs_sr.append(r.img_sr)\n imgs_deconvolved.append(r.widefield_deconvolution)\n\n # save reconstruction summary data\n r.save_result(os.path.join(sim_diagnostics_path, \"sim_reconstruction_params.pkl\"))\n\n tend = time.process_time()\n print(\"%d/%d from %s in %0.2fs\" % (counter, ncolors * nt_used * nxy_used * nz_used, folder, tend - tstart))\n\n counter += 1\n\n # #################################\n # save data for all reconstructed files\n # #################################\n if saving and save_tif_stack:\n\n # todo: want to include metadata in tif.\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'widefield.tif'))\n imgs_wf = np.asarray(imgs_wf)\n wf_to_save = np.reshape(imgs_wf, [ncolors, nt_used, nz_used, imgs_wf[0].shape[-2], imgs_wf[0].shape[-1]])\n tools.save_tiff(wf_to_save, fname, dtype='float32', axes_order=\"CTZYX\", hyperstack=True,\n datetime=start_time)\n\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'sim_os.tif'))\n imgs_os = np.asarray(imgs_os)\n sim_os = np.reshape(imgs_os, [ncolors, nt_used, nz_used, imgs_os[0].shape[-2], imgs_os[0].shape[-1]])\n tools.save_tiff(sim_os, fname, dtype='float32', axes_order=\"CTZYX\", hyperstack=True,\n datetime=start_time)\n\n if not widefield_only:\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'sim_sr.tif'))\n imgs_sr = np.asarray(imgs_sr)\n sim_to_save = np.reshape(imgs_sr, [ncolors, nt_used, nz_used, imgs_sr[0].shape[-2], imgs_sr[0].shape[-1]])\n tools.save_tiff(sim_to_save, fname, dtype='float32', axes_order=\"CTZYX\", hyperstack=True,\n datetime=start_time)\n\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'deconvolved.tif'))\n imgs_deconvolved = np.asarray(imgs_deconvolved)\n deconvolved_to_save = np.reshape(imgs_deconvolved, [ncolors, nt_used, nz_used, imgs_deconvolved[0].shape[-2],\n imgs_deconvolved[0].shape[-1]])\n tools.save_tiff(deconvolved_to_save, fname, dtype='float32', axes_order='CTZYX', hyperstack=True,\n datetime=start_time)\n\n return imgs_sr, imgs_wf, imgs_deconvolved, imgs_os",
"def write_file(_data, _label, _clinical, _contour, _type):\n pickle.dump(np.array(_data), open(_type + '_data.pxl', 'wb'))\n pickle.dump(np.array(_label), open(_type + '_label.pxl', 'wb'))\n pickle.dump(np.array(_clinical), open(_type + '_clinical.pxl', 'wb'))\n pickle.dump(np.array(_contour), open(_type + '_contour.pxl', 'wb'))",
"def create_demo_dcm_data(dcm_dir):\n pet_fname = os.path.join(os.path.dirname(__file__), 'data', 'brainweb_06_osem.nii')\n mr_fname = os.path.join(os.path.dirname(__file__), 'data', 'brainweb_06_t1.nii')\n \n pet, pet_affine = flip_ras_lps(*load_nii_in_ras(pet_fname))\n mr, mr_affine = flip_ras_lps(*load_nii_in_ras(mr_fname))\n\n os.mkdir(dcm_dir)\n write_3d_static_dicom(pet, os.path.join(dcm_dir, 'PT'), pet_affine, modality = 'PT')\n write_3d_static_dicom(mr, os.path.join(dcm_dir, 'MR'), mr_affine, modality = 'MR')"
] |
[
"0.62861353",
"0.616862",
"0.6056372",
"0.59437436",
"0.59157383",
"0.58623785",
"0.5839377",
"0.5777538",
"0.5696017",
"0.5684696",
"0.56278366",
"0.5573473",
"0.55719286",
"0.5565964",
"0.55530864",
"0.5543438",
"0.5541765",
"0.5523903",
"0.5523742",
"0.5514894",
"0.55092824",
"0.55091006",
"0.5506386",
"0.55001605",
"0.5464847",
"0.54600054",
"0.54538274",
"0.54533476",
"0.54505676",
"0.5446187"
] |
0.7806346
|
0
|
Creates a new JSON based on the return parameters specified by user. If no return parameters specified, still runs through to clean any URL values, converts URLs to their respective values with clean_value() function.
|
def clean_json(json, return_params):
cleaned_json = []
for element in json:
new_element = {}
for key in element:
if (return_params is None) or (key in return_params):
value = element[key]
new_element[key] = clean_value(value, key)
cleaned_json.append(new_element)
return cleaned_json
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create() -> TJsonResponse:\n if request.headers['Content-Type'] == 'application/json':\n url = request.json.get('url')\n else:\n url = request.form.get('url')\n if not url:\n return jsonify(error='bad request'), 400\n result = scrape.scrape_meta_for_url(url)\n inserted_id, tags = result.get()\n url_hash = encode(inserted_id)\n response_body: Dict[str, Any] = jsonify(hash=url_hash, short_url=f'https://fanlens.io/@{url_hash}', tags=tags)\n return response_body",
"def to_return(**data):\n missing = []\n for arg in [\"changes\", \"comment\", \"warnings\", \"result\"]:\n if arg not in data:\n if arg == \"warnings\":\n data.setdefault(arg, [])\n elif arg == \"comment\":\n data.setdefault(arg, \"Success\")\n elif arg == \"changes\":\n data.setdefault(arg, {})\n else:\n missing.append(arg)\n\n if missing:\n raise sugar.lib.exceptions.SugarRuntimeException(\n \"Missing arguments for the result object: {}\".format(\", \".join(missing)))\n\n data = ActionResult(**data)\n data.warn = \"warnings\"\n\n return data",
"def make_dict(result):\n response = dict()\n response.update(\n {\n 'url': result.get('Url', None),\n 'title': result.get('Title', None),\n 'description': result.get('Description', None),\n 'card_type': 1,\n 'icon_url': None,\n 'provider_icon_url': None,\n 'action_type': 1,\n })\n return response",
"def json_friendly(self):",
"def clean_response(r, *args, **kwargs):\n\n def convert_pkg_info(pkg_url_list):\n \"\"\"\n Converts a list of package info dicts\n into a dict, where the key is the type\n of package.. eg: sdist\n :param pkg_url_list:\n :return: dict\n \"\"\"\n package_urls = {}\n for pkg_url in pkg_url_list:\n package_urls.update(\n {\n pkg_url.get(\"packagetype\"): {\n \"md5\": pkg_url.get(\"digests\").get(\"md5\"),\n \"sha256\": pkg_url.get(\"digests\").get(\"sha256\"),\n \"filename\": pkg_url.get(\"filename\"),\n \"size\": pkg_url.get(\"size\"),\n \"upload_time\": pkg_url.get(\"upload_time\"),\n \"url\": pkg_url.get(\"url\"),\n }\n }\n )\n return package_urls\n\n # only run hooks for 200\n if r.status_code == 200:\n dirty_response = r.json()\n cleaned_response = {}\n\n info = dirty_response.get(\"info\")\n if info:\n cleaned_response = {\n \"name\": info.get(\"name\"),\n \"latest_version\": info.get(\"version\"),\n \"summary\": info.get(\"summary\"),\n \"homepage\": info.get(\"home_page\"),\n \"package_url\": info.get(\"project_url\") or info.get(\"package_url\"),\n \"author\": info.get(\"author\"),\n \"project_urls\": info.get(\"project_urls\"),\n \"requires_python\": info.get(\"requires_python\"),\n \"license\": info.get(\"license\"),\n \"author_email\": info.get(\"author_email\"),\n \"latest_release_url\": info.get(\"release_url\"),\n \"dependencies\": info.get(\"requires_dist\"),\n }\n\n # release list\n releases = dirty_response.get(\"releases\")\n if releases:\n release_list = list(releases.keys())\n release_list.reverse()\n\n # more detailed info of every release's package\n releases_info = {}\n for key, val in releases.items():\n if val:\n releases_info[key] = convert_pkg_info(val)\n\n cleaned_response.update(\n {\n \"releases\": release_list,\n \"releases_pkg_info\": releases_info,\n }\n )\n\n # latest release's package information\n latest_pkg_urls = dirty_response.get(\"urls\")\n if latest_pkg_urls:\n cleaned_response.update(\n {\n \"latest_pkg_urls\": convert_pkg_info(latest_pkg_urls),\n }\n )\n\n r.cleaned_json = cleaned_response\n return r",
"def get(path, dict_params):\n curr = PATHS.get(path)\n url = curr['url']\n path_params = {}\n query_params = {}\n fields = []\n hydrations = []\n first_field = True\n print(dict_params)\n # search through the dictionary of params, categorize what kind of parameter, make sure they exist and then replace them in the url\n for k, v in dict_params.items():\n if curr['path_params'].get(k):\n path_params.update({k: str(v)})\n print(path_params)\n elif k in curr['query_params']:\n query_params.update({k: v})\n # DEBUG RIGHT HERe\n else:\n break\n print(query_params)\n for s, t in path_params.items():\n url = url.replace(\"{{{}}}\".format(s), t)\n print(\"Working through here\")\n while url.find('{') != -1:\n # so if u put in the wrong shit it repeats endlessly\n print(url)\n start_index = url.find('{')\n end_index = url.find('}')\n param = url[start_index:end_index + 1]\n print(url)\n param_without_brackets = url[start_index + 1:end_index]\n if param_without_brackets in curr['required_params']:\n url = url.replace(param, curr.get('path_params').get(param_without_brackets)['default'])\n elif param_without_brackets not in curr['required_params']:\n url = url.replace(\"/\" + param, '')\n\n\n if len(query_params) > 0:\n for k, v in query_params.items():\n if k == 'fields' or k == 'hydrate':\n sep = '?' if url.find('?') == -1 else '&'\n url += sep + k + \"=\"\n total = len(v)\n counter = 1\n for i in v:\n sep = '%2c' if counter < total else ''\n counter += 1\n qurl = i + sep\n url += qurl\n else:\n sep = '?' if url.find('?') == -1 else '&'\n v = str(v)\n url += sep + k + \"=\" + v\n # For if fields in the query\n # Make sure required parameters are present\n print(url)\n satisfied = False\n missing_params = []\n for x in curr.get('required_params', []):\n if len(x) == 0:\n break\n else:\n for i in x:\n if path_params.get(i) or query_params.get(i) or i in fields or i in hydrations:\n satisfied=True\n if satisfied != True:\n missing_params.extend(x)\n\n\n if len(missing_params) != 0:\n return 'missing params {}'.format(missing_params)\n\n r = requests.get(url)\n if r.status_code not in [200,201]:\n return r.status_code\n else:\n return r.json()",
"def postprocessRequest(self, retval, route):\n JSONed = False\n GZIPPED = False\n\n if retval is None:\n self.logger.warn(\"retval is None!\")\n return retval\n\n # Is this request under the a path we're enforcing JSON output for?\n if (route is not None and hasattr(route, 'rule') and route.rule.startswith(self.baseRulePath)) or response.status_code >= 400:\n # It is. Try to serialize the returned data as JSON\n self.logger.debug(\"response should be JSON\")\n\n # First, is the data even something we can serialize as JSON?\n # if the retval is not a dict, we don't know what to do with it, so just be transparent\n if type(retval) not in (dict, list):\n self.logger.error(\"\\033[41;1m You are trying to send the client data that doesn't look like it should be JSON (%s). Fix this! \\033[0m\" % type(retval))\n # TODO: consider raising an exception so as to generate a server error (500), forcing the app developer\n # to confront why/how they are sending back something that doesn't make much sense serializing as JSON\n else:\n # Was the \"pretty\" query parameter set?\n if request.query.get(\"pretty\") == 'true':\n # It was. Indent & sort keys\n self.logger.debug(\"found pretty query param, value is true, prettying JSON\")\n retval = json.dumps(retval, indent=4, sort_keys=True)\n else:\n # It was not. By default, we'll use the most compact representation\n retval = json.dumps(retval, separators=(',', ':'))\n response.content_type = \"application/json\"\n self.logger.debug(\"%d bytes of JSON created\" % len(retval))\n JSONed = True\n else:\n self.logger.debug(\"response should NOT be JSON\")\n\n # Gzipping the response\n # Can the client even handle gzipped response bodies?\n httpRespObj = None\n if isinstance(retval, bottle.HTTPResponse):\n # we'll keep the HTTPResponse so we can update it after gzipping.\n self.logger.debug(\"Found HTTPResponse instance\")\n httpRespObj = retval\n if type(retval.body) in (str, unicode):\n retval = retval.body\n elif hasattr(retval.body, \"read\"):\n retval = retval.body.read()\n else:\n self.logger.error(\"HTTPResponse.body attr is not a str and does not have a read() method!\")\n raise ValueError(\"HTTPResponse.body is not sane: attr is not a str, and is not a file-like object\")\n\n elif isinstance(retval, bottle.HTTPError):\n self.logger.debug(\"Found HTTPError instance\")\n httpRespObj = retval\n if type(retval.body) in (str, unicode):\n retval = retval.body\n elif hasattr(retval.body, \"read\"):\n retval = retval.body.read()\n else:\n self.logger.error(\"HTTPError.body attr is not a str and does not have a read() method!\")\n raise ValueError(\"HTTPError.body is not sane: attr is not a str, and is not a file-like object\")\n\n if 'gzip' in request.headers.get(\"Accept-Encoding\", \"\") and len(retval) > 0:\n self.logger.debug(\"client accepts gzip, gzipping data\")\n # the client handle gzipped data, so lets gzip out data\n self.logger.debug(\"original response data was %d bytes\" % len(retval))\n sio = StringIO.StringIO()\n gzFile = gzip.GzipFile(fileobj=sio, mode='wb', compresslevel=6)\n gzFile.write(retval)\n gzFile.close()\n sio.seek(0)\n retval = sio.read()\n sio.close()\n self.logger.debug(\"new gzipped response data is %d bytes\" % len(retval))\n GZIPPED = True\n\n # Were we given an HTTPResponse isntance? If so, we need to update it a bit\n if httpRespObj:\n self.logger.debug(\"Updating HTTPResponse instance with gzipped content, headers\")\n httpRespObj.body = retval\n httpRespObj['Content-Length'] = str(len(retval))\n httpRespObj['Content-Encoding'] = 'gzip'\n else:\n # update the content-length (it is already set) and add the content-encoding header\n response.set_header('Content-Length', str(len(retval)))\n response.set_header('Content-Encoding', 'gzip')\n else:\n self.logger.debug(\"client either doesn't accept gzip or there's no data to return; len(retval)=%d\" % len(retval))\n\n self.logger.info(\"RESPONSE %s gzipped:%s json:%s size:%dB\" % (response.status_code, GZIPPED, JSONed, len(retval)))\n if httpRespObj:\n return httpRespObj\n return retval",
"def handle_log_output(original_parameters_string: Optional[Any]) -> Dict[str, Any]:\n if original_parameters_string is None:\n return {}\n\n if isinstance(original_parameters_string, bytes):\n mystr = original_parameters_string.decode(\"utf-8\")\n elif isinstance(original_parameters_string, str):\n mystr = original_parameters_string\n else:\n mystr = str(original_parameters_string)\n\n if mystr.strip() == \"\":\n return {}\n\n urlencoded = False\n try:\n parameters = orjson.loads(mystr)\n except orjson.JSONDecodeError:\n try:\n parameters = urllib.parse.parse_qs(mystr)\n urlencoded = True\n except Exception: # pragma: no cover\n return original_parameters_string\n\n return obfuscate_dict(parameters, urlencoded=urlencoded)",
"def return_data(self, **kwargs) -> dict:\r\n return None",
"def gen_output(json_dct, *args):\n keys_to_add = ('job_title', 'location', 'date', 'company', 'num_stars')\n for arg, key in zip(args, keys_to_add): \n if arg: \n json_dct[key] = arg\n\n return json_dct",
"def api_analyze():\n output = {}\n\n #retrieve the json from the ajax call\n json_file = ''\n if request.method == 'POST':\n json_file = request.json\n print (\"post request\")\n\n #if json_file successfully posted..\n if json_file != '':\n # check all required arguments are present:\n if not all(arg in json_file for arg in [\"portfolio\",\"riskfactor\", \"shockmag\"]):\n print(\"Missing arguments in post request\")\n return json.dumps({\"status\":\"Error\", \"messages\":\"Missing arguments\"}), 422\n portfolio = json_file[\"portfolio\"]\n riskfactor = json_file[\"riskfactor\"]\n shockmag = json_file[\"shockmag\"]\n print(\"retreived data: \" + str(portfolio) + \" | \" + str(riskfactor) + \" | \" + str(shockmag))\n\n #run Predictive Market Scenario service\n PMS_status = predictivemarketscenario.generate_scenario(riskfactor, shockmag)\n #if error in the call, return with error\n if PMS_status != 200:\n print(\"Unable to create csv from Predictive Market Scenario service\")\n return json.dumps({'error': str(PMS_status) + \" Unable to create csv from Predictive Market Scenario service\"})\n print (\"CREATED CSV\")\n\n #get holdings data\n holdings_data = investmentportfolio.get_portfolio_holdings(portfolio)\n\n #go through each holding in the portfolio\n asset_output = []\n for holding in holdings_data[\"holdings\"][-1][\"holdings\"]:\n\n #call the simulatedinstrumentanalytics module\n data = simulatedinstrumentanalytics.compute_simulated_analytics(instrument_id=holding[\"instrumentId\"])\n\n #if returned as json would mean error, assign N/A, else assing the values from the list of objects\n if isinstance(data, dict):\n value1 = \"N/A\"\n value2 = \"N/A\"\n else:\n value1 = data[0][\"values\"][0][\"THEO/Price\"]\n value2 = data[1][\"values\"][0][\"THEO/Price\"]\n\n #create obj with the output values\n asset = holding[\"asset\"]\n instrumentId = holding[\"instrumentId\"]\n quantity = holding[\"quantity\"]\n companyName = holding[\"companyName\"]\n obj = {\n 'Asset': asset,\n 'InstrumentId': instrumentId,\n 'Quantity': quantity,\n 'CompanyName': companyName,\n 'BaseVal': value1,\n 'NewVal': value2\n }\n asset_output.append(obj)\n\n #get the market_conditions as list\n if os.path.exists(\"output_PMS.csv\"):\n market_conditions = get_market_conditions(\"output_PMS.csv\")\n\n #create the output json\n output = {\"holdingsInfo\": asset_output, \"marketConditions\": market_conditions}\n\n return json.dumps(output)",
"def _from_json(return_obj):\n\n return json.loads(return_obj.decode())",
"def mocked_json(return_data=None):\n if return_data is None:\n return_data = {}\n\n def json(*args, **kwargs): # pylint:disable=unused-argument, missing-docstring\n return return_data\n return json",
"def use_GET_in(fn, request):\n response = fn(request.GET)\n if isinstance(response, dict):\n return HttpResponse(json.dumps(response),\n content_type='application/json')\n else:\n return response",
"def handle_json_arguments(context: click.Context, param: click.Parameter, items: Sequence[str]) -> Optional[dict]:\n if not items or context.resilient_parsing:\n return\n return parse_dict_items(items)",
"def _post_process_result(result: Any) -> Any:\n return result",
"def get():\n #Return the corresponding value\n return json_back()",
"def get(self, request):\n# self.context[\"form\"] = AddReturnParamsForm()\n# return render(request, \"dbkeeper/add_return_params.html\", self.context)\n try:\n returnParams = Setting.getReturnParams()\n d = {}\n for station in range(0, 6):\n fields = [\"st{}{}\".format(station, valname) for valname in (\"id\", \"v0\", \"v1\", \"v2\", \"v3\", \"v4\", \"v5\")]\n values = returnParams[station]\n \n for f,v in zip(fields, values):\n d[f] = v \n\n self.context[\"form\"] = AddReturnParamsForm(initial=d)\n except:\n self.context[\"form\"] = AddReturnParamsForm()\n return render(request, \"dbkeeper/add_return_params.html\", self.context)",
"def params_helper(self,**kwargs):\n\n dic = {'output' : 'json, xml, kml',\n 'maxresults' : 'limit on max number of results returned ; Default is limited to 100',\n 'countrycode' : 'GB, US etc ISO Country Code ==> Only 2 caracters !',\n 'latitude' : 'latitude reference for distance calculation',\n 'distance' : 'return results based on specified distance from specified latitude/longitude',\n 'distanceunit' : 'Miles or km',\n 'operatorid' : 'exact match on a given EVSE operator id (comma separated list)',\n 'connectiontypeid' : ' exact match on a given connection type id (comma separated list)',\n 'countryid' : 'exact match on a given country id (comma separated list)',\n 'levelid' : 'exact match on a given charging level (1-3) id (comma separated list)',\n 'minpowerkw' : 'minimum output power in kW (this information is not known for many locations)',\n 'usagetypeid' : 'exact match on a given usage type id (comma separated list) ',\n 'statustypeid' : ' exact match on a given status type id (comma separated list)',\n 'dataproviderid ' : 'exact match on a given data provider id id (comma separated list). Use opendata=true for only OCM provided (\"Open\") data.',\n 'modifiedsince' : 'POIs modified since the given date (UTC) e.g. 2016-09-15T09:30',\n 'opendata' : ' true or false. Set to true to include only Open Data licensed content, false to return only non-open licensed data. By default all available data is returned.',\n 'includecomments' : ' true or false. Set to true to also include user comments and media items (photos) per charging location. Default = false.',\n 'verbose ' : ' true or false. Set to false to get a smaller result set with null items removed. Default = true.',\n 'compact ' : 'true or false. Set to true to remove reference data objects from output (just returns IDs for common reference data such as DataProvider etc). Default = false.',\n 'camelcase' : 'true or false. Set to true to get a property names in camelCase format. Default = false',\n 'callback' : 'specify the name of the JSONP callback (if required), JSON response type only.'\n }\n\n if len(kwargs)==0 :\n\n for key in dic.keys() :\n print(key)\n\n else :\n \n for k in kwargs: \n print(dic.get(k))",
"def get_json_string(self, **kwargs):\n ...",
"def standardize_api_response(function):\n\n available_result_keys = [\n 'success', 'error', 'created', 'updated', 'deleted', 'no-data']\n\n status_code_and_descriptions = {\n 'success': (200, 'Successful Operation'),\n 'error': (400, 'Bad Request'),\n 'created': (201, 'Successfully created'),\n 'updated': (200, 'Successfully updated'),\n 'deleted': (200, 'Successfully deleted'),\n 'no-data': (204, '')\n }\n\n @functools.wraps(function)\n def make_response(*args, **kwargs):\n\n result = function(*args, **kwargs)\n\n if not set(available_result_keys) & set(result):\n raise ValueError('Invalid result key.')\n\n status_code, description = status_code_and_descriptions[\n next(iter(result.keys()))\n ]\n\n status_code = ('status_code', status_code)\n description = (\n ('description', description) if status_code[1] != 400 else\n ('error', description)\n )\n data = (\n ('data', next(iter(result.values()))) if status_code[1] != 204 else\n ('data', '')\n )\n\n return json.dumps(collections.OrderedDict([\n status_code, description, data])), status_code[-1]\n\n return make_response",
"def retrieve_short_url():\n if request.method == 'GET':\n if 'custom' in request.args:\n token_string = request.args['custom']\n conn = psycopg2.connect(host=host, user=user, password=passwrd, database=db)\n cursor = conn.cursor()\n check_row = \"SELECT S_URL FROM WEB_URL WHERE S_URL = %s FOR UPDATE\"\n cursor.execute(check_row, (token_string,))\n check_fetch = cursor.fetchone()\n\n if check_fetch is None:\n data = jsonify({\n 'error': 'Custom string given not available as shortened url.'\n })\n return make_response(data, 200)\n else:\n info, counter, browser, platform = list_data(token_string)\n data = jsonify({\n 'clicks': counter[0],\n 'custom': info[1],\n 'long_url': info[0],\n 'click_browser': {\n 'chrome': browser[0],\n 'firefox': browser[1],\n 'safari': browser[2],\n 'other_browser': browser[3]\n },\n 'click_platform': {\n 'android': platform[0],\n 'ios': platform[1],\n 'windows': platform[2],\n 'linux': platform[3],\n 'mac': platform[4],\n 'other_platform': platform[5]\n },\n 'tag': info[2]\n })\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'Follow the API format ',\n })\n return make_response(data, 405)\n else:\n data = jsonify({'error': 'Invalid Method Used , Use GET .'})\n return make_response(data, 405)",
"def get_json(self):\n try:\n nformat, pattern = self.format\n except TypeError:\n nformat, pattern = self.format, \"\"\n\n if self._formula != '':\n value = self._formula\n value_key = 'formulaValue'\n elif is_number(self._value):\n value = self._value\n value_key = 'numberValue'\n elif type(self._value) is bool:\n value = self._value\n value_key = 'boolValue'\n elif type(self._value) is str or type(self._value) is unicode:\n value = self._value\n value_key = 'stringValue'\n else: # @TODO errorValue key not handled\n value = self._value\n value_key = 'errorValue'\n\n ret_json = dict()\n ret_json[\"userEnteredFormat\"] = dict()\n\n if self.format[0] is not None:\n ret_json[\"userEnteredFormat\"][\"numberFormat\"] = {\"type\": getattr(nformat, 'value', nformat),\n \"pattern\": pattern}\n if self._color[0] is not None:\n ret_json[\"userEnteredFormat\"][\"backgroundColor\"] = {\"red\": self._color[0], \"green\": self._color[1],\n \"blue\": self._color[2], \"alpha\": self._color[3]}\n if self.text_format is not None:\n ret_json[\"userEnteredFormat\"][\"textFormat\"] = self.text_format.copy()\n fg = ret_json[\"userEnteredFormat\"][\"textFormat\"].get('foregroundColor', None)\n ret_json[\"userEnteredFormat\"][\"textFormat\"]['foregroundColor'] = format_color(fg, to='dict')\n\n if self.borders is not None:\n ret_json[\"userEnteredFormat\"][\"borders\"] = self.borders\n if self._horizontal_alignment is not None:\n ret_json[\"userEnteredFormat\"][\"horizontalAlignment\"] = self._horizontal_alignment.value\n if self._vertical_alignment is not None:\n ret_json[\"userEnteredFormat\"][\"verticalAlignment\"] = self._vertical_alignment.value\n if self._wrap_strategy is not None:\n ret_json[\"userEnteredFormat\"][\"wrapStrategy\"] = self._wrap_strategy\n if self.text_rotation is not None:\n ret_json[\"userEnteredFormat\"][\"textRotation\"] = self.text_rotation\n\n if self._note is not None:\n ret_json[\"note\"] = self._note\n ret_json[\"userEnteredValue\"] = {value_key: value}\n\n return ret_json",
"def _assign_returned_values(\n function,\n outputs: Dict[str, Any],\n returned_values: Dict[str, Any],\n output_dict: Dict[str, List[tfx_types.Artifact]],\n json_typehints: Dict[str, Type], # pylint: disable=g-bare-generic\n) -> Dict[str, List[tfx_types.Artifact]]:\n result = copy.deepcopy(output_dict)\n if not isinstance(outputs, dict):\n raise ValueError(\n ('Expected component executor function %s to return a dict of '\n 'outputs (got %r instead).') % (function, outputs))\n\n # Assign returned ValueArtifact values.\n for name, is_optional in returned_values.items():\n if name not in outputs:\n raise ValueError(\n 'Did not receive expected output %r as return value from '\n 'component executor function %s.' % (name, function))\n if not is_optional and outputs[name] is None:\n raise ValueError('Non-nullable output %r received None return value from '\n 'component executor function %s.' % (name, function))\n try:\n result[name][0].value = outputs[name]\n except TypeError as e:\n raise TypeError(\n ('Return value %r for output %r is incompatible with output type '\n '%r.') %\n (outputs[name], name, result[name][0].__class__)) from e\n # Handle JsonValue runtime type check.\n if name in json_typehints:\n ret = function_parser.check_strict_json_compat(outputs[name],\n json_typehints[name])\n if not ret:\n raise TypeError(\n ('Return value %r for output %r is incompatible with output type '\n '%r.') % (outputs[name], name, json_typehints[name]))\n return result",
"def retrieve(self, data_only_filter=\"all\", return_type=\"python\"):\n if return_type == \"python\":\n if data_only_filter == \"all\":\n return dict(dict_data=self.data_dict, list_data=self.data_list)\n elif data_only_filter == \"list\":\n return self.data_list\n elif data_only_filter == \"dict\":\n return self.data_dict\n else:\n print(\">>>> Data filter only: {'all', 'list', 'dict'}, your: %s\" % data_only_filter)\n exit(1)\n elif return_type == \"model\":\n if data_only_filter == \"all\":\n return dict(dict_data=DictModel(name=\"obj_dict\", raw_data=self.data_dict),\n list_data=DictModel(name=\"obj_list\", raw_data=self.data_list))\n elif data_only_filter == \"list\":\n return DictModel(name=\"obj_dict\", raw_data=self.data_dict)\n elif data_only_filter == \"dict\":\n return DictModel(name=\"obj_list\", raw_data=self.data_list)\n else:\n print(\">>>> Data filter only: {'all', 'list', 'dict'}, your: %s\" % data_only_filter)\n exit(1)\n else:\n print(\">>>> Return type only: {'python', 'model'}, your: %s\" % return_type)\n exit(1)",
"def process_request(input_: dict) -> dict:\n\n output = {\n \"Time\": datetime.datetime.now(),\n \"Source\": \"ecommerce.users\",\n \"Resources\": [input_[\"userName\"]],\n \"DetailType\": \"UserCreated\",\n \"Detail\": json.dumps({\n \"userId\": input_[\"userName\"],\n \"email\": input_[\"request\"][\"userAttributes\"][\"email\"]\n }),\n \"EventBusName\": EVENT_BUS_NAME\n }\n\n return output",
"def std_response(db_table,db_cols,field_to_cols=None,return_json=True,return_format=None):\n\n # Object that converts filter strings into safe SQL statements\n sql_compiler = SQLCompiler()\n\n # GET request parameters\n filter_str = request.args.get(\"filter\")\n fields_str = request.args.get(\"fields\")\n sort_str = request.args.get(\"sort\")\n format_str = request.args.get(\"format\")\n\n if fields_str is not None:\n # User's requested fields\n fields = map(lambda x: x.strip(),fields_str.split(\",\"))\n\n # Translate to database columns\n if field_to_cols is not None:\n fields = map(lambda x: field_to_cols.get(x,x),fields)\n\n # To avoid injection, only accept fields that we know about\n fields = filter(lambda x: x in db_cols,fields)\n else:\n fields = db_cols\n\n if sort_str is not None:\n # User's requested fields\n sort_fields = map(lambda x: x.strip(),sort_str.split(\",\"))\n\n # Translate to database columns\n if field_to_cols is not None:\n sort_fields = map(lambda x: field_to_cols.get(x,x),sort_fields)\n\n # To avoid injection, only accept fields that we know about\n sort_fields = filter(lambda x: x in db_cols,sort_fields)\n else:\n sort_fields = None\n\n # if filter_str is not None:\n # sql, params = fparser.parse_into_sql(filter_str,db_table,db_cols,fields,sort_fields)\n # else:\n # sql = \"SELECT * FROM {}\".format(db_table)\n # params = []\n\n sql, params = sql_compiler.to_sql(filter_str,db_table,db_cols,fields,sort_fields,field_to_cols)\n\n # text() is sqlalchemy helper object when specifying SQL as plain text string\n # allows for bind parameters to be used\n cur = g.db.execute(text(sql),params)\n\n outer = {\n \"data\": None,\n \"lastPage\": None\n }\n\n # We may need to translate db columns --> field names.\n if field_to_cols is not None:\n cols_to_field = {v: k for k, v in field_to_cols.iteritems()}\n else:\n cols_to_field = {v: v for v in db_cols}\n\n # Figure out return format.\n if return_format == \"table\" or (return_format is None and (format_str is None or format_str == \"\")):\n data = OrderedDict()\n\n for i, row in enumerate(cur):\n for col in fields:\n # Some of the database column names don't match field names.\n field = cols_to_field.get(col,col)\n\n val = row[col]\n if isinstance(val,dict):\n for k, v in val.iteritems():\n if k not in data:\n data[k] = [None] * i\n\n data[k].append(v)\n else:\n data.setdefault(field,[]).append(row[col])\n\n outer[\"data\"] = data\n\n elif return_format == \"objects\" or format_str == \"objects\":\n data = []\n for row in cur:\n rowdict = dict(row)\n finaldict = dict()\n\n # User may have requested only certain fields\n for col in fields:\n # Translate from database column to field name\n field = cols_to_field.get(col,col)\n finaldict[field] = rowdict[col]\n\n data.append(finaldict)\n\n outer[\"data\"] = data\n\n if return_json:\n return jsonify(outer)\n else:\n return data",
"def auto_jsonp(f):\n def new(*arg, **kw):\n callback = request.GET.get('callback')\n result_data = f(*arg, **kw)\n if callback and isinstance(result_data, dict):\n # We only do JSONP for dicts\n response.headers['Content-type'] = 'text/javascript'\n return makeJSONP(callback, result_data)\n # otherwise, we just return as usual\n return result_data\n\n return new",
"def _clean_and_encode_params(params: Mapping):\n # Keep only the parameters that were given a value\n params = {k: v for k, v in params.items() if v is not None}\n\n # All query parameters are later urlencoded - for projection, comma-separated\n # list is supported only on literal comma; convert comma-separated list\n # to a list of values which will be encoded to multiple query parameters\n try:\n params[\"projection\"] = [x.strip() for x in params[\"projection\"].split(\",\")]\n except KeyError:\n pass\n return params",
"def create():\n link_user = request.cookies.get('linkuser')\n user_browser = request.user_agent.browser\n time_stamp = datetime.now()\n action = \"create\" ## create or autocreate? add a request param.\n lat = request.form['lat']\n longitude = request.form['long'] \n\n if request.method == 'POST':\n url = request.form['url']\n short = request.form['short']\n\n ## add http:// if not in url\n if url.find('http://') == -1: \n app.logger.debug(\"adding http://\")\n url = 'http://' + url\n\t\n ## log user action\n logline = [str(time_stamp), link_user, user_browser, action, url, short, lat, longitude ]\n app.logger.debug(logline)\n write_log(logline)\t\n\n ## check if url in db\n for shortDB, urlDB in db.items():\n if url == urlDB or url[:7] +'www.'+ url[7:] == urlDB:\n short = shortDB\n app.logger.debug(url+\" already stored at \"+ short)\n return jsonify(url=url,short=short,link=\"http://people.ischool.berkeley.edu/~arenold/server/\"+short)\n\t\t\n\t\t## store new short and url\n app.logger.debug(\"request to store new \"+url+\" at \"+short)\n clicks[str(short)] = 0 \n db[str(short)] = str(url)\n\t\n return jsonify(url=url,short=short,link=\"http://people.ischool.berkeley.edu/~arenold/server/\"+short)"
] |
[
"0.59374917",
"0.5405094",
"0.5354095",
"0.5266332",
"0.51881903",
"0.5186116",
"0.51577467",
"0.51448053",
"0.5123238",
"0.5116251",
"0.5111089",
"0.5100726",
"0.5025311",
"0.4997758",
"0.4993474",
"0.49896982",
"0.49742863",
"0.49505863",
"0.49287662",
"0.49123642",
"0.49086103",
"0.48872724",
"0.4886944",
"0.4862675",
"0.4858931",
"0.48515096",
"0.48316172",
"0.48303145",
"0.48196757",
"0.48182714"
] |
0.6074537
|
0
|
Filters any elements out of JSON who's parameters are not within range specified.
|
def filter_json(json, param, param_range):
filtered_json = []
for element in json:
if element[param]:
try:
value = int(element[param])
if param_range[0] <= value <= param_range[1]:
filtered_json.append(element)
except:
pass
return filtered_json
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def validate(self, data):\n l = len(data[\"start_times\"])\n for i in range(l):\n if data[\"start_times\"][i]>=data['end_times'][i]:\n raise serializers.ValidationError(\"Start times should come before end times\") \n return data",
"def test_filter_params_invalid_limit(self):\n filter_params = {\n \"resolution\": \"monthly\",\n \"time_scope_value\": \"-1\",\n \"time_scope_units\": \"month\",\n \"limit\": \"invalid\",\n }\n serializer = OCIFilterSerializer(data=filter_params)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)",
"def remove_distance_extremes(scan, low, high):\n scan.samples[:] = [sample for sample in scan.samples if (\n sample.distance >= low and sample.distance <= high)]",
"def remove_out_of_bounds(self, data, low_bound, high_bound):\n data = data.dropna()\n data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)] \n return data",
"def test_parse_exclude_params_no_time(self):\n exclude_params = {\n \"region\": FAKE.word(),\n \"payer_tenant_id\": FAKE.uuid4(),\n \"instance_type\": FAKE.word(),\n }\n serializer = OCIExcludeSerializer(data=exclude_params)\n self.assertTrue(serializer.is_valid())",
"def _boundary_filter(self, south, west, north, east):\n return Q(latitude__gt=south, longitude__gt=west, \n latitude__lt=north, longitude__lt=east)",
"def test_filter_params_invalid_limit_time_scope_resolution(self):\n filter_params = {\"resolution\": \"monthly\", \"time_scope_value\": \"-10\", \"time_scope_units\": \"day\"}\n serializer = OCIFilterSerializer(data=filter_params)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)",
"def filter_timespans(self, minTime=2.0):\n igList = [ig.Rsc['PAIR'] for ig in self.Set if abs(float(ig.Rsc['TIME_SPAN_YEAR'])) < minTime]\n self.Set.omit(IG=igList)",
"def filter_items(self, filter_data: Dict[str, str] = None) -> List[WalletItem]:\n filtered_items = self.items\n for key, value in filter_data.items():\n if key == \"category\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.category, re.IGNORECASE)]\n if key == \"account\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.account, re.IGNORECASE)]\n if key == \"notes\" in filter_data:\n filtered_items = [item for item in filtered_items\n if re.search(value, item.notes, re.IGNORECASE)]\n if key == \"amt_min\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount >= value]\n if key == \"amt_max\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount <= value]\n if key == \"begin_date\":\n try:\n begin_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if begin_date <= item.date]\n except ValueError as ex:\n print(ex)\n exit(1)\n if key == \"end_date\":\n try:\n end_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if item.date <= end_date]\n except ValueError as ex:\n print(ex)\n exit(1)\n return filtered_items",
"def filter_range(self, name, field, start, end):\n self.__names[name] = {\n 'filter': {\n 'range': {\n field: {\n 'gt': start,\n 'lt': end\n }\n }\n }\n }\n self.__last_name = name\n return self",
"def removeBounded(self, bounds):\n if bounds==None or len(bounds)!=4:\n return\n x1,y1,x2,y2 = bounds\n if x1>x2 :\n temp=x1;x1=x2;x2=temp\n if y1>y2:\n temp=y1;y1=y2;y2=temp\n lst=[]\n for i in range(0,self.length()):\n x=self.x[i]; y=self.y[i]\n if (x>x1 and x<x2) and (y>y1 and y<y2): \n lst.append(i)\n self.removeMultiple(lst)\n return",
"def filterNotInRange(frame, min, max, colorMode):\n\n tempFrame = cv2.cvtColor(frame, colorMode)\n\n mask = cv2.inRange(tempFrame, min, max)\n\n filtered_frame = cv2.bitwise_and(frame, frame, mask=mask)\n\n return filtered_frame",
"def filter_datetime_range(self, queryobject, start_datetime, end_datetime):\n raise NotImplementedError()",
"def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' in item:\n item_time = item['time']\n elif 'timestamp' in item:\n item_time = item['timestamp']\n timestamp = parser.parse(item_time + \"UTC\").timestamp()\n if end_time > timestamp > start_time:\n filtered_items.append(item)\n\n return filtered_items",
"def _removeOutOfRangeTransformer(self, working_stats, params):\n\n choices = [int(choice) for choice, subsets in working_stats.iteritems()\n if [value for value in subsets if value > 0]]\n\n min_choice = min(choices)\n max_choice = max(choices)\n\n for choice in working_stats.keys():\n if int(choice) < min_choice or int(choice) > max_choice:\n del working_stats[choice]\n\n return working_stats",
"def validate(self, data):\n if data['min_precipitation'] > data['max_precipitation']:\n raise serializers.ValidationError(\n 'Minimum precipitation cannot be higher than the maximum precipitation.')\n return data",
"def remove_by_date():\n start_date = request.args.get(\"start\", default=None, type=str)\n start_date = datetime.datetime.fromisoformat(start_date)\n end_date = request.args.get(\"end\", default=None, type=str)\n end_date = datetime.datetime.fromisoformat(end_date)\n\n removed = []\n for key in rd.keys(\"*\"):\n animal = json.loads(rd.get(key))\n if (\n start_date\n <= datetime.datetime.fromisoformat(animal[\"created-on\"])\n <= end_date\n ):\n removed.append(animal)\n\n for animal in removed:\n rd.delete(animal[\"uuid\"])\n\n return jsonify(removed)",
"def test_range__no_end_date(self):\n data = self._data()\n data.pop('end_date')\n response = self._get(get_kwargs=data)\n self._check_response(response, 104)",
"def sec_range(cls, queryname, start, end, positive=True):\n if positive:\n return {queryname: {\"$lt\": end, \"$gte\": start}}\n else:\n return {queryname: {\"$not\": {\"$lt\": end, \"$gte\":start}}}",
"def filter_point(x, y, xlower, xupper, ylower, yupper):\n ignore = False\n if (x < xlower or x > xupper or y < ylower or y > yupper):\n ignore = True\n return ignore",
"def not_between(self, column: str, low: [str, int], high: [str, int]):\n self._wheres += (BetweenExpression(column, low, high, equality=\"NOT BETWEEN\"),)\n return self",
"def filter_MAD_range(self, criteria):\n # Get the median absolute deviation\n med_abs_dev = abs(self.passed[criteria] - self.passed[criteria].median()).mean()\n dev_ref = med_abs_dev * self.tolerance[criteria]\n lower = self.passed[criteria].median() - dev_ref\n upper = self.passed[criteria].median() + dev_ref\n allowed_range = (str(int(x)) for x in [lower, upper])\n allowed_range = \"-\".join(allowed_range)\n self.allowed[criteria] = allowed_range\n self.failed[criteria] = self.passed[\n abs(self.passed[criteria] - self.passed[criteria].median()) > dev_ref\n ].index\n self.passed = self.passed[\n abs(self.passed[criteria] - self.passed[criteria].median()) <= dev_ref\n ]",
"def test_parse_filter_params_no_time(self):\n filter_params = {\n \"region\": FAKE.word(),\n \"payer_tenant_id\": FAKE.uuid4(),\n \"instance_type\": FAKE.word(),\n }\n serializer = OCIFilterSerializer(data=filter_params)\n self.assertTrue(serializer.is_valid())",
"def __dynamic_range_process(info):\n if 'range' in info:\n for i in range(len(info['range'])):\n if info['range'][i][1] == -1:\n info['range'][i][1] = None\n return info",
"def _withinRangeCheckerWrapper(self, args):\n\n constraints = args['constraints']\n\n def _withinRangeChecker(entity, params):\n \"\"\"Checks if certain properties are within given constrains. \n \"\"\"\n\n for constraint in constraints:\n type = constraint.get('type')\n field = constraint.get('field')\n\n if not type or not field:\n raise ProtocolError()\n\n min_value = constraint.get('min_value', 0)\n max_value = constraint.get('max_value', 1)\n\n if type == 'size':\n value = entity.__getattribute__(field)\n if len(value) < min_value or len(value) > max_value:\n return False\n else:\n raise ProtocolError()\n \n return True\n \n return _withinRangeChecker",
"def test_filter_by_bad_date(admin_client, public_resource_with_metadata):\n query_filter = {\"date\": [\"2019-11-01\", \"bad-date\"]}\n djangoresponse = admin_client.get('/discoverapi/?filter={}'.format(json.dumps(query_filter)), follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n assert djangoresponse.status_code == 400\n assert \"date parsing error\" in response['message']",
"def validate_subset_of_schema(self, schema):\n super(NumericAttributeSchema, self).validate_subset_of_schema(schema)\n\n if self.range and (\n not schema.range\n or self.range[0] < schema.range[0]\n or self.range[1] > schema.range[1]\n ):\n raise AttributeSchemaError(\n \"Range %s is not a subset of %s\" % (self.range, schema.range)\n )",
"def trim_range(self, low_bound, hi_bound, full_bound=True):\n low_bound_int = int(low_bound[:self.place+1])\n hi_bound_int = int(hi_bound[:self.place+1])\n\n # Remove keys outside of range\n # modifying dict during loop caused lots of problems - del after loop\n keys_to_del = []\n for key in self.Poss_Tree:\n if key < int(low_bound[:self.place]):\n keys_to_del.append(key)\n continue\n elif key > int(hi_bound[:self.place]):\n keys_to_del.append(key)\n continue\n for key in keys_to_del:\n del self.Poss_Tree[key]\n\n # Remove values outside of range\n vals_to_del = defaultdict(list)\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n password = int(construct_pass(key, choice))\n if password > hi_bound_int or password < low_bound_int:\n vals_to_del[key].append(choice)\n for key in vals_to_del:\n for val in vals_to_del[key]:\n self.Poss_Tree[key].remove(val)",
"def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res",
"def cut_params(params, exclude):\n for ex_var, ex_list in exclude.items():\n for ex in ex_list:\n if ex in params[ex_var]:\n print(f'Excluding {ex_var}={ex:.3f} from grid')\n ex_idx = np.searchsorted(params[ex_var], ex)\n params[ex_var] = np.delete(params[ex_var], [ex_idx])"
] |
[
"0.58409065",
"0.5508638",
"0.5436571",
"0.54290146",
"0.5371077",
"0.53509444",
"0.5345174",
"0.53403056",
"0.5330316",
"0.5301808",
"0.5300423",
"0.5294917",
"0.5251204",
"0.52305263",
"0.5230448",
"0.52102774",
"0.51994115",
"0.51790226",
"0.517034",
"0.5168191",
"0.51614696",
"0.5145644",
"0.51360893",
"0.51107365",
"0.50888836",
"0.5079304",
"0.50753266",
"0.5062649",
"0.5062589",
"0.50554895"
] |
0.8165445
|
0
|
Precompute the polynomial hash values of all the substring starting from the first letter.
|
def _precompute_substrings(self, p: int) -> List[int]:
hash_vals: List[int] = [0]
for i in range(len(self._s)):
val = (hash_vals[i] * self.X + ord(self._s[i])) % p
hash_vals.append(val)
return hash_vals
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def precompute_hashes(text, pattern_len, p, x):\n\n if len(text) < pattern_len or pattern_len == 0:\n return []\n\n hashes = [ 0 ] * (len(text) - pattern_len + 1)\n\n slice = text[len(text) - pattern_len:]\n hashes[len(text) - pattern_len] = SearchPattern.poly_hash(\n slice, p, x)\n\n y = 1\n for i in range(1, pattern_len + 1):\n y = (y * x) % p\n for i in range(len(text) - pattern_len - 1, -1, -1):\n hashes[i] = (x * hashes[i + 1] + ord(text[i]) -\n y * ord(text[i + pattern_len])) % p\n\n return hashes",
"def poly_hash(text, p, x):\n hash = 0\n for ch in reversed(text):\n hash = (hash * x + ord(ch)) % p\n\n return hash",
"def __polynomial_hash(self, s, base = 31, max_size=168):\r\n digest = 0\r\n max_size = 168\r\n for c in s: digest = base * digest + ord(c)\r\n digest &= 2 ** max_size - 1 \r\n return hex(digest).rstrip('L')",
"def customHashFunc(str):\n return sum(ord(chr) for chr in str)%128",
"def compute_minhash(string: str) -> LeanMinHash:\n m = MinHash(num_perm=PERMUTATIONS)\n for d in string_encoder(string):\n m.update(d)\n return LeanMinHash(m)",
"def hash_function_1(key: str) -> int:\n hash = 0\n for letter in key:\n hash += ord(letter)\n return hash",
"def _minhash_from_text(self, text):\n minhash = MinHash(self._config.num_perm)\n for word in self._shingles_from_text(text):\n minhash.update(word.encode('utf8'))\n return minhash",
"def hash_function_2(key: str) -> int:\n hash, index = 0, 0\n index = 0\n for letter in key:\n hash += (index + 1) * ord(letter)\n index += 1\n return hash",
"def pre_compute_hashes(s, M1, M2, X):\n n = len(s)\n h1 = [0 for _ in range(n+1)]\n h2 = [0 for _ in range(n+1)]\n for i in range(1, n+1):\n ch = ord(s[i-1])\n h1[i] = (X*h1[i-1] + ch) % M1\n h2[i] = (X*h2[i-1] + ch) % M2\n return h1, h2",
"def custom_hash(str_in):\n \n PRIMES = [\n 2, 3, 5, 7, 11, 13,\n 17, 19, 23, 29, 31,\n 37, 41, 43, 47, 53,\n 59, 61, 67, 71, 73,\n 79, 83, 89, 97, 101\n ]\n\n LOWERCASE_Z_ASCII = ord('z')\n hash_count = 1\n\n for letter in str_in:\n try:\n hash_count *= PRIMES[ord(letter) - LOWERCASE_Z_ASCII]\n except IndexError:\n print(f'list index out of range: {letter} in {str_in}')\n \n return hash_count",
"def hashstring(astring, tablesize):\n \n sum = 0\n for pos in range(len(astring)):\n # to account for anagrams, we give weightage to positions of the letters to give different hash values\n sum = sum + ord(astring[pos]) * (pos + 1)\n \n return sum % tablesize",
"def prehash(key):\n\n return hash(key)",
"def fnv1(self, key):\n # hash = 0xff\n hash = 0xcbf29ce484222325\n for n in key.encode():\n # print(n)\n hash = hash ^ n\n hash = hash * 0x100000001b3\n\n # print(hash)\n return hash",
"def computeHash(string):\n\tif isBytes(string):\n\t\tstring = string.decode(\"latin-1\")\n\thash_ = 63689\n\tfor char in string:\n\t\thash_ = hash_ * 378551 + ord(char)\n\treturn hash_ % 65536",
"def first_recurring_char(s: str) -> str:\n h = {} # using dictionary as hash\n for ch in s:\n if ch in h:\n return ch\n\n h[ch] = 0\n return None",
"def calculate_weighted_hash(cls, word):\n\n hash_value = 0\n for char in word:\n hash_value += cls.alpha_lookup[char.lower()]\n return hash_value",
"def hash(self, text):\n hashval = 0\n for i in xrange(0, len(text)):\n hashval += ord(text[i])**i\n return hashval",
"def hash_function_1(key):\n hash = 0\n for i in key:\n hash = hash + ord(i)\n return hash",
"def keyhash(string):\n return hashlib.sha1(string.encode('utf-8')).hexdigest()",
"def solve_part_one(self):\n password = \"\"\n index = 0\n while len(password) < 8:\n (s, found_index) = self.find_next_hash(index)\n password += s[5]\n index = found_index + 1\n return password",
"def hash_string(to_hash):\n\n chars = string.printable\n\n hashed = \"\"\n\n total = 1\n\n counter = 1\n\n for letter in to_hash:\n\n total *= (chars.index(letter) * counter * len(to_hash)*13)\n\n counter += 1\n\n if counter%3 == 0:\n\n total *= total\n\n total = str(total)[:30]\n\n temp_int = \"\"\n\n for i in range(len(total)):\n\n temp_int += total[i]\n\n if i % 2 != 0:\n\n hashed += chars[int(temp_int)]\n\n temp_int = \"\"\n\n return hashed",
"def hash_key(self,key: str) -> int: \n \n total = 0\n prime = 3\n \n for index, char in enumerate(key,start=1):\n \n total += ord(char)*(prime)**index\n \n return total % self.buckets",
"def typeahead_hash(self) -> str:",
"def part1(input_string):\n twos, threes = 0, 0\n for line in input_string:\n letters_seen = {}\n for char in line:\n letters_seen[char] = letters_seen.setdefault(char, 0)+1\n if 2 in letters_seen.values():\n twos += 1\n if 3 in letters_seen.values():\n threes += 1\n return threes * twos",
"def hash_function(self, x):\n if not x:\n return -1\n hashed_value = 0\n\n for char in x:\n hashed_value = 181 * hashed_value + ord(char)\n\n return hashed_value % self.capacity",
"def get_encoded_minhash(string: str) -> str:\n return encode_minhash(compute_minhash(string))",
"def hash(plainString):\n result = plainString\n for i in range(0,12):\n result = hashHelp(result)\n return result",
"def myHash(string, base=91, mod=1000000321):\n value = 0\n for pos, elem in enumerate(string[::-1]): # считаем значение полинома\n value += ord(elem) * base**pos # в последней задаче сделано с помощью массива (динамика)\n return value % mod",
"def hash(string):\n hs = 0\n for s in string:\n hs += ord(s)\n return hs",
"def hashHelp(plainString):\n # hashed = plainString\n n = len(plainString) // 4\n\n a = plainString[0:n]\n b = plainString[n : n * 2]\n c = plainString[n * 2 : n * 3]\n d = plainString[n * 3 : len(plainString)]\n\n if len(a) == 0 or len(b) == 0 or len(c) == 0 or len(d) == 0:\n raise Exception(\"Error string not long enough, use at least 4 characters\")\n\n numA = numToBin(functionF(a,d))\n numB = numToBin(strToNum(b))\n numC = numToBin(bitShift(c))\n numD = numToBin(functionG(a,b,c,d))\n\n # hashed = A + B + numC + D\n # hashed = numA + '|' + numB + '|' + numC + '|' + numD\n hashed = text_from_bits(numD + numA + numB + numC)\n hashed = numbersToLetters(hashed)\n return hashed"
] |
[
"0.6839831",
"0.63260895",
"0.6266397",
"0.62024325",
"0.61921644",
"0.6178474",
"0.6141195",
"0.61348975",
"0.61310077",
"0.6084944",
"0.5924525",
"0.59095466",
"0.587965",
"0.58699226",
"0.5862857",
"0.57915723",
"0.57605505",
"0.57369167",
"0.57208353",
"0.57144654",
"0.57088345",
"0.5699338",
"0.56980973",
"0.5683847",
"0.5683219",
"0.56205034",
"0.5611213",
"0.5606023",
"0.5584797",
"0.55807745"
] |
0.7261117
|
0
|
Compute the value of Xl mod p for l = [0, len(s)]
|
def _precompute_xl(self, p: int) -> List[int]:
res = [1]
val = 1
for _ in range(len(self._s)):
val = (val * self.X) % p
res.append(val)
return res
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def mod(p):\n return (p[0]**2 + p[1]**2 + p[2]**2)**0.5",
"def mod_inv(a,p):\r\n\r\n for i in range(1,p):\r\n if (i*a)%p==1: return i\r\n raise ValueError(str(a)+\" has no inverse mod \"+str(p))",
"def ModSqrt(a, p):\n a = a % p\n for x in range(p // 2 + 1):\n if x * x % p == a:\n return x\n return None",
"def modp(p, *args):\n return [modint(a, p) for a in args]",
"def mod_pow(x,e,p):\n x = x % p\n R = 1\n while e > 0 :\n if (e%2) == 1 :\n R = (R*x) % p\n e = e//2\n x = (x*x) % p \n return(R)",
"def normalize(p):\n return p / mod(p)",
"def mod(numbers):\n result = numbers[0]\n for i in numbers[1:]:\n result = result % i\n return result",
"def mpolysmul(p,s):\n sp = []\n for c,d in p:\n sp.append((s*c,d))\n \n return sp",
"def div_mod_p(self, a, b):\n a = a % self.p\n b = b % self.p\n return a * self.pow_mod_p(b, self.p - 2, self.p) % self.p",
"def _Schoof_mod_l(self, l):\n if l == 2:\n return self._Schoof_mod2()\n E = self.cubic\n D = self.division_polynomials\n lth_div = self.division_polynomials[l]\n field = self.basefield\n bfsize = card(field)\n x = UniVarPolynomial({1:field.one}, field)\n k = bfsize % l\n x_frob = PolyPow(x, bfsize, lth_div) #x_frob=x^q\n x_frobfrob = PolyPow(x_frob, bfsize, lth_div) #x_frobfrob=x^{q^2}\n\n # test for x^{q^2} - x\n f, P = self._sub1(k, x_frobfrob - x, lth_div)\n f0, f3 = f[0], f[3]\n\n if GCD(lth_div, P).degree() > 0:\n if arith1.legendre(k, l) == -1:\n _log.debug(\"%s $\" % str((0, l)))\n return (0, l)\n\n # arith1.legendre(k, l) == 1 <=> k is QR\n w = arith1.modsqrt(k, l)\n f, P = self._sub1(w, x_frob - x, lth_div)\n\n if GCD(lth_div, P).degree() == 0: # coprime\n _log.debug(\"%s $$$$\" % str((0, l)))\n return (0, l)\n\n # there exist non trivial common divisors\n g0 = PolyPow(E, (bfsize - 1) // 2, lth_div) #y^(q-1)\n P = self._sub2(w, g0, f[3], lth_div)\n\n if GCD(lth_div, P).degree() > 0:\n _log.debug(\"%s $$\" % str((2*w % l, l)))\n return (2*w % l, l)\n else:\n _log.debug(\"%s $$$\" % str((-2*w % l, l)))\n return (-2*w % l, l)\n\n else: # coprime (GCD(P, lth_div).degree() == 0)\n Y = x - x_frobfrob\n g0 = PolyPow(E, (bfsize - 1) // 2, lth_div) #y^(q-1)\n g1 = PolyPow(g0, bfsize + 1, lth_div) #y^(q^2-1)\n f = -self._sub2(k, g1, f3, lth_div)\n h1 = PolyMulRed([f, f], lth_div)\n if k % 2 == 0:\n g = (PolyMulRed([Y, E, f3], lth_div) - f0) * 4\n h0 = PolyMulRed([g, g], lth_div)\n aux1 = PolyMulRed([f0, h0], lth_div) + h1\n X_d = PolyMulRed([E, f3, h0], lth_div)\n else:\n g = (PolyMulRed([Y, f3], lth_div) - PolyMulRed([E, f0], lth_div)) * 4\n h0 = PolyMulRed([g, g], lth_div)\n aux1 = PolyMulRed([E, PolyMulRed([f0, h0], lth_div) + h1], lth_div)\n X_d = PolyMulRed([f3, h0], lth_div)\n X_n = PolyMulRed([X_d, x_frobfrob + x_frob + x], lth_div) - aux1\n\n # loop of t\n e_q = PolyPow(self.cubic, bfsize, lth_div)\n for t in range(1, (l - 1)//2 + 1):\n Z_d_x, Z_n_x = self._Z_x(t, D, e_q, bfsize, lth_div)\n # X_n * Z_d_x == X_d * Z_n_x (mod lth_div)?\n if not PolyMod(X_n * Z_d_x - X_d * Z_n_x, lth_div):\n break\n else: # loop of t exhausted\n _log.debug(\"%s @@@\" % str((0, l)))\n return (0, l)\n\n # found: X_n * Z_d_x == X_d * Z_n_x (mod lth_div)\n y0 = PolyMulRed([-2*x_frobfrob - x, X_d], lth_div) + aux1\n if k % 2 == 0:\n Y_d = PolyMulRed([E, D[k], g, X_d], lth_div)\n else:\n Y_d = PolyMulRed([D[k], g, X_d], lth_div)\n Y_n = -PolyMulRed([g1, Y_d], lth_div) - PolyMulRed([f, y0], lth_div)\n Z_d_y, Z_n_y = self._Z_y(t, D, g0, bfsize, lth_div)\n\n # Y_n * Z_d_y == Y_d * Z_n_y (mod lth_div)?\n if PolyMod(Y_n * Z_d_y - Y_d * Z_n_y, lth_div):\n _log.debug(\"%s @@\" % str((l-t, l)))\n return (l-t, l)\n else:\n _log.debug(\"%s @\" % str((t, l)))\n return (t, l)",
"def pseudo(x,N) :\n\treturn (x**2+1)%N",
"def bulk_modulus():\n\n return 10000.0",
"def _precompute_substrings(self, p: int) -> List[int]:\n hash_vals: List[int] = [0]\n for i in range(len(self._s)):\n val = (hash_vals[i] * self.X + ord(self._s[i])) % p\n hash_vals.append(val)\n\n return hash_vals",
"def PolyMod(f, g):\n return f % g",
"def recompose(x, list_p, N):\n res = 1\n for i in zip(x, list_p):\n plus = 1\n for j in range(i[0]):\n plus *= i[1]\n plus %= N\n res *= plus\n res %= N\n return int(res % N)",
"def modular_sqrt(a, p):\n\tif legendre_symbol(a, p) != 1:\n\t\treturn 0\n\telif a == 0:\n\t\treturn 0\n\telif p == 2:\n\t\treturn p\n\telif p % 4 == 3:\n\t\treturn pow(a, (p + 1) // 4, p)\n\ts = p - 1\n\te = 0\n\twhile s % 2 == 0:\n\t\ts //= 2\n\t\te += 1\n\tn = 2\n\twhile legendre_symbol(n, p) != -1:\n\t\tn += 1\n\tx = pow(a, (s + 1) // 2, p)\n\tb = pow(a, s, p)\n\tg = pow(n, s, p)\n\tr = e\n\twhile True:\n\t\tt = b\n\t\tm = 0\n\t\tfor m in range(r):\n\t\t\tif t == 1:\n\t\t\t\tbreak\n\t\t\tt = pow(t, 2, p)\n\t\tif m == 0:\n\t\t\treturn x\n\t\tgs = pow(g, 2 ** (r - m - 1), p)\n\t\tg = (gs * gs) % p\n\t\tx = (x * gs) % p\n\t\tb = (b * g) % p\n\t\tr = m",
"def perp_vector(p, q, r):\n v = cross(q - r, q - p)\n return v / mod(v) + q",
"def mod_prime(x, y, p):\n res = 1\n x = x % p\n while y > 0:\n if y & 1:\n res = (res * x) % p\n y = y - 1\n y = y >> 1\n x = (x * x) % p\n return res",
"def __mod__(self, rhs: Union[float, Simpy]) -> Simpy:\n result: list[float] = []\n if isinstance(rhs, float):\n for item in self.values:\n result.append(item % rhs)\n else:\n assert len(self.values) == len(rhs.values)\n for i in range(len(self.values)):\n result.append(self.values[i] % rhs.values[i])\n return Simpy(result)",
"def modulus(vect):\n return np.sqrt(vect[0]**2 + vect[1]**2 + vect[2]**2)",
"def E_LE(self,s,l):\n if s>l: return self.E_LE(l,s)\n delta = ((s/l)-(l/(s+l)))\n return delta-delta%self._tau",
"def mod(self):\n p = self.end - self.start\n return p.mod()",
"def lpflip(P):\n if len(P) == 1:\n return 0\n\n Z = logsumexp(P)\n P -= Z\n\n NP = np.exp(np.copy(P))\n\n assert math.fabs(1.0-sum(NP)) < 10.0**(-10.0)\n\n return pflip(NP)",
"def modf(x):\n return 0.0, 0.0",
"def lcms(argg: range) -> int:\n l = 1\n for arg in argg:\n l = lcm(l, arg)\n return l",
"def evansMod(x,n):\n if x%n == 0:\n return 1\n else:\n return 0",
"def modpow(a, n, p):\n res = 1\n a = a % p\n while n > 0:\n # if n is odd\n if n & 1:\n res = (res * a) % p\n n = n >> 1 # n = n / 2\n a = (a*a) % p\n\n return res",
"def Findlt(l,sp,rhs):\n m = sp.M(l)\n return (m / l**3) - rhs",
"def inverse_modulo_p(a, p):\n prime = p\n \n while a < 0:\n a += prime\n \n y1 = 1\n y2 = 0\n \n while a != 1:\n q = (p // a) % prime\n # use of integer division // speeded algorithm up by huge factor\n \n # save temporary values\n tmp_a = a\n tmp_y2 = y2\n # compute all these simultaneously\n a = (p - (q*a)) % prime\n p = tmp_a\n y2 = y1\n y1 = (tmp_y2 - (q*y1)) % prime\n \n return y1 % prime",
"def euler_criterion(a, p):\n return a ** ((p - 1) / 2) % p == 1"
] |
[
"0.7254188",
"0.6605625",
"0.6407147",
"0.6380116",
"0.63562644",
"0.60197103",
"0.59769803",
"0.59705365",
"0.59639233",
"0.59582186",
"0.59088165",
"0.5866518",
"0.5865667",
"0.585502",
"0.5849936",
"0.5811184",
"0.5805049",
"0.577011",
"0.5748945",
"0.57488036",
"0.5728245",
"0.5716619",
"0.5711522",
"0.57105196",
"0.56816417",
"0.56745636",
"0.5609809",
"0.5600331",
"0.55798364",
"0.55755323"
] |
0.7123825
|
1
|
N should be an int specifying what size window Ngram to use. Default is 2 (bigram). features should be an integer specifying how many features of the len(alph)^N features to use to build each profile. Default is 676 (26^2). extended_alphabet should be a boolean value specifying whether to build profiles using only alphabetical characters (False) or including punctuation (True). Default is False.
|
def __init__(self, N=2, features=0, extended_alphabet=False):
self.alph += (string.punctuation + ' ') if extended_alphabet else ''
self.N = N
self.features = features if features != 0 else len(self.alph)**N
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _profile(self, text):\n prof = zeros(len(self.alph)**self.N)\n ngs = ngrams(text, self.N)\n for tup in ngs:\n loc = 0\n for i in range(len(tup)):\n loc += (len(self.alph)**i) * self.alph.index(tup[i])\n prof[loc] += 1\n return prof",
"def propername_featurize(input_data,N, MinFreq,model_choice =\"NGram\"):\n def to_lowercase(text):\n return text.lower()\n\n def remove_URL(text):\n return re.sub(r\"http\\S+\", \"\", text)\n def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words\n\n def tokenize(text):\n return text.split()\n def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stop_word:\n new_words.append(word)\n return new_words\n def detokenize_words(words):\n separator = ' '\n return separator.join(words)\n def preprocess_text(df):\n df['text'] = df['text'].apply(to_lowercase)\n df['text'] = df['text'].apply(remove_URL)\n df['text'] = df['text'].apply(tokenize)\n df['text'] = df['text'].apply(remove_non_ascii)\n df['text'] = df['text'].apply(detokenize_words) \n return df\n def character_ngram(text_matrix, N, MinFreq): #array of non-tokenized text\n #tokenize\n all_tokenized_text = []\n #build all token\n flatten_tokenized_text = []\n for j in text_matrix:\n cur_text = \"\".join(j.split())\n cur_feature = []\n \n for i in range(N[0]-1,N[1]): \n \n for l in range(len(cur_text) - i):\n cur_feature.append(cur_text[l:l+i+1])\n \n all_tokenized_text.append(cur_feature)\n flatten_tokenized_text.extend(cur_feature)\n charfreq = {}\n for i in flatten_tokenized_text:\n if i not in charfreq.keys():\n charfreq[i] = 1\n else:\n charfreq[i] += 1\n selected_feature = []\n for i, item in charfreq.items():\n if item >= MinFreq:\n selected_feature.append(i)\n dim = len(selected_feature)\n encoded_matrix = []\n selected_feature = np.array(selected_feature)\n for i in all_tokenized_text:\n cur_text = np.array(i)\n cur_encoded = np.zeros(dim)\n cur_idx = []\n for j in range(len(cur_text)):\n idx = np.where(selected_feature == cur_text[j]) \n if len(idx[0]) != 0: \n cur_idx.append(idx[0][0])\n #binary character presence \n cur_encoded[cur_idx] = 1\n\n encoded_matrix.append(cur_encoded)\n encoded_matrix = np.array(encoded_matrix)\n\n return encoded_matrix, selected_feature\n def task_specific_featurize(feature_value):\n feature_dic = {\"contain_numerics\":[], \"contain_special_punc\":[],\"contain_inc\":[],\"Small_token_length\":[]}\n special_pun = \"&\\?-:%\"\n company_col = [\"co.\",\"inc.\"]\n def hasNumbers(string):\n return any(char.isdigit() for char in string)\n for i in text_feature:\n if hasNumbers(i):\n feature_dic[\"contain_numerics\"].append(1)\n else:\n feature_dic[\"contain_numerics\"].append(0)\n Spec_Punc = False\n for l in special_pun:\n if i.find(l) != -1:\n feature_dic[\"contain_special_punc\"].append(1)\n Spec_Punc = True\n break\n if Spec_Punc == False:\n feature_dic[\"contain_special_punc\"].append(0)\n Contain_Com = False\n for l in company_col:\n if i.find(l) != -1:\n feature_dic[\"contain_inc\"].append(1)\n Contain_Com = True\n break\n if Contain_Com == False:\n feature_dic[\"contain_inc\"].append(0)\n token_length = len(i.split())\n if token_length <= 1:\n feature_dic[\"Small_token_length\"].append(1)\n else:\n feature_dic[\"Small_token_length\"].append(0)\n\n encoded_matrix = pd.DataFrame(feature_dic).values\n selected_feature = list(feature_dic.keys()) \n return encoded_matrix, selected_feature\n # TODO: Implement featurization of input.\n matrix_processed = preprocess_text(input_data)\n text_feature = matrix_processed[[\"text\"]].values.flatten() \n if model_choice == \"NGram\":\n \n encoded_matrix, selected_feature = character_ngram(text_feature, N, MinFreq)\n elif model_choice == \"TS\":\n encoded_matrix, selected_feature = task_specific_featurize(text_feature)\n elif model_choice == \"Combined\":\n\n encoded_matrix_specific, selected_feature_specific = task_specific_featurize(text_feature) \n encoded_matrix_bow, selected_feature_bow = character_ngram(text_feature, N, MinFreq)\n encoded_matrix = np.hstack((encoded_matrix_bow,encoded_matrix_specific))\n selected_feature = list(selected_feature_bow)\n selected_feature.extend(selected_feature_specific)\n \n return encoded_matrix,selected_feature",
"def plot_features(self, N=10, save=None):\n makedir(self.plotdir)\n if save is None:\n save = '{:s}/features.png'.format(self.plotdir)\n \n feats = []\n fls = glob('{:s}/*.fts'.format(self.modeldir))\n for i,fl in enumerate(fls):\n with open(fl) as fp:\n lns = fp.readlines()\n feats += [' '.join(ln.rstrip().split()[1:]) for ln in lns] \n\n f = plt.figure(figsize=(8, 16))\n ax = plt.axes([0.05, 0.05, 0.4, 0.9])\n height = 0.8\n # sort features in descending order by frequency of appearance\n labels = list(set(feats) - set(['']))\n freqs = [feats.count(label) for label in labels]\n labels = [label for _,label in sorted(zip(freqs,labels))][::-1]\n freqs = sorted(freqs)[::-1]\n fts = copy(labels)\n \n N = np.min([N, len(freqs)])\n labels = ['\\n'.join(wrap(' '.join(l.split('_')), 40)) for l in labels ][:N]\n freqs = freqs[:N]\n inds = range(len(freqs))\n ax.barh(inds, np.array(freqs)/len(fls), height=height, color='#90EE90')\n ax2 = ax.twiny()\n ax.xaxis.grid()\n ax2.set_xlim(ax.get_xlim())\n xm = np.mean(ax.get_xlim())\n for ind,label in zip(inds,labels):\n ax.text(xm, ind, label, ha='center', va='center')\n plt.yticks([])\n for axi in [ax,ax2]: axi.set_ylim([inds[0]-0.5, inds[-1]+0.5])\n ax.invert_yaxis()\n \n # righthand feature plots\n axs = []\n dy1 = 0.9*height/N\n dy2 = 0.9*(1-height)/N\n for i in range(N):\n axi = plt.axes([0.5, 0.05+i*(dy1+dy2)+dy2/2, 0.4, dy1])\n axi.set_yticks([])\n axs.append(axi)\n axs = axs[::-1]\n\n fM,ys = self._extract_features(self.ti_forecast, self.tf_forecast)\n inds0 = np.where(ys['label']<1)\n \n inds = []\n for te in self.data.tes:\n inds.append(np.where((ys['label']>0)&(abs((te-ys.index).total_seconds()/(3600*24))<5.)))\n cols = ['b','g','r','m','c']\n \n N0 = int(np.sqrt(len(inds0[0])/2.))\n\n for axi, ft in zip(axs,fts):\n ft0 = np.log10(fM[ft].iloc[inds0]).replace([np.inf, -np.inf], np.nan).dropna()\n ft0_min = np.mean(ft0)-3*np.std(ft0)\n ft0 = ft0[ft0>ft0_min]\n y,e = np.histogram(ft0, N0)\n x = 0.5*(e[:-1]+e[1:])\n axi.fill_between(x, [0,]*len(x), y, color='#add8e6', label='all windows')\n ylim = axi.get_ylim()\n axi.set_ylim(ylim)\n ym = np.mean(ylim)\n dy = (ylim[1]-ylim[0])/(len(inds)+1)\n for i, ind, col in zip(range(-2,3), inds, cols):\n ft1 = np.log10(fM[ft].iloc[ind]).replace([np.inf, -np.inf], np.nan).dropna()\n te = self.data.tes[i+2]\n lbl = te.strftime('%b')+' '+('{:d}'.format(te.year))\n axi.scatter(ft1, [ym+dy*i,]*len(ft1), np.arange(1,len(ft1)+1)*6, col, marker='x', label=lbl)\n\n axs[0].legend(prop={'size':6})\n\n plt.savefig(save, dpi=300)\n plt.close(f)",
"def generate(self, context=None, n=20):\n if context is None:\n context = ''\n final_tokens = []\n else:\n final_tokens = self.split(context)\n context = ' '.join(['<'] * (self.n - 1) + [context.replace(' . ', ' .%s ' % (' <' * (self.n - 1)))])\n tokens = self.split(context)\n\n while len(final_tokens) < n:\n\n # find the longest context that exists in the model, from size self.n - 1 to 1\n history = tokens[-self.n + 1:]\n while len(history) > 0:\n histories = self.n_grams_by_len[len(history) - 1]\n if self.join(history) in histories:\n break\n else: # try smaller history\n history = history[1:]\n\n # generate the candidate tokens with their respective weights\n candidates, weights = [], []\n len_history = len(history)\n history = self.join(history)\n for n_gram, n_gram_count in self.n_grams_by_len[len_history].items():\n n_gram_tokens = self.split(n_gram)\n n_gram_history = self.join(n_gram_tokens[:-1])\n if n_gram_history == history:\n # candidate = n_gram_tokens[-1]\n # if candidate != '<':\n candidates.append(n_gram_tokens[-1])\n weights.append(n_gram_count)\n\n # select a candidate and append to generated text\n selected_candidate = random.choices(candidates, weights=weights)[0]\n if selected_candidate == '>': # text ending character\n break\n tokens.append(selected_candidate)\n if selected_candidate != '<':\n final_tokens.append(selected_candidate)\n\n return self.join(final_tokens) # remove artificial prefix (made of '<' characters)",
"def build_ngram_vocab(self, n):\n max_ngram_per_word = 0\n ngram_dict = collections.defaultdict(int)\n for word in self.train_data:\n if word == self.eos or word == self.sos:\n continue\n _word = '^' + word + '$'\n ngram_counts = len(_word) - n + 1\n if ngram_counts > max_ngram_per_word:\n max_ngram_per_word = ngram_counts\n for i in range(ngram_counts):\n ngram = _word[i:i + n]\n ngram_dict[ngram] += 1\n\n unk_ngram_list = set()\n item_to_id = dict()\n item_to_id[constants.PAD_ITEM] = len(item_to_id)\n item_to_id[constants.UNK_ITEM] = len(item_to_id)\n sorted_dict = sorted(ngram_dict.items(), key=operator.itemgetter(1), reverse=True)\n for token, freq in sorted_dict:\n if freq == 1:\n unk_ngram_list.add(token)\n if token not in item_to_id:\n item_to_id[token] = len(item_to_id)\n return item_to_id, unk_ngram_list, max_ngram_per_word",
"def __init__(self, n, sents, corpus='', gamma=None, addone=True):\n self.n = n\n self.smoothingtechnique = 'Interpolated (Jelinek Mercer) Smoothing'\n self.gamma = gamma\n self.addone = addone\n self.counts = counts = defaultdict(int)\n self.gamma_flag = True\n self.corpus = corpus\n # way more efficient than use set unions\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = list(set(voc))\n\n if gamma is None:\n self.gamma_flag = False\n\n # if not gamma given\n if not self.gamma_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent for training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n\n for sent in train_sents:\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(train_sents)\n # variable only for tests\n self.tocounts = counts\n # search the gamma that gives lower perplexity\n gamma_candidates = [i*50 for i in range(1, 15)]\n # xs is a list with (gamma, perplexity)\n xs = []\n sents = train_sents\n for aux_gamma in gamma_candidates:\n self.gamma = aux_gamma\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_gamma, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.gamma = xs[0][0]\n with open('old-stuff/interpolated_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Gamma: {}\\n'.format(self.gamma))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n else:\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n sents = list(map((lambda x: x + ['</s>']), sents))\n\n for sent in sents:\n # counts now holds all k-grams for 0 < k < n + 1\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(sents)",
"def test_ngram():\n # Some examples of functions usage\n trigram_counts, bigram_counts, unigram_counts, token_count = train_ngrams(S_train)\n print \"#trigrams: \" + str(len(trigram_counts))\n print \"#bigrams: \" + str(len(bigram_counts))\n print \"#unigrams: \" + str(len(unigram_counts))\n print \"#tokens: \" + str(token_count)\n perplexity = evaluate_ngrams(S_dev, trigram_counts, bigram_counts, unigram_counts, token_count, 0.5, 0.4)\n print \"#perplexity: \" + str(perplexity)\n ### YOUR CODE HERE\n print(vocabsize)\n ### END YOUR CODE",
"def __init__(self, n, sents, corpus='', beta=None, addone=True):\n self.n = n\n self.beta = beta\n self.corpus = corpus\n self.beta_flag = True\n self.addone = addone\n self.smoothingtechnique = 'Back Off (Katz) with Discounting Smoothing'\n self.counts = counts = defaultdict(int)\n self.A_set = defaultdict(set)\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = set(voc)\n if beta is None:\n self.beta_flag = False\n\n # if no beta given, we compute it\n if not self.beta_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent por training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n for sent in train_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(train_sents)\n counts[('</s>',)] = len(train_sents)\n\n self.tocounts = counts\n # search for the beta that gives lower perplexity\n beta_candidates = [i*0.1 for i in range(1, 10)]\n # xs is a list with (beta, perplexity)\n xs = []\n self.sents = train_sents\n for aux_beta in beta_candidates:\n self.beta = aux_beta\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_beta, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.beta = xs[0][0]\n with open('old-stuff/backoff_'+str(n)+'_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Beta: {}\\n'.format(self.beta))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n else:\n sents = list(map((lambda x: x + ['</s>']), sents))\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n\n for sent in sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(sents)\n counts[('</s>',)] = len(sents)",
"def __init__(self, n=3, chars=False, log_base=math.e, stupid_backoff_alpha=0.3):\n self.n = n\n self.chars = chars\n self.log_base = log_base\n self.n_grams_by_len = []\n self.corpus_len = 0\n self.stupid_backoff_alpha = stupid_backoff_alpha",
"def compute_sparse_features(word: str,\n n_chars: int = 26,\n max_ngram_size: int = 3):\n word_lower = word.lower()\n ones = []\n n_grams = ngramify(word_lower, max_ngram_size)\n n_grams_filtered = filter(lambda x: regex.match('^[a-z]+$', x), n_grams)\n for ngram in n_grams_filtered:\n index = ngram_to_index(n_chars=n_chars, ngram=ngram)\n ones.append(index)\n return f7(ones)",
"def __makeNgrams(self, n):\n # start_time = time.time()\n ngrams = dict()\n itergrams = dict()\n\n for k in range(2,n+1):\n itergrams[k] = list(nltk.ngrams(self.words, k))\n\n for k, grams in itergrams.items():\n kgrams = defaultdict(Counter)\n for gram in grams: \n kgram = list(gram)\n key = ' '.join(kgram[:k-1])\n kgrams[key].update({kgram[-1]})\n ngrams[k] = kgrams\n # print ('finish gen ', k, 'grams at ', time.time()-start_time)\n return ngrams",
"def get_ngrams(seq, n):\n return",
"def get_ngram_features(train_data, test_data):\n print(\"getting ngram features\")\n ngram_vectorizer = CountVectorizer(ngram_range = (1, 2))\n ngram_vectorizer = ngram_vectorizer.fit(train_data)\n return ngram_vectorizer.transform(train_data), ngram_vectorizer.transform(test_data)",
"def test_top_n_grams():\n ngrams = NgramFrequencies()\n unigrams_dic = {\n \"COUNT\": 10,\n \"time_burton's\": 5,\n \"burton's_corpse\": 4,\n \"corpse_bride\": 1\n }\n top_n_unigrams = ngrams.top_n_grams(unigrams_dic, 2)\n assert top_n_unigrams == [\n (\"time_burton's\", 0.5),\n (\"burton's_corpse\", 0.4)\n ]",
"def build_ngrams(tokens, n=2):\n ngrams = zip(*(islice(group, idx, None) for idx, group in enumerate(tee(tokens, n))))\n return ngrams",
"def test_ngram():\n #Some examples of functions usage\n trigram_counts, bigram_counts, unigram_counts, token_count = train_ngrams(S_train)\n print \"#trigrams: \" + str(len(trigram_counts))\n print \"#bigrams: \" + str(len(bigram_counts))\n print \"#unigrams: \" + str(len(unigram_counts))\n print \"#tokens: \" + str(token_count)\n perplexity = evaluate_ngrams(S_dev, trigram_counts, bigram_counts, unigram_counts, token_count, 0.5, 0.4)\n print \"#perplexity: \" + str(perplexity)\n ### YOUR CODE HERE\n ### END YOUR CODE",
"def full_ngrams(items, n):\n ngs = {}\n for i in xrange(1, n+1):\n ngs.update(gen_ngrams(items, i))\n return ngs",
"def ner_features(tokens, index, history):\n\n # Pad the sequence with placeholders\n tokens = [('__START2__', '__START2__'), ('__START1__', '__START1__')] + list(tokens) + [('__END1__', '__END1__'),\n ('__END2__', '__END2__')]\n history = ['__START2__', '__START1__'] + list(history)\n\n # shift the index with 2, to accommodate the padding\n index += 2\n\n word, pos = tokens[index]\n prevword, prevpos = tokens[index - 1]\n prevprevword, prevprevpos = tokens[index - 2]\n nextword, nextpos = tokens[index + 1]\n nextnextword, nextnextpos = tokens[index + 2]\n previob = history[-1]\n prevpreviob = history[-2]\n\n feat_dict = {\n 'word': word,\n 'lemma': stemmer.stem(word),\n 'pos': pos,\n 'shape': shape(word),\n\n 'next-word': nextword,\n 'next-pos': nextpos,\n 'next-lemma': stemmer.stem(nextword),\n 'next-shape': shape(nextword),\n\n 'next-next-word': nextnextword,\n 'next-next-pos': nextnextpos,\n 'next-next-lemma': stemmer.stem(nextnextword),\n 'next-next-shape': shape(nextnextword),\n\n 'prev-word': prevword,\n 'prev-pos': prevpos,\n 'prev-lemma': stemmer.stem(prevword),\n 'prev-iob': previob,\n 'prev-shape': shape(prevword),\n\n 'prev-prev-word': prevprevword,\n 'prev-prev-pos': prevprevpos,\n 'prev-prev-lemma': stemmer.stem(prevprevword),\n 'prev-prev-iob': prevpreviob,\n 'prev-prev-shape': shape(prevprevword),\n }\n\n return feat_dict",
"def _create_ngrams(tokens, n):\n\n ngrams = collections.Counter()\n for ngram in (tuple(tokens[i:i + n]) for i in xrange(len(tokens) - n + 1)):\n ngrams[ngram] += 1\n return ngrams",
"def feature(self, N, pcomment, comment, positive, negative, posSub, negSub):\n features = {}\n # features top N\n for i in range(0, N - 1):\n if positive[i] in comment:\n features[\"positive(%s)\" % positive[i]] = True\n else:\n features[\"positive(%s)\" % positive[i]] = False\n if negative[i] in comment:\n features[\"negative(%s)\" % negative[i]] = True\n else:\n features[\"negative(%s)\" % negative[i]] = False\n # features subjetive lists\n for word in set(comment):\n if word in posSub and comment.count(word) > 0:\n features[\"subjetive_pos(%s)\" % word] = comment.count(word)\n if word in negSub and comment.count(word) > 0:\n features[\"subjetive_neg(%s)\" % word] = comment.count(word)\n\n #custom features\n if self.generateHeuristic(pcomment):\n features[\"no_gusto\"] = True\n\n return features",
"def _get_bag_of_pos_ngram(words, index, window_size, N):\n bos = DummyWord(pos=utils.BEGIN_OF_SENTENCE, upos=utils.BEGIN_OF_SENTENCE, dependency_relation=utils.BEGIN_OF_SENTENCE)\n eos = DummyWord(pos=utils.END_OF_SENTENCE, upos=utils.END_OF_SENTENCE, dependency_relation=utils.END_OF_SENTENCE)\n words = [bos] * (window_size + N) + words + [eos] * (window_size + N)\n index += (window_size + N)\n return [\n \"_\".join([_get_word_feature(w) for w in words[i:i+N]])\n for i in range(index-window_size, index+window_size+1)]",
"def ngrams(tokens,lang):\n stopwords = stops.stopwords[lang]\n max = len(tokens)\n ngrams = []\n left_punctuation = '!\"%&\\'()*+,-./:;<=>?[\\\\]^_`{|}~'\n\n for i in range(1,max):\n for j in xrange(0,len(tokens)-(i-1)):\n if __check_features(tokens[j:j+i],stopwords):\n ng_str = \" \".join(tokens[j:j+i])\n ng_str = (ng_str.rstrip(string.punctuation)).lstrip(left_punctuation) \n ngrams.append(ng_str)\n \n ng_str = \" \".join(tokens)\n ng_str = (ng_str.rstrip(string.punctuation)).lstrip(left_punctuation) \n ngrams.append(ng_str)\n return ngrams",
"def create_ngrams(word_list, n):\n yield zip(*[word_list[i:] for i in range(n)])",
"def __init__(self, sents, n, corpus='', D=None):\n\n self.n = n\n self.D = D\n self.corpus = corpus\n self.smoothingtechnique = 'Kneser Ney Smoothing'\n # N1+(·w_<i+1>)\n self._N_dot_tokens_dict = N_dot_tokens = defaultdict(set)\n # N1+(w^<n-1> ·)\n self._N_tokens_dot_dict = N_tokens_dot = defaultdict(set)\n # N1+(· w^<i-1>_<i-n+1> ·)\n self._N_dot_tokens_dot_dict = N_dot_tokens_dot = defaultdict(set)\n self.counts = counts = defaultdict(int)\n vocabulary = []\n\n if D is None:\n total_sents = len(sents)\n k = int(total_sents*9/10)\n training_sents = sents[:k]\n held_out_sents = sents[k:]\n training_sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], training_sents))\n for sent in training_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n - 1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n D_candidates = [i*0.12 for i in range(1, 9)]\n xs = []\n for D in D_candidates:\n self.D = D\n aux_perplexity = self.perplexity(held_out_sents)\n xs.append((D, aux_perplexity))\n xs.sort(key=lambda x: x[1])\n self.D = xs[0][0]\n with open('old-stuff/kneserney_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('D: {}\\n'.format(self.D))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n # discount value D provided\n else:\n sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], sents))\n for sent in sents:\n for j in range(n+1):\n # all k-grams for 0 <= k <= n\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n # e.g., ngram = (1,2,3,4,5,6,7,8)\n # right_token = (8,)\n # left_token = (1,)\n # right_kgram = (2,3,4,5,6,7,8)\n # left_kgram = (1,2,3,4,5,6,7)\n # middle_kgram = (2,3,4,5,6,7)\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n-1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n\n xs = [k for k, v in counts.items() if v == 1 and n == len(k)]\n ys = [k for k, v in counts.items() if v == 2 and n == len(k)]\n n1 = len(xs)\n n2 = len(ys)\n self.D = n1 / (n1 + 2 * n2)",
"def make_profiles(datafolder, profilefolder, size):\n files = os.listdir(datafolder) \n for file in files:\n languagename = file.split(\"-\")[0]\n encodering = file.split(\"-\")[1]\n bestand = open('training/' + file,'r' , encoding=encodering) #Reads with the correct encoding.\n test = langdetect.trigram_table(bestand.read(), size) #Creates a ngram table of the content of the file.\n filename = languagename + '.' + str(size) + '.txt' #Creates a new filename.\n newfile = open('trigram-models/' + filename, 'w', encoding=\"utf-8\") \n langdetect.write_trigrams(test, 'trigram-models/' + filename) #Creates a new file with the ngrams and their frequency.\n newfile.close()",
"def _generateNgrams(self,text,n=2):\n token = Utilities.CVTokeniser(text)\n # token = nltk.word_tokenize(text)\n computedNgrams=ngrams(token,n)\n return Counter(computedNgrams)",
"def construct_features(seq_df, paaclamb=6, paacw=0.5):\n seq_df = insert_aac(seq_df)\n seq_df = insert_ngrams(seq_df, n=2)\n seq_df = insert_cksaagp(seq_df, gap=3) # As the maximum motif length = 5.\n seq_df = insert_paac(seq_df, lamb=paaclamb, w=paacw)\n seq_df = insert_phycs(seq_df)\n\n return seq_df",
"def most_informative_features(self, n=100):\n\t# The set of (fname, fval) pairs used by this classifier.\n\tfeatures = set()\n\t# The max & min probability associated w/ each (fname, fval)\n\t# pair. Maps (fname,fval) -> float.\n\tmaxprob = defaultdict(lambda: 0.0)\n\tminprob = defaultdict(lambda: 1.0)\n\n\tfor (label, fname), probdist in self._feature_probdist.items():\n\t\tfor fval in probdist.samples():\n\t\t\tfeature = (fname, fval)\n\t\t\tfeatures.add( feature )\n\t\t\tp = probdist.prob(fval)\n\t\t\tprint p\n\t\t\tmaxprob[feature] = max(p, maxprob[feature])\n\t\t\tminprob[feature] = min(p, minprob[feature])\n\t\t\tif minprob[feature] == 0:\n\t\t\t\tfeatures.discard(feature)\n\t\t\t# print maxprob\n\t\t\t# print minprob\n\n\n\t# Convert features to a list, & sort it by how informative\n\t# features are.\n\tfeatures = sorted(features,\n\t key=lambda feature_: minprob[feature_]/maxprob[feature_])\n\treturn features[:n]",
"def n_grams(tokens, n=1):\n shiftToken = lambda i: (el for j,el in enumerate(tokens) if j>=i)\n shiftedTokens = (shiftToken(i) for i in range(n))\n tupleNGrams = zip(*shiftedTokens)\n return tupleNGrams",
"def generate_ngrams(tokens, ngram_size, *, include_terminator = False):\n \n if ngram_size <= 0:\n raise Exception(\"ngram size must be positive\")\n\n if not include_terminator:\n for token_index in range(len(tokens)):\n leader = get_leader(tokens, token_index, ngram_size - 1)\n token = tokens[token_index]\n \n yield leader, token\n else:\n for token_index in range(len(tokens) + 1):\n leader = get_leader(tokens, token_index, ngram_size - 1)\n token = tokens[token_index] if token_index < len(tokens) else None\n\n yield leader, token"
] |
[
"0.5295727",
"0.51407915",
"0.5013194",
"0.5007742",
"0.50075006",
"0.49982417",
"0.49953595",
"0.49740466",
"0.49623084",
"0.49546182",
"0.49153745",
"0.49083975",
"0.49014306",
"0.48962018",
"0.4837598",
"0.48255348",
"0.48082608",
"0.47809005",
"0.47733",
"0.47624588",
"0.47441715",
"0.4742377",
"0.47370747",
"0.47264552",
"0.4721011",
"0.47075108",
"0.46983674",
"0.46964538",
"0.46949458",
"0.4690628"
] |
0.7015037
|
0
|
Cleans the text into a form that works for processing by removing tabs and linefeeds, and punctuation is extended_alphabet = False, along with making the text uniformly lowercase and transforming it into asciicompatible characters.
|
def _clean(self, text):
if len(self.alph) == 26:
text = sub('[\n\t ' + string.punctuation + ']+?', '', text)
else:
text = sub('[\n\t]+?', '', text)
text = text.lower()
text = text.encode('ascii', 'ignore').decode()
return text
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def text_cleaning(self, text):\n # remove string formatting '\\n' or '\\t'\n tmp_text = re.sub(r'\\n+', '. ', text)\n tmp_text = re.sub(r'\\t+', '. ', text)\n # remove words with non-ascii characters\n tmp_text = \" \".join([word for word in tmp_text.split() if self.is_ascii(word)])\n # remove email address\n tmp_text = \" \".join([word for word in tmp_text.split() if not word.startswith(\"@\")])\n # remove urls\n tmp_text = re.sub(r'http\\S+', '', tmp_text, flags=re.MULTILINE)\n tmp_text = re.sub(r'www\\S+', '', tmp_text, flags=re.MULTILINE)\n # remove punctuation but . (to split sentences)\n cleaned_text = re.sub('[^A-Za-z.,]+', ' ', tmp_text)\n # lowercase\n cleaned_text = cleaned_text.lower()\n\n return cleaned_text",
"def clean_text(txt):\n\n cleaned_txt = ''\n for character in txt:\n if character not in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVQXWY ': #punctuation\n character = ''\n cleaned_txt += character\n elif character == character.upper(): #uppercase\n character = character.lower()\n cleaned_txt += character\n else:\n cleaned_txt += character\n return cleaned_txt",
"def clean_text(txt):\n\n alphabet= 'abcdefghijklmnopqrstuvwxyz '\n\n ALPHABET= 'ABCDEFGHIJKLMNOPQRSTUVWXYZ '\n\n new_words=''\n \n for i in txt:\n if i in alphabet or i in ALPHABET:\n new_words+= i\n\n clean=new_words.lower().split()\n\n return clean",
"def clean_text(text):\n text = text.lower()\n text = text.replace('\\xa0', ' ')\n text = text.replace('fls.', 'folhas ')\n text = text.replace('fl.', 'folha ')\n text = text.replace('arts.', 'artigos ')\n text = text.replace('art.', 'artigo ')\n text = re_tree_dots.sub('...', text)\n text = re.sub(r'\\.\\.\\.', ' ', text)\n text = re_remove_brackets.sub(' ', text)\n text = re_changehyphen.sub('-', text)\n text = re_remove_html.sub(' ', text)\n text = re_transform_numbers.sub('0', text)\n text = re_transform_url.sub('URL', text)\n text = re_transform_emails.sub('EMAIL', text)\n text = re_quotes_1.sub(r'\\1\"', text)\n text = re_quotes_2.sub(r'\"\\1', text)\n text = re_quotes_3.sub('\"', text)\n text = re.sub('\"', ' ', text)\n text = re_dots.sub('.', text)\n text = re_punctuation.sub(r'\\1', text)\n text = re_hiphen.sub(' - ', text)\n text = re_punkts.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_b.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_c.sub(r'\\1 \\2', text)\n text = re_doublequotes_1.sub('\\\"', text)\n text = re_doublequotes_2.sub('\\'', text)\n text = re_trim.sub(' ', text)\n return text.strip()",
"def clean_text(text):\n new_text = \"\"\n text = text.lower()\n for character in text:\n if character.isalpha():\n new_text = new_text + character\n return new_text",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()",
"def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()",
"def _clean_text(text):\n rrb = re.compile(\"-RRB-\")\n lrb = re.compile(\"-LRB-\")\n new_text = re.sub(rrb, \" \", text)\n new_text = re.sub(lrb, \" \", new_text)\n\n punct = re.compile(r'[_?!.,]')\n new_text = re.sub(punct, \" \", new_text)\n\n new_text = str(new_text).lower()\n return new_text",
"def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)",
"def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = clean_number(text)\n text = decontracted(text)\n text = correct_spelling(text)\n text = spacing_punctuation(text)\n text = spacing_some_connect_words(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n text = text.lower()\n return text",
"def clean_text(text):\n\n\n regex = re.compile('[\\.|\\-|\\,|\\?|\\_|\\:|\\\"|\\)|\\(\\)\\/|\\\\|\\>|\\<]')\n text = text.lower() # Turn everything to lower case\n text = regex.sub(' ', text).strip()\n out = re.sub(' +', ' ', text) # Reduce whitespace down to one\n \n return out",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)",
"def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt",
"def norm_text(self, text):\n\n # encode to apply utf-8 and decode to remove initial 'b'\n text = str(text.encode('utf-8').decode('utf-8'))\n text = text.lower()\n\n # Clean the text\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"cannot \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text",
"def preprocess_text(text):\n # replace non characers with space and lower case\n temp = re.sub(r\"[/W/D/S.,-]+\", \" \", str(text).lower())\n # merge multiple spaces to a single one\n return re.sub(r\"[ ]+\", \" \", temp)",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text(txt):\n\n for symbol in \"\"\".,'?!()/-:;\"\"\":\n txt = txt.replace(symbol, '')\n txt = txt.lower()\n txt = txt.split()\n return txt",
"def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")",
"def normalize_text(text, lower=True, punctuations=',!?:;', chars_to_remove=r'\\(\\)\\[\\]\\{\\}\\<\\>\\#*\"-',\n char_to_make_whitespace='/'):\n if lower:\n text = text.lower()\n text = text.strip() # remove trailing spaces\n text = re.sub(r'([' + chars_to_remove + '])', '', text) # remove characters\n text = re.sub('([' + punctuations + '])', r' \\1', text) # add space before punctuations\n text = re.sub('([' + char_to_make_whitespace + '])', ' ', text) # replace with space\n text = re.sub(r'\\s+', ' ', text) # remove redundant spaces\n\n # treat points especially, for the model to be able to split sentences:\n text = re.sub(r'(\\. )', r' \\1', text) # add space only before points not part of abbreviations (e.g. U.S.A.)\n text = re.sub(r'\\.\\. \\.', r' ...', text) # join the ruined ellipsis ('...')\n if text[-1] == '.':\n text = text[:-1] + ' .'\n return text",
"def clean_text(text):\n text = str(text).lower()\n text = text.strip(string.punctuation)\n text = re.sub(\"&\", '', text)\n text = re.sub(\"https\", '', text)\n text = re.sub('\\W\\s', '', text)\n text = re.sub('\\s,\\W', '', text)\n text = re.sub('[.!@#$%^&*()_,:;/-]', '', text)\n text = re.sub(\"\\d+\", '', text)\n\n return text",
"def clean_review(self, text):\n text = text.lower() # lowercase capital letters\n\n if self.remove_stopwords:\n text = self.remove_stopwords_f(text, keep_neg_words=True)\n\n text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)\n # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers)\n # text = re.sub(r'\\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore)\n\n text = re.sub(' +', ' ', text) # remove extra spaces\n\n if self.apply_normalization:\n text = self.normalize_text(text)\n\n return text",
"def clean(text):\n\n # removing paragraph numbers\n text = re.sub('[0-9]+.\\t', '', str(text))\n # removing new line characters\n text = re.sub('\\n ', ' ', str(text))\n text = re.sub('\\n', ' ', str(text))\n # removing apostrophes\n text = re.sub(\"'s\", '', str(text))\n # removing hyphens\n text = re.sub(\"-\", '', str(text))\n text = re.sub(\"— \", '', str(text))\n # removing quotation marks\n text = re.sub('\\\"', '', str(text))\n # removing salutations\n text = re.sub(\"Mr\\.\", 'Mr', str(text))\n text = re.sub(\"Mrs\\.\", 'Mrs', str(text))\n # removing any reference to outside text\n text = re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", str(text))\n\n return text",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)"
] |
[
"0.7089762",
"0.7030888",
"0.7011495",
"0.6901245",
"0.68705463",
"0.68348545",
"0.68348545",
"0.68334574",
"0.67861676",
"0.67735624",
"0.6761075",
"0.673244",
"0.6727343",
"0.6723118",
"0.6716831",
"0.67066026",
"0.6696384",
"0.6644331",
"0.6644331",
"0.6644331",
"0.6644331",
"0.6644331",
"0.6644331",
"0.66373426",
"0.663578",
"0.66037226",
"0.6600147",
"0.65959746",
"0.65845346",
"0.6563454"
] |
0.7909278
|
0
|
Reduces features to ones with largest variance. If "features" is not specified in instantiation, same as original profile.
|
def _reduceFeatures(self):
# Adds up all profiles corresponding to each author,
# then compiles into a matrix of these "group" profiles.
group_profiles = {auth : zeros(len(self.alph)**self.N) for auth in set(self.train_data[1])}
for i in range(len(self.train_data[1])):
group_profiles[self.train_data[1][i]] += self.train_data[0][i]
profile_matrix = array([group_profiles[auth] for auth in group_profiles])
# Takes the variances for all features across the "group" profiles,
# then extracts the indices of the features with the highest variances.
vars = profile_matrix.var(axis=0)
self.feature_indices = argsort(vars)[-self.features:]
# Recompiles the training data.
self.train_data[0] = array([prof[self.feature_indices] for prof in self.train_data[0]])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _scale_features(self, features):\n assert isinstance(features, np.ndarray), \"Input is not a numpy array!\"\n\n return self.scaler.transform(features.reshape(1, -1))",
"def test_reduce_features_size(self):\n # Get some data\n data = array([[0.564, 20.661, 1], [-18.512, 41.168, -1],\n [-0.009, 20.440, 7]])\n cdata = CData(data)\n\n # ===================================\n # Perform PCA to reduce to 2 features\n # ===================================\n\n # Reduce by nearest int closest to 60%, rounding up\n frac = 0.6\n cdata.reduce_features(frac)\n self.assertTrue(cdata.data.shape == (3, 2))",
"def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]",
"def preprocess_features(features):\r\n rowsum = np.array(features.sum(1),dtype='float')\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n # return sparse_to_tuple(features)\r\n return features\r\n # print(features)\r\n # rowsum = np.array(features.sum(1),dtype='float')\r\n #\r\n # r_inv = np.power(rowsum, -1).flatten()\r\n # r_inv[np.isinf(r_inv)] = 0.\r\n # r_mat_inv = np.diag(r_inv)\r\n # features = r_mat_inv.dot(features)\r\n # # return sparse_to_tuple(features)\r\n # return features\r",
"def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = np.diag(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return features",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.toarray() # densify -- these are tiny and we don't care",
"def prune_features(self, verbose=False):\n # Collect all features and prune those occurring only once.\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)\n\n self.remove_features = []\n for k in features:\n if features[k] <= 2:\n self.remove_features.append(k)\n\n if verbose:\n print \"Number of unique features: \", len(self.remove_features)\n\n self.remove_features = set(self.remove_features)\n for k in self.utterance_features:\n self.utterance_features[k].prune(self.remove_features)\n\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features",
"def __call__(self, features: List[List[float]]) -> List[List[float]]:\n self.count += 1\n if self.count <= 1:\n self.feature_fix = features\n self.gmin = np.amin(features, axis=0) \n self.gmax = np.amax(features, axis=0) \n self.gdiff = np.subtract(self.gmax,self.gmin)\n \n features_scale = np.full((np.array(features).shape[0],np.array(features).shape[1]), np.nan)\n for i in range(np.array(features).shape[0]):\n for j in range(np.array(features).shape[1]):\n features_scale[i,j] = (np.array(features)[i,j] - self.gmin[j])/self.gdiff[j]\n \n return features_scale",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense()",
"def feature_selection_RFE(features):\n feature_test = features[list(features.keys())[0]]\n n_dims = len(feature_test[0])\n overall_accu = list()\n for dim_picked in range(n_dims, 0, -1):\n accu = fitting_scoring(features, cv=2, verbose=False, is_RFE_mode=True, n_dims_RFE=dim_picked)\n overall_accu.append(accu[0])\n\n return overall_accu",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features).tocoo()\n return sparse_to_tensor(features)",
"def devectorize(theta, features, default=0.0, tolerance=0.0):\n t = 0;\n factors = []\n for u in features:\n fnext = Factor(u.vars, theta[t:t+u.numel()]);\n t += u.numel();\n if (fnext-default).abs().sum() > tolerance: # if any entries are different from the default,\n factors.append( fnext.expIP() ); # add them as factors (exp?)\n return factors",
"def nontuple_preprocess_features(features):\n rowsum = np.array(features.sum(1))\n ep = 1e-10\n r_inv = np.power(rowsum + ep, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)",
"def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return sparse_to_tuple(features)",
"def _update(self, features: DataFrameLike) -> None:\n # add features\n self._features = (\n pd.concat([self._features, features], axis=1, sort=True)\n # fill nans resulting from concatenation where features does not\n # contain neighborless nodes (out-degree=0) on its axis\n .fillna(0)\n )\n # prune redundant features\n pruner = FeaturePruner(self._final_features, self._feature_group_thresh)\n features_to_drop = pruner.prune_features(self._features)\n self._features = self._features.drop(features_to_drop, axis=1)\n # save features that remain after pruning and that\n # have not previously been saved as final features\n retained = features.columns.difference(features_to_drop)\n feature_dict = as_frame(self._features[retained]).to_dict()\n self._final_features[self.generation_count] = feature_dict",
"def feature_normalization(train, test):\n (N,p) = np.shape(train)\n mins = np.amin(train,axis=0)\n maxs = np.amax(train,axis=0) + mins\n train = (train + mins)/maxs\n test = (test + mins)/maxs\n return train, test",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return sparse_to_tuple(features)",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return sparse_to_tuple(features)",
"def build_by_features(self, features):\n v = [0] * self.f\n masks = [1 << i for i in range(self.f)]\n if isinstance(features, dict):\n features = features.items()\n for f in features:\n if isinstance(f, basestring):\n h = self.hashfunc(f.encode('utf-8'))\n w = 1\n else:\n assert isinstance(f, collections.Iterable)\n h = self.hashfunc(f[0].encode('utf-8'))\n w = f[1]\n for i in range(self.f):\n v[i] += w if h & masks[i] else -w\n ans = 0\n for i in range(self.f):\n if v[i] >= 0:\n ans |= masks[i]\n self.value = ans",
"def build_by_features(self, features):\n v = [0] * self.f\n masks = [1 << i for i in range(self.f)]\n if isinstance(features, dict):\n features = features.items()\n for f in features:\n if isinstance(f, basestring):\n h = self.hashfunc(f.encode('utf-8'))\n w = 1\n else:\n assert isinstance(f, collections.Iterable)\n h = self.hashfunc(f[0].encode('utf-8'))\n w = f[1]\n for i in range(self.f):\n v[i] += w if h & masks[i] else -w\n ans = 0\n for i in range(self.f):\n if v[i] >= 0:\n ans |= masks[i]\n self.value = ans",
"def precompute(self, features, mode, params):\n return None",
"def feature_normalization(train, test):\n mins_of_features = np.amin(train, axis=0)\n maxs_of_features = np.amax(train, axis=0)\n range_of_features = maxs_of_features-mins_of_features\n range_of_features[range_of_features==0] = 1\n \n train_normalized = (train - mins_of_features)/range_of_features\n test_normalized = (test - mins_of_features)/range_of_features\n \n return (train_normalized, test_normalized)",
"def apply_randomization(features, label, randomize_prob):\n rnd_tok = lambda: tf.as_string(tf.random.uniform([], 0, 99999999, tf.int32))\n\n for idx in CAT_FEATURE_INDICES:\n key = feature_name(idx)\n # Ignore lint since tf.cond should evaluate lambda immediately.\n features[key] = tf.cond(tf.random.uniform([]) < randomize_prob,\n rnd_tok,\n lambda: features[key]) # pylint: disable=cell-var-from-loop\n return features, label",
"def _variance(self, features):\n return np.mean(np.var(features.reshape((features.shape[0], -1)), axis=1))",
"def get_max_features(feature: torch.Tensor):\n # (2, batch_size, num_particles, tau, feature_dim)\n # -> (2, batch_size, tau, num_particles, feature_dim)\n features_permute = feature.permute(0, 1, 3, 2, 4)\n scalar = get_msq(feature, keep_dim=False)\n indices = torch.max(scalar, dim=-2).indices.unsqueeze(-1)\n if feature.shape[-1] == 1:\n scalar = feature.max(dim=-1).values\n elif feature.shape[-1] == 4:\n scalar = get_msq(feature, keep_dim=False)\n else:\n raise NotImplementedError(\n f\"feature dimension {feature.shape[-1]} not supported yet\"\n )\n\n # aggregated_permuted = gather_righthand(features_permute, indices)\n\n # (2, batch_size, tau, num_particles, feature_dim)\n # -> (2, batch_size, num_particles, tau, feature_dim)\n return gather_righthand(features_permute, indices).permute(0, 1, 3, 2, 4)",
"def sub_select_features(features, strategy):\n\n def extract_one_index(y_val):\n index_ones = []\n y_prev = 0\n start_stop = []\n if y_val[-1] == 1:\n y_val = y_val.tolist() + [0]\n for i, y in enumerate(y_val):\n if y_prev == 0 and y == 1:\n start_stop = [i]\n if y_prev == 1 and y == 0:\n start_stop.append(i)\n index_ones.append(start_stop)\n y_prev = y\n return index_ones\n\n def wrapper(start_stop, maxi):\n size = start_stop[1] - start_stop[0]\n bound = (size+1)//2\n return [max(0, start_stop[0]-bound), min(maxi, start_stop[1]+bound)]\n\n def deduce_index_to_keep(one_index, maxi):\n wrapped = [wrapper(start_stop, maxi) for start_stop in one_index]\n to_keep = [idx for idx in range(wrapped[0][0], wrapped[0][1])]\n for start_stop in wrapped[1:]:\n to_keep += [idx for idx in range(start_stop[0], start_stop[1]) if idx > to_keep[-1]]\n return to_keep\n\n if strategy == 0:\n new_features = features # We do nothing\n\n else:\n new_features = dict()\n for which in ['train', 'test']:\n one_id = extract_one_index(features['y_'+which])\n true_idx = deduce_index_to_keep(one_id, len(features['y_'+which]))\n try:\n new_features['x_'+which] = features['x_'+which][true_idx]\n new_features['y_'+which] = features['y_'+which][true_idx]\n except IndexError as e:\n print(which)\n print(features['x_'+which].shape)\n print(features['y_'+which].shape)\n print(one_id)\n raise e\n\n return new_features"
] |
[
"0.61816657",
"0.59276104",
"0.59100384",
"0.58602893",
"0.5847031",
"0.5812968",
"0.5746926",
"0.572695",
"0.572695",
"0.5707635",
"0.56395966",
"0.5633891",
"0.56001306",
"0.5564291",
"0.55267614",
"0.5483713",
"0.5483713",
"0.5483698",
"0.54620516",
"0.5434813",
"0.54276484",
"0.54276484",
"0.5418464",
"0.5418464",
"0.54117286",
"0.5396529",
"0.5388878",
"0.5333532",
"0.5326394",
"0.5317136"
] |
0.63739
|
0
|
Train the model based on the training_data. training_data should be a dictionary whose keys are strings corresponding to an author's name, and whose values are lists of texts written by that author.
|
def train(self, training_data, chunk_size=100):
# For some reason, for the SVM to work, the keys need to be in alphabetical order
training_data = {k : training_data[k] for k in sorted(training_data)}
# Compile all author texts into one large text to then be broken down
for auth in training_data:
training_data[auth] = '\n\n'.join(training_data[auth])
self.auths = list(training_data.keys())
self.chunk_size = chunk_size
# Creates two lists, one of the texts and one of the corresponding author labels.
labels = []
texts = []
for auth in training_data:
lines = training_data[auth].split('\n')
for p in range( chunk_size, len(lines), chunk_size ):
labels.append(auth) # authors per text in the training corpus
texts.append('\n'.join(lines[p-chunk_size : p])) # texts in the training corpus
labels = array(labels)
texts = array(texts)
# Cleans the texts
for i in range(len(texts)):
texts[i] = self._clean(texts[i])
# Generates the profiles from these tests
profiles = zeros((len(texts), len(self.alph)**self.N))
for i in range(len(texts)):
profiles[i] = self._profile(texts[i])
# Reduces the features and fits the model
self.train_data = [profiles, labels]
self._reduceFeatures()
self.model = SVC(kernel='linear')
self.model.probability = True
self.model.fit(self.train_data[0], self.train_data[1])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def train(self, training_data):\n pass",
"def train(self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any) -> None:\n pass",
"def train(self, training_data, model_name):\n dataset = []\n for example in training_data:\n entity_offsets = self._convert_example(example)\n dataset.append(self._from_json_to_crf(example, entity_offsets))\n\n features = [self._sentence_to_features(s) for s in dataset]\n labels = [self._sentence_to_labels(s) for s in dataset]\n trainer = sklearn_crfsuite.CRF(\n algorithm=\"lbfgs\",\n # coefficient for L1 penalty\n c1=0.1,\n # coefficient for L2 penalty\n c2=0.1,\n # stop earlier\n max_iterations=50,\n # include transitions that are possible, but not observed\n all_possible_transitions=True,\n )\n trainer.fit(features, labels)\n logger.info(\"Creating Model for Intent %s\",model_name)\n joblib.dump(trainer, 'core/agent/model_files/%s.model' % model_name)\n return True",
"def train(self, data):\n \n logger('[.] Training with whole dataset ...')\n \n datalist = self.unpack_data(data)\n self.knn_model.fit(datatuple['features'], datatuple['labels'])",
"def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n pass",
"def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n pass",
"def train(self, trainData):\n pass",
"def train(self, train_data):\n with open(train_data, 'r') as train_data:\n while True:\n tokens = train_data.readline().split()\n pos = train_data.readline().split()\n labels = train_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Generate transition probabilities\n for i in range(0, len(labels) - self.N_VALUE + 1):\n self.add_label_sequence(labels[i:i + self.N_VALUE])\n # Generate lexical generation probabilities\n for i in range(0, len(tokens)):\n token = tokens[i].lower()\n label = labels[i]\n self.add_word_tag(token, label)\n self.handle_unknowns()",
"def train(self):\n # >>> YOUR ANSWER HERE\n\n fake_docs = []\n fake_words = []\n fake_words_freq = {}\n real_docs = []\n real_words = []\n real_words_freq = {}\n\n # load fake data of the training dataset, store the docs and words\n fake_data = open(self.train_data['fake']).readlines()\n for sentence in fake_data:\n preprocess_sentence = sentence.strip()\n fake_docs.append(preprocess_sentence)\n fake_words.extend(preprocess_sentence.split())\n\n # load real data of the training dataset, store the docs, words and word frequencies.\n real_data = open(self.train_data['real']).readlines()\n for sentence in real_data:\n preprocess_sentence = sentence.strip()\n real_docs.append(preprocess_sentence)\n real_words.extend(preprocess_sentence.split())\n\n # remove stop words if necessary\n if self.REMOVE_STOPWORDS:\n fake_words = [word for word in fake_words if word not in self.stopwords]\n real_words = [word for word in real_words if word not in self.stopwords]\n\n # calculate all words' frequency\n for word in fake_words:\n self.vocabulary.add(word)\n fake_words_freq[word] = fake_words_freq.get(word, 0) + 1\n for word in real_words:\n self.vocabulary.add(word)\n real_words_freq[word] = real_words_freq.get(word, 0) + 1\n\n # pre-calculate the number of all docs, the number of docs per class and words frequency per class for\n # calculation in the training loop.\n n_doc = len(fake_docs) + len(real_docs)\n n_class = {'fake': len(fake_docs), 'real': len(real_docs)}\n big_doc_dict = {'fake': fake_words_freq, 'real': real_words_freq}\n fake_words_num = 0\n real_words_num = 0\n for w in self.vocabulary:\n fake_words_num += fake_words_freq.get(w, 0)\n real_words_num += real_words_freq.get(w, 0)\n words_frequency_per_class = {'fake': fake_words_num, 'real': real_words_num}\n\n # Training\n for c in self.classes:\n self.logprior[c] = math.log(n_class[c] / n_doc)\n for w in self.vocabulary:\n count_w_c = big_doc_dict[c].get(w, 0)\n log_likelihood = math.log((count_w_c + 1) / (len(self.vocabulary) + words_frequency_per_class[c]))\n self.loglikelihood[(w, c)] = log_likelihood\n # >>> END YOUR ANSWER",
"def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)",
"def train(self, data):\n pass",
"def train(self, dataset, model_dir):\n raise NotImplementedError",
"def train(self, trainFilenames):\n\n\t\tstartIndex = len(self.documents)\n\t\tendIndex = startIndex + len(trainFilenames)\n\t\tself.documents += trainFilenames\n\n\t\tX = [[i] for i in range(startIndex, endIndex)]\n\t\tY = [isAroused(f) for f in trainFilenames]\n\n\t\tself.knn.fit(np.array(X), np.array(Y))",
"def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract",
"def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)",
"def training(training_data, iterations):\n nlp = spacy.blank('en') # create blank Language class\n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner, last=True)\n\n # add labels\n for _, annotations in training_data:\n for ent in annotations.get('entities'):\n ner.add_label(ent[2])\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n optimizer = nlp.begin_training()\n for itn in range(iterations):\n print(\"Starting iteration \" + str(itn))\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in training_data:\n nlp.update(\n [text], # batch of texts\n [annotations], # batch of annotations\n drop=0.2, # dropout - make it harder to memorise data\n sgd=optimizer, # callable to update weights\n losses=losses)\n print(losses)\n return nlp",
"def train(self, x_train, y_train):\n\n # convert input to format for classifier\n list_of_embeddings = list(x_train[self.embeddings_col])\n x_train = np.array([[float(i) for i in embedding.strip('[]').split()] for embedding in list_of_embeddings])\n\n # discard fold ID column from labels\n review_groups = [col for col in y_train.columns if not col=='k']\n\n for review_group in tqdm(review_groups, desc='Train Review Groups'):\n\n # pull label column\n labels = y_train[review_group]\n\n # logistic classifier\n classifier = SGDClassifier(loss=\"log\", alpha=self.alpha,\n l1_ratio = self.l1_ratio, penalty=\"elasticnet\").fit(x_train, labels)\n\n # save the model in dictionary of models\n self.models[review_group] = classifier",
"def _train(args, pretrain_args):\n start_time = time.time()\n print('Training', ', '.join(args.speakers), '...')\n\n # randomly sample validation set monte_carlo_cv_num times\n for num in range(args.monte_carlo_cv_num):\n # get seed used to sub-sample validation dataset (use 42 for 1st run)\n seed = utils.get_seed(num)\n\n # get train/valid/test data and convert to sequences\n train_data, valid_data, test_data, id_to_word = data_reader.get_data(\n args, seed=seed)\n # set configurations/hyperparameters for model\n config, test_config = utils.set_config(args, id_to_word)\n\n # initialize word embeddings\n init_embed = utils.init_embedding(id_to_word, dim=args.embed_size,\n init_scale=args.init_scale,\n embed_path=args.embed_path)\n\n with tf.Graph().as_default():\n # initializer used to initialize TensorFlow variables\n initializer = tf.random_uniform_initializer(-config['init_scale'],\n config['init_scale'])\n # create Train model\n with tf.name_scope('Train'):\n with tf.variable_scope('Model', reuse=None,\n initializer=initializer):\n m_train = model.Model(args, is_training=True, config=config,\n init_embed=init_embed, name='Train')\n m_train.build_graph()\n\n # create Valid model\n with tf.name_scope('Valid'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_valid = model.Model(args, is_training=False, config=config,\n init_embed=init_embed, name='Valid')\n m_valid.build_graph()\n\n # create Test model\n with tf.name_scope('Test'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_test = model.Model(args, is_training=False, config=test_config,\n init_embed=init_embed, name='Test')\n m_test.build_graph()\n\n # create summaries to be viewed in TensorBoard\n tb_summaries = utils.TensorBoardSummaries()\n tb_summaries.create_ops()\n\n init = tf.global_variables_initializer()\n\n # if pretrained, must create dict to initialize TF Saver\n if bool(pretrain_args):\n # get trainable variables and convert to dict for Saver\n reuse_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES)\n reuse_vars_dict = dict(\n [(var.op.name, var) for var in reuse_vars])\n # create saver for TF session (see function for addl details)\n saver = utils.create_tf_saver(args, pretrain_args,\n reuse_vars_dict)\n else:\n saver = tf.train.Saver()\n\n # ppls dict has perplexities that are stored in results database\n ppls = {}\n ppls, _ = _update_ppls(ppls, initialize=True)\n\n with tf.Session() as sess:\n sess.run(init)\n\n if args.load_path != '':\n print('Restoring model...')\n saver.restore(sess, args.load_path)\n\n for epoch in range(config['max_epoch']):\n print('Epoch: {0} Learning rate: {1:.3f}\\n'.format(\n epoch + 1, sess.run(m_train.lr)))\n for i, speaker in enumerate(args.speakers):\n print('Training {0} ...'.format(speaker))\n\n # run epoch on training data\n train_perplexity = _run_epoch(sess, m_train, args, train_data,\n i, tb_summaries, id_to_word,\n train_op=m_train.train_op,\n verbose=True)\n print('Epoch: {0} Train Perplexity: {1:.3f}'.format(\n epoch + 1, train_perplexity))\n ppls, _ = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=train_perplexity,\n dataset='train')\n\n print('Validating...')\n # run epoch on validation data\n valid_perplexity = _run_epoch(sess, m_valid, args,\n valid_data, i, tb_summaries,\n id_to_word, verbose=True)\n print('Epoch: {0} Valid Perplexity: {1:.3f}'.format(\n epoch + 1, valid_perplexity))\n ppls, improved = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=valid_perplexity,\n dataset='valid')\n\n if improved:\n # save model if valid ppl is lower than current\n # best valid ppl\n if args.save_path != '':\n print('Saving model to {0}.'.format(\n args.save_path))\n saver.save(sess, args.save_path)\n\n for i, speaker in enumerate(args.speakers):\n print('Testing {0} ...'.format(speaker))\n print('Restoring best model for testing...')\n saver.restore(sess, args.save_path)\n # run model on test data\n test_perplexity = _run_epoch(sess, m_test, args, test_data, i)\n ppls['test_ppl_' + speaker] = test_perplexity\n print('Test Perplexity: {0:.3f}'.format(test_perplexity))\n\n if args.insert_db == 'True':\n # write params/config/results to sql database\n results_db.insert_results(args, config, start_time, ppls)",
"def trainModel( self, featureTrain, classTrain):",
"def train(self, training_data, cfg, **kwargs):\n pass",
"def train_model(self, *args, **kwargs):\n self.model.train(self.training, *args, **kwargs)",
"def train(\n self, training_data: TrainingData, cfg: DazuConfig, **kwargs: Any\n ) -> None:",
"def train(pipetype, datapath):\n with open(datapath, 'r') as f:\n training_data = json.load(f)\n docs = ['{0} {1}'.format(d['title'], d['text']) for d in training_data]\n\n if pipetype == 'bow':\n vector.train(docs)\n\n if pipetype in ['stanford', 'spotlight', 'keyword']:\n concept.train(docs, pipetype=pipetype)",
"def train( self, trainingData, trainingLabels):\n\t\t\n\t\tself.features = trainingData[0].keys()\n\t\t\"*** YOUR CODE HERE ***\"\n\t\ttrain_data_size = len(trainingData)\n\t\tnum_classifiers = len(self.classifiers)\n\t\tweights = np.array([1.0/(train_data_size) for _ in range(train_data_size)])\n\t\tindex = 1\n\t\tfor k in range(num_classifiers):\n\t\t\tclassifier = self.classifiers[k]\n\t\t\tprint(\"Training Classifier \" + str(index))\n\n\t\t\tclassifier.train(trainingData,trainingLabels,weights)\n\n\t\t\terror = 0.0\n\t\t\tpred = classifier.classify(trainingData)\n\t\t\tfor i in range(train_data_size):\n\t\t\t\tif (pred[i] != trainingLabels[i]):\n\t\t\t\t\terror = error + weights[i]\n\t\t\tprint(\"Error \" + str(error))\n\t\t\tfor i in range(train_data_size):\n\t\t\t\tif (pred[i] == trainingLabels[i]):\n\t\t\t\t\t\tweights[i] = weights[i] * (error) / (1 - error)\n\t\t\t\t# else:\n\t\t\t\t# \tweights[i] = weights[i] * (1 - error) / (error) \n\n\t\t\tself.alphas[k] = np.log((1 - error)/(error))\n\t\t\tprint(\"Alpha \" + str(self.alphas[k]))\n\t\t\tweights = weights / (np.sum(weights))\n\t\t\tindex += 1\n\n\n\t\t# util.raiseNotDefined()",
"def train(self, trainingData, trainingLabels, validationData, validationLabels):\t \n\t \n\t# might be useful in your code later...\n\t# this is a list of all features in the training set.\n\tself.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n\t\n\tif (self.automaticTuning):\n\t\tkgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n\telse:\n\t\tkgrid = [self.k]\n\t\t\n\tself.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)",
"def train(self, trainingData, trainingLabels, validationData, validationLabels):\n self.trainingData = trainingData\n self.trainingLabels = trainingLabels",
"def train(self,\n max_epochs = 10, # number of max possible training iterations\n min_count = 5, # min frequency of usage to enter vocab\n vec_size = 100, # size of feature vectors\n max_alpha = 0.025, # starting learning rate\n min_alpha = 0.00025, # lowest learning rate\n save_name = None):\n\n if not self.tagged_docs and not (self.paperdf and self.authordf):\n print('no data to train.')\n return\n\n self.model.epochs = max_epochs\n self.model.vocabulary.min_count = min_count\n self.model.vector_size = vec_size\n self.model.alpha = max_alpha\n self.model.min_alpha = min_alpha\n\n print('Training model.')\n print('Building Vocabulary.')\n self.model.build_vocab(self.tagged_docs)\n\n print('Training for', max_epochs, 'epochs.')\n self.epoch_logger = EpochLogger()\n self.model.train(self.tagged_docs, total_examples = self.model.corpus_count,\n epochs = self.model.epochs, callbacks = [self.epoch_logger])\n print(\"Finished in {} seconds.\".format(round(time.time() - self.epoch_logger.start_time, 3)))\n\n if save_name:\n filename = str(save_name) + '.model'\n self.model.save(filename)\n print(\"Model Saved as\", filename)\n\n # self._compute_util_data()",
"def pretrain(texts_list: List[List[str]]) -> Any:\n \n return None",
"def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)",
"def train(\n # fmt: off\n lang: (\"Model language\", \"positional\", None, str),\n output_path: (\"Output directory to store model in\", \"positional\", None, Path),\n train_path: (\"Location of JSON-formatted training data\", \"positional\", None, Path),\n dev_path: (\"Location of JSON-formatted development data\", \"positional\", None, Path),\n raw_text: (\"Path to jsonl file with unlabelled text documents.\", \"option\", \"rt\", Path) = None,\n base_model: (\"Name of model to update (optional)\", \"option\", \"b\", str) = None,\n pipeline: (\"Comma-separated names of pipeline components\", \"option\", \"p\", str) = \"tagger,parser,ner\",\n vectors: (\"Model to load vectors from\", \"option\", \"v\", str) = None,\n replace_components: (\"Replace components from base model\", \"flag\", \"R\", bool) = False,\n n_iter: (\"Number of iterations\", \"option\", \"n\", int) = 30,\n n_early_stopping: (\"Maximum number of training epochs without dev accuracy improvement\", \"option\", \"ne\", int) = None,\n n_examples: (\"Number of examples\", \"option\", \"ns\", int) = 0,\n use_gpu: (\"Use GPU\", \"option\", \"g\", int) = -1,\n version: (\"Model version\", \"option\", \"V\", str) = \"0.0.0\",\n meta_path: (\"Optional path to meta.json to use as base.\", \"option\", \"m\", Path) = None,\n init_tok2vec: (\"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.\", \"option\", \"t2v\", Path) = None,\n parser_multitasks: (\"Side objectives for parser CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"pt\", str) = \"\",\n entity_multitasks: (\"Side objectives for NER CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"et\", str) = \"\",\n noise_level: (\"Amount of corruption for data augmentation\", \"option\", \"nl\", float) = 0.0,\n orth_variant_level: (\"Amount of orthography variation for data augmentation\", \"option\", \"ovl\", float) = 0.0,\n eval_beam_widths: (\"Beam widths to evaluate, e.g. 4,8\", \"option\", \"bw\", str) = \"\",\n gold_preproc: (\"Use gold preprocessing\", \"flag\", \"G\", bool) = False,\n learn_tokens: (\"Make parser learn gold-standard tokenization\", \"flag\", \"T\", bool) = False,\n textcat_multilabel: (\"Textcat classes aren't mutually exclusive (multilabel)\", \"flag\", \"TML\", bool) = False,\n textcat_arch: (\"Textcat model architecture\", \"option\", \"ta\", str) = \"bow\",\n textcat_positive_label: (\"Textcat positive label for binary classes with two labels\", \"option\", \"tpl\", str) = None,\n tag_map_path: (\"Location of JSON-formatted tag map\", \"option\", \"tm\", Path) = None,\n verbose: (\"Display more information for debug\", \"flag\", \"VV\", bool) = False,\n debug: (\"Run data diagnostics before training\", \"flag\", \"D\", bool) = False,\n # fmt: on\n):\n util.fix_random_seed()\n util.set_env_log(verbose)\n\n # Make sure all files and paths exists if they are needed\n train_path = util.ensure_path(train_path)\n dev_path = util.ensure_path(dev_path)\n meta_path = util.ensure_path(meta_path)\n output_path = util.ensure_path(output_path)\n if raw_text is not None:\n raw_text = list(srsly.read_jsonl(raw_text))\n if not train_path or not train_path.exists():\n msg.fail(\"Training data not found\", train_path, exits=1)\n if not dev_path or not dev_path.exists():\n msg.fail(\"Development data not found\", dev_path, exits=1)\n if meta_path is not None and not meta_path.exists():\n msg.fail(\"Can't find model meta.json\", meta_path, exits=1)\n meta = srsly.read_json(meta_path) if meta_path else {}\n if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:\n msg.warn(\n \"Output directory is not empty\",\n \"This can lead to unintended side effects when saving the model. \"\n \"Please use an empty directory or a different path instead. If \"\n \"the specified output path doesn't exist, the directory will be \"\n \"created for you.\",\n )\n if not output_path.exists():\n output_path.mkdir()\n msg.good(f\"Created output directory: {output_path}\")\n\n tag_map = {}\n if tag_map_path is not None:\n tag_map = srsly.read_json(tag_map_path)\n # Take dropout and batch size as generators of values -- dropout\n # starts high and decays sharply, to force the optimizer to explore.\n # Batch size starts at 1 and grows, so that we make updates quickly\n # at the beginning of training.\n dropout_rates = util.decaying(\n util.env_opt(\"dropout_from\", 0.2),\n util.env_opt(\"dropout_to\", 0.2),\n util.env_opt(\"dropout_decay\", 0.0),\n )\n batch_sizes = util.compounding(\n util.env_opt(\"batch_from\", 100.0),\n util.env_opt(\"batch_to\", 1000.0),\n util.env_opt(\"batch_compound\", 1.001),\n )\n\n if not eval_beam_widths:\n eval_beam_widths = [1]\n else:\n eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(\",\")]\n if 1 not in eval_beam_widths:\n eval_beam_widths.append(1)\n eval_beam_widths.sort()\n has_beam_widths = eval_beam_widths != [1]\n\n default_dir = Path(__file__).parent.parent / \"ml\" / \"models\" / \"defaults\"\n\n # Set up the base model and pipeline. If a base model is specified, load\n # the model and make sure the pipeline matches the pipeline setting. If\n # training starts from a blank model, intitalize the language class.\n pipeline = [p.strip() for p in pipeline.split(\",\")]\n msg.text(f\"Training pipeline: {pipeline}\")\n disabled_pipes = None\n pipes_added = False\n if use_gpu >= 0:\n activated_gpu = None\n try:\n activated_gpu = set_gpu(use_gpu)\n except Exception as e:\n msg.warn(f\"Exception: {e}\")\n if activated_gpu is not None:\n msg.text(f\"Using GPU: {use_gpu}\")\n else:\n msg.warn(f\"Unable to activate GPU: {use_gpu}\")\n msg.text(\"Using CPU only\")\n use_gpu = -1\n if base_model:\n msg.text(f\"Starting with base model '{base_model}'\")\n nlp = util.load_model(base_model)\n if nlp.lang != lang:\n msg.fail(\n f\"Model language ('{nlp.lang}') doesn't match language \"\n f\"specified as `lang` argument ('{lang}') \",\n exits=1,\n )\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n nlp.select_pipes(disable=[p for p in nlp.pipe_names if p not in pipeline])\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n if pipe not in nlp.pipe_names:\n msg.text(f\"Adding component to base model '{pipe}'\")\n nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n elif replace_components:\n msg.text(f\"Replacing component from base model '{pipe}'\")\n nlp.replace_pipe(pipe, nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n else:\n if pipe == \"textcat\":\n textcat_cfg = nlp.get_pipe(\"textcat\").cfg\n base_cfg = {\n \"exclusive_classes\": textcat_cfg[\"exclusive_classes\"],\n \"architecture\": textcat_cfg[\"architecture\"],\n \"positive_label\": textcat_cfg[\"positive_label\"],\n }\n if base_cfg != pipe_cfg:\n msg.fail(\n f\"The base textcat model configuration does\"\n f\"not match the provided training options. \"\n f\"Existing cfg: {base_cfg}, provided cfg: {pipe_cfg}\",\n exits=1,\n )\n msg.text(f\"Extending component from base model '{pipe}'\")\n disabled_pipes = nlp.select_pipes(\n disable=[p for p in nlp.pipe_names if p not in pipeline]\n )\n else:\n msg.text(f\"Starting with blank model '{lang}'\")\n lang_cls = util.get_lang_class(lang)\n nlp = lang_cls()\n\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"morphologizer\":\n config_loc = default_dir / \"morphologizer_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n pipe = nlp.create_pipe(pipe, config=pipe_cfg)\n nlp.add_pipe(pipe)\n\n # Update tag map with provided mapping\n nlp.vocab.morphology.tag_map.update(tag_map)\n\n # Multitask objectives\n multitask_options = [(\"parser\", parser_multitasks), (\"ner\", entity_multitasks)]\n for pipe_name, multitasks in multitask_options:\n if multitasks:\n if pipe_name not in pipeline:\n msg.fail(\n f\"Can't use multitask objective without '{pipe_name}' in \"\n f\"the pipeline\"\n )\n pipe = nlp.get_pipe(pipe_name)\n for objective in multitasks.split(\",\"):\n pipe.add_multitask_objective(objective)\n\n # Prepare training corpus\n msg.text(f\"Counting training words (limit={n_examples})\")\n corpus = GoldCorpus(train_path, dev_path, limit=n_examples)\n n_train_words = corpus.count_train()\n\n if base_model and not pipes_added:\n # Start with an existing model, use default optimizer\n optimizer = create_default_optimizer()\n else:\n # Start with a blank model, call begin_training\n cfg = {\"device\": use_gpu}\n optimizer = nlp.begin_training(lambda: corpus.train_examples, **cfg)\n nlp._optimizer = None\n\n # Load in pretrained weights (TODO: this may be broken in the config rewrite)\n if init_tok2vec is not None:\n components = _load_pretrained_tok2vec(nlp, init_tok2vec)\n msg.text(f\"Loaded pretrained tok2vec for: {components}\")\n\n # Verify textcat config\n if \"textcat\" in pipeline:\n textcat_labels = nlp.get_pipe(\"textcat\").cfg.get(\"labels\", [])\n if textcat_positive_label and textcat_positive_label not in textcat_labels:\n msg.fail(\n f\"The textcat_positive_label (tpl) '{textcat_positive_label}' \"\n f\"does not match any label in the training data.\",\n exits=1,\n )\n if textcat_positive_label and len(textcat_labels) != 2:\n msg.fail(\n \"A textcat_positive_label (tpl) '{textcat_positive_label}' was \"\n \"provided for training data that does not appear to be a \"\n \"binary classification problem with two labels.\",\n exits=1,\n )\n train_data = corpus.train_data(\n nlp,\n noise_level=noise_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n train_labels = set()\n if textcat_multilabel:\n multilabel_found = False\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1:\n multilabel_found = True\n if not multilabel_found and not base_model:\n msg.warn(\n \"The textcat training instances look like they have \"\n \"mutually-exclusive classes. Remove the flag \"\n \"'--textcat-multilabel' to train a classifier with \"\n \"mutually-exclusive classes.\"\n )\n if not textcat_multilabel:\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1 and not base_model:\n msg.warn(\n \"Some textcat training instances do not have exactly \"\n \"one positive label. Modifying training options to \"\n \"include the flag '--textcat-multilabel' for classes \"\n \"that are not mutually exclusive.\"\n )\n nlp.get_pipe(\"textcat\").cfg[\"exclusive_classes\"] = False\n textcat_multilabel = True\n break\n if base_model and set(textcat_labels) != train_labels:\n msg.fail(\n f\"Cannot extend textcat model using data with different \"\n f\"labels. Base model labels: {textcat_labels}, training data \"\n f\"labels: {list(train_labels)}\",\n exits=1,\n )\n if textcat_multilabel:\n msg.text(\n f\"Textcat evaluation score: ROC AUC score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n elif textcat_positive_label and len(textcat_labels) == 2:\n msg.text(\n f\"Textcat evaluation score: F1-score for the \"\n f\"label '{textcat_positive_label}'\"\n )\n elif len(textcat_labels) > 1:\n if len(textcat_labels) == 2:\n msg.warn(\n \"If the textcat component is a binary classifier with \"\n \"exclusive classes, provide '--textcat_positive_label' for \"\n \"an evaluation on the positive class.\"\n )\n msg.text(\n f\"Textcat evaluation score: F1-score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n else:\n msg.fail(\n \"Unsupported textcat configuration. Use `spacy debug-data` \"\n \"for more information.\"\n )\n\n # fmt: off\n row_head, output_stats = _configure_training_output(pipeline, use_gpu, has_beam_widths)\n row_widths = [len(w) for w in row_head]\n row_settings = {\"widths\": row_widths, \"aligns\": tuple([\"r\" for i in row_head]), \"spacing\": 2}\n # fmt: on\n print(\"\")\n msg.row(row_head, **row_settings)\n msg.row([\"-\" * width for width in row_settings[\"widths\"]], **row_settings)\n try:\n iter_since_best = 0\n best_score = 0.0\n for i in range(n_iter):\n train_data = corpus.train_dataset(\n nlp,\n noise_level=noise_level,\n orth_variant_level=orth_variant_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n if raw_text:\n random.shuffle(raw_text)\n raw_batches = util.minibatch(\n (nlp.make_doc(rt[\"text\"]) for rt in raw_text), size=8\n )\n words_seen = 0\n with tqdm.tqdm(total=n_train_words, leave=False) as pbar:\n losses = {}\n for batch in util.minibatch_by_words(train_data, size=batch_sizes):\n if not batch:\n continue\n try:\n nlp.update(\n batch,\n sgd=optimizer,\n drop=next(dropout_rates),\n losses=losses,\n )\n except ValueError as e:\n err = \"Error during training\"\n if init_tok2vec:\n err += \" Did you provide the same parameters during 'train' as during 'pretrain'?\"\n msg.fail(err, f\"Original error message: {e}\", exits=1)\n if raw_text:\n # If raw text is available, perform 'rehearsal' updates,\n # which use unlabelled data to reduce overfitting.\n raw_batch = list(next(raw_batches))\n nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)\n docs = [ex.doc for ex in batch]\n if not int(os.environ.get(\"LOG_FRIENDLY\", 0)):\n pbar.update(sum(len(doc) for doc in docs))\n words_seen += sum(len(doc) for doc in docs)\n with nlp.use_params(optimizer.averages):\n util.set_env_log(False)\n epoch_model_path = output_path / f\"model{i}\"\n nlp.to_disk(epoch_model_path)\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for beam_width in eval_beam_widths:\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n nwords = sum(len(ex.doc) for ex in dev_dataset)\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n if use_gpu < 0:\n gpu_wps = None\n cpu_wps = nwords / (end_time - start_time)\n else:\n gpu_wps = nwords / (end_time - start_time)\n with use_ops(\"numpy\"):\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n cpu_wps = nwords / (end_time - start_time)\n acc_loc = output_path / f\"model{i}\" / \"accuracy.json\"\n srsly.write_json(acc_loc, scorer.scores)\n\n # Update model meta.json\n meta[\"lang\"] = nlp.lang\n meta[\"pipeline\"] = nlp.pipe_names\n meta[\"spacy_version\"] = f\">={about.__version__}\"\n if beam_width == 1:\n meta[\"speed\"] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta.setdefault(\"accuracy\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"accuracy\"][metric] = scorer.scores[metric]\n else:\n meta.setdefault(\"beam_accuracy\", {})\n meta.setdefault(\"beam_speed\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"beam_accuracy\"][metric] = scorer.scores[metric]\n meta[\"beam_speed\"][beam_width] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta[\"vectors\"] = {\n \"width\": nlp.vocab.vectors_length,\n \"vectors\": len(nlp.vocab.vectors),\n \"keys\": nlp.vocab.vectors.n_keys,\n \"name\": nlp.vocab.vectors.name,\n }\n meta.setdefault(\"name\", f\"model{i}\")\n meta.setdefault(\"version\", version)\n meta[\"labels\"] = nlp.meta[\"labels\"]\n meta_loc = output_path / f\"model{i}\" / \"meta.json\"\n srsly.write_json(meta_loc, meta)\n util.set_env_log(verbose)\n\n progress = _get_progress(\n i,\n losses,\n scorer.scores,\n output_stats,\n beam_width=beam_width if has_beam_widths else None,\n cpu_wps=cpu_wps,\n gpu_wps=gpu_wps,\n )\n if i == 0 and \"textcat\" in pipeline:\n textcats_per_cat = scorer.scores.get(\"textcats_per_cat\", {})\n for cat, cat_score in textcats_per_cat.items():\n if cat_score.get(\"roc_auc_score\", 0) < 0:\n msg.warn(\n f\"Textcat ROC AUC score is undefined due to \"\n f\"only one value in label '{cat}'.\"\n )\n msg.row(progress, **row_settings)\n # Early stopping\n if n_early_stopping is not None:\n current_score = _score_for_model(meta)\n if current_score < best_score:\n iter_since_best += 1\n else:\n iter_since_best = 0\n best_score = current_score\n if iter_since_best >= n_early_stopping:\n msg.text(\n f\"Early stopping, best iteration is: {i - iter_since_best}\"\n )\n msg.text(\n f\"Best score = {best_score}; Final iteration score = {current_score}\"\n )\n break\n except Exception as e:\n msg.warn(f\"Aborting and saving final best model. Encountered exception: {e}\")\n finally:\n best_pipes = nlp.pipe_names\n if disabled_pipes:\n disabled_pipes.restore()\n with nlp.use_params(optimizer.averages):\n final_model_path = output_path / \"model-final\"\n nlp.to_disk(final_model_path)\n meta_loc = output_path / \"model-final\" / \"meta.json\"\n final_meta = srsly.read_json(meta_loc)\n final_meta.setdefault(\"accuracy\", {})\n final_meta[\"accuracy\"].update(meta.get(\"accuracy\", {}))\n final_meta.setdefault(\"speed\", {})\n final_meta[\"speed\"].setdefault(\"cpu\", None)\n final_meta[\"speed\"].setdefault(\"gpu\", None)\n meta.setdefault(\"speed\", {})\n meta[\"speed\"].setdefault(\"cpu\", None)\n meta[\"speed\"].setdefault(\"gpu\", None)\n # combine cpu and gpu speeds with the base model speeds\n if final_meta[\"speed\"][\"cpu\"] and meta[\"speed\"][\"cpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"cpu\"], meta[\"speed\"][\"cpu\"]]\n )\n final_meta[\"speed\"][\"cpu\"] = speed\n if final_meta[\"speed\"][\"gpu\"] and meta[\"speed\"][\"gpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"gpu\"], meta[\"speed\"][\"gpu\"]]\n )\n final_meta[\"speed\"][\"gpu\"] = speed\n # if there were no speeds to update, overwrite with meta\n if (\n final_meta[\"speed\"][\"cpu\"] is None\n and final_meta[\"speed\"][\"gpu\"] is None\n ):\n final_meta[\"speed\"].update(meta[\"speed\"])\n # note: beam speeds are not combined with the base model\n if has_beam_widths:\n final_meta.setdefault(\"beam_accuracy\", {})\n final_meta[\"beam_accuracy\"].update(meta.get(\"beam_accuracy\", {}))\n final_meta.setdefault(\"beam_speed\", {})\n final_meta[\"beam_speed\"].update(meta.get(\"beam_speed\", {}))\n srsly.write_json(meta_loc, final_meta)\n msg.good(\"Saved model to output directory\", final_model_path)\n with msg.loading(\"Creating best model...\"):\n best_model_path = _collate_best_model(final_meta, output_path, best_pipes)\n msg.good(\"Created best model\", best_model_path)"
] |
[
"0.7491742",
"0.7081026",
"0.7024971",
"0.698054",
"0.6940347",
"0.6940347",
"0.6874835",
"0.68558615",
"0.67965704",
"0.67838323",
"0.67239964",
"0.6681644",
"0.66786915",
"0.664978",
"0.66082704",
"0.66024756",
"0.65962833",
"0.6578496",
"0.6560251",
"0.65377945",
"0.65177953",
"0.65015775",
"0.6483401",
"0.6480669",
"0.64461076",
"0.64370394",
"0.642209",
"0.6419349",
"0.6416763",
"0.6408835"
] |
0.76001686
|
0
|
Test the list devices endpoint WIFI3452
|
def test_gwservice_listdevices(self, setup_controller):
resp = setup_controller.request("gw", "devices", "GET", None, None)
body = resp.url + "," + str(resp.status_code) + ',' + resp.text
allure.attach(name="gw list devices", body=body)
if resp.status_code != 200:
assert False
devices = json.loads(resp.text)
print (devices)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_get_devices(self):\n pass",
"def test_get_devices(self):\n pass",
"def test_verify_list_of_devices_in_my_network():",
"def test_get_devices1(self):\n pass",
"def user_sends_get_call_to_the_devices():\n web_app.list_devices()",
"def list_devices():\n return _lib.SeaTeaseAPI().list_devices()",
"def test_get_pci_device_list(self):\n pass",
"def ret_device_list():\n token = get_auth_token() # Get Token\n url = \"https://sandboxdnac.cisco.com/api/v1/network-device\"\n hdr = {'x-auth-token': token, 'content-type' : 'application/json'}\n resp = requests.get(url, headers=hdr) # Make the Get Request\n device_list = resp.json()\n return device_list",
"def test_get_device(self):\n pass",
"def test_get_device(self):\n pass",
"def list_devices(arn=None, nextToken=None):\n pass",
"def test_get_devices(self):\n print(\"Test Device List\")\n self.mock_api.return_value = call_json.DeviceList.device_list_response()\n self.manager.get_devices()\n all_kwargs = parse_args(self.mock_api)\n assert assert_test(self.manager.get_devices, all_kwargs, None,\n self.write_api, self.overwrite)\n assert len(self.manager.bulbs) == call_json_bulbs.BULBS_NUM\n assert len(self.manager.outlets) == call_json_outlets.OUTLETS_NUM\n assert len(self.manager.fans) == call_json_fans.FANS_NUM\n assert len(self.manager.switches) == call_json_switches.SWITCHES_NUM",
"def device_list():\n click.echo(\"\\nRetrieving the devices.\")\n\n url = base_url + \"/device\"\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get list of devices \" + str(response.text))\n exit()\n\n headers = [\"Host-Name\", \"Device Type\", \"Device ID\", \"System IP\", \"Site ID\", \"Version\", \"Device Model\"]\n table = list()\n\n for item in items:\n tr = [item.get('host-name'), item.get('device-type'), item.get('uuid'), item.get('system-ip'), item.get('site-id'), item.get('version'), item.get('device-model')]\n table.append(tr)\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()",
"def list_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n for device in result:\n print(device)",
"def getDeviceList(self):\r\n\r\n self._logger.debug(\"In getDeviceList()...\")\r\n\r\n # update the security token if needed \r\n if self._checkToken():\r\n\r\n response = self._callAPI(_API_GET_DEVICE_LIST, useSession=True)\r\n\r\n if response is not None:\r\n\r\n deviceInfo = response.json()\r\n \r\n if response.status_code == 200 and \"items\" in deviceInfo:\r\n\r\n deviceList = []\r\n\r\n for dev in deviceInfo[\"items\"]:\r\n\r\n # pull out common attributes\r\n deviceID = dev[\"serial_number\"]\r\n deviceType = dev[\"device_family\"]\r\n description = dev.get(\"name\", deviceType + \" \" + deviceID[-4:])\r\n\r\n # uncomment the next line to inspect the devices returned from the MyQ service\r\n self._logger.debug(\"Device Found - Device ID: %s, Device Type: %s, Description: %s\", deviceID, deviceType, description)\r\n\r\n # add device to the list with properties based on type\r\n if deviceType == API_DEVICE_TYPE_GATEWAY:\r\n\r\n # get gateway attributes\r\n online = dev[\"state\"][\"online\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add gateway device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"description\": description,\r\n \"online\": online,\r\n \"last_updated\": lastUpdated\r\n })\r\n\r\n elif deviceType == API_DEVICE_TYPE_OPENER:\r\n \r\n # get the door attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"door_state\"]\r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add garage door opener device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n elif deviceType == API_DEVICE_TYPE_LAMP:\r\n\r\n # get the lamp attributes\r\n parentID = dev[\"parent_device_id\"] \r\n state = dev[\"state\"][\"lamp_state\"] \r\n lastChanged = dev[\"state\"][\"last_update\"]\r\n lastUpdated = dev[\"state\"][\"last_status\"]\r\n\r\n # add lamp device to list\r\n deviceList.append({\r\n \"type\": deviceType,\r\n \"id\": deviceID,\r\n \"parent_id\": parentID,\r\n \"description\": description,\r\n \"state\": state,\r\n \"last_changed\": lastChanged,\r\n \"last_updated\": lastUpdated\r\n })\r\n \r\n return deviceList\r\n \r\n elif response.status_code == 401:\r\n \r\n self._logger.error(\"There was an authentication error with the MyQ account: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n \r\n self._logger.error(\"Error retrieving device list: %s\", _parseResponseMsg(response))\r\n return None\r\n\r\n else:\r\n # Error logged in _callAPI function\r\n return None\r\n\r\n else:\r\n # Check token failed - wait and see if next call successful\r\n return None",
"async def get_device_list(self):\n self.logger.debug(\"Retrieving device list information.\")\n #url = 'https://{}/api/user/device'.format(self.apiHost) #suddenly stopped worrking, so use\n '''\n #full version\n url = 'https://{}/api/user/device?lang=en&apiKey={}&getTags=1&version={}&ts={}&nonce={}&appid={}&imei={}&os={}&model={}&romVersion={}&appVersion={}'.format(self.apiHost,\n self.apikey,\n self.timestamp,\n self._version,\n self._nonce,\n self._appid,\n self._imei,\n self._os,\n self._model,\n self._romVersion,\n self._appVersion)\n '''\n url = 'https://{}/api/user/device?version={}&appid={}'.format(self.apiHost, self._version, self._appid)\n headers = {\n 'Authorization': 'Bearer %s' % self.authenticationToken,\n }\n self.logger.debug('url: %s, headers: %s' % (url, headers))\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as response:\n json_response = await response.json()\n \n self.logger.debug('received response status: %s' % response.status) \n self.logger.debug('received response: %s' % self.pprint(json_response))\n if response.status != 200:\n self.logger.error('error: %s received' % response.status)\n return\n \n if json_response.get(\"devicelist\"):\n self.logger.info('New response format found')\n json_response = json_response[\"devicelist\"]\n \n self.logger.debug('number of device(s) is: %d' % len(json_response))\n \n self._devices = json_response #list of devices and current configurations\n \n self._create_client_devices()\n \n '''\n Example Response:\n [\n {\n \"__v\": 0,\n \"_id\": \"5becffa6d2b4a3c34cb79b38\",\n \"apikey\": \"530303a6-cf2c-4246-894c-xxxxxxxxxxx\",\n \"brandName\": \"AUTOSLIDE\",\n \"createdAt\": \"2018-11-15T05:09:58.341Z\",\n \"deviceStatus\": \"\",\n \"deviceUrl\": \"\",\n \"deviceid\": \"100050xxxxx\",\n \"devicekey\": \"4123ec79-d2c3-4d32-930a-xxxxxxxxxxxxx\",\n \"extra\": {\n \"_id\": \"xxxxxxxxxxxxxxxx\",\n \"extra\": {\n \"apmac\": \"xx:xx:xx:xx:xx:xx\",\n \"brandId\": \"5a6fcf00f620073c67efc280\",\n \"description\": \"20180813001\",\n \"mac\": \"xx:xx:xx0:xx:xx:xx\",\n \"manufacturer\": \"\\u9752\\u5c9b\\u6fb3\\u601d\\u5fb7\\u667a\\u80fd\\u95e8\\u63a7\\u7cfb\\u7edf\\u6709\\u9650\\u516c\\u53f8\",\n \"model\": \"PSA-BTA-GL\",\n \"modelInfo\": \"5af3f5332c8642b001540dac\",\n \"ui\": \"\\u63a8\\u62c9\\u5ba0\\u7269\\u95e8\",\n \"uiid\": 54\n }\n },\n \"group\": \"\",\n \"groups\": [],\n \"ip\": \"xxx.xx.xx.xxx\",\n \"location\": \"\",\n \"name\": \"Patio Door\",\n \"offlineTime\": \"2018-12-31T07:23:31.018Z\",\n \"online\": true,\n \"onlineTime\": \"2018-12-31T12:19:33.216Z\",\n \"params\": {\n \"a\": \"3\",\n \"b\": \"3\",\n \"c\": \"1\",\n \"d\": \"1\",\n \"e\": \"1\",\n \"f\": \"1\",\n \"fwVersion\": \"2.0.2\",\n \"g\": \"0\",\n \"h\": \"1\",\n \"i\": \"0\",\n \"j\": \"00\",\n \"k\": \"0\",\n \"l\": \"1\",\n \"m\": \"2\",\n \"n\": \"0\",\n \"rssi\": -53,\n \"staMac\": \"xx:xx:xx:xx:xx:xx\"\n },\n \"productModel\": \"WFA-1\",\n \"settings\": {\n \"alarmNotify\": 1,\n \"opsHistory\": 1,\n \"opsNotify\": 0\n },\n \"sharedTo\": [\n {\n \"note\": \"\",\n \"permit\": 15,\n \"phoneNumber\": \"[email protected]\",\n \"shareTime\": 1542259546087\n }\n ],\n \"showBrand\": true,\n \"type\": \"10\",\n \"uiid\": 54\n }\n ]\n \n or New format:\n {\n \"devicelist\": [\n {\n \"__v\": 0,\n \"_id\": \"5c3665d012d28ae6ba4943c8\",\n \"apikey\": \"530303a6-cf2c-4246-894c-50855b00e6d8\",\n \"brandLogoUrl\": \"https://us-ota.coolkit.cc/logo/KRZ54OifuGmjoEMxT1YYM3Ybu2fj5K2C.png\",\n \"brandName\": \"Sonoff\",\n \"createdAt\": \"2019-01-09T21:21:20.402Z\",\n \"devConfig\": {},\n \"devGroups\": [],\n \"deviceStatus\": \"\",\n ... as before\n '''",
"def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})",
"def get_network_devices(user, passwd, base_api_url):\n network_devices = ''\n response = connect_to_idrac(user, passwd, base_api_url)\n if response and response.json():\n network_devices_info = response.json()\n try:\n network_devices = network_devices_info[u'Members']\n except KeyError:\n network_devices = ''\n get_user_response(message='could not get network devices info')\n else:\n get_user_response(message='idrac connection status code is 401')\n\n return network_devices",
"def getDevices(i):\n devices = Account['KTFLR'].devices('monpressprod')\n device = devices[i]\n return device",
"def test_get_device_by_id1(self):\n pass",
"def the_user_should_be_returned_with_the_list_of_devices_with_ip_address():\n assert web_app.validate_list_devices()",
"def test_get_device_by_id(self):\n pass",
"def test_filter_device(self):\n pass",
"def test_device_on(self):\n self.ms.add_response({'\\x14081031031E226410\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.on((49, 3))\n self.assertTrue(response)",
"def get_device_list(ip_address, headers):\n ome_device_list = []\n next_link_url = 'https://%s/api/DeviceService/Devices' % ip_address\n while next_link_url is not None:\n device_response = requests.get(next_link_url, headers=headers, verify=False)\n next_link_url = None\n if device_response.status_code == 200:\n dev_json_response = device_response.json()\n if dev_json_response['@odata.count'] <= 0:\n print(\"No devices found at \", ip_address)\n return\n\n if '@odata.nextLink' in dev_json_response:\n next_link_url = 'https://%s/' % ip_address + dev_json_response['@odata.nextLink']\n\n if dev_json_response['@odata.count'] > 0:\n ome_device_list = ome_device_list + [x['Id'] for x in dev_json_response['value']]\n else:\n print(\"No devices found at \", ip_address)\n\n return ome_device_list",
"def get_devices(self):\n devices = self.get(\"event/device\")",
"def test_verify_connection_to_a_device():",
"async def find_devices() -> List[DeviceInfo]:\n return await Discovery.search_devices()",
"def listDevices(self):\n count = 0\n for device in self:\n count += 1\n printLog(\"Device \" + str(count) + \": '%s %s (%s, %s, %s)'\" % (\n device.make, device.model, device.deviceId, device.androidVersion, device.operator))\n if device.idle:\n printLog(\"[Idle]\")\n else:\n printLog(\"[Busy]\")"
] |
[
"0.73646533",
"0.73646533",
"0.7279443",
"0.70936656",
"0.699219",
"0.6931419",
"0.6884572",
"0.68194383",
"0.67612463",
"0.67612463",
"0.66208494",
"0.65730584",
"0.6533507",
"0.6531842",
"0.64532113",
"0.6428311",
"0.64186794",
"0.6415154",
"0.6404095",
"0.6266183",
"0.62343705",
"0.6194935",
"0.61460835",
"0.6144422",
"0.61162436",
"0.6112488",
"0.60959417",
"0.60884815",
"0.60588396",
"0.6057388"
] |
0.7410879
|
0
|
Removes detections with lower object confidence score than 'conf_thres' and performs NonMaximum Suppression to further filter detections.
|
def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):
# From (center x, center y, width, height) to (x1, y1, x2, y2)
prediction[..., :4] = change_box_order(prediction[..., :4], order="xywh2xyxy")
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
# Filter out confidence scores below threshold
image_pred = image_pred[image_pred[:, 4] >= conf_thres]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Object confidence times class confidence
score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]
# Sort by it
image_pred = image_pred[(-score).argsort()]
class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)
detections = torch.cat(
(image_pred[:, :5], class_confs.float(), class_preds.float()), 1
)
# Perform non-maximum suppression
keep_boxes = []
while detections.size(0):
large_overlap = (
box_iou(detections[0, :4].unsqueeze(0), detections[:, :4], order="xyxy")
> nms_thres
)
label_match = detections[0, -1] == detections[:, -1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
weights = detections[invalid, 4:5]
# Merge overlapping bboxes by order of confidence
detections[0, :4] = (weights * detections[invalid, :4]).sum(
0
) / weights.sum()
keep_boxes += [detections[0]]
detections = detections[~invalid]
if keep_boxes:
output[image_i] = torch.stack(keep_boxes)
return output
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.5):\n min_wh = 2 # (pixels) minimum box width and height\n\n output = [None] * len(prediction)\n for image_i, pred in enumerate(prediction):\n # Multiply conf by class conf to get combined confidence\n class_conf, class_pred = pred[:, 5:].max(1)\n pred[:, 4] *= class_conf\n\n # Select only suitable predictions\n i = pred[:, 4] > conf_thres\n i &= (pred[:, 2:4] > min_wh).all(1)\n i &= torch.isfinite(pred).all(1)\n\n pred = pred[i]\n\n # If none are remaining => process next image\n if len(pred) == 0:\n continue\n\n # Select predicted classes\n class_conf = class_conf[i]\n class_pred = class_pred[i].unsqueeze(1).float()\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n pred[:, :4] = xywh2xyxy(pred[:, :4])\n\n # Detections ordered as (x1y1x2y2, obj_conf, class_conf, class_pred)\n pred = torch.cat((pred[:, :5], class_conf.unsqueeze(1), class_pred), 1)\n\n # Get detections sorted by decreasing confidence scores\n pred = pred[(-pred[:, 4]).argsort()]\n\n det_max = []\n nms_style = 'MERGE' # 'OR' (default), 'AND', 'MERGE' (experimental)\n for c in pred[:, -1].unique():\n dc = pred[pred[:, -1] == c] # select class c\n n = len(dc)\n if n == 1:\n det_max.append(dc) # No NMS required if only 1 prediction\n continue\n elif n > 100:\n # limit to first 100 boxes:\n # https://github.com/ultralytics/yolov3/issues/117\n dc = dc[:100]\n\n # Non-maximum suppression\n if nms_style == 'OR': # default\n while dc.shape[0]:\n det_max.append(dc[:1]) # save highest conf detection\n if len(dc) == 1: # Stop if we're at the last detection\n break\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\n elif nms_style == 'AND': # requires overlap, single boxes erased\n while len(dc) > 1:\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n if iou.max() > 0.5:\n det_max.append(dc[:1])\n dc = dc[1:][iou < nms_thres] # remove ious > threshold\n elif nms_style == 'MERGE': # weighted mixture box\n while len(dc):\n if len(dc) == 1:\n det_max.append(dc)\n break\n i = bbox_iou(dc[0], dc) > nms_thres # iou with other boxes\n weights = dc[i, 4:5]\n dc[0, :4] = (weights * dc[i, :4]).sum(0) / weights.sum()\n det_max.append(dc[:1])\n dc = dc[i == 0]\n # soft-NMS https://arxiv.org/abs/1704.04503\n elif nms_style == 'SOFT':\n sigma = 0.5 # soft-nms sigma parameter\n while len(dc):\n if len(dc) == 1:\n det_max.append(dc)\n break\n det_max.append(dc[:1])\n iou = bbox_iou(dc[0], dc[1:]) # iou with other boxes\n dc = dc[1:]\n # decay confidences\n dc[:, 4] *= torch.exp(-iou ** 2 / sigma)\n\n if len(det_max):\n det_max = torch.cat(det_max) # concatenate\n output[image_i] = det_max[(-det_max[:, 4]).argsort()] # sort\n\n return output",
"def non_max_suppression(prediction, conf_thres=0.4, iou_thres=0.6):\n\n nc = prediction[0].shape[1] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [None] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # If none remain process next image\n n = x.shape[0] # number of boxes\n if not n:\n continue\n\n # Sort by confidence\n # x = x[x[:, 4].argsort(descending=True)]\n\n # Batched NMS\n c = x[:, 5:6] * max_wh # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n break # time limit exceeded\n\n return output",
"def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):\r\n if prediction.dtype is torch.float16:\r\n prediction = prediction.float() # to FP32\r\n\r\n nc = prediction[0].shape[1] - 5 # number of classes\r\n xc = prediction[..., 4] > conf_thres # candidates\r\n\r\n # Settings\r\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\r\n max_det = 300 # maximum number of detections per image\r\n time_limit = 10.0 # seconds to quit after\r\n redundant = True # require redundant detections\r\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\r\n\r\n t = time.time()\r\n output = [None] * prediction.shape[0]\r\n for xi, x in enumerate(prediction): # image index, image inference\r\n # Apply constraints\r\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\r\n x = x[xc[xi]] # confidence\r\n\r\n # If none remain process next image\r\n if not x.shape[0]:\r\n continue\r\n\r\n # Compute conf\r\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\r\n\r\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\r\n box = xywh2xyxy(x[:, :4])\r\n\r\n # Detections matrix nx6 (xyxy, conf, cls)\r\n if multi_label:\r\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\r\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\r\n else: # best class only\r\n conf, j = x[:, 5:].max(1, keepdim=True)\r\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\r\n\r\n # Filter by class\r\n if classes:\r\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\r\n\r\n # Apply finite constraint\r\n # if not torch.isfinite(x).all():\r\n # x = x[torch.isfinite(x).all(1)]\r\n\r\n # If none remain process next image\r\n n = x.shape[0] # number of boxes\r\n if not n:\r\n continue\r\n\r\n # Sort by confidence\r\n # x = x[x[:, 4].argsort(descending=True)]\r\n\r\n # Batched NMS\r\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\r\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\r\n i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\r\n if i.shape[0] > max_det: # limit detections\r\n i = i[:max_det]\r\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\r\n try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\r\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\r\n weights = iou * scores[None] # box weights\r\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\r\n if redundant:\r\n i = i[iou.sum(1) > 1] # require redundancy\r\n except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139\r\n print(x, i, x.shape, i.shape)\r\n pass\r\n\r\n output[xi] = x[i]\r\n if (time.time() - t) > time_limit:\r\n break # time limit exceeded\r\n\r\n return output",
"def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, top_k_num=3000, merge=False, classes=None, agnostic=False):\n # print('conf_thres',conf_thres)\n if prediction.dtype is torch.float16:\n prediction = prediction.float() # to FP32\n\n nc = prediction[0].shape[1] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n time_limit = 10.0 # seconds to quit after\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [None] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero().t()\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n\n\n # If none remain process next image\n n = x.shape[0] # number of boxes\n if not n:\n continue\n\n # Sort by confidence\n # x = x[x[:, 4].argsort(descending=True)]\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n # # Sort by confidence\n ind_Sort_by_confidence = x[:, 4].argsort(descending=True)\n boxes = boxes[ind_Sort_by_confidence][:top_k_num] #\n scores = scores[ind_Sort_by_confidence][:top_k_num] #\n x = x[ind_Sort_by_confidence][:top_k_num] #\n # cross classes nms\n i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n # if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n # iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n # weights = iou * scores[None] # box weights\n # x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n # if redundant:\n # i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n break # time limit exceeded\n\n return output",
"def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n #redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n #merge = False # use merge-NMS\n\n output = [np.zeros((0, 6))] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n # Detections matrix nx6 (xyxy, conf, cls)\n # best class only\n conf = x[:, 5:].max(1, keepdims=True)\n j = np.argmax(x[:, 5:], axis=1)\n j = j.reshape(j.shape[0],1)\n #x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n x = np.concatenate((box, conf, j.astype(np.float32)),axis=1)\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort()[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n #i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n i = nms(boxes, scores, iou_thres) # NMS\n \n output[xi] = x[i]\n\n return output",
"def non_max_suppression(prediction, score_thres=0.5, nms_thres=0.4):\n output = [{'boxes':None, 'labels':None, 'scores':None} for _ in range(len(prediction))]\n for image_i, pred in enumerate(prediction):\n boxes = pred['boxes']\n labels = pred['labels'].unsqueeze(1)\n scores = pred['scores'].unsqueeze(1)\n image_pred = torch.cat((boxes, scores, labels.float()), 1)\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 4] >= score_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 4]\n # Sort by it\n image_pred = image_pred[(-score).argsort()]\n #class_confs, class_preds = image_pred[:, 4:].max(1, keepdim=True)\n detections = image_pred\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 4:5]\n # Merge overlapping bboxes by order of confidence\n detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i]['boxes'] = torch.stack(keep_boxes)[:,:4]\n output[image_i]['labels'] = torch.stack(keep_boxes)[:,-1]\n output[image_i]['scores'] = torch.stack(keep_boxes)[:,4:-1]\n\n return output",
"def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output",
"def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=()):\n nc = prediction.shape[2] - 5\n xc = prediction[..., 4] > conf_thres\n min_wh, max_wh = 2, 4096\n max_det = 300\n max_nms = 30000\n time_limit = 10.0\n redundant = True\n multi_label &= nc > 1\n merge = False\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction):\n x = x[xc[xi]]\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5]\n v[:, 4] = 1.0\n v[range(len(l)), l[:, 0].long() + 5] = 1.0\n x = torch.cat((x, v), 0)\n if not x.shape[0]:\n continue\n x[:, 5:] *= x[:, 4:5]\n box = xywh2xyxy(x[:, :4])\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else:\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n n = x.shape[0]\n if not n:\n continue\n elif n > max_nms:\n x = x[x[:, 4].argsort(descending=True)[:max_nms]]\n c = x[:, 5:6] * (0 if agnostic else max_wh)\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = torchvision.ops.nms(boxes, scores, iou_thres)\n if i.shape[0] > max_det:\n i = i[:max_det]\n if merge and 1 < n < 3000.0:\n iou = box_iou(boxes[i], boxes) > iou_thres\n weights = iou * scores[None]\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)\n if redundant:\n i = i[iou.sum(1) > 1]\n output[xi] = x[i]\n if time.time() - t > time_limit:\n None\n break\n return output",
"def filter_detections(detections, arg_to_class, conf_thresh=0.5):\n num_classes = detections.shape[0]\n filtered_detections = []\n for class_arg in range(1, num_classes):\n class_detections = detections[class_arg, :]\n confidence_mask = np.squeeze(class_detections[:, -1] >= conf_thresh)\n confident_class_detections = class_detections[confidence_mask]\n if len(confident_class_detections) == 0:\n continue\n class_name = arg_to_class[class_arg]\n for confident_class_detection in confident_class_detections:\n coordinates = confident_class_detection[:4]\n score = confident_class_detection[4]\n detection = Box2D(coordinates, score, class_name)\n filtered_detections.append(detection)\n return filtered_detections",
"def non_max_suppression_kneron(prediction, conf_thres=0.1, iou_thres=0.6, top_k_num=3000, merge=False, classes=None, agnostic=False):\n if prediction.dtype is torch.float16:\n prediction = prediction.float() # to FP32\n\n nc = prediction[0].shape[1] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n time_limit = 10.0 # seconds to quit after\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [None] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero().t()\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n\n\n # If none remain process next image\n n = x.shape[0] # number of boxes\n if not n:\n continue\n\n # Sort by confidence\n # x = x[x[:, 4].argsort(descending=True)]\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n # Sort by confidence\n ind_Sort_by_confidence = x[:, 4].argsort(descending=True)\n boxes = boxes[ind_Sort_by_confidence][:top_k_num] #\n scores = scores[ind_Sort_by_confidence][:top_k_num] #\n x = x[ind_Sort_by_confidence][:top_k_num] #\n # cross classes nms\n i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n break # time limit exceeded\n\n return output",
"def non_maximum_suppression(prediction, iou_threshold=0.45, score_threshold=0.25):\n\n # num_classes = len(names)\n max_wh = 4096\n max_det = 300\n max_nms = 30000\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n\n for xi, x in enumerate(prediction):\n x = x[x[..., 4] > score_threshold]\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = x[:, :4]\n\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > score_threshold]\n\n # Filter by class\n # if classes is not None:\n # x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n # sort by confidence\n x = x[x[:, 4].argsort(descending=True)[:max_nms]]\n\n # Batched NMS\n c = x[:, 5:6] * max_wh # classes\n # boxes (offset by class), scores\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = nms(boxes, scores, iou_threshold) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = x[i]\n\n return output",
"def non_max_suppression(inputs, n_classes, max_output_size, iou_threshold, confidence_threshold):\n batch = tf.unstack(inputs)\n boxes_dicts = []\n\n for boxes in batch:\n boxes = tf.boolean_mask(boxes, boxes[:, 4] > confidence_threshold)\n classes = tf.argmax(boxes[:, 5:], axis=-1)\n classes = tf.expand_dims(tf.cast(classes, tf.float32), axis=-1)\n boxes = tf.concat([boxes[:, :5], classes], axis=-1)\n\n boxes_dict = dict()\n for cls in range(n_classes):\n mask = tf.equal(boxes[:, 5], cls)\n mask_shape = mask.get_shape()\n if mask_shape.ndims != 0:\n class_boxes = tf.boolean_mask(boxes, mask)\n boxes_coords, boxes_conf_scores, _ = tf.split(class_boxes, [4, 1, -1], axis=-1)\n boxes_conf_scores = tf.reshape(boxes_conf_scores, [-1])\n indices = tf.image.non_max_suppression(boxes_coords,\n boxes_conf_scores,\n max_output_size,\n iou_threshold)\n class_boxes = tf.gather(class_boxes, indices)\n boxes_dict[cls] = class_boxes[:, :5]\n\n boxes_dicts.append(boxes_dict)\n return boxes_dicts",
"def _remove_experts(self):\n self.experts = [ex for ex in self.experts if np.mean(\n ex.weight) >= self.theta]",
"def nonmax_suppression(pred_labels, probabilities, x0, y0, windowsize, overlap_thr=0.1):\n\n # define list of proposals as list of indices over all predictions\n proposals = np.arange(0, len(pred_labels), dtype='int')\n\n # intialize final list of boxes\n final = []\n\n # delete all boxes labeled as \"other\"\n mask_other = [pred!='other' for pred in pred_labels]\n proposals = list(proposals[mask_other])\n\n while len(proposals)>0:\n\n # add the box with the highest confidence to the final selection\n ind_max = probabilities[proposals].argmax()\n select = proposals.pop(ind_max)\n final.append(select)\n\n # delete all boxes which overlap substantially with this last selected box\n delete_i = []\n for i, p in enumerate(proposals):\n\n # compute IoU score\n boxA = (x0[select], y0[select], x0[select]+windowsize[select], y0[select]+windowsize[select])\n boxB = (x0[p], y0[p], x0[p]+windowsize[p], y0[p]+windowsize[p])\n iou = intersection_over_union_from_boxes(boxA, boxB)\n\n if iou >= overlap_thr:\n delete_i.append(i)\n\n # update proposal list\n proposals = [proposals[i] for i in range(len(proposals)) if i not in delete_i]\n\n\n new_pred_labels = np.array(pred_labels)[final]\n new_probabilities = np.array(probabilities)[final]\n new_x0 = np.array(x0)[final]\n new_y0 = np.array(y0)[final]\n new_windowsize = np.array(windowsize)[final]\n\n return new_pred_labels, new_probabilities, new_x0, new_y0, new_windowsize",
"def non_maxima_suppression(boxes, probs, classes_num, thr=0.2):\n for i, box in enumerate(boxes):\n if probs[i] == 0:\n continue\n for j in range(i+1, len(boxes)):\n if classes_num[i] == classes_num[j] and iou(box, boxes[j]) > thr:\n probs[j] = 0.0\n\n return probs",
"def prune(self, upper, lower):\n # max_count = sorted([self.counts[key] for key in self.counts.keys()])[::-1][upper]\n max_count = upper\n\n print('Removed all words that occur less than {} times and more than {} times'.format(lower, upper))\n for i, doc in enumerate(self.docs):\n new_doc = []\n for word in doc:\n if self.counts[word] <= max_count and self.counts[word] > lower:\n new_doc.append(word)\n self.docs[i] = new_doc",
"def pre_exclude_rest_instances(seg_raw_df, class_df):\n import smdt.features.features as features\n\n print \"============start pre exclusion==================================\"\n \n\n seg_mag_df = seg_raw_df.copy(deep=True)\n temp = [seg_mag_df[name]**2 for name in s_info.raw_value_names]\n temp = np.sum(temp, axis=0)\n seg_mag_df['mag'] = np.sqrt(temp)\n\n grouped = seg_mag_df.groupby(s_info.segment_col)\n\n c1 = grouped['mag'].std()\n c2 = grouped['mag'].aggregate(features.f_slope).abs()\n c3 = grouped['mag'].aggregate(features.f_pppeakamplitude, paras={\"q\":10})\n\n # Used for visualization testing\n # import matplotlib.pyplot as pyplot\n # c1.hist()\n # pyplot.figure()\n # c2.hist(bins=100)\n # pyplot.figure()\n # c3.hist()\n\n # pyplot.show()\n # sys.exit(1)\n t1 = 0.13\n t2 = 0.0004\n t3 = 0.5\n print \"===================preexclusion criterions====================\"\n print \"std: <= %f, slope: <= %f, peak-peak amplitude: < %f\" % (t1, t2, t3) \n excluded = (c1 <= t1) & (c2 <= t2) & (c3 <= t3)\n class_df[s_info.classname_col][excluded] = 'rest'\n class_df[s_info.classnum_col][excluded] = -1\n\n c_rest = len(class_df[excluded])\n c_keep = len(class_df[~excluded])\n c_total = len(class_df)\n print \"Exclusion result: excluded/keep/total: %.1f, %.1f, %.1f exclusion rate: %.2f\" % (c_rest, c_keep, c_total, c_rest/float(c_total))\n return class_df",
"def remove_hi_confidence_chromosome(G):\n to_remove = []\n for nd in G.nodes():\n if get_length_from_spades_name(nd) > PARAMS.CHROMOSOME_LEN_THRESH and \\\n G.nodes[nd]['score'] < PARAMS.CHROMOSOME_SCORE_THRESH:\n to_remove.append(nd)\n to_remove.append(rc_node(nd))\n G.remove_nodes_from(to_remove)\n logger.info(\"Removed %d long, likely chromosomal nodes\" % len(set(to_remove)))",
"def eliminateRules(self):\n deleteKey = []\n for key,value in self._rules.items():\n if value[0] < self._minConfidence:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._rules[key]",
"def remove_low_info(X, max_frequency=0.99):\n selector = UniqueThreshold(max_frequency=max_frequency)\n return selector.fit_transform(X)",
"def apply_tf_nms(boxes, pred_conf):\r\n return tf.image.combined_non_max_suppression(\r\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\r\n scores=tf.reshape(\r\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\r\n max_output_size_per_class=50,\r\n max_total_size=50,\r\n iou_threshold=FLAGS.iou,\r\n score_threshold=FLAGS.score\r\n )",
"def non_max_suppression(self, filtered_boxes, box_classes, box_scores):\n box_predictions = []\n predicted_box_classes = []\n predicted_box_scores = []\n for label in range(len(self.class_names)):\n # for each class\n boxes = []\n class_tmp = []\n score_tmp = []\n for i in range(len(box_classes)):\n if box_classes[i] == label:\n boxes.append(filtered_boxes[i])\n class_tmp.append(box_classes[i])\n score_tmp.append(box_scores[i])\n\n class_tmp = np.array(class_tmp)\n while len(class_tmp) > 0 and np.amax(class_tmp) > -1:\n index = np.argmax(score_tmp)\n box_predictions.append(boxes[index])\n predicted_box_classes.append(class_tmp[index])\n predicted_box_scores.append(score_tmp[index])\n score_tmp[index] = -1\n class_tmp[index] = -1\n px1, py1, px2, py2 = boxes[index]\n p_area = (px2 - px1) * (py2 - py1)\n\n for box in range(len(boxes)):\n if class_tmp[box] != -1:\n bx1, by1, bx2, by2 = boxes[box]\n b_area = (bx2 - bx1) * (by2 - by1)\n ox1 = px1 if px1 > bx1 else bx1\n oy1 = py1 if py1 > by1 else by1\n ox2 = px2 if px2 < bx2 else bx2\n oy2 = py2 if py2 < by2 else by2\n if ox2 - ox1 <= 0 or oy2 - oy1 <= 0:\n continue\n # Calculate overlap area and IoU\n o_area = (ox2 - ox1) * (oy2 - oy1)\n u_area = p_area + b_area - o_area\n iou = o_area / u_area\n\n if iou > self.nms_t:\n class_tmp[box] = -1\n score_tmp[box] = -1\n\n box_predictions = np.array(box_predictions)\n predicted_box_classes = np.array(predicted_box_classes)\n predicted_box_scores = np.array(predicted_box_scores)\n return (box_predictions, predicted_box_classes, predicted_box_scores)",
"def Clean(pmf):\n vals = [val for val in pmf.Values() if val < thresh]\n [pmf.Remove(val) for val in vals]",
"def _build_non_max_suppressor(type):\n\n if type == model_config.SSD:\n score_threshold = config.cfg.POSTPROCESSOR.SCORE_THRESHOLD\n iou_threshold = config.cfg.POSTPROCESSOR.IOU_THRESHOLD\n max_detections_per_class = config.cfg.POSTPROCESSOR.MAX_DETECTIONS_PER_CLASS\n max_total_detections = config.cfg.POSTPROCESSOR.MAX_TOTAL_DETECTIONS\n elif type == model_config.FASTER_RCNN:\n score_threshold = config.cfg.POSTPROCESSOR.SCORE_THRESHOLD\n iou_threshold = config.cfg.POSTPROCESSOR.IOU_THRESHOLD\n max_detections_per_class = config.cfg.POSTPROCESSOR.MAX_DETECTIONS_PER_CLASS\n max_total_detections = config.cfg.POSTPROCESSOR.MAX_TOTAL_DETECTIONS\n else:\n raise ValueError('type must be ssd or faster_rcnn string')\n\n if iou_threshold < 0 or iou_threshold > 1.0:\n raise ValueError('iou_threshold not in [0, 1.0].')\n if max_detections_per_class > max_total_detections:\n raise ValueError('max_detections_per_class should be no greater than '\n 'max_total_detections.')\n\n non_max_suppressor_fn = functools.partial(\n post_processing.batch_multiclass_non_max_suppression,\n score_thresh=score_threshold,\n iou_thresh=iou_threshold,\n max_size_per_class=max_detections_per_class,\n max_total_size=max_total_detections)\n\n return non_max_suppressor_fn",
"def suppress_analyze(more_exclusions=None):\n return api.override_step_data(\n 'read filter exclusion spec',\n api.json.output({\n 'base': {\n 'exclusions': ['f.*'] + (more_exclusions or []),\n },\n 'chromium': {\n 'exclusions': [],\n },\n })\n )",
"def non_max_suppression(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\n # 49 x 6 \n assert type(bboxes) == list\n # print(bboxes)\n bboxes = [box for box in bboxes if box[1] > threshold]\n bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n bboxes_after_nms = []\n # print(bboxes)\n while bboxes:\n chosen_box = bboxes.pop(0)\n bbox_temp = bboxes.copy()\n bboxes = []\n for box in bbox_temp: # not the same class or not overlap a lot \n if box[0] != chosen_box[0] or intersection_over_union(torch.tensor(chosen_box[2:]),torch.tensor(box[2:]), box_format=box_format,) < iou_threshold:\n bboxes.append(box)\n\n bboxes_after_nms.append(chosen_box)\n # print(\"NMS: \" + str(len(bboxes_after_nms)))\n return bboxes_after_nms",
"def prune(self, threshold=0, with_multiplicity=False):\n coefs = self.eci if with_multiplicity else self.coefs\n bit_ids = [i for i, coef in enumerate(coefs) if abs(coef) < threshold]\n self.cluster_subspace.remove_corr_functions(bit_ids)\n\n # Update necessary attributes\n ids_complement = list(set(range(len(self.coefs))) - set(bit_ids))\n ids_complement.sort()\n self.coefs = self.coefs[ids_complement]\n\n if self._feat_matrix is not None:\n self._feat_matrix = self._feat_matrix[:, ids_complement]\n\n if hasattr(self, \"eci\"): # reset cache\n del self.eci\n\n if hasattr(self, \"cluster_interaction_tensors\"): # reset cache\n del self.cluster_interaction_tensors\n\n # reset the evaluator\n self._set_evaluator_data(set_orbits=True)",
"def get_noise_thresholds(size_of_class=45, fakes='./data/CASIA1_fakes', originals='./data/CASIA1_originals', \n fakes_ela='./data/CASIA1_fakes_ela'):\n fakes_list = os.listdir(fakes)\n\n fakes = load_fakes(fakes_list, fakes, originals)\n\n noises = []\n for i, item in enumerate(fakes):\n image = cv2.imread(os.path.join(fakes_ela, item.path.split('\\\\')[-1]))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n image = cv2.inRange(image, np.array([0,0,0]), np.array([180,255,60]))\n image = cv2.bitwise_not(image)\n noises.append(estimate_noise(image))\n\n fakes = np.array(fakes)\n noises = np.array(noises)\n idxs = noises.argsort()\n sorted_by_noise = fakes[idxs]\n\n for i, item in enumerate(sorted(noises)):\n if (i+1) % size_of_class == 0:\n print(\"####\", i+1, item)\n else:\n print(i+1, item)",
"def non_max_suppression_all_classes(boxes, scores, labels, iou_threshold=0.5):\n excluded_indices = []\n for i in range(0,len(boxes)):\n obj1_box, _, obj1_label = boxes[i], scores[i], labels[i]\n for j in range(i+1,len(boxes)):\n obj2_box, _, obj2_label = boxes[j], scores[j], labels[j]\n if (get_iou(obj1_box, obj2_box) > iou_threshold):\n #print('excluding idx={}, class={}, score={}, bbox={}'.format(j, obj2_label, obj2_score, obj2_box))\n excluded_indices.append(j)\n \n excluded_indices = list(set(excluded_indices)) #Elimina indices repetidos\n included_indices = [idx for idx in range(len(boxes)) if idx not in excluded_indices]\n #print(included_indices)\n return included_indices",
"def purge_outlying_trials(self, trial_nums, thresh=5.0):\n for injkey in self.values.keys():\n for fit_key in self.values[injkey].keys():\n points = np.array(self.values[injkey][\n fit_key]['metric_val']['vals'])\n if len(points.shape) == 1:\n points = points[:, None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n modified_z_score = 0.6745 * diff / med_abs_deviation\n good_trials = modified_z_score < thresh\n if not np.all(good_trials):\n bad_trials = np.where(not good_trials)[0]\n logging.warning(\n 'Outlier(s) detected for %s in trial(s) %s. Will be '\n 'removed. If you think this should not happen, please '\n 'change the value of the threshold used for the '\n 'decision (currently set to %.2e).'%(\n fit_key, trial_nums[bad_trials], thresh\n )\n )\n for fitkey in self.values[injkey].keys():\n for param in self.values[injkey][fitkey].keys():\n new_vals = np.delete(\n np.array(self.values[injkey][\n fitkey][param]['vals']),\n bad_trials\n )\n self.values[injkey][\n fitkey][param]['vals'] = new_vals"
] |
[
"0.73838377",
"0.70225656",
"0.688687",
"0.67943317",
"0.67545784",
"0.67077035",
"0.6688915",
"0.6534796",
"0.62705123",
"0.6241854",
"0.6180899",
"0.6155851",
"0.5827579",
"0.57856023",
"0.5763159",
"0.55842763",
"0.555821",
"0.5550575",
"0.5529788",
"0.5489186",
"0.5482876",
"0.54732305",
"0.54695976",
"0.545383",
"0.54406476",
"0.5435401",
"0.54324335",
"0.54021096",
"0.5345317",
"0.533278"
] |
0.7614843
|
0
|
L{BaseLogFile.shouldRotate} is abstract and must be implemented by subclass.
|
def test_abstractShouldRotate(self):
log = logfile.BaseLogFile(self.name, self.dir)
self.addCleanup(log.close)
self.assertRaises(NotImplementedError, log.shouldRotate)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_rotation(self):\n # this logfile should rotate every 10 bytes\n with contextlib.closing(\n logfile.LogFile(self.name, self.dir, rotateLength=10)\n ) as log:\n\n # test automatic rotation\n log.write(\"123\")\n log.write(\"4567890\")\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.2\".format(self.path)))\n log.write(\"\")\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertTrue(os.path.exists(\"{}.2\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n log.write(\"3\")\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n\n # test manual rotation\n log.rotate()\n self.assertTrue(os.path.exists(\"{}.3\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.4\".format(self.path)))\n\n self.assertEqual(log.listLogs(), [1, 2, 3])",
"def test_rotation(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n days = [(self.path + \".\" + log.suffix(day * 86400)) for day in range(3)]\n\n # test automatic rotation\n log._clock = 0.0 # 1970/01/01 00:00.00\n log.write(\"123\")\n log._clock = 43200 # 1970/01/01 12:00.00\n log.write(\"4567890\")\n log._clock = 86400 # 1970/01/02 00:00.00\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(days[0]))\n self.assertFalse(os.path.exists(days[1]))\n log._clock = 172800 # 1970/01/03 00:00.00\n log.write(\"\")\n self.assertTrue(os.path.exists(days[0]))\n self.assertTrue(os.path.exists(days[1]))\n self.assertFalse(os.path.exists(days[2]))\n log._clock = 259199 # 1970/01/03 23:59.59\n log.write(\"3\")\n self.assertFalse(os.path.exists(days[2]))",
"def test_rotatePermissionFileNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.path, 0o444)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def test_need_to_rotate_log(self):\n self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time')\n self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time')\n self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size')\n self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size')",
"def test_rotateAlreadyExists(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n # Build a new file with the same name as the file which would be created\n # if the log file is to be rotated.\n newFilePath = \"{}.{}\".format(log.path, log.suffix(log.lastDate))\n with open(newFilePath, \"w\") as fp:\n fp.write(\"123\")\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def test_logfile_recreates_after_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n self.conveyer.rotate_logs()\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"fourth\\\"}\"))\n self.assertEquals(self.events_out.getvalue(), \"{message: \\\"fourth\\\"}\")\n self.assertTrue(self.renamerCalled)",
"def test_log_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n filename = self.conveyer.rotate_logs()\n self.assertEquals(self.conveyer.logfile, None)\n self.assertEquals(filename, \"testfile.dat.rotated\")",
"def test_rotatePermissionDirectoryNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.directory, 0o444)\n # Restore permissions so tests can be cleaned up.\n self.addCleanup(os.chmod, log.directory, 0o755)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def control_log_file(self, enable: bool = False, rotate: bool = False) -> bool:\n enable = tools.coerce_bool(enable)\n if enable and not self.HANDLER_FILE:\n self.HANDLER_FILE = logs.add_file(**self.ARGS_HANDLER_FILE)\n self.LOG.debug(\"Logging to file enabled.\")\n return True\n self.rotate_log_files(value=rotate)\n if not enable and self.HANDLER_FILE:\n self.LOG.debug(\"Logging to file disabled.\")\n self.HANDLER_FILE.close()\n logs.del_file(obj=self.LOG_LOGGER)\n self.HANDLER_FILE = None\n return True\n return False",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir,\"xxx\"), \"w\")\n except (OSError, IOError):\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEquals(f.tell(), 6)\n f.seek(0, 0)\n self.assertEquals(f.read(), \"abcdef\")\n log.close()",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0o555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir, \"xxx\"), \"w\")\n except OSError:\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEqual(f.tell(), 6)\n f.seek(0, 0)\n self.assertEqual(f.read(), b\"abcdef\")",
"def _determine_rotated_logfile(self):\n rotated_filename = self._check_rotated_filename_candidates()\n if rotated_filename and exists(rotated_filename):\n if stat(rotated_filename).st_ino == self._offset_file_inode:\n return rotated_filename\n\n # if the inode hasn't changed, then the file shrank; this is expected with copytruncate,\n # otherwise print a warning\n if stat(self.filename).st_ino == self._offset_file_inode:\n if self.copytruncate:\n return rotated_filename\n else:\n sys.stderr.write(\n \"[pygtail] [WARN] file size of %s shrank, and copytruncate support is \"\n \"disabled (expected at least %d bytes, was %d bytes).\\n\" %\n (self.filename, self._offset, stat(self.filename).st_size))\n\n return None",
"def rotate(self):\n pass",
"def test_rotated(self):\n self._calibration_test(\"rotated\")",
"def test_modePreservation(self):\n open(self.path, \"w\").close()\n os.chmod(self.path, 0o707)\n mode = os.stat(self.path)[stat.ST_MODE]\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n log.rotate()\n self.assertEqual(mode, os.stat(self.path)[stat.ST_MODE])",
"def rotation(self, *args, **kwargs) -> Any:\n pass",
"def rotatelog(self,**kwargs):\n newname = self._newname()\n newlgf = LogFile(newname,**kwargs)\n with self.id_lock:\n self._rotatelog(newlgf,newname)",
"def rotate(self):\n val = None\n try:\n \"\"\"Get rotation tags\"\"\"\n f = open(self._name, 'rb')\n tags = exifread.process_file(f)\n f.close()\n orientation = tags[\"Image Orientation\"]\n val = orientation.values\n\n except:\n return True\n\n if 3 in val:\n rotation = 180\n\n elif 6 in val:\n rotation = 270\n\n elif 8 in val:\n rotation = 90\n\n else:\n rotation = 0\n\n self._image = pygame.transform.rotate(self._image, rotation)",
"def rotate(self, *args, **kwargs): # real signature unknown\n pass",
"def testModePreservation(self):\n f = open(self.path, \"w\").close()\n os.chmod(self.path, 0707)\n mode = os.stat(self.path)[stat.ST_MODE]\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n log.rotate()\n self.assertEquals(mode, os.stat(self.path)[stat.ST_MODE])",
"def can_rotate(self) -> (bool, list, list):\n arms, axis = self.get_arms()\n rotation = self.get_rotation()\n if rotation[1][0] == 0:\n return False\n coord_axis = np.array([[axis.x_obj], [axis.y_obj]])\n coord_arms = [np.array([[arm.x_obj], [arm.y_obj]])\n for arm in arms]\n coord_new_arms = []\n # Collecting arm coordinates in the situation there turnstile rotates\n for i in range(len(arms)):\n coord_arm = coord_arms[i]\n coord_new_arms.append(\n np.dot(rotation, coord_arm - coord_axis) + coord_axis)\n can_rotate = True\n for i in range(len(arms)):\n coord_arm = coord_arms[i]\n coord_new_arm = coord_new_arms[i]\n # Object turnstile should push\n coord_front = coord_arm + coord_new_arm - coord_axis\n coord_character = np.array(\n [[self.moving_character.x_obj], [self.moving_character.y_obj]])\n obj_front = self.grid.obj_list[\n coord_front[0][0], coord_front[1][0]]\n if not (isinstance(obj_front, ob.Void) or (coord_front == coord_character).all()):\n can_rotate = False\n # Object being at the destination of the arm\n obj_target = self.grid.obj_list[\n coord_new_arm[0][0], coord_new_arm[1][0]]\n if not isinstance(obj_target, (ob.Void, ob.TurnstileBloc)):\n can_rotate = False\n return can_rotate, coord_arms, coord_new_arms",
"def rotate90(self):",
"def on_rotate(self, callback):\n self._rotate_callback = callback",
"def _rotate_about_origin(self, angle, axis):\n print 'Invoked abstract {}._rotate_about_origin({}, {})'.format(\n self, angle, axis)\n return",
"def log_rotate():\n st = os.stat(log_file)\n if st.st_size >= max_log_size:\n logfiles = glob.glob(\"{0}/{1}.[0-9].gz\".format(clone_dir,os.path.basename(log_file)))\n for i in xrange(len(logfiles),0,-1):\n oldlog = logfiles[i-1]\n newlog = \"{0}.{1}.gz\".format(oldlog[:-5],i)\n os.rename(oldlog,newlog)\n f_in = open(log_file, \"r+b\")\n f_out = gzip.open(log_file + \".0.gz\", \"wb\")\n f_out.writelines(f_in)\n f_out.close()\n f_in.seek(0)\n f_in.truncate(0)\n f_in.close()\n pass",
"def test_rotate(self):\n rotable = TestRotable()\n command = RotateCommand(rotable)\n collinear_to_new_direction = rotable.get_direction() + rotable.get_angular_velocity()\n\n command()\n\n ratio = norm(rotable.get_direction()) / norm(collinear_to_new_direction)\n self.assertTrue(allclose(collinear_to_new_direction * ratio, rotable.get_direction()))\n self.assertTrue(isclose(norm(rotable.get_direction()), 1))",
"def test_logReader(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\\n\")\n log.write(\"def\\n\")\n log.rotate()\n log.write(\"ghi\\n\")\n log.flush()\n\n # check reading logs\n self.assertEqual(log.listLogs(), [1])\n with contextlib.closing(log.getCurrentLog()) as reader:\n reader._file.seek(0)\n self.assertEqual(reader.readLines(), [\"ghi\\n\"])\n self.assertEqual(reader.readLines(), [])\n with contextlib.closing(log.getLog(1)) as reader:\n self.assertEqual(reader.readLines(), [\"abc\\n\", \"def\\n\"])\n self.assertEqual(reader.readLines(), [])\n\n # check getting illegal log readers\n self.assertRaises(ValueError, log.getLog, 2)\n self.assertRaises(TypeError, log.getLog, \"1\")\n\n # check that log numbers are higher for older logs\n log.rotate()\n self.assertEqual(log.listLogs(), [1, 2])\n with contextlib.closing(log.getLog(1)) as reader:\n reader._file.seek(0)\n self.assertEqual(reader.readLines(), [\"ghi\\n\"])\n self.assertEqual(reader.readLines(), [])\n with contextlib.closing(log.getLog(2)) as reader:\n self.assertEqual(reader.readLines(), [\"abc\\n\", \"def\\n\"])\n self.assertEqual(reader.readLines(), [])",
"def check_random_rotation(method):\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n [degrees, resample, expand, center, fill_value], _ = parse_user_args(method, *args, **kwargs)\n check_degrees(degrees)\n\n if resample is not None:\n type_check(resample, (Inter,), \"resample\")\n if expand is not None:\n type_check(expand, (bool,), \"expand\")\n if center is not None:\n check_2tuple(center, \"center\")\n if fill_value is not None:\n check_fill_value(fill_value)\n\n return method(self, *args, **kwargs)\n\n return new_method",
"def startRotatingRight(self,event):\n self.isRotatingRight=True",
"def _check_rotated_filename_candidates(self):\n # savelog(8)\n candidate = \"%s.0\" % self.filename\n if (exists(candidate) and exists(\"%s.1.gz\" % self.filename) and\n (stat(candidate).st_mtime > stat(\"%s.1.gz\" % self.filename).st_mtime)):\n return candidate\n\n # logrotate(8)\n # with delaycompress\n candidate = \"%s.1\" % self.filename\n if exists(candidate):\n return candidate\n\n # without delaycompress\n candidate = \"%s.1.gz\" % self.filename\n if exists(candidate):\n return candidate\n\n rotated_filename_patterns = (\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # for TimedRotatingFileHandler\n \".[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\",\n )\n for rotated_filename_pattern in rotated_filename_patterns:\n candidates = glob.glob(self.filename + rotated_filename_pattern)\n if candidates:\n candidates.sort()\n return candidates[-1] # return most recent\n\n # no match\n return None"
] |
[
"0.71487135",
"0.6492304",
"0.64736336",
"0.64527625",
"0.63196",
"0.6227223",
"0.6160279",
"0.6128817",
"0.60674125",
"0.60335624",
"0.6029823",
"0.58550125",
"0.57736146",
"0.5724196",
"0.56967515",
"0.5521593",
"0.5495449",
"0.54723734",
"0.54601",
"0.5436036",
"0.5279109",
"0.5177493",
"0.5160039",
"0.5118937",
"0.51165825",
"0.51015687",
"0.5099576",
"0.5094622",
"0.5083703",
"0.50272626"
] |
0.8085365
|
0
|
Various tests for log readers. First of all, log readers can get logs by number and read what was written to those log files. Getting nonexistent log files raises C{ValueError}. Using anything other than an integer index raises C{TypeError}. As logs get older, their log numbers increase.
|
def test_logReader(self):
log = logfile.LogFile(self.name, self.dir)
self.addCleanup(log.close)
log.write("abc\n")
log.write("def\n")
log.rotate()
log.write("ghi\n")
log.flush()
# check reading logs
self.assertEqual(log.listLogs(), [1])
with contextlib.closing(log.getCurrentLog()) as reader:
reader._file.seek(0)
self.assertEqual(reader.readLines(), ["ghi\n"])
self.assertEqual(reader.readLines(), [])
with contextlib.closing(log.getLog(1)) as reader:
self.assertEqual(reader.readLines(), ["abc\n", "def\n"])
self.assertEqual(reader.readLines(), [])
# check getting illegal log readers
self.assertRaises(ValueError, log.getLog, 2)
self.assertRaises(TypeError, log.getLog, "1")
# check that log numbers are higher for older logs
log.rotate()
self.assertEqual(log.listLogs(), [1, 2])
with contextlib.closing(log.getLog(1)) as reader:
reader._file.seek(0)
self.assertEqual(reader.readLines(), ["ghi\n"])
self.assertEqual(reader.readLines(), [])
with contextlib.closing(log.getLog(2)) as reader:
self.assertEqual(reader.readLines(), ["abc\n", "def\n"])
self.assertEqual(reader.readLines(), [])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_read_logs(self):\n records = log_reader(self.input_data_path)\n\n results = list(records)\n\n self.assertEqual(4, len(results))\n\n self.assertEqual(\n deque([u'record 1\\n', u'\\tline 1\\n',\n u'\\tline 2\\n', u'\\tline 3\\n']),\n results[0])\n\n self.assertEqual(\n deque([u'record 4\\n', u'\\tline 1\\n', u'\\tline 2\\n']),\n results[3])",
"def test_listLogsIgnoresZeroSuffixedFiles(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n for i in range(0, 3):\n with open(\"{}.{}\".format(log.path, i), \"w\") as fp:\n fp.write(\"123\")\n\n self.assertEqual([1, 2], log.listLogs())",
"def test_read_logs(self):\n class LogReaderLocal(DarshanLogReader):\n def logfiles(self):\n for file_path in ['dir1/file1', 'dir1/file2', 'dir1/file3', 'dir2/file4']:\n yield file_path\n\n def read_log(self, filename, suggested_label):\n return [FakeJob(filename, suggested_label)]\n\n lr = LogReaderLocal('test-path')\n\n logs = lr.read_logs()\n self.assertIsInstance(logs, types.GeneratorType)\n\n logs = list(logs)\n self.assertEqual(len(logs), 2)\n\n self.assertEqual(logs[0].names, ['dir1/file1', 'dir1/file2', 'dir1/file3'])\n self.assertEqual(logs[1].names, ['dir2/file4'])",
"def get_filename_from_index(self, log_index, depth = 5):\n\n try:\n \n if os.path.isfile('./master.log'):\n #global_lock.acquire()\n master_files_handle = open('./master.log', 'r')\n all_lines = master_files_handle.readlines()\n #global_lock.release()\n else:\n return_value = (-1, 'No logs to report from yet')\n return return_value\n # if 0, then that's the \"most recent\"\n \n\n # default value\n return_value = (-1, 'No Valid Page')\n \n if str(log_index) == '0':\n return_value = (1, all_lines[-1].strip('\\n'))\n else:\n return_value = (1, all_lines[-(int(log_index) % len(all_lines)) - 1].strip(' \\n'))\n master_files_handle.close() \n \n except Exception, e:\n print 'Error in get_filename_from_index'\n print e\n return (-1, e)\n else:\n # means it's blank..\n # the depth variable is so that we don't infinitely hang\n if not return_value[1] and depth > 0:\n new_index_int = (int(log_index)-1)\n print 'Redirecting to '+str(new_index_int)+' with depth '+str(depth)\n return_value = self.get_filename_from_index(str(new_index_int), depth - 1)\n return return_value",
"def test_getLog(self):\n data = [\"1\\n\", \"2\\n\", \"3\\n\"]\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n for d in data:\n log.write(d)\n log.flush()\n\n # This returns the current log file.\n r = log.getLog(0.0)\n self.addCleanup(r.close)\n\n self.assertEqual(data, r.readLines())\n\n # We can't get this log, it doesn't exist yet.\n self.assertRaises(ValueError, log.getLog, 86400)\n\n log._clock = 86401 # New day\n r.close()\n log.rotate()\n r = log.getLog(0) # We get the previous log\n self.addCleanup(r.close)\n self.assertEqual(data, r.readLines())",
"def test_maxNumberOfLog(self):\n log = logfile.LogFile(self.name, self.dir, rotateLength=10, maxRotatedFiles=3)\n self.addCleanup(log.close)\n log.write(\"1\" * 11)\n log.write(\"2\" * 11)\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n\n log.write(\"3\" * 11)\n self.assertTrue(os.path.exists(\"{}.2\".format(self.path)))\n\n log.write(\"4\" * 11)\n self.assertTrue(os.path.exists(\"{}.3\".format(self.path)))\n with open(\"{}.3\".format(self.path)) as fp:\n self.assertEqual(fp.read(), \"1\" * 11)\n\n log.write(\"5\" * 11)\n with open(\"{}.3\".format(self.path)) as fp:\n self.assertEqual(fp.read(), \"2\" * 11)\n self.assertFalse(os.path.exists(\"{}.4\".format(self.path)))",
"def test_maxNumberOfLog(self):\n log = logfile.LogFile(self.name, self.dir, rotateLength=10,\n maxRotatedFiles=3)\n log.write(\"1\" * 11)\n log.write(\"2\" * 11)\n self.failUnless(os.path.exists(\"%s.1\" % self.path))\n\n log.write(\"3\" * 11)\n self.failUnless(os.path.exists(\"%s.2\" % self.path))\n\n log.write(\"4\" * 11)\n self.failUnless(os.path.exists(\"%s.3\" % self.path))\n self.assertEquals(file(\"%s.3\" % self.path).read(), \"1\" * 11)\n\n log.write(\"5\" * 11)\n self.assertEquals(file(\"%s.3\" % self.path).read(), \"2\" * 11)\n self.failUnless(not os.path.exists(\"%s.4\" % self.path))",
"def test_listLogsWithBadlyNamedFiles(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n with open(\"{}.1\".format(log.path), \"w\") as fp:\n fp.write(\"123\")\n with open(\"{}.bad-file\".format(log.path), \"w\") as fp:\n fp.write(\"123\")\n\n self.assertEqual([1], log.listLogs())",
"def test_LogReaderReadsZeroLine(self):\n # We don't need any content, just a file path that can be opened.\n with open(self.path, \"w\"):\n pass\n\n reader = logfile.LogReader(self.path)\n self.addCleanup(reader.close)\n self.assertEqual([], reader.readLines(0))",
"def test_aud_from_log_ignores_index():\n assert True",
"def open_logs():\n\treturn log, action_log, error_log",
"def test_004_log(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __test_filename = consts.TEST_FILENAME\n __test_logname = __test_filename + \"_log.txt\"\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __dir_game_log = os.path.join(__dir_game_log, __test_logname)\n #test list\n __log_test = __test.log(__test_filename, __test_data, True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"\\nLine (0 bis \" +str(consts.TEST_LIST_LENGHT-1) +\")\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, __test_data, False)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"FILE_EXIST\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Still Line (0 bis \" +str(consts.TEST_LIST_LENGHT-1) +\")\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, __test_data, True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Line (0 bis \" +str(consts.TEST_LIST_LENGHT-1) +\") two times\")\n print(__log_game.read())\n os.remove(__dir_game_log)\n self.assertFalse(os.path.isfile(__dir_game_log))\n #test string\n __log_test = __test.log(__test_filename, \"__test_data\", True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"\\nOne Line:\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, \"__test_data\", False)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"FILE_EXIST\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Still one Line:\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, \"__test_data\", True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Two Lines\")\n print(__log_game.read())",
"def test_file_reader(self) -> None:\n result = [['123', 'Jin He', 'Computer Science'],\n ['234', 'Nanda Koka', 'Software Engineering'],\n ['345', 'Benji Cai', 'Software Engineering']]\n # file have header\n self.assertTrue(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|', True)) == result)\n # file without header\n self.assertFalse(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|')) == result)\n # More than 3 datafield\n with self.assertRaises(ValueError):\n list(file_reader(\n 'C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 4, '|', True))\n # file not found\n with self.assertRaises(FileNotFoundError):\n list(file_reader('abc.txt', 3, '|', True))",
"def parseLogs():\n parsed_logs = (sc\n .textFile(logFile)\n .map(parseApacheLogLine)\n .cache())\n\n access_logs = (parsed_logs\n .filter(lambda s: s[1] == 1)\n .map(lambda s: s[0])\n .cache())\n\n failed_logs = (parsed_logs\n .filter(lambda s: s[1] == 0)\n .map(lambda s: s[0]))\n failed_logs_count = failed_logs.count()\n if failed_logs_count > 0:\n print 'Number of invalid logline: {}'.format(failed_logs.count())\n for line in failed_logs.take(20):\n print 'Invalid logline: {}'.format(line)\n\n print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))\n return parsed_logs, access_logs, failed_logs",
"def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())",
"def test_file_logger_all_values_parsed(self, mkdir):\n expected_file_logger = self.__create_file_logger(\n filename=\"/folder/log_file1\",\n format=\"('%(asctime)s [%(threadName)18s][%(levelname)8s] %(message)s')\",\n max_bytes=1024,\n backup_count=1,\n min_level=logging.INFO,\n max_level=logging.ERROR,\n )\n\n # parse config and get first logger\n parsed_config = self._get_parsed_config(\"file_loggers_config.yml\")\n parsed_file_logger = parsed_config.loggers[0]\n\n # make sure file was opened\n mkdir.assert_called_with(\"/folder\")\n\n result, msg = self.__compare_file_loggers(\n expected_file_logger, parsed_file_logger\n )\n self.assertTrue(\n result,\n msg=f\"Full config is not as expected, following comparison failed: {msg}\",\n )",
"def test_write_on_loaded(self):\n # Run a first time 100 epochs\n logger = Logger(file=\"test_logs.csv\", nb_epochs=200)\n logger.__print_function__ = mock.Mock()\n for i in range(0, 100):\n i += 1\n logger.epoch(i, lambda: {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)})\n del logger\n\n # Run a second time 100 epochs\n logger_second = Logger(shell=False, file=\"test_logs.csv\", nb_epochs=200)\n logger_second.__print_function__ = mock.Mock()\n self.assertEqual(logger_second.__print_function__.called, False, \"Print should not be called\")\n self.assertEqual(\n logger_second.logs,\n [(i, {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)}) for i in range(1, 101)],\n \"Each first 100 lines should be well written\"\n )\n for i in range(100, 200):\n i += 1\n logger_second.epoch(i, lambda: {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)})\n del logger_second\n\n # Load and read\n logger_reader = Logger(shell=False, file=\"test_logs.csv\", nb_epochs=200)\n self.assertEqual(\n logger_reader.logs,\n [(i, {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)}) for i in range(1, 201)],\n \"Every line + the old one should be well written\"\n )",
"def _read_log(self, **kwargs):\n\n log_file = find_log_file()\n\n if not log_file:\n raise RequestProcessingError(\n \"Error attempting to retrieve logs - unable to determine log filename. \"\n \"Please verify that the plugin is writing to a log file.\"\n )\n\n try:\n return read_log_file(log_file=log_file, **kwargs)\n except IOError as e:\n raise RequestProcessingError(\n \"Error attempting to retrieve logs - unable to read log file at {0}. \"\n \"Root cause I/O error {1}: {2}\".format(log_file, e.errno, e.strerror)\n )",
"def test_write(self):\n logger = Logger(file=\"test_logs.csv\", nb_epochs=100)\n logger.__print_function__ = mock.Mock()\n for i in range(100):\n i += 1\n logger.epoch(i, lambda: {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)})\n\n self.assertEqual(logger.__print_function__.called, True, \"Calling to print should have been done\")\n\n self.assertEqual(\n logger.__print_function__.call_args_list,\n [\n call\n for i in range(1, 101)\n for call in [\n mock.call(\"::: Train Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 0+i),\n mock.call('+\\tkno acc:', 1+i),\n mock.call('+\\tunk acc:', 2+i),\n mock.call(\"::: Dev Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 1+i),\n mock.call('+\\tkno acc:', 2+i),\n mock.call('+\\tunk acc:', 3+i),\n ]\n ]\n )\n del logger\n logger_reader = Logger(file=\"test_logs.csv\", nb_epochs=100)\n self.assertEqual(\n logger_reader.logs,\n [(i, {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)}) for i in range(1, 101)],\n \"Every line should be well written\"\n )",
"def read_linelog():",
"def list_log_files():\n for filename in os.listdir(\"/home/malyhass/log-parser\"):\n if filename.startswith(\"access.log\"):\n yield filename",
"def test_log(self):\r\n # expected result when no result_path is provided\r\n self.default_app(\r\n seq_path=self.tmp_seq_filepath,\r\n result_path=None,\r\n log_path=self.tmp_log_filepath,\r\n )\r\n\r\n # open the actual log file and the expected file, and pass into lists\r\n with open(self.tmp_log_filepath) as f:\r\n obs = [l.strip() for l in list(f)]\r\n exp = rdp_test1_log_file_contents.split('\\n')\r\n # sort the lists as the entries are written from a dict,\r\n # so order may vary\r\n obs.sort()\r\n exp.sort()\r\n self.assertEqual(obs, exp)",
"def read_logs(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"logs\"]\n with open(input_file) as fin:\n self._logs[system] = fin.read()",
"def test_read_namespaced_build_log_log(self):\n pass",
"def main():\n from time import perf_counter\n\n hint = None\n if len(sys.argv) > 1:\n hint = sys.argv[1]\n logpath = _guess_log_path(hint)\n\n if logpath:\n print(\"Logpath:\", logpath)\n print(\"Getting values:\")\n\n with open(logpath, 'r') as ofl:\n headers = parse_log_headers(ofl.read(297))\n pre = perf_counter()\n values = get_values(ofl)\n post = perf_counter()\n\n print(\"Values:\")\n for i, v in enumerate(values):\n print(\"\\t\", headers[i][0], \": \", v)\n print(\"Read in {:0.9f} sec\".format(post - pre))\n\n else:\n print(\"Nope\")",
"def load_logs(workdir='.', logtype='log', logbase='log*log', geobase='geo*log'):\n workdir = os.path.abspath(workdir)\n if logtype == 'log':\n globbase = os.path.join(workdir, logbase)\n loader = bats.BatsLog\n elif logtype == 'geo':\n globbase = os.path.join(workdir, geobase)\n loader = bats.GeoIndexFile\n else:\n raise ValueError('load_logs: logtype must be either \"log\" or \"geo\", not {}'.format(logtype))\n fns = sorted(glob.glob(globbase))\n if not fns:\n raise IOError('No log files found with selected search term ({})'.format(globbase))\n all_logs = [loader(fn) for fn in fns]\n log = all_logs[0]\n if len(all_logs)>1:\n for nlg in all_logs[1:]:\n log = merge_logfiles(log, nlg)\n return log",
"def test_featurecounts_step_part_get_log_file(gene_expression_quantification_workflow):\n expected = (\n \"work/{mapper}.featurecounts.{library_name}/log/{mapper}.featurecounts.{library_name}.log\"\n )\n actual = gene_expression_quantification_workflow.get_log_file(\"featurecounts\", \"run\").get(\"log\")\n assert actual == expected",
"def test_read_fail1(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IMOD' start",
"def test_var_not_set_same_logging_file(set_tempdir):\n tasks = run_n_simple_tasks(5)\n for task in tasks:\n log_path_matcher = LogPathCorrectnessMatcher(default_log_path(task[\"jobid\"]))\n log_path = UsedLogPath(task)\n assert log_path == log_path_matcher",
"def test_read_parser_output(self):\n with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'parsed-darshan-log'), 'r') as f:\n data = f.read()\n\n lr = DarshanLogReader('test-path')\n\n ingested = lr._read_log_internal(data, 'a-file', 'a-label')\n\n # We return an array of jobs, which in this case will contain only one\n self.assertIsInstance(ingested, list)\n self.assertEqual(len(ingested), 1)\n ingested = ingested[0]\n self.assertIsInstance(ingested, DarshanIngestedJob)\n\n self.assertEqual(ingested.filename, \"a-file\")\n self.assertEqual(ingested.jobid, 0)\n self.assertEqual(ingested.label, \"a-label\")\n self.assertEqual(ingested.log_version, \"2.06\")\n self.assertEqual(ingested.time_start, 1469134177)\n self.assertEqual(ingested.uid, 1801)\n\n # We should have parsed the file details correctly\n self.assertEqual(len(ingested.file_details), 3)\n\n self.assertIn(\"dummy-path\", ingested.file_details)\n f = ingested.file_details[\"dummy-path\"]\n self.assertEqual(f.name, \"dummy-path\")\n self.assertEqual(f.open_count, 1)\n self.assertEqual(f.bytes_read, 0)\n self.assertEqual(f.bytes_written, 0)\n self.assertEqual(f.read_count, 0)\n self.assertEqual(f.write_count, 0)\n\n self.assertIn(\"dummy-path-1\", ingested.file_details)\n f = ingested.file_details[\"dummy-path-1\"]\n self.assertEqual(f.name, \"dummy-path-1\")\n self.assertEqual(f.open_count, 1)\n self.assertEqual(f.bytes_read, 0)\n self.assertEqual(f.bytes_written, 35)\n self.assertEqual(f.read_count, 0)\n self.assertEqual(f.write_count, 2)\n\n self.assertIn(\"dummy-path-2\", ingested.file_details)\n f = ingested.file_details[\"dummy-path-2\"]\n self.assertEqual(f.name, \"dummy-path-2\")\n self.assertEqual(f.open_count, 1)\n self.assertEqual(f.bytes_read, 0)\n self.assertEqual(f.bytes_written, 0)\n self.assertEqual(f.read_count, 0)\n self.assertEqual(f.write_count, 0)"
] |
[
"0.68103456",
"0.6534209",
"0.6531893",
"0.6460277",
"0.6354693",
"0.63483745",
"0.6344147",
"0.6059483",
"0.5766431",
"0.57321525",
"0.5725983",
"0.5687353",
"0.56618154",
"0.5638312",
"0.5596897",
"0.5578588",
"0.55629724",
"0.5526029",
"0.5516472",
"0.55151045",
"0.5510207",
"0.5499644",
"0.54924846",
"0.5457984",
"0.5432457",
"0.5387915",
"0.5329944",
"0.53270966",
"0.53156716",
"0.531217"
] |
0.68883264
|
0
|
L{LogReader.readLines} supports reading no line.
|
def test_LogReaderReadsZeroLine(self):
# We don't need any content, just a file path that can be opened.
with open(self.path, "w"):
pass
reader = logfile.LogReader(self.path)
self.addCleanup(reader.close)
self.assertEqual([], reader.readLines(0))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def myreadlines(self):\n lines = []\n while True:\n line = self.readline()\n if not line:\n return lines\n else:\n lines.append(line)",
"def _consume_blanklines(self):\n while True:\n line = self.reader.readline()\n if len(line) == 0:\n return None\n\n if line.rstrip() == '':\n self.offset = self.fh.tell() - self.reader.rem_length()\n continue\n\n return line",
"def read_linelog():",
"def readLine(self, default=None):\n raise NotImplementedError()",
"def readlines(self):\n lines = []\n while True:\n line = self.readline()\n if line is None:\n return lines\n lines.append(line)",
"def skiplines(self, lines: int):\n for i in range(0, lines):\n self.fileobject.readline()",
"def readline(self) -> Optional[bytes]:\n ...",
"def emptyline(self):",
"def readlines(channel=LOG_CHANNEL_STDOUT):",
"def readline(self) -> Optional[str]:",
"def _checkForBlankLines(self, datalines):\n empties = None\n count = 0\n rtlines = []\n for line in datalines:\n if line.strip() == \"\":\n empties = 1\n else:\n if empties == 1: # If data line found after empty line then raise\n raise Exception(\"Empty line found in data section at line: \" + str(count))\n else:\n rtlines.append(line)\n count = count + 1\n return rtlines",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def emptyline(self):\n pass",
"def test_file_readlines_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines()",
"def readlines(self) -> list[bytes] | None:"
] |
[
"0.67332673",
"0.66607267",
"0.6356252",
"0.6341487",
"0.63176274",
"0.6276629",
"0.62745696",
"0.62190604",
"0.6175334",
"0.6134149",
"0.6123871",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.60674244",
"0.6067186",
"0.6015684"
] |
0.7608662
|
0
|
Test the fromFullPath method.
|
def test_fromFullPath(self):
log1 = logfile.LogFile(self.name, self.dir, 10, defaultMode=0o777)
self.addCleanup(log1.close)
log2 = logfile.LogFile.fromFullPath(self.path, 10, defaultMode=0o777)
self.addCleanup(log2.close)
self.assertEqual(log1.name, log2.name)
self.assertEqual(os.path.abspath(log1.path), log2.path)
self.assertEqual(log1.rotateLength, log2.rotateLength)
self.assertEqual(log1.defaultMode, log2.defaultMode)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_fromFullPath(self):\n log1 = logfile.LogFile(self.name, self.dir, 10, defaultMode=0777)\n log2 = logfile.LogFile.fromFullPath(self.path, 10, defaultMode=0777)\n self.assertEquals(log1.name, log2.name)\n self.assertEquals(os.path.abspath(log1.path), log2.path)\n self.assertEquals(log1.rotateLength, log2.rotateLength)\n self.assertEquals(log1.defaultMode, log2.defaultMode)",
"def test_expand_path_3(self):\n partial_path = \"/fake/path\"\n input_path = \".\" + partial_path\n expanded_path = basic.expand_path(input_path)\n local_path = Path(\".\").resolve()\n expected_path = str(local_path) + partial_path\n self.assertEqual(expanded_path, expected_path)",
"def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)",
"def test_get_absolute_path():\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"../foo\"), \"/bar/foo\")\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"/foo\"), \"/foo\")",
"def test_expand_path_1(self):\n partial_path = \"/fake/path\"\n input_path = \"~\" + partial_path\n expanded_path = basic.expand_path(input_path)\n home_dir = Path(\"~\").expanduser()\n expected_path = str(home_dir) + partial_path\n self.assertEqual(expanded_path, expected_path)",
"def testPathNoMock(self):\n self.assertEqual(\n os.path.join(self.directory, self.filename),\n self.mr.path\n )",
"def test_path(self):\n self.assertEqual(self.ftp_case.path, '/rfc/rfc1808.txt')\n self.assertEqual(self.ldap_case.path, '/c=GB')\n self.assertEqual(self.news_case.path, \n 'comp.infosystems.www.servers.unix')\n self.assertEqual(self.telnet_case.path, '/')\n self.assertEqual(self.urn_case.path, \n 'oasis:names:specification:docbook:dtd:xml:4.1.2')",
"def test_sanitized_filename(self):\n value = \"/absolute/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"\n\n value = \"../relative/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"",
"def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())",
"def test_verify_path_2(self):\n result = basic.verify_path(str(self.test_filepath1) + \"abcxyz\", \"file\")\n self.assertFalse(result)",
"def _adjust_path(self, file):\n path_component = '/osm_pla/test/'\n real_path = os.path.realpath(file)\n if path_component not in real_path:\n return os.path.dirname(real_path) + path_component + os.path.basename(real_path)\n else:\n return real_path",
"def test_verify_path_1(self):\n result = basic.verify_path(self.test_filepath1, \"file\")\n self.assertTrue(result)",
"def test_local_filepath_helper():\n expected_local_filepath = TEST_LOCAL_CONFIG_PATH.replace('.cfg', '_local.cfg')\n\n assert wf_utils.get_local_config_filepath(TEST_LOCAL_CONFIG_PATH) == TEST_LOCAL_CONFIG_PATH\n\n assert wf_utils.get_local_config_filepath(TEST_LOCAL_CONFIG_PATH, True) == expected_local_filepath",
"def test_valid_pathname(self):\n self.assertTrue(Util.is_pathname_valid('./myrandomvalidfilename.dat'))\n self.assertTrue(Util.is_pathname_valid('myrandomvalidfilename.dat'))",
"def test_filesystem_can_translate_path_to_file_in_datadir(self):\n datadir_path = PhotoPath(self.datadir)\n url = Url.from_string('https://example.com/foo/bar')\n photo = Screenshot(url, datadir_path, self.refresh_rate)\n self.index.es.index = MagicMock()\n photo.path.filesize = MagicMock(return_value=10000)\n self.index.save_photo(photo)\n\n self.index.photos_file_exists = MagicMock(return_value=123000)\n self.index.photos_get_photo = MagicMock(return_value=photo)\n\n path = self.filesystem._translate_path(\n '/example.com/2019-01-13H20:00/foo/bar.png'\n )\n self.assertEqual(datadir_path.full_path(), path)",
"def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)",
"def test_realpath(self):\n print real_upath(\"ref with space\")\n self.assertTrue(real_upath(\"ref with space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_upath(\"ref\\ with\\ space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_ppath(\"ref with space\").endswith(\"ref with space\"))\n self.assertTrue(real_ppath(\"ref\\ with\\ space\").endswith(\"ref with space\"))",
"def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)",
"def test_relativise_src_under():\n src = pathlib.Path(\"/tmp/foo/bar/baz/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"../../dst.txt\")",
"def test_local_path(self, nexus_base):\n assert isinstance(nexus_base.local_path, str)",
"def test_normalize_path(self):\n self.assertEqual(normalize_path(\"//////\"), \"/\")\n self.assertEqual(normalize_path(\"//\"), \"/\")\n self.assertEqual(normalize_path(\"//foo/bar//baz\"), \"/foo/bar/baz\")\n self.assertEqual(normalize_path(\"//foo/bar//baz/\"), \"/foo/bar/baz/\")\n self.assertEqual(normalize_path(\"//f%20oo/bar\"), \"/f oo/bar\")",
"def _get_resource_path(filename, path=Path.TEST):\n return os.path.normpath(os.path.join(path.value, filename))",
"def testFileInRead(self, mockPath):\n mockPath.return_value = 'bananaphone.ccc'\n\n self.assertEqual(\n None,\n self.node.file_in\n )\n\n self.node.file_in = 'mybestfile.ccc'\n\n mockPath.assert_called_once_with('mybestfile.ccc')\n\n self.assertEqual(\n 'bananaphone.ccc',\n self.node.file_in\n )",
"def _get_fullpath(self, address):\n address = os.path.abspath(address)\n if len(address) < 4 or address[-4:] != \".dta\":\n address = address + \".dta\"\n return address",
"def abspath(path: str) -> str:\n pass",
"def test_repo_relpath(self):\n from os import path\n repodir = \"~/codes/ci/tests\"\n relpath = \"../pyci/config.py\"\n result = path.expanduser(\"~/codes/ci/pyci/config.py\")\n self.assertEqual(result, get_repo_relpath(repodir, relpath))",
"def getRelativePath(fullPath, rootPath, liberalChars=True):\n\tif not fullPath.startswith(rootPath):\n\t\traise ValueError(\n\t\t\t\"Full path %s does not start with resource root %s\"%(fullPath, rootPath))\n\tres = fullPath[len(rootPath):].lstrip(\"/\")\n\tif not liberalChars and not _SAFE_FILENAME.match(res):\n\t\traise ValueError(\"File path '%s' contains characters known to\"\n\t\t\t\" the DaCHS authors to be hazardous in URLs. Please defuse the name\"\n\t\t\t\" before using it for published names (or see howDoI).\"%res)\n\treturn res",
"def test_get_pyrin_root_path():\n\n root_path = os.path.abspath('.')\n assert application_services.get_pyrin_root_path() == root_path",
"def test_make_temp_path(self):\n temp_path = self.make_temp_path('some-id', 'a', 'b.test')\n self.assertTrue(temp_path.endswith('b.test'))",
"def _absPath(self, relpath):\n\n # Pass through URIs and absolute paths.\n if self.isUrl(relpath) or relpath[0] == '/':\n return relpath\n\n # This won't deal with ~user/ syntax, but it's much less\n # common anyway.\n if relpath.startswith('~/') and 'HOME' in os.environ:\n return os.path.join(os.environ['HOME'], relpath[2:])\n\n if self._configFileStack:\n relativeTo = os.path.dirname(self._configFileStack[-1])\n else:\n relativeTo = os.getcwd()\n\n if self.isUrl(relativeTo):\n parts = urlparse.urlsplit(relativeTo)\n return urlparse.urlunsplit((parts.scheme, parts.netloc, os.path.normpath(os.path.join(parts.path, relpath)), parts.query, parts.fragment))\n return os.path.normpath(os.path.join(relativeTo, relpath))"
] |
[
"0.7311831",
"0.6667163",
"0.66629404",
"0.6523232",
"0.6366584",
"0.6325803",
"0.6300358",
"0.62475204",
"0.61943454",
"0.618361",
"0.6173475",
"0.6126438",
"0.6062561",
"0.6036115",
"0.6036055",
"0.60141873",
"0.6011796",
"0.60003114",
"0.599448",
"0.5986072",
"0.5981678",
"0.5981282",
"0.5967142",
"0.5961697",
"0.5942401",
"0.59370697",
"0.5929274",
"0.59269136",
"0.5892445",
"0.5883602"
] |
0.70760405
|
1
|
Test specifying the permissions used on the log file.
|
def test_specifiedPermissions(self):
log1 = logfile.LogFile(self.name, self.dir, defaultMode=0o066)
self.addCleanup(log1.close)
mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])
if runtime.platform.isWindows():
# The only thing we can get here is global read-only
self.assertEqual(mode, 0o444)
else:
self.assertEqual(mode, 0o066)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0066)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEquals(mode, 0444)\n else:\n self.assertEquals(mode, 0066)",
"def test_permissions(self):\n self.assertEqual(dir_perm, 0o2750)\n self.assertEqual(file_perm, 0o0440)",
"def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0",
"def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0",
"def test_configure_logger_file_permissions_unix(self):\n path, segments = manufacture.fs.makePathInTemp()\n content = (\n '[log]\\n'\n 'log_file: %s\\n'\n ) % (path)\n config = self._getConfiguration(content=content)\n if self._drop_user != '-':\n account = self._drop_user\n else:\n account = manufacture.username\n\n logger = manufacture.makeLogger()\n\n try:\n logger.configure(configuration=config, account=account)\n logger.removeAllHandlers()\n\n self.assertTrue(\n manufacture.fs.exists(segments),\n 'Log file was not created at ' + path.encode('utf-8'),\n )\n\n # FIXME:928:\n # Rather than testing for 2 variables, we should only check\n # for matching \"account\" and not \"Administrators\".\n self.assertIn(\n [unicode(account), 'Administrators'],\n manufacture.fs.getOwner(segments))\n finally:\n manufacture.fs.deleteFile(segments, ignore_errors=True)",
"def _have_permissions(self, location):\n if not os.path.isfile(location):\n return True\n \n stats = os.stat(location)\n # check specifically for write permission\n return bool(stats.st_mode & stat.S_IWUSR)",
"def test_rotatePermissionFileNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.path, 0o444)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir,\"xxx\"), \"w\")\n except (OSError, IOError):\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEquals(f.tell(), 6)\n f.seek(0, 0)\n self.assertEquals(f.read(), \"abcdef\")\n log.close()",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0o555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir, \"xxx\"), \"w\")\n except OSError:\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEqual(f.tell(), 6)\n f.seek(0, 0)\n self.assertEqual(f.read(), b\"abcdef\")",
"def permissions():\n pass",
"def test_permissions(self):\n exist = os.access('models/amenity.py', os.F_OK)\n self.assertTrue(exist)\n read = os.access('models/amenity.py', os.R_OK)\n self.assertTrue(read)\n write = os.access('models/amenity.py', os.W_OK)\n self.assertTrue(write)\n exe = os.access('models/amenity.py', os.X_OK)\n self.assertTrue(exe)",
"def get_permissions(self, filepath):\n return oct(os.stat(filepath).st_mode & 0777)",
"def test_modePreservation(self):\n open(self.path, \"w\").close()\n os.chmod(self.path, 0o707)\n mode = os.stat(self.path)[stat.ST_MODE]\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n log.rotate()\n self.assertEqual(mode, os.stat(self.path)[stat.ST_MODE])",
"def testChAttrs(self):\n def _check(results):\n self.flushLoggedErrors()\n self.assertTrue(results[0].startswith(b'-rw-r--r--'))\n self.assertEqual(results[1], b'')\n self.assertTrue(results[2].startswith(b'----------'), results[2])\n self.assertEqual(results[3], b'')\n\n d = self.runScript('ls -l testfile1', 'chmod 0 testfile1',\n 'ls -l testfile1', 'chmod 644 testfile1')\n return d.addCallback(_check)\n # XXX test chgrp/own",
"def testModePreservation(self):\n f = open(self.path, \"w\").close()\n os.chmod(self.path, 0707)\n mode = os.stat(self.path)[stat.ST_MODE]\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n log.rotate()\n self.assertEquals(mode, os.stat(self.path)[stat.ST_MODE])",
"def test_get_permissions(self):\n pass",
"def test_provider_system_hook_file_chmod(change_dir, fix_file_perms):\n tackle(context_file='chmod.yaml', no_input=True)\n assert oct(os.stat('tackle.yaml').st_mode)[-3:] == \"600\"",
"def get_permissions(filepath):\n return oct(stat.S_IMODE(os.lstat(filepath).st_mode))",
"def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")",
"def permissions_check(\n basedir='.',\n verbose_level=0,\n):\n # File permissions on Cygwin/Windows filesystems don't work the\n # same way as Linux. Don't try to change them.\n # TODO(dittrich): Is there a Better way to handle perms on Windows?\n fs_type = get_fs_type(basedir)\n if fs_type in ['NTFS', 'FAT', 'FAT32']:\n msg = (\n f\"[-] {basedir} has file system type '{fs_type}': \"\n \"skipping permissions check\"\n )\n logger.info(msg)\n return\n any_other_perms = stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH\n for root, dirs, files in os.walk(basedir, topdown=True):\n for name in files:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n f\"[!] file '{path}' is mode {oct(perms)}\",\n file=sys.stderr\n )\n except OSError:\n pass\n for name in dirs:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n (\n f\"[!] directory '{path}' is mode \"\n f\"{oct(perms)}\"\n ),\n file=sys.stderr\n )\n except OSError:\n pass",
"def test_rotatePermissionDirectoryNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.directory, 0o444)\n # Restore permissions so tests can be cleaned up.\n self.addCleanup(os.chmod, log.directory, 0o755)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def __test_config_file__(parser, path):\n if not os.path.exists(path):\n return False\n else:\n statinfo = os.stat(path)\n if not os.path.isfile(path) or os.path.islink(path):\n parser.error(\"invalid configuration file {} (it must be a regular file)\".format(path))\n elif stat.filemode(statinfo.st_mode) != '-rw-------':\n parser.error(\"invalid configuration file {} (it must have only read and write permissions for user)\".format(path))\n elif statinfo.st_uid != os.getuid():\n parser.error(\"invalid configuration file {} (the owner must be the user)\".format(path))\n return True",
"def set_permissions(self, permissions):\n\n\t\tif Platform.PLATFORM_POSIX == self.__platform.get_platform():\n\t\t\tif permissions.__class__ == str and re.match('([-r][-w][-xsStT]){3,3}', permissions):\n\t\t\t\tself.__permissions = 0\n\t\t\t\tif permissions[0] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IRUSR\n\t\t\t\tif permissions[1] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWUSR\n\t\t\t\tif permissions[2] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXUSR\n\t\t\t\tif permissions[3] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IRGRP\n\t\t\t\tif permissions[4] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWGRP\n\t\t\t\tif permissions[5] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXGRP\n\t\t\t\tif permissions[6] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IROTH\n\t\t\t\tif permissions[7] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWOTH\n\t\t\t\tif permissions[8] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXOTH\n\t\t\t\t\t\n\t\t\telif permissions.__class__ == str and re.match('(0)?[0-7]{3,3}', permissions):\n\t\t\t\tif len(permissions) == 3:\n\t\t\t\t\tpermissions = '0' + permissions\n\t\t\t\tself.__permissions = octstr_to_int(permissions)\n\t\t\t\n\t\t\telif permissions.__class__ == int and 0 <= permissions <= 511:\n\t\t\t\tself.__permissions = permissions\n\t\t\t\n\t\t\telse:\n\t\t\t\traise PermissionsInvalidError()\n\n\t\telif Platform.PLATFORM_WINDOWS == self.__platform.get_platform():\n\t\t\tif permissions.__class__ == str and re.match('[-r][-w]', permissions):\n\t\t\t\tself.__permissions = 0\n\t\t\t\tif permissions[0] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IREAD\n\t\t\t\tif permissions[1] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWRITE\n\t\t\telif permissions.__class__ == int and 0 <= permissions <= 511:\n\t\t\t\tself.__permissions = permissions\n\t\t\telse:\n\t\t\t\traise PermissionsInvalidError() \n\t\telse:\n\t\t\traise PlatformNotSupportedError()",
"def _verify_logging(self):\n log_file = self.device.log_file_name\n self.assertTrue(os.path.exists(log_file),\n f\"{self.device.name}'s log file {log_file} does not exist\")\n self.assertTrue(os.path.getsize(log_file),\n f\"{self.device.name}'s log file {log_file} is empty\")",
"def test_file(path, mode, exception=RuntimeError, isdir=False):\n what = (\"directory\" if isdir else \"file\")\n if not os.access(path, os.F_OK):\n raise exception(\"Cannot access %s '%s'.\" % (what, path))\n if isdir and not os.path.isdir(path):\n raise exception(\n \"Expected '%s' to be a directory, but it's not.\" % path)\n if (mode & os.R_OK) and not os.access(path, os.R_OK):\n raise exception(\"Cannot read %s '%s'.\" % (what, path))\n if (mode & os.W_OK) and not os.access(path, os.W_OK):\n raise exception(\"Cannot write to %s '%s'.\" % (what, path))\n if (mode & os.X_OK) and not os.access(path, os.X_OK):\n if isdir:\n raise exception(\"Cannot traverse directory '%s':\"\n \" lacks 'x' permission.\" % path)\n else:\n raise exception(\"File '%s' lacks execute ('x') permission.\" % path)\n return True",
"def assertMode(self, path, expected):\n stat_result = os.stat(path)\n format_mode = lambda m: \"0o%03o\" % m\n self.assertEqual(\n format_mode(stat_result.st_mode & 0o777),\n format_mode(expected),\n )",
"def check_permission(perm_mode, flags=stat.S_IWOTH):\n return bool(perm_mode & flags)",
"def check_file_validity(self, file_):\n if not os.access(file_, os.F_OK):\n raise TailError(\"File '%s' does not exist\" % (file_))\n if not os.access(file_, os.R_OK):\n raise TailError(\"File '%s' not readable\" % (file_))\n if os.path.isdir(file_):\n raise TailError(\"File '%s' is a directory\" % (file_))",
"def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success",
"def has_permission(self, file):\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@link android.Manifest.permission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@RequiresPermission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@link Manifest.permission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@RequiresPermission.Read(@RequiresPermission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@RequiresPermission.Write(@RequiresPermission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\".permission\"):\n return True\n\n return False"
] |
[
"0.8162897",
"0.72478354",
"0.66200453",
"0.66200453",
"0.6612495",
"0.6511051",
"0.641548",
"0.6336619",
"0.6332582",
"0.6299115",
"0.62703973",
"0.62536114",
"0.61752075",
"0.61353326",
"0.60526747",
"0.60353744",
"0.6025186",
"0.59882975",
"0.59793806",
"0.5972703",
"0.5971934",
"0.59244066",
"0.5840676",
"0.5825322",
"0.5811463",
"0.5766489",
"0.57649904",
"0.5734967",
"0.5730691",
"0.5730028"
] |
0.8171138
|
0
|
L{logfile.LogFile.reopen} allows to rename the currently used file and make L{logfile.LogFile} create a new file.
|
def test_reopen(self):
with contextlib.closing(logfile.LogFile(self.name, self.dir)) as log1:
log1.write("hello1")
savePath = os.path.join(self.dir, "save.log")
os.rename(self.path, savePath)
log1.reopen()
log1.write("hello2")
with open(self.path) as f:
self.assertEqual(f.read(), "hello2")
with open(savePath) as f:
self.assertEqual(f.read(), "hello1")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_reopen(self):\n log1 = logfile.LogFile(self.name, self.dir)\n log1.write(\"hello1\")\n savePath = os.path.join(self.dir, \"save.log\")\n os.rename(self.path, savePath)\n log1.reopen()\n log1.write(\"hello2\")\n log1.close()\n\n f = open(self.path, \"r\")\n self.assertEquals(f.read(), \"hello2\")\n f.close()\n f = open(savePath, \"r\")\n self.assertEquals(f.read(), \"hello1\")\n f.close()",
"def _reopen(self, mode='r', **kwargs):\n\n self.h5file.close()\n self.h5file = tb.open_file(self.h5fname, mode, **kwargs)\n return True",
"def rotatelog(self,**kwargs):\n newname = self._newname()\n newlgf = LogFile(newname,**kwargs)\n with self.id_lock:\n self._rotatelog(newlgf,newname)",
"def reopen(self):\n self.event_writer.reopen()\n self._closed = False",
"def reopen_files(self):\r\n for log in (self.error_log, self.access_log):\r\n for h in log.handlers:\r\n if isinstance(h, logging.FileHandler):\r\n h.acquire()\r\n h.stream.close()\r\n h.stream = open(h.baseFilename, h.mode)\r\n h.release()",
"def open_logfile(name):\r\n\r\n _format = \"%(asctime)s.%(msecs)03d %(name)-10s: %(levelname)-8s: %(message)s\"\r\n _datefmt = \"%H:%M:%S\"\r\n\r\n if config[\"log_dir\"] != None:\r\n filename = os.path.join(config[\"log_dir\"], name) + \".log\"\r\n else:\r\n filename = config[\"log_file\"]\r\n\r\n logger = logging.getLogger()\r\n\r\n # Remove any existing handlers\r\n for handler in logger.handlers:\r\n logger.removeHandler(handler)\r\n handler.close()\r\n\r\n # Add a new handler\r\n handler = logging.FileHandler(filename, mode='a')\r\n handler.setFormatter(logging.Formatter(_format, _datefmt))\r\n logger.addHandler(handler)",
"def test_reopen_changed_inode(tmp_path):\n\n path1 = tmp_path / \"file\"\n path2 = tmp_path / \"changed_file\"\n\n with open(path1, \"w\") as f:\n for i in range(1000):\n print(f\"{i}\", file=f)\n\n with open(path2, \"w\") as f:\n for i in range(2000):\n print(f\"{i}\", file=f)\n\n file_info = LogFileInfo(\n filename=path1,\n size_when_last_opened=0,\n file_position=0,\n file_handle=None,\n is_err_file=False,\n job_id=None,\n worker_pid=None,\n )\n\n file_info.reopen_if_necessary()\n for _ in range(1000):\n file_info.file_handle.readline()\n\n orig_file_pos = file_info.file_handle.tell()\n file_info.file_position = orig_file_pos\n\n # NOTE: On windows, an open file can't be deleted.\n file_info.file_handle.close()\n os.remove(path1)\n os.rename(path2, path1)\n\n file_info.reopen_if_necessary()\n\n assert file_info.file_position == orig_file_pos\n assert file_info.file_handle.tell() == orig_file_pos",
"def reopen(self):\n self.close()\n self._fileobj = os.fdopen(\n os.open(str(self.path), os.O_CREAT | os.O_RDWR, 384), \"r+b\", 0\n )",
"def reopen(self):\n self.close()\n self._fileobj = os.fdopen(os.open(str(self.path), os.O_CREAT | os.O_RDWR, 384), \"r+b\", 0)",
"def closeLogFile(self):\n if not self.fileObject is None:\n try:\n self.fileObject.close()\n except IOError:\n pass # Ignore IOError on closing\n self.fileObject = None",
"def reopen():",
"def setlogfile(file_name):\n global logfile\n logfile = file_name",
"def close_log():\n\n global log_file\n if log_file is not None:\n try:\n log_file.flush()\n finally:\n log_file.close()",
"def cycle_logfile(logfile):\n logfile_old = logfile + '.old'\n if os.path.exists(logfile):\n # Cycle the old logfiles to *.old\n if os.path.exists(logfile_old):\n # E.g. Windows don't support rename-replace\n os.remove(logfile_old)\n os.rename(logfile, logfile_old)",
"def setPath(logPath):\n GlobalLogger.logger.close()\n GlobalLogger.logger = FileLogger(logPath)",
"def open_logfile(self):\r\n if self.output_option == 2:\r\n self.ER_file = open(self.result_filename, 'w')",
"def log2file(self, file_path):\n self.removeHandler(self.fh)\n self.fh = self.init_handler(logging.FileHandler(file_path))\n return",
"def _rotatelog(self,newlgf,newname):\n modlogger.debug( \"rl:%s\"%newname)\n if self.logf: \n start_new_thread(self._waitlog,(self.logf,self.logname))\n self.logsync.acquire()\n\n if newname: self.in_use_logs += [ newname ] \n try:\n self.logf, self.logname = newlgf , newname\n except Exception:\n if newname:\n self.in_use_logs.remove(newname)\n raise",
"def setLogFile(filename):\n\tglobal logfile\n\tlogfile = filename",
"def _close_file_logger(self):\n if self._file_log_handler is not None:\n self._file_log_handler.flush()\n self._file_log_handler.close()\n self.logger.removeHandler(self._file_log_handler)\n self._file_log_handler = None\n self.logger.propagate = True",
"def SetLoggingFile(log_file):\n global logger\n new_logger = logging.getLogger('dragon_filehandler')\n new_logger.setLevel(logger.level)\n file_handler = logging.FileHandler(log_file, mode=\"w\", encoding=\"UTF-8\")\n new_logger.addHandler(file_handler)\n logger = new_logger",
"def logToFile(self, pathname):\n self.closeLogFile()\n if os.path.exists( pathname ) and not os.path.isfile( pathname ):\n raise Exception( 'Logging to \"{0}\" requested, but that already exists and is not a file' )\n self.fileObject = open( pathname, 'a', 0 )",
"def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")",
"def close_logs_file(file: TextIOWrapper):\n print_prefix(prefix=0)\n print(\"Finishing logs...\")\n file.write(\"\\nEnd of log entry\\n\")\n file.close()\n print_prefix(prefix=0)\n print(\"Done writing logs\")",
"def rename_log_file(log_file):\n assert log_file.endswith('.build')\n new_log_file = log_file[:-6] + '.log'\n # os.rename(log_file, new_log_file)\n # I'm copying instead of renaming, for testing:\n shutil.copy(log_file, new_log_file)\n return new_log_file",
"def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()",
"def set_log_file(filename):\n pass",
"def os_open_logfile( self, ):\r\n# from subprocess import Popen, PIPE # since infrequently used ??\r\n# try:\r\n# proc = Popen( [ self.parameters.ex_editor, self.parameters.pylogging_fn ] )\r\n#\r\n# except Exception as excpt:\r\n# self.logger.info( \"os_open_logfile exception trying to use >\" + str( self.parameters.ex_editor ) + \"< to open file >\" + str( self.parameters.pylogging_fn ) +\r\n# \"< Exception \" + str( excpt ) )\r\n# #self.logger.info( \"send_receive() timeout -- send_data = >\" + send_data +\"<\", )\r\n AppGlobal.os_open_txt_file( self.parameters.pylogging_fn )",
"def setLogFile(self, logfile):\n self.log(\"Log set to: \" + logfile)\n self.logfile = logfile",
"def _open_changed ( self ):\n file_name = open_file( extensions = FileInfo(), id = demo_id )\n if file_name != '':\n self.file_name = file_name"
] |
[
"0.7946377",
"0.64694947",
"0.63982296",
"0.59405273",
"0.5934263",
"0.5886852",
"0.5834705",
"0.57587516",
"0.57212925",
"0.5604787",
"0.5531132",
"0.5527666",
"0.55086905",
"0.5500291",
"0.54822856",
"0.547342",
"0.545335",
"0.53930455",
"0.5317504",
"0.52583057",
"0.52558446",
"0.52164084",
"0.520928",
"0.5187845",
"0.5138315",
"0.5131256",
"0.5123645",
"0.5106557",
"0.5099965",
"0.5078224"
] |
0.78429323
|
1
|
Specifying an invalid directory to L{LogFile} raises C{IOError}.
|
def test_nonExistentDir(self):
e = self.assertRaises(
IOError, logfile.LogFile, self.name, "this_dir_does_not_exist"
)
self.assertEqual(e.errno, errno.ENOENT)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_slf_badfolder():\n oldlogfile = get_logfile()\n with pytest.warns(UserWarning, match=\"Cannot write to logfile\"):\n start_logfile(\"nonexistent-folder/log.txt\")\n set_logfile(oldlogfile)",
"def test_bad_log_dir():\n with pytest.warns(LoggerWarning):\n log_file = '/abc/log.log'\n logger = init_logger(__name__, log_file=log_file)\n assert len(logger.handlers) == 1\n assert logger.handlers[0].name == 'stream'\n assert LOGGERS.loggers[__name__]['log_file'] is None\n\n LOGGERS.clear()",
"def test_slf_badfilename():\n oldlogfile = get_logfile()\n with pytest.warns(UserWarning, match=\"Cannot write to logfile\"):\n start_logfile(\"?/:\")\n set_logfile(oldlogfile)",
"def test_invalid_dir(self):\n self.assertRaises(OSError, awstats_reader.AwstatsReader, '/tmp/XYZ', 'example.com')",
"def test_cantChangeFileMode(self):\n if runtime.platform.isWindows():\n name, directory = \"NUL\", \"\"\n expectedPath = \"NUL\"\n else:\n name, directory = \"null\", \"/dev\"\n expectedPath = \"/dev/null\"\n\n log = logfile.LogFile(name, directory, defaultMode=0o555)\n self.addCleanup(log.close)\n\n self.assertEqual(log.path, expectedPath)\n self.assertEqual(log.defaultMode, 0o555)",
"def __init_log_folder():\n try:\n os.makedirs(Logger.__log_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e",
"def test_if_specified_dir_is_a_file_then_error_is_thrown(fs):\n\n output_dir = 'user_specified_directory'\n existing_file = output_dir\n\n fs.create_file(existing_file)\n assert os.path.exists(existing_file)\n assert os.path.isfile(existing_file)\n\n with pytest.raises(NotADirectoryError) as exception_info:\n verify_output_dir(existing_file)\n\n assert exception_info.value.args[0] == 'The \"user_specified_directory\" directory, which was specified by ' \\\n 'the --output-dir command-line argument, is not an existing directory. ' \\\n 'Please either create that directory or specify a different one.'\n\n assert os.path.exists(existing_file)\n assert os.path.isfile(existing_file)",
"def test_log_filenames_invalid_file(self):\n with self.assertRaises(Exception):\n self.app.log_filenames([__file__])\n self.assertEqual(self.get_track_count(), 0)",
"def test_error_is_thrown_if_directory_does_not_exist(fs):\n\n output_dir = 'user_specified_directory'\n assert not os.path.exists(output_dir)\n\n with pytest.raises(NotADirectoryError) as exception_info:\n verify_output_dir(output_dir)\n\n assert exception_info.value.args[0] == 'The \"user_specified_directory\" directory, which was specified by ' \\\n 'the --output-dir command-line argument, is not an existing directory. ' \\\n 'Please either create that directory or specify a different one.'",
"def set_log_file(filename):\n pass",
"def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)",
"def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)",
"def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()",
"def set_error(self, error):\n if self.log_file_exist(self.file_path_name):\n logging.error(error)\n else:\n print \"The log \"+ self.name_log + \"does not exist in the directory\"",
"def test_valid_dir_raises():\n with pytest.raises(ValueError):\n assert cli._valid_dir(__file__)",
"def open_log(fn):\n\n global log_file\n if fn is not None:\n d = os.path.dirname(fn)\n if d != \"\":\n makedirs(d)\n log_file = open(fn, \"a+\")",
"def set_log_dir(dir):\r\n LogOptions._LOG_DIR = dir",
"def test_file_append_missing_file(self):\n with (self.assertRaises(IOError)):\n FileWriter(self.bogus_path).append(self.ascii_string)",
"def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)",
"def test_bad_paths(self):\n self.do_test_bad_path('frog', '/frog') # no permission to write",
"def setErrorFile(fname='dis.err'):\n dislin.errfil(fname)",
"def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))",
"def test_process_args_should_reject_non_existent_input_directory(self, arg_dict):\n self.use_source_path(arg_dict, 'sample/directory_does_not_exist/')\n self.use_resolution_val(arg_dict, 600)\n\n with pytest.raises(FileNotFoundError):\n change_resolution.process_args(arg_dict)",
"def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log')\n with open(path_to_log) as f:\n log_content = f.read()\n self.assertTrue(\"Sorry, directory {} wasn't found\".format(false_dir) in log_content)",
"def test_non_existing_directory_raises_when_metavar_is_dir_for_db_export_cleaned(self):\n with contextlib.redirect_stderr(io.StringIO()) as stderr:\n with pytest.raises(SystemExit):\n parser = cli_parser.get_parser()\n parser.parse_args([\"db\", \"export-archived\", \"--output-path\", \"/non/existing/directory\"])\n error_msg = stderr.getvalue()\n\n assert error_msg == (\n \"\\nairflow db export-archived command error: The directory \"\n \"'/non/existing/directory' does not exist!, see help above.\\n\"\n )",
"def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR",
"def test_custom_log_path_points_at_dir(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n\n with pytest.raises(IsADirectoryError):\n run_n_simple_tasks(1)",
"def test_scan_dir_not_found(self, dir_path):\n with self.assertRaises(FileNotFoundError):\n self.file_scanner.scan(dir_path)",
"def test_supply_file(self):\n f = open(self.junk_file, 'w')\n f.close()\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, self.junk_file)",
"def test_configure_logger_file_permissions_unix(self):\n path, segments = manufacture.fs.makePathInTemp()\n content = (\n '[log]\\n'\n 'log_file: %s\\n'\n ) % (path)\n config = self._getConfiguration(content=content)\n if self._drop_user != '-':\n account = self._drop_user\n else:\n account = manufacture.username\n\n logger = manufacture.makeLogger()\n\n try:\n logger.configure(configuration=config, account=account)\n logger.removeAllHandlers()\n\n self.assertTrue(\n manufacture.fs.exists(segments),\n 'Log file was not created at ' + path.encode('utf-8'),\n )\n\n # FIXME:928:\n # Rather than testing for 2 variables, we should only check\n # for matching \"account\" and not \"Administrators\".\n self.assertIn(\n [unicode(account), 'Administrators'],\n manufacture.fs.getOwner(segments))\n finally:\n manufacture.fs.deleteFile(segments, ignore_errors=True)"
] |
[
"0.68436676",
"0.67017156",
"0.6490114",
"0.63831156",
"0.63824093",
"0.6239327",
"0.6176036",
"0.6102296",
"0.60836774",
"0.5973037",
"0.59534276",
"0.59534276",
"0.5948382",
"0.5926003",
"0.5904776",
"0.58937776",
"0.589284",
"0.58544636",
"0.58523554",
"0.58483446",
"0.5845495",
"0.5802716",
"0.5790232",
"0.57607704",
"0.572946",
"0.5729146",
"0.5726916",
"0.5722949",
"0.5706474",
"0.57000834"
] |
0.71894586
|
0
|
Opening a L{LogFile} which can be read and write but whose mode can't be changed doesn't trigger an error.
|
def test_cantChangeFileMode(self):
if runtime.platform.isWindows():
name, directory = "NUL", ""
expectedPath = "NUL"
else:
name, directory = "null", "/dev"
expectedPath = "/dev/null"
log = logfile.LogFile(name, directory, defaultMode=0o555)
self.addCleanup(log.close)
self.assertEqual(log.path, expectedPath)
self.assertEqual(log.defaultMode, 0o555)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def open_log(fn):\n\n global log_file\n if fn is not None:\n d = os.path.dirname(fn)\n if d != \"\":\n makedirs(d)\n log_file = open(fn, \"a+\")",
"def test_slf_readonly(read_only_file):\n oldlogfile = get_logfile()\n with pytest.warns(UserWarning, match=\"Cannot write to logfile\"):\n start_logfile(read_only_file)\n set_logfile(oldlogfile)",
"def os_open_logfile( self, ):\r\n# from subprocess import Popen, PIPE # since infrequently used ??\r\n# try:\r\n# proc = Popen( [ self.parameters.ex_editor, self.parameters.pylogging_fn ] )\r\n#\r\n# except Exception as excpt:\r\n# self.logger.info( \"os_open_logfile exception trying to use >\" + str( self.parameters.ex_editor ) + \"< to open file >\" + str( self.parameters.pylogging_fn ) +\r\n# \"< Exception \" + str( excpt ) )\r\n# #self.logger.info( \"send_receive() timeout -- send_data = >\" + send_data +\"<\", )\r\n AppGlobal.os_open_txt_file( self.parameters.pylogging_fn )",
"def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0o066)\n self.addCleanup(log1.close)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEqual(mode, 0o444)\n else:\n self.assertEqual(mode, 0o066)",
"def test_reopen(self):\n with contextlib.closing(logfile.LogFile(self.name, self.dir)) as log1:\n log1.write(\"hello1\")\n savePath = os.path.join(self.dir, \"save.log\")\n os.rename(self.path, savePath)\n log1.reopen()\n log1.write(\"hello2\")\n\n with open(self.path) as f:\n self.assertEqual(f.read(), \"hello2\")\n with open(savePath) as f:\n self.assertEqual(f.read(), \"hello1\")",
"def test_reopen(self):\n log1 = logfile.LogFile(self.name, self.dir)\n log1.write(\"hello1\")\n savePath = os.path.join(self.dir, \"save.log\")\n os.rename(self.path, savePath)\n log1.reopen()\n log1.write(\"hello2\")\n log1.close()\n\n f = open(self.path, \"r\")\n self.assertEquals(f.read(), \"hello2\")\n f.close()\n f = open(savePath, \"r\")\n self.assertEquals(f.read(), \"hello1\")\n f.close()",
"def open_logfile(self):\r\n if self.output_option == 2:\r\n self.ER_file = open(self.result_filename, 'w')",
"def test_modePreservation(self):\n open(self.path, \"w\").close()\n os.chmod(self.path, 0o707)\n mode = os.stat(self.path)[stat.ST_MODE]\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n log.rotate()\n self.assertEqual(mode, os.stat(self.path)[stat.ST_MODE])",
"def test_slf_badfilename():\n oldlogfile = get_logfile()\n with pytest.warns(UserWarning, match=\"Cannot write to logfile\"):\n start_logfile(\"?/:\")\n set_logfile(oldlogfile)",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0o555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir, \"xxx\"), \"w\")\n except OSError:\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEqual(f.tell(), 6)\n f.seek(0, 0)\n self.assertEqual(f.read(), b\"abcdef\")",
"def test_init_logger_with_logfile(monkeypatch):\n log_path = f\"{gettempdir()}/{uuid()}.log\"\n assert not Path(log_path).exists()\n monkeypatch.setenv(\"LOG_OUTPUT\", log_path)\n logger = helpers.init_logger(uuid())\n msg = \"Write to disk.\"\n logger.warning(msg)\n assert Path(log_path).exists()\n with open(log_path, \"r\") as log:\n assert msg in log.read()",
"def logToFile(self, pathname):\n self.closeLogFile()\n if os.path.exists( pathname ) and not os.path.isfile( pathname ):\n raise Exception( 'Logging to \"{0}\" requested, but that already exists and is not a file' )\n self.fileObject = open( pathname, 'a', 0 )",
"def os_open_comm_log( self, ):\r\n AppGlobal.os_open_txt_file( self.parameters.comm_logging_fn )",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir,\"xxx\"), \"w\")\n except (OSError, IOError):\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEquals(f.tell(), 6)\n f.seek(0, 0)\n self.assertEquals(f.read(), \"abcdef\")\n log.close()",
"def testModePreservation(self):\n f = open(self.path, \"w\").close()\n os.chmod(self.path, 0707)\n mode = os.stat(self.path)[stat.ST_MODE]\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n log.rotate()\n self.assertEquals(mode, os.stat(self.path)[stat.ST_MODE])",
"def open_logfile(name):\r\n\r\n _format = \"%(asctime)s.%(msecs)03d %(name)-10s: %(levelname)-8s: %(message)s\"\r\n _datefmt = \"%H:%M:%S\"\r\n\r\n if config[\"log_dir\"] != None:\r\n filename = os.path.join(config[\"log_dir\"], name) + \".log\"\r\n else:\r\n filename = config[\"log_file\"]\r\n\r\n logger = logging.getLogger()\r\n\r\n # Remove any existing handlers\r\n for handler in logger.handlers:\r\n logger.removeHandler(handler)\r\n handler.close()\r\n\r\n # Add a new handler\r\n handler = logging.FileHandler(filename, mode='a')\r\n handler.setFormatter(logging.Formatter(_format, _datefmt))\r\n logger.addHandler(handler)",
"def safe_open(filename, mode, return_none=False, zap=False):\n if 'w' in mode and os.path.exists(filename) and not zap:\n rounder_logger.error(\"ABORT: Output file exists '{}'. Please delete \"\n \"or rename the file and restart the program\".format(filename))\n if return_none:\n return None\n sys.exit(1)\n return open(filename, mode)",
"def __init__(self, logfile=None):\n if logfile is None:\n self.__fd = None\n else:\n self.__fd = open(logfile, \"a\")",
"def safe_open(fname, mode, buffering=-1):\n # file descriptors\n try:\n return open(fname, mode, buffering=buffering)\n except PermissionError as ex:\n raise xt.XonshError(f\"xonsh: {fname}: permission denied\") from ex\n except FileNotFoundError as ex:\n raise xt.XonshError(f\"xonsh: {fname}: no such file or directory\") from ex\n except Exception as ex:\n raise xt.XonshError(f\"xonsh: {fname}: unable to open file\") from ex",
"def setup_log(fileno):\n \n global logger\n logger = logging.Logger('log')\n handler = logging.StreamHandler(os.fdopen(fileno, 'a', 0))\n handler.setFormatter(logging.Formatter(\"[%(asctime)s] - [%(levelname)s] - %(message)s\", datefmt=None))\n logger.addHandler(handler)",
"def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0066)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEquals(mode, 0444)\n else:\n self.assertEquals(mode, 0066)",
"def open (self, path, mode):\r\n pass",
"def set_log_file(filename):\n pass",
"def test_configure_logger_file_permissions_unix(self):\n path, segments = manufacture.fs.makePathInTemp()\n content = (\n '[log]\\n'\n 'log_file: %s\\n'\n ) % (path)\n config = self._getConfiguration(content=content)\n if self._drop_user != '-':\n account = self._drop_user\n else:\n account = manufacture.username\n\n logger = manufacture.makeLogger()\n\n try:\n logger.configure(configuration=config, account=account)\n logger.removeAllHandlers()\n\n self.assertTrue(\n manufacture.fs.exists(segments),\n 'Log file was not created at ' + path.encode('utf-8'),\n )\n\n # FIXME:928:\n # Rather than testing for 2 variables, we should only check\n # for matching \"account\" and not \"Administrators\".\n self.assertIn(\n [unicode(account), 'Administrators'],\n manufacture.fs.getOwner(segments))\n finally:\n manufacture.fs.deleteFile(segments, ignore_errors=True)",
"def test_writing(self):\n with contextlib.closing(logfile.LogFile(self.name, self.dir)) as log:\n log.write(\"123\")\n log.write(\"456\")\n log.flush()\n log.write(\"7890\")\n\n with open(self.path) as f:\n self.assertEqual(f.read(), \"1234567890\")",
"def test_slf_badfolder():\n oldlogfile = get_logfile()\n with pytest.warns(UserWarning, match=\"Cannot write to logfile\"):\n start_logfile(\"nonexistent-folder/log.txt\")\n set_logfile(oldlogfile)",
"def init_log(path):\n file = open(path, 'w+')\n file.close()",
"def setLogFile(filename):\n\tglobal logfile\n\tlogfile = filename",
"def safe_open(filename, *args, **kwargs):\r\n safe_mkdir(os.path.dirname(filename))\r\n return open(filename, *args, **kwargs)",
"def openLogfileConnection(self,):\n \n #\n # Imports\n #\n import sys\n import time\n import os\n \n #\n # for logmessages\n # \n tmpLogMessages = []\n \n #\n # check if logfile present open connection or create\n #\n SEAseqPipeLine.logfile = self.analysisPath + '/logfile.txt'\n if os.path.isfile(SEAseqPipeLine.logfile):\n if self.command == 'initiateAnalysis':\n print 'ERROR: the logfile already exists please use another path to initiate the analysis.\\n'\n sys.exit(1)\n else:\n SEAseqPipeLine.logfile = open(SEAseqPipeLine.logfile,'a',1)\n SEAseqPipeLine.logfile.write('----------------\\nConnection to logfile '+SEAseqPipeLine.logfile.name+' opened.\\n')\n return 0\n else:\n tmpLogMessage = 'Creating the logfile \"'+SEAseqPipeLine.logfile+'\".\\n'\n tmpLogMessages.append(tmpLogMessage)\n print tmpLogMessage\n SEAseqPipeLine.logfile = open(SEAseqPipeLine.logfile,'w',1)\n \n return tmpLogMessages"
] |
[
"0.6616025",
"0.6545175",
"0.6315127",
"0.6270276",
"0.62506443",
"0.6169582",
"0.6090433",
"0.6053088",
"0.60519564",
"0.6036345",
"0.6032789",
"0.60268515",
"0.5983956",
"0.5974772",
"0.59665847",
"0.5963023",
"0.59392637",
"0.58879405",
"0.5856009",
"0.5837253",
"0.57855564",
"0.5756605",
"0.57536674",
"0.5741746",
"0.5740187",
"0.57328737",
"0.57171834",
"0.5702884",
"0.5702119",
"0.5691065"
] |
0.7473909
|
0
|
L{LogFile.listLogs} doesn't choke if it encounters a file with an unexpected name.
|
def test_listLogsWithBadlyNamedFiles(self):
log = logfile.LogFile(self.name, self.dir)
self.addCleanup(log.close)
with open("{}.1".format(log.path), "w") as fp:
fp.write("123")
with open("{}.bad-file".format(log.path), "w") as fp:
fp.write("123")
self.assertEqual([1], log.listLogs())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_log_files():\n for filename in os.listdir(\"/home/malyhass/log-parser\"):\n if filename.startswith(\"access.log\"):\n yield filename",
"def test_listLogsIgnoresZeroSuffixedFiles(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n for i in range(0, 3):\n with open(\"{}.{}\".format(log.path, i), \"w\") as fp:\n fp.write(\"123\")\n\n self.assertEqual([1, 2], log.listLogs())",
"def list_logs():\n log_dir = os.path.join(\".\",\"logs\")\n if not os.path.isdir(log_dir):\n print(\"ERROR: API (log): cannot find log dir\")\n return jsonify([])\n \n return jsonify(os.listdir(log_dir))",
"def list_logs():\n resource_route = \"/static/log/\"\n file_request_path = request.base_url[:request.base_url.rfind('/')] + resource_route\n path_to_current_file = os.path.dirname(os.path.abspath(__file__))\n logs_path = os.path.join(path_to_current_file, 'static', 'log')\n directory_list = os.listdir(logs_path)\n log_files = [f for f in directory_list if os.path.isfile(os.path.join(logs_path, f))]\n log_files.sort()\n if '.gitignore' in log_files:\n log_files.remove('.gitignore')\n full_log_paths = [file_request_path + f for f in log_files]\n response_code = 200\n return make_response(jsonify({'files': full_log_paths}), response_code)",
"def get_access_logs(file_dir=log_dir):\n \n file_list = []\n for myfile in glob.glob1(file_dir, 'access_log*'):\n file_list.append('%s/%s' % (file_dir, myfile))\n# print file_list\n return file_list",
"def find_logs():\n\n file_list_targets = [r'/Program Files/IDEMIA/MFace Flex IA/first/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/first/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex MS/logs/*.log*',\n r'/Program Files (x86)/IDEMIA/DocAuth/logs/*.log*',\n r'/Temp/*.log*',\n r'/Temp/*.csv*',\n r'/STIP/*.log*',\n r'/ECAT/BioFDRS/*.xml*',\n r'/ECAT/FDRS/*.xml*',\n r'/Program Files/IDEMIA/Cameras/First/*.log*',\n r'/Program Files/IDEMIA/Cameras/Second/*.log*']\n\n file_lists_of_lists = [glob.glob(i, recursive=False) for i in file_list_targets]\n\n # Flatten out the list of lists into one list\n file_list = []\n for i in file_lists_of_lists:\n file_list.extend(i)\n\n return file_list",
"def getLogFileNames():\r\n return [\"Server1.txt\", \"Server2.txt\", \"Client1.txt\", \"Client2.txt\"]",
"def find_logs(self, log_format):\n # print(self.path)\n r, d, files = next(os.walk(self.path))\n # TODO use regex to find logs\n files = list(filter(lambda x: log_format in x, files))\n files = [os.path.join(r, f) for f in files]\n ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files]\n # print(self.path, files)\n return list(zip(ctimes, files))",
"def all_logs(self):\n return os.listdir(LOGS_BASE_PATH)",
"def _get_daemon_logs_files(self):\n for fname in os.listdir('/tmp/'):\n fname = os.path.join('/tmp/', fname)\n if fname.lower().endswith('.log'):\n yield fname",
"def getLogs():",
"def getLogs():",
"def SelectLogFileToPull(ssh, file_name=None):\n log_files = GetAllLogFilePaths(ssh)\n if file_name:\n file_path = os.path.join(constants.REMOTE_LOG_FOLDER, file_name)\n if file_path in log_files:\n return [file_path]\n raise errors.CheckPathError(\"Can't find this log file(%s) from remote \"\n \"instance.\" % file_path)\n\n if len(log_files) == 1:\n return log_files\n\n if len(log_files) > 1:\n print(\"Multiple log files detected, choose any one to proceed:\")\n return utils.GetAnswerFromList(log_files, enable_choose_all=True)\n\n raise errors.CheckPathError(\"Can't find any log file in folder(%s) from \"\n \"remote instance.\" % constants.REMOTE_LOG_FOLDER)",
"def get_file_list_without_current_log():\n full_list = sorted(filter(os.path.isfile, os.listdir('.')), key=os.path.getmtime)\n full_list.remove(\"connect-log.log\")\n return full_list",
"def showAllLogs():\n\t#Add sections to log screen\n\tallLogs=findFiles(getWorkingDirectory(),\".log\")\n\tcounter=-1\n\tfor l in allLogs:\n\t\tcounter+=1\n\t\tbase=getRootName(l)\n\t\tif base in logDict:\n\t\t\tbase=logDict[base]\n\t\t#Add to selection bar\n\t\tlogSelectionBar.addTab(base,command=lambda n=l: displayLog(n))\n\t\t#Store\n\t\tloadedLogs[counter]=l",
"def find_log_files(all_logs, log_file):\n log_files = []\n for folder in all_logs.itervalues():\n for log in folder:\n if log_file in log:\n log_files.append(log)\n\n return log_files",
"def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)",
"def GetLogs(self):\n raise NotImplementedError()",
"def test_log_filenames_multiple_no_date(self):\n now = datetime.datetime.now()\n (tracks, statuses) = self.app.log_filenames([self.track_path('silence.mp3')]*5)\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertLess(track_obj['timestamp'], now)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])",
"def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files",
"def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)",
"def get_new_logs(log_paths,log_conf):\n if log_conf is None or log_conf.get_host() is None:\n return log_paths\n conf_logs = log_conf.get_host().get_logs()\n new_logs = [log_path for log_path in log_paths if log_path not in conf_logs]\n print 'New logs detected on %s: %s'(log_conf.get_host().get_name(), new_logs)\n logger.info('New logs detected on %s: %s',log_conf.get_host().get_name(), new_logs)\n return new_logs",
"def test_log_filenames_invalid_file(self):\n with self.assertRaises(Exception):\n self.app.log_filenames([__file__])\n self.assertEqual(self.get_track_count(), 0)",
"def test_log_filenames_no_filenames(self):\n (tracks, statuses) = self.app.log_filenames([])\n self.assertEqual(len(tracks), 0)\n self.assertEqual(len(statuses), 1)\n self.assertIn('No filenames', statuses[0])",
"def get_logs(self, name):\n logs = self.get_status()\n\n for pod in self.list_pods(namespace=self.project):\n if name in pod.name: # get just logs from pods related to app\n pod_logs = pod.get_logs()\n if pod_logs:\n logs += pod_logs\n\n return logs",
"def _most_recent_event_files(self):\n regex = re.compile(r\"\\w*events.log\")\n return [\n os.path.join(self._output_dir, x)\n for x in os.listdir(self._output_dir)\n if regex.search(x)\n ]",
"def logs_directory(self):",
"def process_log_files(source_name, log_file_list):\n\n result_list = []\n out_fname = create_out_fname(source_name, suffix='_sum', ext=\".csv\")\n\n for log_file in log_file_list:\n result_list += process_log(log_file)\n\n if len(result_list) == 0:\n warning(\"Found no lammps log data to process from: {}\".format(source_name))\n else:\n write_csv(result_list, out_fname, LOG_FIELDNAMES, extrasaction=\"ignore\")",
"def open_logs():\n\treturn log, action_log, error_log",
"def logs(filename):\n\n if not re.search(\".log\",filename):\n print(\"ERROR: API (log): file requested was not a log file: {}\".format(filename))\n return jsonify([])\n\n log_dir = os.path.join(\".\",\"logs\")\n if not os.path.isdir(log_dir):\n print(\"ERROR: API (log): cannot find log dir\")\n return jsonify([])\n\n file_path = os.path.join(log_dir,filename)\n if not os.path.exists(file_path):\n print(\"ERROR: API (log): file requested could not be found: {}\".format(filename))\n return jsonify([])\n \n return send_from_directory(log_dir, filename, as_attachment=True)"
] |
[
"0.73426706",
"0.70452076",
"0.6829374",
"0.6824721",
"0.67591035",
"0.6753871",
"0.6636772",
"0.66222703",
"0.6560827",
"0.6523128",
"0.64634424",
"0.64634424",
"0.62073946",
"0.61896455",
"0.61524665",
"0.60308105",
"0.5954449",
"0.5924665",
"0.58753115",
"0.5863695",
"0.58618724",
"0.5826126",
"0.5824995",
"0.5823457",
"0.5819843",
"0.5806822",
"0.58018184",
"0.5796342",
"0.5770393",
"0.5753771"
] |
0.7593141
|
0
|
L{LogFile.listLogs} ignores log files which rotated suffix is 0.
|
def test_listLogsIgnoresZeroSuffixedFiles(self):
log = logfile.LogFile(self.name, self.dir)
self.addCleanup(log.close)
for i in range(0, 3):
with open("{}.{}".format(log.path, i), "w") as fp:
fp.write("123")
self.assertEqual([1, 2], log.listLogs())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_file_list_without_current_log():\n full_list = sorted(filter(os.path.isfile, os.listdir('.')), key=os.path.getmtime)\n full_list.remove(\"connect-log.log\")\n return full_list",
"def list_log_files():\n for filename in os.listdir(\"/home/malyhass/log-parser\"):\n if filename.startswith(\"access.log\"):\n yield filename",
"def list_logs():\n log_dir = os.path.join(\".\",\"logs\")\n if not os.path.isdir(log_dir):\n print(\"ERROR: API (log): cannot find log dir\")\n return jsonify([])\n \n return jsonify(os.listdir(log_dir))",
"def list_logs():\n resource_route = \"/static/log/\"\n file_request_path = request.base_url[:request.base_url.rfind('/')] + resource_route\n path_to_current_file = os.path.dirname(os.path.abspath(__file__))\n logs_path = os.path.join(path_to_current_file, 'static', 'log')\n directory_list = os.listdir(logs_path)\n log_files = [f for f in directory_list if os.path.isfile(os.path.join(logs_path, f))]\n log_files.sort()\n if '.gitignore' in log_files:\n log_files.remove('.gitignore')\n full_log_paths = [file_request_path + f for f in log_files]\n response_code = 200\n return make_response(jsonify({'files': full_log_paths}), response_code)",
"def find_logs(self, log_format):\n # print(self.path)\n r, d, files = next(os.walk(self.path))\n # TODO use regex to find logs\n files = list(filter(lambda x: log_format in x, files))\n files = [os.path.join(r, f) for f in files]\n ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files]\n # print(self.path, files)\n return list(zip(ctimes, files))",
"def all_logs(self):\n return os.listdir(LOGS_BASE_PATH)",
"def test_listLogsWithBadlyNamedFiles(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n with open(\"{}.1\".format(log.path), \"w\") as fp:\n fp.write(\"123\")\n with open(\"{}.bad-file\".format(log.path), \"w\") as fp:\n fp.write(\"123\")\n\n self.assertEqual([1], log.listLogs())",
"def get_access_logs(file_dir=log_dir):\n \n file_list = []\n for myfile in glob.glob1(file_dir, 'access_log*'):\n file_list.append('%s/%s' % (file_dir, myfile))\n# print file_list\n return file_list",
"def _get_daemon_logs_files(self):\n for fname in os.listdir('/tmp/'):\n fname = os.path.join('/tmp/', fname)\n if fname.lower().endswith('.log'):\n yield fname",
"def find_logs():\n\n file_list_targets = [r'/Program Files/IDEMIA/MFace Flex IA/first/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/first/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IA/second/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/*.log*',\n r'/Program Files/IDEMIA/MFace Flex IPS/log/archive/*.log*',\n r'/Program Files/IDEMIA/MFace Flex MS/logs/*.log*',\n r'/Program Files (x86)/IDEMIA/DocAuth/logs/*.log*',\n r'/Temp/*.log*',\n r'/Temp/*.csv*',\n r'/STIP/*.log*',\n r'/ECAT/BioFDRS/*.xml*',\n r'/ECAT/FDRS/*.xml*',\n r'/Program Files/IDEMIA/Cameras/First/*.log*',\n r'/Program Files/IDEMIA/Cameras/Second/*.log*']\n\n file_lists_of_lists = [glob.glob(i, recursive=False) for i in file_list_targets]\n\n # Flatten out the list of lists into one list\n file_list = []\n for i in file_lists_of_lists:\n file_list.extend(i)\n\n return file_list",
"def last_log(self) -> List:\n logs_list: List = os.listdir(LOGS_BASE_PATH)\n full_list = [os.path.join(LOGS_BASE_PATH, i) for i in logs_list]\n time_sorted_list: List = sorted(full_list, key=os.path.getmtime)\n return time_sorted_list[-1]",
"def getLogs():",
"def getLogs():",
"def _most_recent_event_files(self):\n regex = re.compile(r\"\\w*events.log\")\n return [\n os.path.join(self._output_dir, x)\n for x in os.listdir(self._output_dir)\n if regex.search(x)\n ]",
"def after_rotate_logs(msg, config, checklist):\n return []",
"def get_log_files_to_delete(self):\n dir_name, base_name = os.path.split(self.baseFilename)\n file_names = os.listdir(dir_name)\n result = []\n n, e = os.path.splitext(base_name)\n prefix = n + \".\"\n plen = len(prefix)\n for file_name in file_names:\n if self.namer is None:\n if not file_name.startswith(base_name):\n continue\n else:\n if (\n not file_name.startswith(base_name)\n and file_name.endswith(e)\n and len(file_name) > (plen + 1)\n and not file_name[plen + 1].isdigit()\n ):\n continue\n if file_name[:plen] == prefix:\n suffix = file_name[plen:]\n parts = suffix.split(\".\")\n for part in parts:\n if self.extMatch.match(part):\n result.append(os.path.join(dir_name, file_name))\n break\n if len(result) < self.backupCount:\n result = []\n else:\n result.sort()\n result = result[: len(result) - self.backupCount]\n return result",
"def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files",
"def getLogFileNames():\r\n return [\"Server1.txt\", \"Server2.txt\", \"Client1.txt\", \"Client2.txt\"]",
"def find_legacy_log_files(xcresult_path):\n\n result = []\n\n for root, dirs, files in os.walk(xcresult_path, topdown=True):\n for file in files:\n if file.endswith('.txt'):\n file = os.path.join(root, file)\n result.append(file)\n\n # Sort the files by creation time.\n result.sort(key=lambda f: os.stat(f).st_ctime)\n return result",
"def find_legacy_log_files(xcresult_path):\n\n result = []\n\n for root, dirs, files in os.walk(xcresult_path, topdown=True):\n for file in files:\n if file.endswith('.txt'):\n file = os.path.join(root, file)\n result.append(file)\n\n # Sort the files by creation time.\n result.sort(key=lambda f: os.stat(f).st_ctime)\n return result",
"def archive_logs():\n logging.info('Archive start...')\n\n for log_dir in filter(dir_filter, os.listdir('logs')):\n path = 'logs/{}'.format(log_dir)\n archive_files = filter(lambda x: '.log.' in x, os.listdir(path))\n zip_file_name = '{}/{}.zip'.format(\n path,\n str(datetime.now())\n .replace(' ', '_').replace('.', '_').replace(':', '_'))\n zip_file = zipfile.ZipFile(\n zip_file_name, mode='w', compression=zipfile.ZIP_DEFLATED)\n for f in archive_files:\n log_file = '{}/{}'.format(path, f)\n zip_file.write(log_file)\n os.remove(log_file)\n\n logging.info('Archive end.')",
"def clean_logs(directory):\n to_log = str()\n files = [{\"file_name\": f\"{directory}/{file}\", \"creation_date\": getctime(f\"{directory}/{file}\")} for file in\n listdir(f\"{directory}\") if isfile(join(f\"{directory}\", file))][:-9]\n\n if not files:\n\n print(\"No log removed.\")\n to_log += \"No log removed.\"\n\n else:\n\n for file in files:\n remove(file[\"file_name\"])\n print(f\"{file['file_name']} removed!\")\n to_log += f\"{file['file_name']} removed!\\n\"\n\n return to_log",
"def FilterLogfiles(files):\n log_files = list(files)\n for file_path in files:\n file_name = os.path.basename(file_path)\n if file_name == _KERNEL or file_name.endswith(_IMG_FILE_EXTENSION):\n log_files.remove(file_path)\n return log_files",
"def rotate_logs(basename, max_version=None):\n\n # nothing to do if the basename doesn't already exist\n if not os.path.isfile(basename):\n return\n\n files = glob.glob(f\"{basename}.*\")\n n = len(basename)\n\n versions = [int(f[n + 1:]) for f in files if f[n + 1:].isdigit()]\n sorted_versions = sorted(versions, reverse=True)\n\n for v in sorted_versions:\n if max_version is not None and v >= max_version:\n os.remove(f\"{basename}.{v}\")\n else:\n os.rename(f\"{basename}.{v}\", f\"{basename}.{v+1}\")\n\n # move original if space\n if max_version is not None and max_version == 0:\n os.remove(basename)\n else:\n os.rename(basename, f\"{basename}.1\")",
"def _remove_mp_logs(self):\n for i, fn in enumerate(self.logfiles):\n os.remove(fn)",
"def getFileList(self):\n sid = 86400 # change to 3600 for hour-by-hour\n uDays = range(sid*(int(self.uStart)/sid),sid+(sid*(int(self.uStop)/sid)),sid)\n fileList = []\n sep = os.path.sep\n for d in uDays:\n s = unixTimeToString(d)\n ymdPath = 'year' + s[0:4] + sep + 'month' + s[5:7] + sep + 'day' + s[8:10]\n dirname = self.basePath + sep + ymdPath + sep + self.sensor + sep + 'padhist'\n pattern = '*' + self.sensor + '_hstv*.mat'\n nameList = glob.glob1(dirname,pattern)\n for name in nameList:\n uTime = stringTimeToUnix(name[0:13] + '_00_00.000')\n if ( self.uStart <= uTime <= self.uStop ):\n #print 'IN: %s' % unixTimeToString(uTime)\n fileList.append(dirname + sep + name)\n fileList.sort()\n self.fileList = fileList",
"def showAllLogs():\n\t#Add sections to log screen\n\tallLogs=findFiles(getWorkingDirectory(),\".log\")\n\tcounter=-1\n\tfor l in allLogs:\n\t\tcounter+=1\n\t\tbase=getRootName(l)\n\t\tif base in logDict:\n\t\t\tbase=logDict[base]\n\t\t#Add to selection bar\n\t\tlogSelectionBar.addTab(base,command=lambda n=l: displayLog(n))\n\t\t#Store\n\t\tloadedLogs[counter]=l",
"def _get_logrotated_log(self):\n file_lst = glob.glob(self.rotation_pattern)\n file_lst.remove(self.log_filename)\n\n if len(file_lst) == 0:\n return None\n\n stat_lst = [(os.stat(x).st_mtime, x) for x in file_lst]\n sorted_stat_lst = sorted(stat_lst, key=lambda x: x[1])\n sorted_stat_lst.reverse()\n\n r_tuple = reduce(lambda a,b: a if (a[0] > b[0]) else b, sorted_stat_lst)\n return r_tuple[1]",
"def getArchLogs(self):\n\n # Implement checkFiles() for archs?\n\n # Pull log file\n if self.nbDetails['proc']['archLog'] is not None:\n result = self.c.get(self.nbDetails['proc']['archLog'])\n print(f\"Pulled archive creation log {result.remote} to {result.local}\")\n else:\n print(f\"Archives not yet written.\")",
"def after_rotate_hindcast_logs(msg, config, checklist):\n return []"
] |
[
"0.687992",
"0.68560153",
"0.6781296",
"0.6669578",
"0.6669526",
"0.6665514",
"0.6498067",
"0.64108115",
"0.63948584",
"0.61453205",
"0.613642",
"0.60900474",
"0.60900474",
"0.6055192",
"0.6039035",
"0.60025483",
"0.5982148",
"0.59603906",
"0.5949518",
"0.5949518",
"0.5900916",
"0.5829341",
"0.5807793",
"0.5754842",
"0.5751356",
"0.5747563",
"0.57423806",
"0.5731078",
"0.5688279",
"0.5652628"
] |
0.75706
|
0
|
Daily log files rotate daily.
|
def test_rotation(self):
log = RiggedDailyLogFile(self.name, self.dir)
self.addCleanup(log.close)
days = [(self.path + "." + log.suffix(day * 86400)) for day in range(3)]
# test automatic rotation
log._clock = 0.0 # 1970/01/01 00:00.00
log.write("123")
log._clock = 43200 # 1970/01/01 12:00.00
log.write("4567890")
log._clock = 86400 # 1970/01/02 00:00.00
log.write("1" * 11)
self.assertTrue(os.path.exists(days[0]))
self.assertFalse(os.path.exists(days[1]))
log._clock = 172800 # 1970/01/03 00:00.00
log.write("")
self.assertTrue(os.path.exists(days[0]))
self.assertTrue(os.path.exists(days[1]))
self.assertFalse(os.path.exists(days[2]))
log._clock = 259199 # 1970/01/03 23:59.59
log.write("3")
self.assertFalse(os.path.exists(days[2]))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def backup_rotate_daily(self, backup_dir: str, rotate: int):\n if rotate < 1:\n raise Exception(\"Rotate should be more than 0\")\n backup_domain_dir = self.get_backup_domain_dir(backup_dir)\n # for every file in directory group backups\n for disk in self.get_disks():\n grouped_files = []\n backup_files = glob.glob(\n os.path.join(backup_domain_dir, \"%s_%s-*.%s\" % (self.name, disk.device, disk.format)))\n backup_files.sort(key=os.path.getmtime, reverse=True)\n backing_file = None\n for backup_file in backup_files:\n if backing_file is None:\n grouped_files.append([])\n grouped_files[-1].append(backup_file)\n backing_file = DiskImageHelper.get_backing_file(backup_file)\n logging.debug(\"Grouped backup files %s\" % grouped_files)\n grouped_files_to_remove = grouped_files[rotate:]\n logging.debug(\"Groups to remove %s\" % grouped_files_to_remove)\n for group in grouped_files_to_remove:\n for file in group:\n logging.info(\"Removing old backup disk file: '%s'\" % file)\n os.remove(file)",
"def rotate(days):\n create_backup()\n remove_older_backups(days)",
"def test_rotateAlreadyExists(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n # Build a new file with the same name as the file which would be created\n # if the log file is to be rotated.\n newFilePath = \"{}.{}\".format(log.path, log.suffix(log.lastDate))\n with open(newFilePath, \"w\") as fp:\n fp.write(\"123\")\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def log_rotate():\n st = os.stat(log_file)\n if st.st_size >= max_log_size:\n logfiles = glob.glob(\"{0}/{1}.[0-9].gz\".format(clone_dir,os.path.basename(log_file)))\n for i in xrange(len(logfiles),0,-1):\n oldlog = logfiles[i-1]\n newlog = \"{0}.{1}.gz\".format(oldlog[:-5],i)\n os.rename(oldlog,newlog)\n f_in = open(log_file, \"r+b\")\n f_out = gzip.open(log_file + \".0.gz\", \"wb\")\n f_out.writelines(f_in)\n f_out.close()\n f_in.seek(0)\n f_in.truncate(0)\n f_in.close()\n pass",
"def test_rotatePermissionDirectoryNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.directory, 0o444)\n # Restore permissions so tests can be cleaned up.\n self.addCleanup(os.chmod, log.directory, 0o755)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def test_rotatePermissionFileNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.path, 0o444)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def rotatelog(self,**kwargs):\n newname = self._newname()\n newlgf = LogFile(newname,**kwargs)\n with self.id_lock:\n self._rotatelog(newlgf,newname)",
"def test_rotation(self):\n # this logfile should rotate every 10 bytes\n with contextlib.closing(\n logfile.LogFile(self.name, self.dir, rotateLength=10)\n ) as log:\n\n # test automatic rotation\n log.write(\"123\")\n log.write(\"4567890\")\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.2\".format(self.path)))\n log.write(\"\")\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertTrue(os.path.exists(\"{}.2\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n log.write(\"3\")\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n\n # test manual rotation\n log.rotate()\n self.assertTrue(os.path.exists(\"{}.3\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.4\".format(self.path)))\n\n self.assertEqual(log.listLogs(), [1, 2, 3])",
"def test_logfile_recreates_after_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n self.conveyer.rotate_logs()\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"fourth\\\"}\"))\n self.assertEquals(self.events_out.getvalue(), \"{message: \\\"fourth\\\"}\")\n self.assertTrue(self.renamerCalled)",
"def test_log_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n filename = self.conveyer.rotate_logs()\n self.assertEquals(self.conveyer.logfile, None)\n self.assertEquals(filename, \"testfile.dat.rotated\")",
"def every_day():\n logger.info('[ EVERY_DAY ] [ %s ]' % str(datetime.now().time()))",
"def test_need_to_rotate_log(self):\n self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time')\n self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time')\n self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size')\n self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size')",
"def checkDateForFileName(self):\n #self.currentLocalTime was already changed in log Temperatures\n if self.currentLocalTime.tm_mday != self.currentDay:\n #the day has changed we should start a new log file!\n self.logFile = self._logFile_default()\n self._create_log_file()",
"def rotate_logs(basename, max_version=None):\n\n # nothing to do if the basename doesn't already exist\n if not os.path.isfile(basename):\n return\n\n files = glob.glob(f\"{basename}.*\")\n n = len(basename)\n\n versions = [int(f[n + 1:]) for f in files if f[n + 1:].isdigit()]\n sorted_versions = sorted(versions, reverse=True)\n\n for v in sorted_versions:\n if max_version is not None and v >= max_version:\n os.remove(f\"{basename}.{v}\")\n else:\n os.rename(f\"{basename}.{v}\", f\"{basename}.{v+1}\")\n\n # move original if space\n if max_version is not None and max_version == 0:\n os.remove(basename)\n else:\n os.rename(basename, f\"{basename}.1\")",
"def output_daily_files(dataframe, path, filename):\n\n days = dataframe.groupby('date_time_day')\n dataframe.groupby('date_time_day').size().reset_index(name='data points per day')\n\n for day in days.groups:\n print(day.date())\n output_path = path + filename + \"_\" + str(day.date()) + '.csv'\n print(\"Creating intermediate flagged data file: \", output_path)\n days.get_group(day).to_csv(output_path, index=False)",
"def create_timed_rotating_log(path_log, when=\"midnight\", last=3, interval=1):\n logger = logging.getLogger(\"Test rotating file\")\n formatter = logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s')\n if path_log:\n hdlr = TimedRotatingFileHandler(\n path_log, when=when, interval=interval, backupCount=last\n )\n hdlr.setFormatter(formatter)\n logger.addHandler(hdlr)\n logger.debug(\"[LOG] Criou arquivo de log\")\n else:\n hdlr2 = logging.StreamHandler()\n hdlr2.setFormatter(formatter)\n logger.addHandler(hdlr2)\n return logger",
"def doRollover(self):\n if self.stream:\n self.stream.close()\n # get the time that this sequence started at and make it a TimeTuple\n t = self.rolloverAt - self.interval\n if self.utc:\n timeTuple = time.gmtime(t)\n else:\n timeTuple = time.localtime(t)\n dfn = self.baseFilename + \".\" + time.strftime(self.suffix, timeTuple)\n if self.backupCount > 0:\n cnt=1\n dfn2=\"%s.%03d\"%(dfn,cnt)\n while os.path.exists(dfn2):\n dfn2=\"%s.%03d\"%(dfn,cnt)\n cnt+=1 \n os.rename(self.baseFilename, dfn2)\n for s in self.getFilesToDelete():\n os.remove(s)\n else:\n if os.path.exists(dfn):\n os.remove(dfn)\n os.rename(self.baseFilename, dfn)\n #print \"%s -> %s\" % (self.baseFilename, dfn)\n self.mode = 'w'\n self.stream = self._open()\n currentTime = int(time.time())\n newRolloverAt = self.computeRollover(currentTime)\n while newRolloverAt <= currentTime:\n newRolloverAt = newRolloverAt + self.interval\n #If DST changes and midnight or weekly rollover, adjust for this.\n if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:\n dstNow = time.localtime(currentTime)[-1]\n dstAtRollover = time.localtime(newRolloverAt)[-1]\n if dstNow != dstAtRollover:\n if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour\n newRolloverAt = newRolloverAt - 3600\n else: # DST bows out before next rollover, so we need to add an hour\n newRolloverAt = newRolloverAt + 3600\n self.rolloverAt = newRolloverAt",
"def cycle_logfile(logfile):\n logfile_old = logfile + '.old'\n if os.path.exists(logfile):\n # Cycle the old logfiles to *.old\n if os.path.exists(logfile_old):\n # E.g. Windows don't support rename-replace\n os.remove(logfile_old)\n os.rename(logfile, logfile_old)",
"def _get_logrotated_log(self):\n file_lst = glob.glob(self.rotation_pattern)\n file_lst.remove(self.log_filename)\n\n if len(file_lst) == 0:\n return None\n\n stat_lst = [(os.stat(x).st_mtime, x) for x in file_lst]\n sorted_stat_lst = sorted(stat_lst, key=lambda x: x[1])\n sorted_stat_lst.reverse()\n\n r_tuple = reduce(lambda a,b: a if (a[0] > b[0]) else b, sorted_stat_lst)\n return r_tuple[1]",
"def get_timed_rotating_logger(**kwargs):\n # create logger\n if not os.path.exists(kwargs.get('log_dir_path')):\n try:\n os.makedirs(kwargs.get('log_dir_path'))\n except Exception:\n time.sleep(1)\n os.makedirs(kwargs.get('log_dir_path'))\n pass\n logger = logging.getLogger(kwargs.get('logger_name'))\n if kwargs.get('log_level').lower() == 'info':\n log_level = 20\n elif kwargs.get('log_level').lower() == 'warning':\n log_level = 30\n elif kwargs.get('log_level').lower() == 'error':\n log_level = 40\n elif kwargs.get('log_level').lower() == 'critical':\n log_level = 50\n else:\n log_level = 10\n logger.setLevel(log_level)\n ch = logging.handlers.TimedRotatingFileHandler(\n os.path.join(kwargs.get('log_dir_path'), kwargs.get('log_file_name')),\n when='midnight',\n interval=1,\n backupCount=kwargs.get('backup_count'),\n encoding=None,\n delay=False,\n utc=False\n )\n ch.setLevel(log_level)\n # Create formatter\n formatter = logging.Formatter(\n fmt='%(asctime)s.%(msecs)03d - %(levelname)s[%(lineno)d] - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n # Add formatter to ch\n ch.setFormatter(formatter)\n # Add ch to logger\n logger.addHandler(ch)\n st = logging.StreamHandler(sys.stdout)\n st.setFormatter(formatter)\n st.setLevel(logging.DEBUG)\n logger.addHandler(st)\n return logger",
"def gerar_log():\n arquivo = Path(f'{path_server}/etc/serverx/logs/{datetime.date.today().strftime(\"%d-%m-%Y\")}.txt')\n if os.path.exists(arquivo):\n with open(f'{path_server}/etc/serverx/logs/{datetime.date.today().strftime(\"%d-%m-%Y\")}.txt', 'a') as acessos:\n acessos.write(f'{datetime.datetime.now()} - {request.headers[\"Host\"]} {request.headers[\"User-Agent\"]}\\n')\n else:\n with open(f'{path_server}/etc/serverx/logs/{datetime.date.today().strftime(\"%d-%m-%Y\")}.txt', 'w') as acessos:\n acessos.write(f'{datetime.datetime.now()} - {request.headers[\"Host\"]} {request.headers[\"User-Agent\"]}\\n')",
"def create_file_handler(log_name):\n file_handler = handlers.TimedRotatingFileHandler(\n log_name, when=\"midnight\", interval=1\n )\n file_handler.suffix = \"%Y%m%d\"\n file_handler.setFormatter(formatter)\n return file_handler",
"def _rotatelog(self,newlgf,newname):\n modlogger.debug( \"rl:%s\"%newname)\n if self.logf: \n start_new_thread(self._waitlog,(self.logf,self.logname))\n self.logsync.acquire()\n\n if newname: self.in_use_logs += [ newname ] \n try:\n self.logf, self.logname = newlgf , newname\n except Exception:\n if newname:\n self.in_use_logs.remove(newname)\n raise",
"def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=0, utc=0, maxBytes=0):\n # super(self). #It's old style class, so super doesn't work.\n logging.handlers.TimedRotatingFileHandler.__init__(self, filename, when, interval, backupCount, encoding, delay, utc)\n self.maxBytes=maxBytes",
"def __init__(self, log_dir, prefix, when_interval=None):\n\n log_dir = os.path.abspath(log_dir)\n\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n\n if not isinstance(prefix, str):\n raise ValueError(\"Prefix string required for output files\")\n\n if when_interval is None:\n when, interval = 'H', 1\n else:\n when, interval = when_interval\n if not (isinstance(when, str) and isinstance(interval, int)):\n raise ValueError(\"Rollover interval should be like ('h', 1)\")\n\n log_path = os.path.join(log_dir, prefix)\n\n _LOGGER.info(\"Saving data to path {}\".format(log_path))\n _LOGGER.info(\"Rollover interval {}\"\n .format(\"{}{}\".format(when, interval)))\n\n # Create the logger.\n self._logger = TimedRotatingFileHandler(filename=log_path,\n encoding='utf-8',\n when=when,\n interval=interval)",
"def rotate_backups(self, bucketname, prefix):\n\n bucket = self.conn.get_bucket(bucketname)\n # Collect the backups in the given directory.\n sorted_backups = self.collect_backups(bucketname, prefix)\n if not sorted_backups:\n logger.info(\"No backups found in %s.\", bucketname)\n return\n most_recent_backup = sorted_backups[-1]\n # Group the backups by the rotation frequencies.\n backups_by_frequency = self.group_backups(sorted_backups)\n # Apply the user defined rotation scheme.\n self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)\n # Find which backups to preserve and why.\n backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)\n # Apply the calculated rotation scheme.\n deleted_files = []\n for backup in sorted_backups:\n if backup in backups_to_preserve:\n matching_periods = backups_to_preserve[backup]\n logger.info(\"Preserving %s (matches %s retention %s) ..\",\n backup.pathname, concatenate(map(repr, matching_periods)),\n \"period\" if len(matching_periods) == 1 else \"periods\"\n )\n else:\n logger.info(\"Deleting %s %s ..\", backup.type, backup.pathname)\n if not self.dry_run:\n logger.debug(\"Marking %s for deletion.\", backup.pathname)\n deleted_files.append(backup.pathname)\n if deleted_files:\n bucket.delete_keys(deleted_files)\n \n if len(backups_to_preserve) == len(sorted_backups):\n logger.info(\"Nothing to do! (all backups preserved)\")",
"def __init__(\n self,\n filename,\n when=\"h\",\n interval=1,\n backupCount=1,\n encoding=None,\n delay=False,\n utc=False,\n atTime=None,\n ):\n BaseRotatingHandler.__init__(\n self,\n filename,\n \"a\",\n encoding=encoding,\n delay=delay,\n )\n self.when = when.upper()\n self.backupCount = backupCount\n self.utc = utc\n self.atTime = atTime\n self.mylogfile = \"%s.%08d\" % (\"/tmp/trfmphanldler\", randint(0, 99999999))\n self.interval = interval\n\n if self.when == \"S\":\n self.interval = 1\n self.suffix = \"%Y-%m-%d_%H-%M-%S\"\n self.extMatch = r\"^\\d{4}-\\d{2}-\\d{2}_\\d{2}-\\d{2}-\\d{2}(\\.\\w+)?$\"\n elif self.when == \"M\":\n self.interval = 60\n self.suffix = \"%Y-%m-%d_%H-%M\"\n self.extMatch = r\"^\\d{4}-\\d{2}-\\d{2}_\\d{2}-\\d{2}(\\.\\w+)?$\"\n elif self.when == \"H\":\n self.interval = 60 * 60\n self.suffix = \"%Y-%m-%d_%H\"\n self.extMatch = r\"^\\d{4}-\\d{2}-\\d{2}_\\d{2}(\\.\\w+)?$\"\n elif self.when == \"D\" or self.when == \"MIDNIGHT\":\n self.interval = 60 * 60 * 24\n self.suffix = \"%Y-%m-%d\"\n self.extMatch = r\"^\\d{4}-\\d{2}-\\d{2}(\\.\\w+)?$\"\n elif self.when.startswith(\"W\"):\n self.interval = 60 * 60 * 24 * 7\n if len(self.when) != 2:\n raise ValueError(\n \"You must specify a day for weekly rollover from 0 \"\n \"to 6 (0 is Monday): %s\" % self.when\n )\n if self.when[1] < \"0\" or self.when[1] > \"6\":\n raise ValueError(\n \"Invalid day specified for weekly rollover: %s\" % self.when\n )\n self.dayOfWeek = int(self.when[1])\n self.suffix = \"%Y-%m-%d\"\n self.extMatch = r\"^\\d{4}-\\d{2}-\\d{2}(\\.\\w+)?$\"\n else:\n raise ValueError(\"Invalid rollover interval specified: %s\" % self.when)\n\n self.extMatch = re.compile(self.extMatch, re.ASCII)\n self.interval = self.interval * interval\n self.stream_lock = None\n self.lock_file = self._getLockFile()\n self.next_rollover_time = self.get_next_rollover_time()\n if not self.next_rollover_time:\n self.next_rollover_time = self.compute_next_rollover_time()\n self.save_next_rollover_time()",
"def createLogFolders():\n os.chdir(\"ARCHIVES\")\n logFolder = datetime.datetime.now().strftime(\"ARCHIVE_%d_%b_%Y_%H_%M_%S_0\")\n while logFolder in os.listdir():\n split = logFolder.split('_')\n curIndex = int(split[7])\n nextIndex = curIndex + 1\n split[7] = str(nextIndex)\n logFolder = '_'.join(split)\n os.mkdir(logFolder)\n os.chdir(logFolder)\n os.mkdir(\"Premigration\")\n os.mkdir(\"Migration\")\n os.mkdir(\"Postmigration\")\n os.mkdir(\"Other\")\n print(\"Storing All Logs in ARCHIVES/%s\"%logFolder)\n globs.ARCHIVEFOLDER = os.getcwd()\n os.chdir(globs.PROGDIR)",
"def generate_day_cycle(self, names):\n day_log = []\n time_delta = timedelta(days=1)\n\n for i in range(0, len(self.HOUR_SHEET)):\n if self.is_time_for_bruteforce(i):\n day_log.extend(self.generate_brute_force_log(i, names))\n\n day_log.extend(self.generate_hour_cycle(i, names))\n\n day_log.sort()\n\n self.date += time_delta\n\n return day_log",
"def __init__(self, abs_path_logfile):\n\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n handler = logging.handlers.TimedRotatingFileHandler(abs_path_logfile, when='D', interval=1)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)"
] |
[
"0.6491307",
"0.64848346",
"0.61145",
"0.60250175",
"0.595329",
"0.584923",
"0.5831635",
"0.57777596",
"0.5700385",
"0.5630765",
"0.5577364",
"0.5499653",
"0.54012597",
"0.52955353",
"0.52432585",
"0.52192646",
"0.52171814",
"0.51803863",
"0.5149426",
"0.5129501",
"0.5054763",
"0.5024322",
"0.5022395",
"0.5015525",
"0.50084406",
"0.49848193",
"0.49782187",
"0.493995",
"0.49382675",
"0.49381307"
] |
0.7184687
|
0
|
Test retrieving log files with L{DailyLogFile.getLog}.
|
def test_getLog(self):
data = ["1\n", "2\n", "3\n"]
log = RiggedDailyLogFile(self.name, self.dir)
self.addCleanup(log.close)
for d in data:
log.write(d)
log.flush()
# This returns the current log file.
r = log.getLog(0.0)
self.addCleanup(r.close)
self.assertEqual(data, r.readLines())
# We can't get this log, it doesn't exist yet.
self.assertRaises(ValueError, log.getLog, 86400)
log._clock = 86401 # New day
r.close()
log.rotate()
r = log.getLog(0) # We get the previous log
self.addCleanup(r.close)
self.assertEqual(data, r.readLines())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getLogs():",
"def getLogs():",
"def test_02_log_something(self):\n logger = get_logger(self)\n logger.info('Info in test_02')\n logger.debug('Debug in test_02')\n logger.warn('Warn in test_02')\n logfiles = glob.glob(os.path.join(self.LOG_FOLDER,\n '{}*.log'.format(self.scenario)))\n assert logfiles\n print(logfiles)\n for logfile in logfiles:\n with open(logfile) as f:\n for line in f:\n print(line.strip())",
"def test_logs(self):\n # Purge all logs\n log_dir = self.test_config['LOG_DIR']\n pattern = re.compile('^nginx-access-ui.log-(?P<day_of_log>\\d{8})(\\.gz)?$')\n logs = [f for f in os.listdir(log_dir) if re.search(pattern, f)]\n map(os.remove, logs)\n\n # Try to make report without logs\n self.generate_report()\n self.assertTrue(self.check_in_log(\"Not found logs in directory {}\".format(self.test_config['LOG_DIR'])))",
"def test_listLogsIgnoresZeroSuffixedFiles(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n for i in range(0, 3):\n with open(\"{}.{}\".format(log.path, i), \"w\") as fp:\n fp.write(\"123\")\n\n self.assertEqual([1, 2], log.listLogs())",
"def test_log(self):\r\n # expected result when no result_path is provided\r\n self.default_app(\r\n seq_path=self.tmp_seq_filepath,\r\n result_path=None,\r\n log_path=self.tmp_log_filepath,\r\n )\r\n\r\n # open the actual log file and the expected file, and pass into lists\r\n with open(self.tmp_log_filepath) as f:\r\n obs = [l.strip() for l in list(f)]\r\n exp = rdp_test1_log_file_contents.split('\\n')\r\n # sort the lists as the entries are written from a dict,\r\n # so order may vary\r\n obs.sort()\r\n exp.sort()\r\n self.assertEqual(obs, exp)",
"def test_passing_log_fname(self):\n\n log_env_file = \"test.log\"\n log_file = \"test_2.log\"\n whole_env_log_file = os.path.join(LOG_FOLDER, log_env_file)\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n\n # remove both files if they exist\n for file in (whole_env_log_file, whole_log_file):\n if os.path.exists(file):\n os.remove(file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_env_file\n\n logger = pgo_logger.get_logger(log_file_name=log_file)\n assert logger is not None\n\n logger.info(\"test\")\n\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True\n assert os.path.exists(whole_env_log_file) is False",
"def test_log_filenames_multiple_date_in_past(self):\n time_lower = datetime.datetime.now() - datetime.timedelta(seconds=7210)\n time_upper = time_lower + datetime.timedelta(seconds=20)\n (tracks, statuses) = self.app.log_filenames(\n [self.track_path('silence.mp3')]*5,\n timestamp='2 hours ago'\n )\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertGreaterEqual(track_obj['timestamp'], time_lower)\n self.assertLess(track_obj['timestamp'], time_upper)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])",
"def test_get_source_log(self):\n pass",
"def test_read_logs(self):\n records = log_reader(self.input_data_path)\n\n results = list(records)\n\n self.assertEqual(4, len(results))\n\n self.assertEqual(\n deque([u'record 1\\n', u'\\tline 1\\n',\n u'\\tline 2\\n', u'\\tline 3\\n']),\n results[0])\n\n self.assertEqual(\n deque([u'record 4\\n', u'\\tline 1\\n', u'\\tline 2\\n']),\n results[3])",
"def test_04_logs(self):\n\n file_name = 'train-test.log'\n request_json = {'file':'train-test.log'}\n r = requests.get('http://localhost:{}/logs/{}'.format(port,file_name))\n\n with open(file_name, 'wb') as f:\n f.write(r.content)\n \n self.assertTrue(os.path.exists(file_name))\n\n if os.path.exists(file_name):\n os.remove(file_name)",
"def test_log_filenames_multiple_no_date(self):\n now = datetime.datetime.now()\n (tracks, statuses) = self.app.log_filenames([self.track_path('silence.mp3')]*5)\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertLess(track_obj['timestamp'], now)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])",
"def test_read_logs(self):\n class LogReaderLocal(DarshanLogReader):\n def logfiles(self):\n for file_path in ['dir1/file1', 'dir1/file2', 'dir1/file3', 'dir2/file4']:\n yield file_path\n\n def read_log(self, filename, suggested_label):\n return [FakeJob(filename, suggested_label)]\n\n lr = LogReaderLocal('test-path')\n\n logs = lr.read_logs()\n self.assertIsInstance(logs, types.GeneratorType)\n\n logs = list(logs)\n self.assertEqual(len(logs), 2)\n\n self.assertEqual(logs[0].names, ['dir1/file1', 'dir1/file2', 'dir1/file3'])\n self.assertEqual(logs[1].names, ['dir2/file4'])",
"def test_listLogsWithBadlyNamedFiles(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n with open(\"{}.1\".format(log.path), \"w\") as fp:\n fp.write(\"123\")\n with open(\"{}.bad-file\".format(log.path), \"w\") as fp:\n fp.write(\"123\")\n\n self.assertEqual([1], log.listLogs())",
"def test_download_build_log_file(self, mock_serve, mock_test, mock_os):\n from mod_test.controllers import (TestNotFoundException,\n download_build_log_file)\n\n response = download_build_log_file('1')\n\n self.assertEqual(response, mock_serve())\n mock_test.query.filter.assert_called_once()\n mock_os.path.isfile.assert_called_once()",
"def logfile():\n\n class Logfile(object):\n def __init__(self, filename, *args, **kwargs):\n super(Logfile, self).__init__(*args, **kwargs)\n self.filename = filename\n self.logs = \"\"\n\n def read(self):\n with open(self.filename) as file:\n for line in file:\n self.logs += line\n return self.logs\n\n yield Logfile(filename=\"gen3tests.logs\")\n\n # cleanup after each use\n if os.path.exists(\"gen3tests.logs\"):\n os.remove(\"gen3tests.logs\")",
"def test_different_custom_logging_file(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n task_creator = lambda: generate_root_task(task_class=TestTask, x=\"Test\")\n\n log_path_1 = use_specific_log_file(task_creator, temp_dir, \"first\")\n log_path_2 = use_specific_log_file(task_creator, temp_dir, \"second\")\n\n for log_path in [log_path_1, log_path_2]:\n assert log_path.exists()\n with open(log_path, \"r\") as f:\n log_content = f.read()\n assert f\"Logging: Test\" in log_content",
"def pytest_logger_fileloggers(self, item):",
"def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()",
"def test_creation_logfile(self):\n log_file = os.path.join(DATA_DIR, 'sample_log.txt')\n manager = execution.LogManager('MainThread', log_file)\n LOGGER.debug('Log me!')\n manager.close()\n self.assertEqual(count_lines(log_file), 1)\n os.remove(log_file)",
"def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files",
"def logs_directory(self):",
"def test_get_event_logs(event_log_api_setup):\n api_response = event_log_api_setup.get_event_logs(limit=100, offset=0)\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")",
"def test_get_log(self):\n result = log_lib.get_log(True)\n self.assertTrue(callable(result))\n result(\"dummy-message\")\n\n result = log_lib.get_log(False)\n self.assertTrue(callable(result))\n result(\"dummy-message\")",
"def test_gitlogs(self):\r\n\r\n self._setstaff_login()\r\n self._mkdir(getattr(settings, 'GIT_REPO_DIR'))\r\n\r\n self._add_edx4edx()\r\n response = self.client.get(reverse('gitlogs'))\r\n\r\n # Check that our earlier import has a log with a link to details\r\n self.assertIn('/gitlogs/MITx/edx4edx/edx4edx', response.content)\r\n\r\n response = self.client.get(\r\n reverse('gitlogs_detail', kwargs={\r\n 'course_id': 'MITx/edx4edx/edx4edx'}))\r\n\r\n self.assertIn('======> IMPORTING course',\r\n response.content)\r\n\r\n self._rm_edx4edx()",
"def test_004_log(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __test_filename = consts.TEST_FILENAME\n __test_logname = __test_filename + \"_log.txt\"\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __dir_game_log = os.path.join(__dir_game_log, __test_logname)\n #test list\n __log_test = __test.log(__test_filename, __test_data, True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"\\nLine (0 bis \" +str(consts.TEST_LIST_LENGHT-1) +\")\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, __test_data, False)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"FILE_EXIST\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Still Line (0 bis \" +str(consts.TEST_LIST_LENGHT-1) +\")\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, __test_data, True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Line (0 bis \" +str(consts.TEST_LIST_LENGHT-1) +\") two times\")\n print(__log_game.read())\n os.remove(__dir_game_log)\n self.assertFalse(os.path.isfile(__dir_game_log))\n #test string\n __log_test = __test.log(__test_filename, \"__test_data\", True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"\\nOne Line:\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, \"__test_data\", False)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"FILE_EXIST\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Still one Line:\")\n print(__log_game.read())\n __log_test = __test.log(__test_filename, \"__test_data\", True)\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])\n with open(__dir_game_log, 'r') as __log_game:\n print(\"Two Lines\")\n print(__log_game.read())",
"def test_logReader(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\\n\")\n log.write(\"def\\n\")\n log.rotate()\n log.write(\"ghi\\n\")\n log.flush()\n\n # check reading logs\n self.assertEqual(log.listLogs(), [1])\n with contextlib.closing(log.getCurrentLog()) as reader:\n reader._file.seek(0)\n self.assertEqual(reader.readLines(), [\"ghi\\n\"])\n self.assertEqual(reader.readLines(), [])\n with contextlib.closing(log.getLog(1)) as reader:\n self.assertEqual(reader.readLines(), [\"abc\\n\", \"def\\n\"])\n self.assertEqual(reader.readLines(), [])\n\n # check getting illegal log readers\n self.assertRaises(ValueError, log.getLog, 2)\n self.assertRaises(TypeError, log.getLog, \"1\")\n\n # check that log numbers are higher for older logs\n log.rotate()\n self.assertEqual(log.listLogs(), [1, 2])\n with contextlib.closing(log.getLog(1)) as reader:\n reader._file.seek(0)\n self.assertEqual(reader.readLines(), [\"ghi\\n\"])\n self.assertEqual(reader.readLines(), [])\n with contextlib.closing(log.getLog(2)) as reader:\n self.assertEqual(reader.readLines(), [\"abc\\n\", \"def\\n\"])\n self.assertEqual(reader.readLines(), [])",
"def test_featurecounts_step_part_get_log_file(gene_expression_quantification_workflow):\n expected = (\n \"work/{mapper}.featurecounts.{library_name}/log/{mapper}.featurecounts.{library_name}.log\"\n )\n actual = gene_expression_quantification_workflow.get_log_file(\"featurecounts\", \"run\").get(\"log\")\n assert actual == expected",
"def pytest_logger_logsdir(self, config):",
"def test_get_daily_change_log(self):\n msg = \"Response status is not 200\"\n response = self.api.get_daily_change_log(self.year, self.month, self.day)\n self.assertEqual(response.status_code, 200, msg)"
] |
[
"0.70789343",
"0.70789343",
"0.7076324",
"0.7074265",
"0.67947835",
"0.6757375",
"0.6727712",
"0.67131376",
"0.67010534",
"0.6648962",
"0.66467845",
"0.65692544",
"0.65282345",
"0.65043813",
"0.64974445",
"0.64873743",
"0.64582217",
"0.6442727",
"0.6421746",
"0.6409436",
"0.6405541",
"0.63895494",
"0.63837427",
"0.6371592",
"0.633788",
"0.6318481",
"0.6315836",
"0.62941766",
"0.62898755",
"0.628517"
] |
0.8062962
|
0
|
L{DailyLogFile.rotate} doesn't do anything if they new log file already exists on the disk.
|
def test_rotateAlreadyExists(self):
log = RiggedDailyLogFile(self.name, self.dir)
self.addCleanup(log.close)
# Build a new file with the same name as the file which would be created
# if the log file is to be rotated.
newFilePath = "{}.{}".format(log.path, log.suffix(log.lastDate))
with open(newFilePath, "w") as fp:
fp.write("123")
previousFile = log._file
log.rotate()
self.assertEqual(previousFile, log._file)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_rotation(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n days = [(self.path + \".\" + log.suffix(day * 86400)) for day in range(3)]\n\n # test automatic rotation\n log._clock = 0.0 # 1970/01/01 00:00.00\n log.write(\"123\")\n log._clock = 43200 # 1970/01/01 12:00.00\n log.write(\"4567890\")\n log._clock = 86400 # 1970/01/02 00:00.00\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(days[0]))\n self.assertFalse(os.path.exists(days[1]))\n log._clock = 172800 # 1970/01/03 00:00.00\n log.write(\"\")\n self.assertTrue(os.path.exists(days[0]))\n self.assertTrue(os.path.exists(days[1]))\n self.assertFalse(os.path.exists(days[2]))\n log._clock = 259199 # 1970/01/03 23:59.59\n log.write(\"3\")\n self.assertFalse(os.path.exists(days[2]))",
"def test_rotatePermissionFileNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.path, 0o444)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def log_rotate():\n st = os.stat(log_file)\n if st.st_size >= max_log_size:\n logfiles = glob.glob(\"{0}/{1}.[0-9].gz\".format(clone_dir,os.path.basename(log_file)))\n for i in xrange(len(logfiles),0,-1):\n oldlog = logfiles[i-1]\n newlog = \"{0}.{1}.gz\".format(oldlog[:-5],i)\n os.rename(oldlog,newlog)\n f_in = open(log_file, \"r+b\")\n f_out = gzip.open(log_file + \".0.gz\", \"wb\")\n f_out.writelines(f_in)\n f_out.close()\n f_in.seek(0)\n f_in.truncate(0)\n f_in.close()\n pass",
"def rotatelog(self,**kwargs):\n newname = self._newname()\n newlgf = LogFile(newname,**kwargs)\n with self.id_lock:\n self._rotatelog(newlgf,newname)",
"def test_rotatePermissionDirectoryNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.directory, 0o444)\n # Restore permissions so tests can be cleaned up.\n self.addCleanup(os.chmod, log.directory, 0o755)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def test_rotation(self):\n # this logfile should rotate every 10 bytes\n with contextlib.closing(\n logfile.LogFile(self.name, self.dir, rotateLength=10)\n ) as log:\n\n # test automatic rotation\n log.write(\"123\")\n log.write(\"4567890\")\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.2\".format(self.path)))\n log.write(\"\")\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertTrue(os.path.exists(\"{}.2\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n log.write(\"3\")\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n\n # test manual rotation\n log.rotate()\n self.assertTrue(os.path.exists(\"{}.3\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.4\".format(self.path)))\n\n self.assertEqual(log.listLogs(), [1, 2, 3])",
"def test_logfile_recreates_after_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n self.conveyer.rotate_logs()\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"fourth\\\"}\"))\n self.assertEquals(self.events_out.getvalue(), \"{message: \\\"fourth\\\"}\")\n self.assertTrue(self.renamerCalled)",
"def _rotatelog(self,newlgf,newname):\n modlogger.debug( \"rl:%s\"%newname)\n if self.logf: \n start_new_thread(self._waitlog,(self.logf,self.logname))\n self.logsync.acquire()\n\n if newname: self.in_use_logs += [ newname ] \n try:\n self.logf, self.logname = newlgf , newname\n except Exception:\n if newname:\n self.in_use_logs.remove(newname)\n raise",
"def test_log_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n filename = self.conveyer.rotate_logs()\n self.assertEquals(self.conveyer.logfile, None)\n self.assertEquals(filename, \"testfile.dat.rotated\")",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0o555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir, \"xxx\"), \"w\")\n except OSError:\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEqual(f.tell(), 6)\n f.seek(0, 0)\n self.assertEqual(f.read(), b\"abcdef\")",
"def checkDateForFileName(self):\n #self.currentLocalTime was already changed in log Temperatures\n if self.currentLocalTime.tm_mday != self.currentDay:\n #the day has changed we should start a new log file!\n self.logFile = self._logFile_default()\n self._create_log_file()",
"def _determine_rotated_logfile(self):\n rotated_filename = self._check_rotated_filename_candidates()\n if rotated_filename and exists(rotated_filename):\n if stat(rotated_filename).st_ino == self._offset_file_inode:\n return rotated_filename\n\n # if the inode hasn't changed, then the file shrank; this is expected with copytruncate,\n # otherwise print a warning\n if stat(self.filename).st_ino == self._offset_file_inode:\n if self.copytruncate:\n return rotated_filename\n else:\n sys.stderr.write(\n \"[pygtail] [WARN] file size of %s shrank, and copytruncate support is \"\n \"disabled (expected at least %d bytes, was %d bytes).\\n\" %\n (self.filename, self._offset, stat(self.filename).st_size))\n\n return None",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir,\"xxx\"), \"w\")\n except (OSError, IOError):\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEquals(f.tell(), 6)\n f.seek(0, 0)\n self.assertEquals(f.read(), \"abcdef\")\n log.close()",
"def control_log_file(self, enable: bool = False, rotate: bool = False) -> bool:\n enable = tools.coerce_bool(enable)\n if enable and not self.HANDLER_FILE:\n self.HANDLER_FILE = logs.add_file(**self.ARGS_HANDLER_FILE)\n self.LOG.debug(\"Logging to file enabled.\")\n return True\n self.rotate_log_files(value=rotate)\n if not enable and self.HANDLER_FILE:\n self.LOG.debug(\"Logging to file disabled.\")\n self.HANDLER_FILE.close()\n logs.del_file(obj=self.LOG_LOGGER)\n self.HANDLER_FILE = None\n return True\n return False",
"def doRollover(self):\n if self.stream:\n self.stream.close()\n # get the time that this sequence started at and make it a TimeTuple\n t = self.rolloverAt - self.interval\n if self.utc:\n timeTuple = time.gmtime(t)\n else:\n timeTuple = time.localtime(t)\n dfn = self.baseFilename + \".\" + time.strftime(self.suffix, timeTuple)\n if self.backupCount > 0:\n cnt=1\n dfn2=\"%s.%03d\"%(dfn,cnt)\n while os.path.exists(dfn2):\n dfn2=\"%s.%03d\"%(dfn,cnt)\n cnt+=1 \n os.rename(self.baseFilename, dfn2)\n for s in self.getFilesToDelete():\n os.remove(s)\n else:\n if os.path.exists(dfn):\n os.remove(dfn)\n os.rename(self.baseFilename, dfn)\n #print \"%s -> %s\" % (self.baseFilename, dfn)\n self.mode = 'w'\n self.stream = self._open()\n currentTime = int(time.time())\n newRolloverAt = self.computeRollover(currentTime)\n while newRolloverAt <= currentTime:\n newRolloverAt = newRolloverAt + self.interval\n #If DST changes and midnight or weekly rollover, adjust for this.\n if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:\n dstNow = time.localtime(currentTime)[-1]\n dstAtRollover = time.localtime(newRolloverAt)[-1]\n if dstNow != dstAtRollover:\n if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour\n newRolloverAt = newRolloverAt - 3600\n else: # DST bows out before next rollover, so we need to add an hour\n newRolloverAt = newRolloverAt + 3600\n self.rolloverAt = newRolloverAt",
"def doRollover(self):\n # Rotate the file first.\n handlers.RotatingFileHandler.doRollover(self)\n\n # Add group write to the current permissions.\n try:\n currMode = os.stat(self.baseFilename).st_mode\n os.chmod(self.baseFilename, currMode | stat.S_IWGRP)\n except OSError:\n pass",
"def test_abstractShouldRotate(self):\n log = logfile.BaseLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n self.assertRaises(NotImplementedError, log.shouldRotate)",
"def reopen_files(self):\r\n for log in (self.error_log, self.access_log):\r\n for h in log.handlers:\r\n if isinstance(h, logging.FileHandler):\r\n h.acquire()\r\n h.stream.close()\r\n h.stream = open(h.baseFilename, h.mode)\r\n h.release()",
"def _get_logrotated_log(self):\n file_lst = glob.glob(self.rotation_pattern)\n file_lst.remove(self.log_filename)\n\n if len(file_lst) == 0:\n return None\n\n stat_lst = [(os.stat(x).st_mtime, x) for x in file_lst]\n sorted_stat_lst = sorted(stat_lst, key=lambda x: x[1])\n sorted_stat_lst.reverse()\n\n r_tuple = reduce(lambda a,b: a if (a[0] > b[0]) else b, sorted_stat_lst)\n return r_tuple[1]",
"def _check_rotated_filename_candidates(self):\n # savelog(8)\n candidate = \"%s.0\" % self.filename\n if (exists(candidate) and exists(\"%s.1.gz\" % self.filename) and\n (stat(candidate).st_mtime > stat(\"%s.1.gz\" % self.filename).st_mtime)):\n return candidate\n\n # logrotate(8)\n # with delaycompress\n candidate = \"%s.1\" % self.filename\n if exists(candidate):\n return candidate\n\n # without delaycompress\n candidate = \"%s.1.gz\" % self.filename\n if exists(candidate):\n return candidate\n\n rotated_filename_patterns = (\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # for TimedRotatingFileHandler\n \".[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\",\n )\n for rotated_filename_pattern in rotated_filename_patterns:\n candidates = glob.glob(self.filename + rotated_filename_pattern)\n if candidates:\n candidates.sort()\n return candidates[-1] # return most recent\n\n # no match\n return None",
"def cycle_logfile(logfile):\n logfile_old = logfile + '.old'\n if os.path.exists(logfile):\n # Cycle the old logfiles to *.old\n if os.path.exists(logfile_old):\n # E.g. Windows don't support rename-replace\n os.remove(logfile_old)\n os.rename(logfile, logfile_old)",
"def rotate(days):\n create_backup()\n remove_older_backups(days)",
"def test_need_to_rotate_log(self):\n self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time')\n self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time')\n self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size')\n self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size')",
"def backup_rotate_daily(self, backup_dir: str, rotate: int):\n if rotate < 1:\n raise Exception(\"Rotate should be more than 0\")\n backup_domain_dir = self.get_backup_domain_dir(backup_dir)\n # for every file in directory group backups\n for disk in self.get_disks():\n grouped_files = []\n backup_files = glob.glob(\n os.path.join(backup_domain_dir, \"%s_%s-*.%s\" % (self.name, disk.device, disk.format)))\n backup_files.sort(key=os.path.getmtime, reverse=True)\n backing_file = None\n for backup_file in backup_files:\n if backing_file is None:\n grouped_files.append([])\n grouped_files[-1].append(backup_file)\n backing_file = DiskImageHelper.get_backing_file(backup_file)\n logging.debug(\"Grouped backup files %s\" % grouped_files)\n grouped_files_to_remove = grouped_files[rotate:]\n logging.debug(\"Groups to remove %s\" % grouped_files_to_remove)\n for group in grouped_files_to_remove:\n for file in group:\n logging.info(\"Removing old backup disk file: '%s'\" % file)\n os.remove(file)",
"def test_getLog(self):\n data = [\"1\\n\", \"2\\n\", \"3\\n\"]\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n for d in data:\n log.write(d)\n log.flush()\n\n # This returns the current log file.\n r = log.getLog(0.0)\n self.addCleanup(r.close)\n\n self.assertEqual(data, r.readLines())\n\n # We can't get this log, it doesn't exist yet.\n self.assertRaises(ValueError, log.getLog, 86400)\n\n log._clock = 86401 # New day\n r.close()\n log.rotate()\n r = log.getLog(0) # We get the previous log\n self.addCleanup(r.close)\n self.assertEqual(data, r.readLines())",
"def _touch_file(self, fname):\n if os.path.exists(fname):\n os.utime(fname, None)\n else:\n open(fname, 'a').close()",
"def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()",
"def rotate_logs(basename, max_version=None):\n\n # nothing to do if the basename doesn't already exist\n if not os.path.isfile(basename):\n return\n\n files = glob.glob(f\"{basename}.*\")\n n = len(basename)\n\n versions = [int(f[n + 1:]) for f in files if f[n + 1:].isdigit()]\n sorted_versions = sorted(versions, reverse=True)\n\n for v in sorted_versions:\n if max_version is not None and v >= max_version:\n os.remove(f\"{basename}.{v}\")\n else:\n os.rename(f\"{basename}.{v}\", f\"{basename}.{v+1}\")\n\n # move original if space\n if max_version is not None and max_version == 0:\n os.remove(basename)\n else:\n os.rename(basename, f\"{basename}.1\")",
"def archive_logs(self):\n source = GAConfig[\"log_file_location\"]\n destination = source + \"Archive/\"\n\n if not os.path.exists(source):\n os.makedirs(source)\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n if len(os.listdir(source)) > 1:\n specific_folder = destination + str(\n len(os.listdir(destination))) + '/'\n os.makedirs(specific_folder)\n for f in os.listdir(source):\n if((\".log\" in f) or (\".zip\" in f)):\n shutil.move(source + f, specific_folder)",
"def create_timed_rotating_log(path_log, when=\"midnight\", last=3, interval=1):\n logger = logging.getLogger(\"Test rotating file\")\n formatter = logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s')\n if path_log:\n hdlr = TimedRotatingFileHandler(\n path_log, when=when, interval=interval, backupCount=last\n )\n hdlr.setFormatter(formatter)\n logger.addHandler(hdlr)\n logger.debug(\"[LOG] Criou arquivo de log\")\n else:\n hdlr2 = logging.StreamHandler()\n hdlr2.setFormatter(formatter)\n logger.addHandler(hdlr2)\n return logger"
] |
[
"0.73030114",
"0.698689",
"0.6847525",
"0.6840788",
"0.6798561",
"0.6691533",
"0.6677794",
"0.6297389",
"0.6220103",
"0.59473693",
"0.5910658",
"0.5899709",
"0.58956224",
"0.58610725",
"0.5853624",
"0.57774526",
"0.5770104",
"0.5769655",
"0.5716921",
"0.5665606",
"0.5656464",
"0.56378293",
"0.563335",
"0.5632421",
"0.56059647",
"0.5600856",
"0.5522513",
"0.5456437",
"0.5449946",
"0.54472286"
] |
0.8134708
|
0
|
L{DailyLogFile.rotate} doesn't do anything if the directory containing the log files can't be written to.
|
def test_rotatePermissionDirectoryNotOk(self):
log = logfile.DailyLogFile(self.name, self.dir)
self.addCleanup(log.close)
os.chmod(log.directory, 0o444)
# Restore permissions so tests can be cleaned up.
self.addCleanup(os.chmod, log.directory, 0o755)
previousFile = log._file
log.rotate()
self.assertEqual(previousFile, log._file)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_rotation(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n days = [(self.path + \".\" + log.suffix(day * 86400)) for day in range(3)]\n\n # test automatic rotation\n log._clock = 0.0 # 1970/01/01 00:00.00\n log.write(\"123\")\n log._clock = 43200 # 1970/01/01 12:00.00\n log.write(\"4567890\")\n log._clock = 86400 # 1970/01/02 00:00.00\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(days[0]))\n self.assertFalse(os.path.exists(days[1]))\n log._clock = 172800 # 1970/01/03 00:00.00\n log.write(\"\")\n self.assertTrue(os.path.exists(days[0]))\n self.assertTrue(os.path.exists(days[1]))\n self.assertFalse(os.path.exists(days[2]))\n log._clock = 259199 # 1970/01/03 23:59.59\n log.write(\"3\")\n self.assertFalse(os.path.exists(days[2]))",
"def test_rotateAlreadyExists(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n # Build a new file with the same name as the file which would be created\n # if the log file is to be rotated.\n newFilePath = \"{}.{}\".format(log.path, log.suffix(log.lastDate))\n with open(newFilePath, \"w\") as fp:\n fp.write(\"123\")\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def test_rotatePermissionFileNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.path, 0o444)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def test_rotation(self):\n # this logfile should rotate every 10 bytes\n with contextlib.closing(\n logfile.LogFile(self.name, self.dir, rotateLength=10)\n ) as log:\n\n # test automatic rotation\n log.write(\"123\")\n log.write(\"4567890\")\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.2\".format(self.path)))\n log.write(\"\")\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertTrue(os.path.exists(\"{}.2\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n log.write(\"3\")\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n\n # test manual rotation\n log.rotate()\n self.assertTrue(os.path.exists(\"{}.3\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.4\".format(self.path)))\n\n self.assertEqual(log.listLogs(), [1, 2, 3])",
"def log_rotate():\n st = os.stat(log_file)\n if st.st_size >= max_log_size:\n logfiles = glob.glob(\"{0}/{1}.[0-9].gz\".format(clone_dir,os.path.basename(log_file)))\n for i in xrange(len(logfiles),0,-1):\n oldlog = logfiles[i-1]\n newlog = \"{0}.{1}.gz\".format(oldlog[:-5],i)\n os.rename(oldlog,newlog)\n f_in = open(log_file, \"r+b\")\n f_out = gzip.open(log_file + \".0.gz\", \"wb\")\n f_out.writelines(f_in)\n f_out.close()\n f_in.seek(0)\n f_in.truncate(0)\n f_in.close()\n pass",
"def test_logfile_recreates_after_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n self.conveyer.rotate_logs()\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"fourth\\\"}\"))\n self.assertEquals(self.events_out.getvalue(), \"{message: \\\"fourth\\\"}\")\n self.assertTrue(self.renamerCalled)",
"def test_log_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n filename = self.conveyer.rotate_logs()\n self.assertEquals(self.conveyer.logfile, None)\n self.assertEquals(filename, \"testfile.dat.rotated\")",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0o555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir, \"xxx\"), \"w\")\n except OSError:\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEqual(f.tell(), 6)\n f.seek(0, 0)\n self.assertEqual(f.read(), b\"abcdef\")",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir,\"xxx\"), \"w\")\n except (OSError, IOError):\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEquals(f.tell(), 6)\n f.seek(0, 0)\n self.assertEquals(f.read(), \"abcdef\")\n log.close()",
"def rotatelog(self,**kwargs):\n newname = self._newname()\n newlgf = LogFile(newname,**kwargs)\n with self.id_lock:\n self._rotatelog(newlgf,newname)",
"def test_need_to_rotate_log(self):\n self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time')\n self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time')\n self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size')\n self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size')",
"def test_abstractShouldRotate(self):\n log = logfile.BaseLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n self.assertRaises(NotImplementedError, log.shouldRotate)",
"def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()",
"def archive_logs(self):\n source = GAConfig[\"log_file_location\"]\n destination = source + \"Archive/\"\n\n if not os.path.exists(source):\n os.makedirs(source)\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n if len(os.listdir(source)) > 1:\n specific_folder = destination + str(\n len(os.listdir(destination))) + '/'\n os.makedirs(specific_folder)\n for f in os.listdir(source):\n if((\".log\" in f) or (\".zip\" in f)):\n shutil.move(source + f, specific_folder)",
"def _get_logrotated_log(self):\n file_lst = glob.glob(self.rotation_pattern)\n file_lst.remove(self.log_filename)\n\n if len(file_lst) == 0:\n return None\n\n stat_lst = [(os.stat(x).st_mtime, x) for x in file_lst]\n sorted_stat_lst = sorted(stat_lst, key=lambda x: x[1])\n sorted_stat_lst.reverse()\n\n r_tuple = reduce(lambda a,b: a if (a[0] > b[0]) else b, sorted_stat_lst)\n return r_tuple[1]",
"def backup_rotate_daily(self, backup_dir: str, rotate: int):\n if rotate < 1:\n raise Exception(\"Rotate should be more than 0\")\n backup_domain_dir = self.get_backup_domain_dir(backup_dir)\n # for every file in directory group backups\n for disk in self.get_disks():\n grouped_files = []\n backup_files = glob.glob(\n os.path.join(backup_domain_dir, \"%s_%s-*.%s\" % (self.name, disk.device, disk.format)))\n backup_files.sort(key=os.path.getmtime, reverse=True)\n backing_file = None\n for backup_file in backup_files:\n if backing_file is None:\n grouped_files.append([])\n grouped_files[-1].append(backup_file)\n backing_file = DiskImageHelper.get_backing_file(backup_file)\n logging.debug(\"Grouped backup files %s\" % grouped_files)\n grouped_files_to_remove = grouped_files[rotate:]\n logging.debug(\"Groups to remove %s\" % grouped_files_to_remove)\n for group in grouped_files_to_remove:\n for file in group:\n logging.info(\"Removing old backup disk file: '%s'\" % file)\n os.remove(file)",
"def control_log_file(self, enable: bool = False, rotate: bool = False) -> bool:\n enable = tools.coerce_bool(enable)\n if enable and not self.HANDLER_FILE:\n self.HANDLER_FILE = logs.add_file(**self.ARGS_HANDLER_FILE)\n self.LOG.debug(\"Logging to file enabled.\")\n return True\n self.rotate_log_files(value=rotate)\n if not enable and self.HANDLER_FILE:\n self.LOG.debug(\"Logging to file disabled.\")\n self.HANDLER_FILE.close()\n logs.del_file(obj=self.LOG_LOGGER)\n self.HANDLER_FILE = None\n return True\n return False",
"def _determine_rotated_logfile(self):\n rotated_filename = self._check_rotated_filename_candidates()\n if rotated_filename and exists(rotated_filename):\n if stat(rotated_filename).st_ino == self._offset_file_inode:\n return rotated_filename\n\n # if the inode hasn't changed, then the file shrank; this is expected with copytruncate,\n # otherwise print a warning\n if stat(self.filename).st_ino == self._offset_file_inode:\n if self.copytruncate:\n return rotated_filename\n else:\n sys.stderr.write(\n \"[pygtail] [WARN] file size of %s shrank, and copytruncate support is \"\n \"disabled (expected at least %d bytes, was %d bytes).\\n\" %\n (self.filename, self._offset, stat(self.filename).st_size))\n\n return None",
"def doRollover(self):\n # Rotate the file first.\n handlers.RotatingFileHandler.doRollover(self)\n\n # Add group write to the current permissions.\n try:\n currMode = os.stat(self.baseFilename).st_mode\n os.chmod(self.baseFilename, currMode | stat.S_IWGRP)\n except OSError:\n pass",
"def get_timed_rotating_logger(**kwargs):\n # create logger\n if not os.path.exists(kwargs.get('log_dir_path')):\n try:\n os.makedirs(kwargs.get('log_dir_path'))\n except Exception:\n time.sleep(1)\n os.makedirs(kwargs.get('log_dir_path'))\n pass\n logger = logging.getLogger(kwargs.get('logger_name'))\n if kwargs.get('log_level').lower() == 'info':\n log_level = 20\n elif kwargs.get('log_level').lower() == 'warning':\n log_level = 30\n elif kwargs.get('log_level').lower() == 'error':\n log_level = 40\n elif kwargs.get('log_level').lower() == 'critical':\n log_level = 50\n else:\n log_level = 10\n logger.setLevel(log_level)\n ch = logging.handlers.TimedRotatingFileHandler(\n os.path.join(kwargs.get('log_dir_path'), kwargs.get('log_file_name')),\n when='midnight',\n interval=1,\n backupCount=kwargs.get('backup_count'),\n encoding=None,\n delay=False,\n utc=False\n )\n ch.setLevel(log_level)\n # Create formatter\n formatter = logging.Formatter(\n fmt='%(asctime)s.%(msecs)03d - %(levelname)s[%(lineno)d] - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n # Add formatter to ch\n ch.setFormatter(formatter)\n # Add ch to logger\n logger.addHandler(ch)\n st = logging.StreamHandler(sys.stdout)\n st.setFormatter(formatter)\n st.setLevel(logging.DEBUG)\n logger.addHandler(st)\n return logger",
"def __init_log_folder():\n try:\n os.makedirs(Logger.__log_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e",
"def _archive_logs(self, logdir, files):\n cwd = os.getcwd()\n archive_wd = os.path.dirname(logdir)\n archive_file = os.path.basename(logdir) + \".tgz\"\n\n # move files into logdir for archive\n for f in files:\n self.logger.info(\"moving '%s' to archive folder\" % f)\n shutil.move(f, logdir)\n\n # move to logdir parent folder\n self.logger.info(\"archiving profile logs into '%s'\" % archive_file)\n os.chdir(archive_wd)\n archive = tarfile.open(archive_file, \"w:gz\")\n archive.add(os.path.basename(logdir))\n archive.close()\n\n # go back to current working dir and remove logdir\n os.chdir(cwd)\n shutil.rmtree(logdir)",
"def _log_name(self, dir_name, job_name):\n base_dir = self.base_dir\n # Every counter is a file opened in append mode and closed\n # immediately to avoid race conditions in parallel computing: \n # file appends are atomic\n open(os.path.join(base_dir, 'log.current'), \n 'a').write('%s/%s\\n' % (dir_name, job_name))\n t = time.localtime()\n year_dir = os.path.join(base_dir, 'log.%i' % t.tm_year)\n try:\n os.mkdir(year_dir)\n except OSError:\n \"Dir exists\"\n month_dir = os.path.join(year_dir, '%02i' % t.tm_mon)\n try:\n os.mkdir(month_dir)\n except OSError:\n \"Dir exists\"\n open(os.path.join(month_dir, '%02i.log' % t.tm_mday), \n 'a').write('%s/%s\\n' % (dir_name, job_name))",
"def write_log_events(self, log_events):\n # Create log file name.\n # Replace / with - so LogGroup names can be written to current directory.\n file_name = self.log_group.name.replace('/', '-') + \"-\" + self.name + '-0.log'\n\n # Append LogEvents to log file.\n with open(file_name, 'a') as log_file:\n for event in log_events:\n log_file.write(event.message + '\\n')\n print('Wrote ' + str(len(log_events)) + ' LogEvents to ' + file_name)\n\n # Rotate log file if it's bigger than limit\n log_file_size = os.path.getsize(file_name)\n\n if log_file_size > self.log_file_limit:\n rotated_file_name = file_name.split('.')[0] + '-' + str(int(time.time())) + \".log\"\n print('Rotating ' + file_name + ' to ' + rotated_file_name)\n os.rename(file_name, rotated_file_name)",
"def _rotatelog(self,newlgf,newname):\n modlogger.debug( \"rl:%s\"%newname)\n if self.logf: \n start_new_thread(self._waitlog,(self.logf,self.logname))\n self.logsync.acquire()\n\n if newname: self.in_use_logs += [ newname ] \n try:\n self.logf, self.logname = newlgf , newname\n except Exception:\n if newname:\n self.in_use_logs.remove(newname)\n raise",
"def archive_logs():\n logging.info('Archive start...')\n\n for log_dir in filter(dir_filter, os.listdir('logs')):\n path = 'logs/{}'.format(log_dir)\n archive_files = filter(lambda x: '.log.' in x, os.listdir(path))\n zip_file_name = '{}/{}.zip'.format(\n path,\n str(datetime.now())\n .replace(' ', '_').replace('.', '_').replace(':', '_'))\n zip_file = zipfile.ZipFile(\n zip_file_name, mode='w', compression=zipfile.ZIP_DEFLATED)\n for f in archive_files:\n log_file = '{}/{}'.format(path, f)\n zip_file.write(log_file)\n os.remove(log_file)\n\n logging.info('Archive end.')",
"def _zip_daemon_logs(self, output_fname='/tmp/daemon_logs.zip'):\n log_files = list(self._get_daemon_logs_files())\n logger.info('Trying to zip all daemon log files, got log_files: {}'.format(log_files))\n if os.path.exists(output_fname):\n os.unlink(output_fname)\n compress_multiple_files(output_fname, log_files)\n return output_fname",
"def _check_rotated_filename_candidates(self):\n # savelog(8)\n candidate = \"%s.0\" % self.filename\n if (exists(candidate) and exists(\"%s.1.gz\" % self.filename) and\n (stat(candidate).st_mtime > stat(\"%s.1.gz\" % self.filename).st_mtime)):\n return candidate\n\n # logrotate(8)\n # with delaycompress\n candidate = \"%s.1\" % self.filename\n if exists(candidate):\n return candidate\n\n # without delaycompress\n candidate = \"%s.1.gz\" % self.filename\n if exists(candidate):\n return candidate\n\n rotated_filename_patterns = (\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # for TimedRotatingFileHandler\n \".[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\",\n )\n for rotated_filename_pattern in rotated_filename_patterns:\n candidates = glob.glob(self.filename + rotated_filename_pattern)\n if candidates:\n candidates.sort()\n return candidates[-1] # return most recent\n\n # no match\n return None",
"def __init__(self, log_dir, prefix, when_interval=None):\n\n log_dir = os.path.abspath(log_dir)\n\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n\n if not isinstance(prefix, str):\n raise ValueError(\"Prefix string required for output files\")\n\n if when_interval is None:\n when, interval = 'H', 1\n else:\n when, interval = when_interval\n if not (isinstance(when, str) and isinstance(interval, int)):\n raise ValueError(\"Rollover interval should be like ('h', 1)\")\n\n log_path = os.path.join(log_dir, prefix)\n\n _LOGGER.info(\"Saving data to path {}\".format(log_path))\n _LOGGER.info(\"Rollover interval {}\"\n .format(\"{}{}\".format(when, interval)))\n\n # Create the logger.\n self._logger = TimedRotatingFileHandler(filename=log_path,\n encoding='utf-8',\n when=when,\n interval=interval)",
"def createLogFolders():\n os.chdir(\"ARCHIVES\")\n logFolder = datetime.datetime.now().strftime(\"ARCHIVE_%d_%b_%Y_%H_%M_%S_0\")\n while logFolder in os.listdir():\n split = logFolder.split('_')\n curIndex = int(split[7])\n nextIndex = curIndex + 1\n split[7] = str(nextIndex)\n logFolder = '_'.join(split)\n os.mkdir(logFolder)\n os.chdir(logFolder)\n os.mkdir(\"Premigration\")\n os.mkdir(\"Migration\")\n os.mkdir(\"Postmigration\")\n os.mkdir(\"Other\")\n print(\"Storing All Logs in ARCHIVES/%s\"%logFolder)\n globs.ARCHIVEFOLDER = os.getcwd()\n os.chdir(globs.PROGDIR)"
] |
[
"0.7095745",
"0.70506084",
"0.69622046",
"0.6680857",
"0.6465015",
"0.6385839",
"0.62457275",
"0.6217206",
"0.6153067",
"0.60400724",
"0.6036944",
"0.60092556",
"0.5932151",
"0.5929419",
"0.5753766",
"0.573158",
"0.57181424",
"0.5692316",
"0.5616413",
"0.555534",
"0.55492324",
"0.5486235",
"0.545549",
"0.5435589",
"0.54344857",
"0.54284215",
"0.5394152",
"0.53819734",
"0.53819466",
"0.5368504"
] |
0.7417176
|
0
|
L{DailyLogFile.rotate} doesn't do anything if the log file can't be written to.
|
def test_rotatePermissionFileNotOk(self):
log = logfile.DailyLogFile(self.name, self.dir)
self.addCleanup(log.close)
os.chmod(log.path, 0o444)
previousFile = log._file
log.rotate()
self.assertEqual(previousFile, log._file)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_rotateAlreadyExists(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n # Build a new file with the same name as the file which would be created\n # if the log file is to be rotated.\n newFilePath = \"{}.{}\".format(log.path, log.suffix(log.lastDate))\n with open(newFilePath, \"w\") as fp:\n fp.write(\"123\")\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def test_rotation(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n days = [(self.path + \".\" + log.suffix(day * 86400)) for day in range(3)]\n\n # test automatic rotation\n log._clock = 0.0 # 1970/01/01 00:00.00\n log.write(\"123\")\n log._clock = 43200 # 1970/01/01 12:00.00\n log.write(\"4567890\")\n log._clock = 86400 # 1970/01/02 00:00.00\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(days[0]))\n self.assertFalse(os.path.exists(days[1]))\n log._clock = 172800 # 1970/01/03 00:00.00\n log.write(\"\")\n self.assertTrue(os.path.exists(days[0]))\n self.assertTrue(os.path.exists(days[1]))\n self.assertFalse(os.path.exists(days[2]))\n log._clock = 259199 # 1970/01/03 23:59.59\n log.write(\"3\")\n self.assertFalse(os.path.exists(days[2]))",
"def test_rotatePermissionDirectoryNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.directory, 0o444)\n # Restore permissions so tests can be cleaned up.\n self.addCleanup(os.chmod, log.directory, 0o755)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)",
"def test_rotation(self):\n # this logfile should rotate every 10 bytes\n with contextlib.closing(\n logfile.LogFile(self.name, self.dir, rotateLength=10)\n ) as log:\n\n # test automatic rotation\n log.write(\"123\")\n log.write(\"4567890\")\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.2\".format(self.path)))\n log.write(\"\")\n self.assertTrue(os.path.exists(\"{}.1\".format(self.path)))\n self.assertTrue(os.path.exists(\"{}.2\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n log.write(\"3\")\n self.assertFalse(os.path.exists(\"{}.3\".format(self.path)))\n\n # test manual rotation\n log.rotate()\n self.assertTrue(os.path.exists(\"{}.3\".format(self.path)))\n self.assertFalse(os.path.exists(\"{}.4\".format(self.path)))\n\n self.assertEqual(log.listLogs(), [1, 2, 3])",
"def log_rotate():\n st = os.stat(log_file)\n if st.st_size >= max_log_size:\n logfiles = glob.glob(\"{0}/{1}.[0-9].gz\".format(clone_dir,os.path.basename(log_file)))\n for i in xrange(len(logfiles),0,-1):\n oldlog = logfiles[i-1]\n newlog = \"{0}.{1}.gz\".format(oldlog[:-5],i)\n os.rename(oldlog,newlog)\n f_in = open(log_file, \"r+b\")\n f_out = gzip.open(log_file + \".0.gz\", \"wb\")\n f_out.writelines(f_in)\n f_out.close()\n f_in.seek(0)\n f_in.truncate(0)\n f_in.close()\n pass",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0o555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir, \"xxx\"), \"w\")\n except OSError:\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEqual(f.tell(), 6)\n f.seek(0, 0)\n self.assertEqual(f.read(), b\"abcdef\")",
"def test_logfile_recreates_after_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n self.conveyer.rotate_logs()\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"fourth\\\"}\"))\n self.assertEquals(self.events_out.getvalue(), \"{message: \\\"fourth\\\"}\")\n self.assertTrue(self.renamerCalled)",
"def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir,\"xxx\"), \"w\")\n except (OSError, IOError):\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEquals(f.tell(), 6)\n f.seek(0, 0)\n self.assertEquals(f.read(), \"abcdef\")\n log.close()",
"def test_log_rotation(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"second\\\"}\"))\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"third\\\"}\"))\n filename = self.conveyer.rotate_logs()\n self.assertEquals(self.conveyer.logfile, None)\n self.assertEquals(filename, \"testfile.dat.rotated\")",
"def rotatelog(self,**kwargs):\n newname = self._newname()\n newlgf = LogFile(newname,**kwargs)\n with self.id_lock:\n self._rotatelog(newlgf,newname)",
"def test_abstractShouldRotate(self):\n log = logfile.BaseLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n self.assertRaises(NotImplementedError, log.shouldRotate)",
"def control_log_file(self, enable: bool = False, rotate: bool = False) -> bool:\n enable = tools.coerce_bool(enable)\n if enable and not self.HANDLER_FILE:\n self.HANDLER_FILE = logs.add_file(**self.ARGS_HANDLER_FILE)\n self.LOG.debug(\"Logging to file enabled.\")\n return True\n self.rotate_log_files(value=rotate)\n if not enable and self.HANDLER_FILE:\n self.LOG.debug(\"Logging to file disabled.\")\n self.HANDLER_FILE.close()\n logs.del_file(obj=self.LOG_LOGGER)\n self.HANDLER_FILE = None\n return True\n return False",
"def test_need_to_rotate_log(self):\n self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time')\n self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time')\n self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size')\n self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size')",
"def _determine_rotated_logfile(self):\n rotated_filename = self._check_rotated_filename_candidates()\n if rotated_filename and exists(rotated_filename):\n if stat(rotated_filename).st_ino == self._offset_file_inode:\n return rotated_filename\n\n # if the inode hasn't changed, then the file shrank; this is expected with copytruncate,\n # otherwise print a warning\n if stat(self.filename).st_ino == self._offset_file_inode:\n if self.copytruncate:\n return rotated_filename\n else:\n sys.stderr.write(\n \"[pygtail] [WARN] file size of %s shrank, and copytruncate support is \"\n \"disabled (expected at least %d bytes, was %d bytes).\\n\" %\n (self.filename, self._offset, stat(self.filename).st_size))\n\n return None",
"def doRollover(self):\n # Rotate the file first.\n handlers.RotatingFileHandler.doRollover(self)\n\n # Add group write to the current permissions.\n try:\n currMode = os.stat(self.baseFilename).st_mode\n os.chmod(self.baseFilename, currMode | stat.S_IWGRP)\n except OSError:\n pass",
"def _rotatelog(self,newlgf,newname):\n modlogger.debug( \"rl:%s\"%newname)\n if self.logf: \n start_new_thread(self._waitlog,(self.logf,self.logname))\n self.logsync.acquire()\n\n if newname: self.in_use_logs += [ newname ] \n try:\n self.logf, self.logname = newlgf , newname\n except Exception:\n if newname:\n self.in_use_logs.remove(newname)\n raise",
"def _get_logrotated_log(self):\n file_lst = glob.glob(self.rotation_pattern)\n file_lst.remove(self.log_filename)\n\n if len(file_lst) == 0:\n return None\n\n stat_lst = [(os.stat(x).st_mtime, x) for x in file_lst]\n sorted_stat_lst = sorted(stat_lst, key=lambda x: x[1])\n sorted_stat_lst.reverse()\n\n r_tuple = reduce(lambda a,b: a if (a[0] > b[0]) else b, sorted_stat_lst)\n return r_tuple[1]",
"def _init_log(self):\n if not os_path_exists(self.log_file):\n self._write('', 'w')",
"def create_timed_rotating_log(path_log, when=\"midnight\", last=3, interval=1):\n logger = logging.getLogger(\"Test rotating file\")\n formatter = logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s')\n if path_log:\n hdlr = TimedRotatingFileHandler(\n path_log, when=when, interval=interval, backupCount=last\n )\n hdlr.setFormatter(formatter)\n logger.addHandler(hdlr)\n logger.debug(\"[LOG] Criou arquivo de log\")\n else:\n hdlr2 = logging.StreamHandler()\n hdlr2.setFormatter(formatter)\n logger.addHandler(hdlr2)\n return logger",
"def doRollover(self):\n if self.stream:\n self.stream.close()\n # get the time that this sequence started at and make it a TimeTuple\n t = self.rolloverAt - self.interval\n if self.utc:\n timeTuple = time.gmtime(t)\n else:\n timeTuple = time.localtime(t)\n dfn = self.baseFilename + \".\" + time.strftime(self.suffix, timeTuple)\n if self.backupCount > 0:\n cnt=1\n dfn2=\"%s.%03d\"%(dfn,cnt)\n while os.path.exists(dfn2):\n dfn2=\"%s.%03d\"%(dfn,cnt)\n cnt+=1 \n os.rename(self.baseFilename, dfn2)\n for s in self.getFilesToDelete():\n os.remove(s)\n else:\n if os.path.exists(dfn):\n os.remove(dfn)\n os.rename(self.baseFilename, dfn)\n #print \"%s -> %s\" % (self.baseFilename, dfn)\n self.mode = 'w'\n self.stream = self._open()\n currentTime = int(time.time())\n newRolloverAt = self.computeRollover(currentTime)\n while newRolloverAt <= currentTime:\n newRolloverAt = newRolloverAt + self.interval\n #If DST changes and midnight or weekly rollover, adjust for this.\n if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:\n dstNow = time.localtime(currentTime)[-1]\n dstAtRollover = time.localtime(newRolloverAt)[-1]\n if dstNow != dstAtRollover:\n if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour\n newRolloverAt = newRolloverAt - 3600\n else: # DST bows out before next rollover, so we need to add an hour\n newRolloverAt = newRolloverAt + 3600\n self.rolloverAt = newRolloverAt",
"def test_getLog(self):\n data = [\"1\\n\", \"2\\n\", \"3\\n\"]\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n for d in data:\n log.write(d)\n log.flush()\n\n # This returns the current log file.\n r = log.getLog(0.0)\n self.addCleanup(r.close)\n\n self.assertEqual(data, r.readLines())\n\n # We can't get this log, it doesn't exist yet.\n self.assertRaises(ValueError, log.getLog, 86400)\n\n log._clock = 86401 # New day\n r.close()\n log.rotate()\n r = log.getLog(0) # We get the previous log\n self.addCleanup(r.close)\n self.assertEqual(data, r.readLines())",
"def get_timed_rotating_logger(**kwargs):\n # create logger\n if not os.path.exists(kwargs.get('log_dir_path')):\n try:\n os.makedirs(kwargs.get('log_dir_path'))\n except Exception:\n time.sleep(1)\n os.makedirs(kwargs.get('log_dir_path'))\n pass\n logger = logging.getLogger(kwargs.get('logger_name'))\n if kwargs.get('log_level').lower() == 'info':\n log_level = 20\n elif kwargs.get('log_level').lower() == 'warning':\n log_level = 30\n elif kwargs.get('log_level').lower() == 'error':\n log_level = 40\n elif kwargs.get('log_level').lower() == 'critical':\n log_level = 50\n else:\n log_level = 10\n logger.setLevel(log_level)\n ch = logging.handlers.TimedRotatingFileHandler(\n os.path.join(kwargs.get('log_dir_path'), kwargs.get('log_file_name')),\n when='midnight',\n interval=1,\n backupCount=kwargs.get('backup_count'),\n encoding=None,\n delay=False,\n utc=False\n )\n ch.setLevel(log_level)\n # Create formatter\n formatter = logging.Formatter(\n fmt='%(asctime)s.%(msecs)03d - %(levelname)s[%(lineno)d] - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n # Add formatter to ch\n ch.setFormatter(formatter)\n # Add ch to logger\n logger.addHandler(ch)\n st = logging.StreamHandler(sys.stdout)\n st.setFormatter(formatter)\n st.setLevel(logging.DEBUG)\n logger.addHandler(st)\n return logger",
"def write_log_events(self, log_events):\n # Create log file name.\n # Replace / with - so LogGroup names can be written to current directory.\n file_name = self.log_group.name.replace('/', '-') + \"-\" + self.name + '-0.log'\n\n # Append LogEvents to log file.\n with open(file_name, 'a') as log_file:\n for event in log_events:\n log_file.write(event.message + '\\n')\n print('Wrote ' + str(len(log_events)) + ' LogEvents to ' + file_name)\n\n # Rotate log file if it's bigger than limit\n log_file_size = os.path.getsize(file_name)\n\n if log_file_size > self.log_file_limit:\n rotated_file_name = file_name.split('.')[0] + '-' + str(int(time.time())) + \".log\"\n print('Rotating ' + file_name + ' to ' + rotated_file_name)\n os.rename(file_name, rotated_file_name)",
"def test_process_log_with_os_error_at_move(self):\n with tempfile.TemporaryDirectory() as sandbox:\n with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout:\n with self.assertLogs() as logger:\n srcfile = Path(sandbox, 'pokus.log')\n srcfile.touch()\n destpath = Path(sandbox, 'backup')\n destpath.touch()\n compressors = process_log(\n datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30),\n {\n 'target': '{{path}}/backup/{{name}}.{{ext}}',\n 'interval': 'hourly',\n 'compress': 'gzip -9',\n },\n 'hourly',\n str(srcfile),\n 10\n )\n self.assertEqual(compressors, [])\n self.assertTrue(srcfile.exists())\n self.assertEqual(fake_stdout.getvalue(), 'Checking \"{src}\"... rotating... '.format(src=srcfile))\n self.assertIn(\"FileExistsError: [Errno 17] File exists: '{}'\".format(destpath), logger.output[0])",
"def __init__(self, log_dir, prefix, when_interval=None):\n\n log_dir = os.path.abspath(log_dir)\n\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n\n if not isinstance(prefix, str):\n raise ValueError(\"Prefix string required for output files\")\n\n if when_interval is None:\n when, interval = 'H', 1\n else:\n when, interval = when_interval\n if not (isinstance(when, str) and isinstance(interval, int)):\n raise ValueError(\"Rollover interval should be like ('h', 1)\")\n\n log_path = os.path.join(log_dir, prefix)\n\n _LOGGER.info(\"Saving data to path {}\".format(log_path))\n _LOGGER.info(\"Rollover interval {}\"\n .format(\"{}{}\".format(when, interval)))\n\n # Create the logger.\n self._logger = TimedRotatingFileHandler(filename=log_path,\n encoding='utf-8',\n when=when,\n interval=interval)",
"def test_modePreservation(self):\n open(self.path, \"w\").close()\n os.chmod(self.path, 0o707)\n mode = os.stat(self.path)[stat.ST_MODE]\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n log.rotate()\n self.assertEqual(mode, os.stat(self.path)[stat.ST_MODE])",
"def check_logging(self):\n if datetime.datetime.utcnow().strftime('%Y%m%d') != self.logger_utc_date:\n # reset\n self.shut_down_logger()\n self.logger, self.logger_utc_date = self.set_up_logging(_name='archive', _mode='a')",
"def _check_rotated_filename_candidates(self):\n # savelog(8)\n candidate = \"%s.0\" % self.filename\n if (exists(candidate) and exists(\"%s.1.gz\" % self.filename) and\n (stat(candidate).st_mtime > stat(\"%s.1.gz\" % self.filename).st_mtime)):\n return candidate\n\n # logrotate(8)\n # with delaycompress\n candidate = \"%s.1\" % self.filename\n if exists(candidate):\n return candidate\n\n # without delaycompress\n candidate = \"%s.1.gz\" % self.filename\n if exists(candidate):\n return candidate\n\n rotated_filename_patterns = (\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + with `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]\",\n # logrotate dateext rotation scheme - `dateformat -%Y%m%d-%s` + without `delaycompress`\n \"-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9].gz\",\n # for TimedRotatingFileHandler\n \".[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\",\n )\n for rotated_filename_pattern in rotated_filename_patterns:\n candidates = glob.glob(self.filename + rotated_filename_pattern)\n if candidates:\n candidates.sort()\n return candidates[-1] # return most recent\n\n # no match\n return None",
"def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger",
"def setup_log_file_handler(config, logfile, fmt):\n log_file_path = os.path.join(config.logs_dir, logfile)\n try:\n handler = logging.handlers.RotatingFileHandler(\n log_file_path, maxBytes=2 ** 20, backupCount=1000)\n except IOError as error:\n raise errors.Error(_PERM_ERR_FMT.format(error))\n # rotate on each invocation, rollover only possible when maxBytes\n # is nonzero and backupCount is nonzero, so we set maxBytes as big\n # as possible not to overrun in single CLI invocation (1MB).\n handler.doRollover() # TODO: creates empty letsencrypt.log.1 file\n handler.setLevel(logging.DEBUG)\n handler_formatter = logging.Formatter(fmt=fmt)\n handler_formatter.converter = time.gmtime # don't use localtime\n handler.setFormatter(handler_formatter)\n return handler, log_file_path"
] |
[
"0.74119174",
"0.72119117",
"0.7091033",
"0.69300157",
"0.6746559",
"0.6664942",
"0.6637426",
"0.66031617",
"0.65224576",
"0.64638555",
"0.6437075",
"0.62858355",
"0.6198719",
"0.6155689",
"0.59311914",
"0.5851718",
"0.58270603",
"0.57980514",
"0.55997765",
"0.55813855",
"0.55788285",
"0.5556812",
"0.5521218",
"0.54249257",
"0.5419772",
"0.5400938",
"0.53814393",
"0.5372402",
"0.5368422",
"0.5363556"
] |
0.72692174
|
1
|
Test that L{DailyLogFile.toDate} converts its timestamp argument to a time tuple (year, month, day).
|
def test_toDate(self):
log = logfile.DailyLogFile(self.name, self.dir)
self.addCleanup(log.close)
timestamp = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, 0))
self.assertEqual((2000, 1, 1), log.toDate(timestamp))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_toDateUsesArgumentsToMakeADate(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n date = (2014, 10, 22)\n seconds = time.mktime(date + (0,) * 6)\n\n logDate = log.toDate(seconds)\n self.assertEqual(date, logDate)",
"def test_05_timestamp_to_dt(self):\n\n ts = int(datetime.datetime.utcnow().strftime(\"%s\"))\n ts_object = utils.timestamp_to_dt(ts)\n self.assertIsInstance(ts_object, datetime.datetime)",
"def test_convert_datetime():",
"def timestamp_to_toDate(timestamp):\n\n frm = \"%a %b %d %H:%M:%S %z %Y\"\n to = \"%Y%m%d%H%M\"\n toDate_dt = datetime.strptime(timestamp, frm)\n toDate_str = toDate_dt.strftime(to)\n return toDate_dt, toDate_str",
"def fromtimestamp(cls, timestamp):\n return date()",
"def test_logging_timestamps(self):\n import datetime\n\n fmt = \"%Y-%m-%d-%H-%M-%S\"\n time = self.chatbot.timestamp(fmt)\n\n self.assertEqual(time, datetime.datetime.now().strftime(fmt))",
"def test_datestring_to_timestamp(self):\n result = datestring_to_timestamp(\"01-JAN-1990\")\n self.assertEqual(result, 631148400.0)\n result = datestring_to_timestamp(\"01-DEC-2000\")\n self.assertEqual(result, 975625200.0)",
"def datetime(\n self, datetimetuple: tuple[int, int, int, int, int, int, int, int], /\n ) -> None:",
"def test_date_format() -> None:\n timestamp: datetime = datetime(2010, 7, 4, 12, 5, 58)\n assert f\"{timestamp:%d %B %Y %H:%M:%S}\" == \"04 July 2010 12:05:58\"",
"def test_prepare_datetime(time):\n assert SSLLabsClient().prepare_datetime(time) == \"2018-03-17\"",
"def test_timestamp_compat(value):\n\tts = Timestamp.convert(value, DEFAULT_POD)\n\tif isinstance(value, (float, int)):\n\t\tassert ts.value == value\n\telse:\n\t\tassert ts.datetime == value\n\n\tassert ts.hour == 4\n\tassert ts.year == 2020\n\n\tassert ts.strftime(\"%Y-%m-%d %H:%M %z\") == \"2020-01-02 04:06 +0000\"",
"def timestamp2dt(timestamp):\n \n dt = datetime.utcfromtimestamp(timestamp)\n \n date = dt.strftime(\"%Y-%m-%d\")\n # TODO: Check this is correct\n time = dt.hour * 3600 + dt.minute * 60 + dt.second + dt.microsecond * 1e-6\n return (date, time)",
"def test_parse_timestamp(\n test_input: int,\n expected: datetime.datetime,\n):\n assert tvmaze.parsers.parse_timestamp(test_input) == expected",
"def date_from(timestamp): \n return datetime.fromtimestamp(timestamp)",
"def test_good_year_datetime_output(self):\n obj = awstats_reader.awstats_datetime('20110430184200')\n self.assertEqual(obj.strftime('%Y%m%d%H%M%S'), '20110430184200')",
"def test_toDateDefaultToday(self):\n\n def mock_localtime(*args):\n self.assertEqual((), args)\n return list(range(0, 9))\n\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n self.patch(time, \"localtime\", mock_localtime)\n logDate = log.toDate()\n self.assertEqual([0, 1, 2], logDate)",
"def test_14_digit_datetime(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n dt = datetime.datetime(2009, 11, 30, 16, 52, 30)\n self.assertEqual(obj, dt)",
"def test_as_date(self):\n self.assertEqual(\n time_display.as_date(\n datetime(2020, 7, 31, 23, 59, 30, 357921)),\n '2020-07-31')",
"def fromtimestamp(cls, *args, **kwargs): # real signature unknown\r\n pass",
"def convertToEST(timestamp):\n newDateTime = datetime.datetime.fromtimestamp(timestamp/1000)\n return newDateTime.date(), newDateTime.time()",
"def to_timestamp(value):\n if not isinstance(value, datetime.date):\n return None\n\n return time.mktime(value.timetuple())",
"def _make_timestamp(self):\r\n\t\tlogger.debug(\"Get a timestamp\")\r\n\t\treturn time.mktime(datetime.today().timetuple())",
"def test_process_datetime_to_timestamp(time_zone, hass: HomeAssistant) -> None:\n hass.config.set_time_zone(time_zone)\n utc_now = dt_util.utcnow()\n assert process_datetime_to_timestamp(utc_now) == utc_now.timestamp()\n now = dt_util.now()\n assert process_datetime_to_timestamp(now) == now.timestamp()",
"def test_get_date(self):\n d = modis.get_date(os.path.splitext(self.fname)[0])\n self.assertTrue(isinstance(d, datetime.datetime))\n self.assertEqual(d, datetime.datetime(2015, 9, 23))",
"def test_8_digit_date(self):\n obj = awstats_reader.awstats_datetime('20091130')\n dt = datetime.date(2009, 11, 30)\n self.assertEqual(obj, dt)",
"def process_time_input(timestamp):\n if type(timestamp) == datetime.datetime:\n output_time = timestamp\n elif type(timestamp) == str:\n try:\n output_time = datetime.datetime.strptime(timestamp,\n \"%Y-%m-%d\")\n except ValueError:\n try:\n output_time = datetime.datetime.strptime(timestamp,\n \"%Y%j\")\n except ValueError:\n raise ValueError(\"The passed timestamp wasn't either \" +\n 'a \"%Y-%m-%d\" string, a \"%Y%j\" string')\n else:\n raise ValueError(\"You can only use a string or a datetime object\")\n return output_time",
"def test_convert_time_format():\n assert api_crawler.convert_time_format(\"YYYY-MM-DD\") == \"%Y-%m-%d\"\n assert api_crawler.convert_time_format(\"DD-MM-YYYY\") == \"%Y-%m-%d\"",
"def test_timestamp():\n natural = timestamp(\"December 15, 2015\")\n assert natural == {\n \"unix\": 1450137600,\n \"natural\": \"December 15, 2015\"\n }\n unix = timestamp(\"1450137600\")\n assert unix == {\n \"unix\": 1450137600,\n \"natural\": \"December 15, 2015\"\n }",
"def convert_timestamp(val, split=\"-\"):\n res = \"\"\n if isinstance(val, pd.Timestamp):\n dt2 = val.date()\n elif isinstance(val, str):\n dt2 = \"\"\n if val.find(split) == 4:\n dt2 = dt.datetime.strptime(val, split.join([\"%Y\", \"%m\", \"%d\"]))\n elif val.find(\"/\") == 4:\n dt2 = dt.datetime.strptime(val, \"/\".join([\"%Y\", \"%m\", \"%d\"]))\n else:\n raise ValueError(\"String must be in %Y-%m-%d or %Y/%m/%d format\")\n else:\n raise ValueError(\"Required Type TimeStamp \" + str(type(val)))\n\n mnth = str(dt2.month) if dt2.month > 9 else \"0\" + str(dt2.month)\n day = str(dt2.day) if dt2.day > 9 else \"0\" + str(dt2.day)\n res = split.join([str(dt2.year), mnth, day])\n\n return res",
"def date_stamp():\n return datetime.fromtimestamp(time()).strftime('%Y.%m.%d')"
] |
[
"0.73253757",
"0.6513783",
"0.6041685",
"0.5993253",
"0.59896183",
"0.58429",
"0.58173954",
"0.5814273",
"0.580627",
"0.57834035",
"0.5779967",
"0.5716261",
"0.56601375",
"0.5622248",
"0.56181383",
"0.5579738",
"0.5574767",
"0.5573614",
"0.55702555",
"0.55503386",
"0.55450135",
"0.5538568",
"0.553676",
"0.5460516",
"0.54593235",
"0.5447339",
"0.54454803",
"0.54364413",
"0.542807",
"0.5408375"
] |
0.7634906
|
0
|
Test that L{DailyLogFile.toDate} returns today's date by default. By mocking L{time.localtime}, we ensure that L{DailyLogFile.toDate} returns the first 3 values of L{time.localtime} which is the current date. Note that we don't compare the real result of L{DailyLogFile.toDate} to the real current date, as there's a slight possibility that the date changes between the 2 function calls.
|
def test_toDateDefaultToday(self):
def mock_localtime(*args):
self.assertEqual((), args)
return list(range(0, 9))
log = logfile.DailyLogFile(self.name, self.dir)
self.addCleanup(log.close)
self.patch(time, "localtime", mock_localtime)
logDate = log.toDate()
self.assertEqual([0, 1, 2], logDate)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_toDate(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n timestamp = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, 0))\n self.assertEqual((2000, 1, 1), log.toDate(timestamp))",
"def test_toDateUsesArgumentsToMakeADate(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n date = (2014, 10, 22)\n seconds = time.mktime(date + (0,) * 6)\n\n logDate = log.toDate(seconds)\n self.assertEqual(date, logDate)",
"def test_today(self):\n self.assertEquals(\n self.builder._today(), date.today().strftime('%Y-%m-%d'))",
"def check_today(self):\n import time\n _time = time.time\n time.time = lambda: 1003539807.89\n try:\n assert Date(\"today\") == Date(\"10/19/2001\"), \"wrong date\"\n finally:\n time.time = _time",
"def mock_today(monkeypatch):\n\n class MockDate:\n @classmethod\n def today(cls):\n return date(2019, 1, 15)\n\n monkeypatch.setattr(taar_locale, \"date\", MockDate)",
"def get_today_date():\n return date.today()",
"def _today() -> datetime.date:\n return datetime.today().date()",
"def get_today() -> datetime.date:\n return datetime.date.today()",
"def test_date_accept_today(self):\n spi_search = \"find date today\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today(), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)",
"def get_today():\n return datetime.today()",
"def TODAY():\n return datetime.date.today()",
"def test_getLog(self):\n data = [\"1\\n\", \"2\\n\", \"3\\n\"]\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n for d in data:\n log.write(d)\n log.flush()\n\n # This returns the current log file.\n r = log.getLog(0.0)\n self.addCleanup(r.close)\n\n self.assertEqual(data, r.readLines())\n\n # We can't get this log, it doesn't exist yet.\n self.assertRaises(ValueError, log.getLog, 86400)\n\n log._clock = 86401 # New day\n r.close()\n log.rotate()\n r = log.getLog(0) # We get the previous log\n self.addCleanup(r.close)\n self.assertEqual(data, r.readLines())",
"def _get_date():\n return datetime.datetime.now()",
"def today(cls):\n timestamp = time.localtime()\n return Date(timestamp[0], timestamp[1], timestamp[3], timestamp[6], timestamp[7])",
"def get_today(**kwargs: int) -> Date:\n return Date.today().replace(**kwargs)",
"def date_now(*, convert_to_current_timezone: bool = False, utc: bool = False):\n now = datetime.datetime.now()\n\n if convert_to_current_timezone:\n now = date_make_timezone_aware(now)\n\n if utc:\n now = date_to_utc(now)\n\n return now",
"def today():\n return date.today()",
"def today():\n return datetime.today()",
"def today(cls, **kwargs: Any) -> Date:\n return cls.from_date(dt.date.today())",
"def test_define_a_second_constructor(self):\n a = Date(2012, 12, 21)\n self.assertEqual(a.year, 2012)\n self.assertEqual(a.month, 12)\n self.assertEqual(a.day, 21)\n\n t = time.localtime()\n b = Date.today()\n self.assertEqual(b.year, t.tm_year)\n self.assertEqual(b.month, t.tm_mon)\n self.assertEqual(b.day, t.tm_mday)",
"def todayDate(self):\n return time.strftime(\"%m/%d/%Y\", time.localtime())",
"def checkDateForFileName(self):\n #self.currentLocalTime was already changed in log Temperatures\n if self.currentLocalTime.tm_mday != self.currentDay:\n #the day has changed we should start a new log file!\n self.logFile = self._logFile_default()\n self._create_log_file()",
"def get_date():\n return datetime.datetime.now()",
"def date():\r\n try:\r\n curr_date = datetime.datetime.now().strftime(\"%b %d %Y\")\r\n except Exception as e:\r\n print(e)\r\n curr_date = False\r\n return curr_date",
"async def test_process_read_localtime(self):\n xknx = XKNX()\n self.datetime = DateTime(\n xknx, \"TestDateTime\", group_address=\"1/2/3\", broadcast_type=\"TIME\"\n )\n\n telegram_read = Telegram(\n destination_address=GroupAddress(\"1/2/3\"), payload=GroupValueRead()\n )\n with patch(\"time.localtime\") as mock_time:\n mock_time.return_value = time.struct_time([2017, 1, 7, 9, 13, 14, 6, 0, 0])\n await self.datetime.process(telegram_read)\n\n telegram = xknx.telegrams.get_nowait()\n assert telegram == Telegram(\n destination_address=GroupAddress(\"1/2/3\"),\n payload=GroupValueResponse(DPTArray((0xE9, 0xD, 0xE))),\n )",
"def test_newFile(self):\n # A point about three months in the past.\n then = self.now - (60 * 60 * 24 * 31 * 3)\n stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))\n\n self.assertEqual(\n self._lsInTimezone('America/New_York', stat),\n '!--------- 0 0 0 0 Aug 28 17:33 foo')\n self.assertEqual(\n self._lsInTimezone('Pacific/Auckland', stat),\n '!--------- 0 0 0 0 Aug 29 09:33 foo')",
"def test_complain_if_cannot_infer_timezone(self, mock_get_prices):\n class BuyBelow10(Moonshot):\n \"\"\"\n A basic test strategy that buys below 10.\n \"\"\"\n\n def prices_to_signals(self, prices):\n signals = prices.loc[\"Close\"] < 10\n return signals.astype(int)\n\n def _mock_get_prices():\n\n dt_idx = pd.DatetimeIndex([\"2018-05-01\",\"2018-05-02\",\"2018-05-03\", \"2018-05-04\"])\n fields = [\"Close\",\"Volume\"]\n idx = pd.MultiIndex.from_product([fields, dt_idx], names=[\"Field\", \"Date\"])\n\n prices = pd.DataFrame(\n {\n \"FI12345\": [\n #Close\n 9,\n 11,\n 10.50,\n 9.99,\n # Volume\n 5000,\n 16000,\n 8800,\n 9900\n ],\n \"FI23456\": [\n # Close\n 9.89,\n 11,\n 8.50,\n 10.50,\n # Volume\n 15000,\n 14000,\n 28800,\n 17000\n\n ],\n },\n index=idx\n )\n\n return prices\n\n def mock_download_master_file(f, *args, **kwargs):\n\n master_fields = [\"Timezone\", \"Symbol\", \"SecType\", \"Currency\", \"PriceMagnifier\", \"Multiplier\"]\n securities = pd.DataFrame(\n {\n \"FI12345\": [\n \"America/New_York\",\n \"ABC\",\n \"STK\",\n \"USD\",\n None,\n None\n ],\n \"FI23456\": [\n \"America/Mexico_City\",\n \"DEF\",\n \"STK\",\n \"MXN\",\n None,\n None,\n ]\n },\n index=master_fields\n )\n securities.columns.name = \"Sid\"\n securities.T.to_csv(f, index=True, header=True)\n f.seek(0)\n\n mock_get_prices.return_value = _mock_get_prices()\n\n with patch(\"moonshot.strategies.base.download_master_file\", new=mock_download_master_file):\n with self.assertRaises(MoonshotParameterError) as cm:\n BuyBelow10().backtest(nlv={\"USD\":100000, \"JPY\":10000000})\n\n self.assertIn(\n \"cannot infer timezone because multiple timezones are present \"\n \"in data, please specify TIMEZONE explicitly (timezones: America/New_York, America/Mexico_City)\", repr(cm.exception))",
"def test_clear_last_started_date(self):\n saver = ReportStatsDBAccessor(\"myreport\", self.manifest_id)\n saver.log_last_started_datetime()\n self.assertIsNotNone(saver.get_last_started_datetime())\n saver.clear_last_started_datetime()\n self.assertIsNone(saver.get_last_started_datetime())",
"def test_hotshot_check_date(self):\n date_first = check_date('2015-11-03 13:21:02.071381', '03.11.2015', '20.11.2015')\n date_second = check_date('2015-11-03 13:21:02.071381', '01.11.2015', '02.11.2015')\n\n self.assertTrue(date_first)\n self.assertFalse(date_second)",
"def convert_to_date_mock(request, mocker):\n module_under_test = request.node.fspath.purebasename.replace(\"test_\", \"\")\n\n def _convert_to_date_utc(*args, **kwargs):\n from dateutil import tz\n\n from awsbatch.utils import convert_to_date\n\n # executes convert_to_date but overrides arguments so that timezone is enforced to utc\n kwargs[\"timezone\"] = tz.tzutc()\n return convert_to_date(*args, **kwargs)\n\n return mocker.patch(\"awsbatch.\" + module_under_test + \".convert_to_date\", wraps=_convert_to_date_utc)"
] |
[
"0.724369",
"0.68642217",
"0.6288886",
"0.6186907",
"0.61391515",
"0.5835489",
"0.5828766",
"0.57547426",
"0.57421017",
"0.56687856",
"0.5586837",
"0.558184",
"0.5570703",
"0.55430096",
"0.5538359",
"0.5517559",
"0.54918987",
"0.54758483",
"0.54238665",
"0.5422457",
"0.54087967",
"0.5378262",
"0.537126",
"0.53617084",
"0.53399616",
"0.53061193",
"0.5271071",
"0.52496934",
"0.5247719",
"0.5235824"
] |
0.863226
|
0
|
Test that L{DailyLogFile.toDate} uses its arguments to create a new date.
|
def test_toDateUsesArgumentsToMakeADate(self):
log = logfile.DailyLogFile(self.name, self.dir)
self.addCleanup(log.close)
date = (2014, 10, 22)
seconds = time.mktime(date + (0,) * 6)
logDate = log.toDate(seconds)
self.assertEqual(date, logDate)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_toDate(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n timestamp = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, 0))\n self.assertEqual((2000, 1, 1), log.toDate(timestamp))",
"def test_toDateDefaultToday(self):\n\n def mock_localtime(*args):\n self.assertEqual((), args)\n return list(range(0, 9))\n\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n self.patch(time, \"localtime\", mock_localtime)\n logDate = log.toDate()\n self.assertEqual([0, 1, 2], logDate)",
"def MakeDate(*args):\n raise NotImplementedError(\"MakeDate has not been written yet. Passsed: %s\" % (args, ))",
"def test_date_only(self):\n args = {\n '--directory': './',\n '<date>': '2016-11-15',\n '<header>': None,\n '<seq>': None,\n '<week>': None\n }\n expected = {\n '--directory': Path('./'),\n '<date>': datetime(2016, 11, 15),\n '<header>': None,\n '<seq>': None,\n '<week>': None\n }\n self.assertEqual(\n star_barcode.process_arguments(args),\n expected\n )",
"def test_convert_datetime():",
"def _get_normal_date(self, args):\n\n func1, func2, func3 = args\n self.assertIsNotNone(func1(20130201, \"20190120\"))\n self.assertIsNotNone(func2(\"2013/02/01\", \"2019-01-20\"))\n self.assertIsNotNone(func3(r\"2013-/\\-02~@-\\/-@~01\",\n pd.to_datetime('2019-01-20')))",
"def test_define_a_second_constructor(self):\n a = Date(2012, 12, 21)\n self.assertEqual(a.year, 2012)\n self.assertEqual(a.month, 12)\n self.assertEqual(a.day, 21)\n\n t = time.localtime()\n b = Date.today()\n self.assertEqual(b.year, t.tm_year)\n self.assertEqual(b.month, t.tm_mon)\n self.assertEqual(b.day, t.tm_mday)",
"def test_date_field():",
"def test_hotshot_check_date(self):\n date_first = check_date('2015-11-03 13:21:02.071381', '03.11.2015', '20.11.2015')\n date_second = check_date('2015-11-03 13:21:02.071381', '01.11.2015', '02.11.2015')\n\n self.assertTrue(date_first)\n self.assertFalse(date_second)",
"def test_today(self):\n self.assertEquals(\n self.builder._today(), date.today().strftime('%Y-%m-%d'))",
"def test_check_date_tour(self):\n date_start = timezone.now()\n date_end = timezone.now() - timedelta(days=5)\n new_tour = Tournament(date_start=date_start, date_end=date_end)\n\n self.assertEqual(new_tour.check_date(), False)",
"def test_make_final_path_both_kwargs(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n expected_message = \"date and directory arguments are mutually exclusive\"\n with self.assertRaisesMessage(ValueError, expected_message):\n archive.make_final_path(date=timezone.now(), directory='test')",
"def test_make_final_path_date(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n now = timezone.now().date()\n final_path = archive.make_final_path(date=now)\n \n valid_path = os.path.join(\n archive.data_dir_path,\n now.strftime('%Y'),\n now.strftime('%m'),\n now.strftime('%d')\n )\n\n self.assertEqual(final_path, valid_path)",
"def test_get_date(self):\n d = modis.get_date(os.path.splitext(self.fname)[0])\n self.assertTrue(isinstance(d, datetime.datetime))\n self.assertEqual(d, datetime.datetime(2015, 9, 23))",
"def test_sample_date(self):\r\n self.assertEqual(self.test_sample.date, '2018-08-02 22:32:23')",
"def setDate(self, p_int, p_int_1, p_int_2): # real signature unknown; restored from __doc__\r\n return False",
"def test_format_date(self):\n assert BaseTSVWriter.format_date(FIXED_DATE) == '2016/05/15'",
"def test_sample_one_date(self):\r\n self.assertEqual(self.test_sample.date, datetime.datetime(2016, 2, 12, 7, 34, 26))",
"def test_movements_date_to(api_client):\n\n MovementFactory(date=datetime.date(2017, 2, 10))\n MovementFactory(date=datetime.date(2017, 2, 11))\n\n response = api_client.get(reverse(\"api:movements-list\"), {\"date_to\": \"2017-02-10\"})\n\n assert response.status_code == 200\n assert len(response.data) == 1\n assert response.data[0][\"date\"] == \"2017-02-10\"",
"def test_make_final_path_datetime(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n now = timezone.now()\n final_path = archive.make_final_path(date=now)\n \n valid_path = os.path.join(\n archive.data_dir_path,\n now.strftime('%Y'),\n now.strftime('%m'),\n now.strftime('%d')\n )\n\n self.assertEqual(final_path, valid_path)",
"def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))",
"def test_8_digit_date(self):\n obj = awstats_reader.awstats_datetime('20091130')\n dt = datetime.date(2009, 11, 30)\n self.assertEqual(obj, dt)",
"def test_one_off_training_date(self):\n self.assertIsInstance(self.one_off_training.date, datetime.date)\n self.assertEqual(\n self.one_off_training.date,\n datetime.date(2020, 6, 14)\n )",
"def test_parse_args(self):\n args = '%s %s' % (self.date1, self.date1)\n args = modis.parse_args(args.split(' '))\n self.assertEqual(args.start_date, self.date1)\n self.assertEqual(args.end_date, self.date1)",
"def test_dates(self):\n result = export.processExport(houseId=1,\n startDate = datetime.datetime(2013, 01, 06) #5 Days\n )\n\n self.assertEqual(result.shape, (1440, 2))\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 06))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))\n\n\n #Stop at 00:00 on the 5th\n result = export.processExport(houseId=1,\n endDate = datetime.datetime(2013, 01, 05, 23, 55) #5 Days\n )\n\n self.assertEqual(result.shape, (1440, 2))\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 05, 23, 55))",
"def test_date_can_be_changed(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=utc),\n 'Some description')\n event.date = datetime(2020, 10, 10, 12, 10, tzinfo=utc)\n self.assertEqual(event.date, datetime(2020, 10, 10, 12, 10, tzinfo=utc))",
"def checkDateForFileName(self):\n #self.currentLocalTime was already changed in log Temperatures\n if self.currentLocalTime.tm_mday != self.currentDay:\n #the day has changed we should start a new log file!\n self.logFile = self._logFile_default()\n self._create_log_file()",
"def test_handler_dates(self):\n\n @intent_handler\n def decorated_test(date_str: str = None, date_date: datetime.date = None):\n return date_str, date_date\n\n r = create_request(\n \"TEST_CONTEXT\",\n date_str=[\n \"2001-12-31\",\n \"2001-12-31\",\n ],\n date_date=[\n \"2001-12-31\",\n \"1001-12-31\",\n ],\n )\n result = decorated_test(r)\n self.assertEqual(result, (\"2001-12-31\", datetime.date(2001, 12, 31)))",
"def check_today(self):\n import time\n _time = time.time\n time.time = lambda: 1003539807.89\n try:\n assert Date(\"today\") == Date(\"10/19/2001\"), \"wrong date\"\n finally:\n time.time = _time",
"def test_one_date(self):\n result = beautiful_days_at_the_movies(10, 10, 6)\n self.assertEquals(result, 0)"
] |
[
"0.8066649",
"0.7104837",
"0.6503378",
"0.63718504",
"0.60607177",
"0.59687394",
"0.5933312",
"0.58151937",
"0.57977784",
"0.5757311",
"0.5743511",
"0.5741214",
"0.57226443",
"0.57186323",
"0.5686977",
"0.5681874",
"0.5680657",
"0.56493884",
"0.56150955",
"0.5610736",
"0.55876213",
"0.5565116",
"0.55444926",
"0.55306417",
"0.54995036",
"0.54931796",
"0.54771054",
"0.5462422",
"0.546023",
"0.54580486"
] |
0.88875306
|
0
|
Test extension_level parameter of Extended Isolation Forest. The extension_level=0 means Isolation Forest's behaviour. This test is testing the known Isolation Forest's behaviour of 'Ghost clusters' which Extended Isolation Forest mitigates. The overall variance in anomaly score of EIF should be lower than anomaly score IF. Anomaly score in 'Ghost clusters' should be lower for IF.
|
def extended_isolation_forest_extension_level_smoke():
seed = 0xBEEF
double_blob = make_blobs(centers=[[10, 0], [0, 10]], cluster_std=[1, 1], random_state=seed,
n_samples=500, n_features=2)[0]
train = h2o.H2OFrame(double_blob)
anomalies = h2o.H2OFrame([[0, 0], [10, 10]]) # Points in the ghost clusters
eif_model = H2OExtendedIsolationForestEstimator(ntrees=100, seed=seed, sample_size=255, extension_level=1)
eif_model.train(training_frame=train)
eif_overall_anomaly_score = eif_model.predict(train)
eif_overall_anomaly = eif_overall_anomaly_score['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
if_model = H2OExtendedIsolationForestEstimator(ntrees=100, seed=0xBEEF, sample_size=255, extension_level=0)
if_model.train(training_frame=train)
if_overall_anomaly_score = if_model.predict(train)
if_overall_anomaly = if_overall_anomaly_score['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
eif_anomaly_score = eif_model.predict(anomalies)['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
if_anomaly_score = if_model.predict(anomalies)['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
assert if_anomaly_score[0] < eif_anomaly_score[0], \
"The anomaly score of simulated Isolation Forest's should be significantly lower than score of " \
"Extended Isolation Forest because this point is in 'Ghost cluster'. " + str(if_anomaly_score[0]) + " < " \
+ str(eif_anomaly_score[0])
assert if_anomaly_score[1] < eif_anomaly_score[1], \
"The anomaly score of simulated Isolation Forest's should be significantly lower than score of " \
"Extended Isolation Forest because this point is in 'Ghost cluster'. " + str(if_anomaly_score[1]) + " < " \
+ str(eif_anomaly_score[1])
assert 0.0015 < eif_overall_anomaly.var() < 0.0020 < if_overall_anomaly.var() < 0.0023, \
"Not expected output: Variance in anomaly score of Extended Isolation Forest is suspiciously different from " \
"Isolation Forest (EIF with extension_level=0). In general, the overall variance in anomaly score of EIF " \
"should be lower than variance in score of IF. It could be potential bug in extension_level parameter " \
"handling because " + str(eif_overall_anomaly.var()) + " should be lower than " + str(if_overall_anomaly.var())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_extended_isolation_forest_categorical():\n\n print(\"Extended Isolation Forest Categorical Test\")\n train = h2o.import_file(pyunit_utils.locate(\"smalldata/parser/hexdev_497/airlines_small_csv\"))\n train[\"Origin\"] = train[\"Origin\"].asfactor()\n train[\"Dest\"] = train[\"Dest\"].asfactor()\n eif_model = H2OExtendedIsolationForestEstimator(ntrees=100,\n seed=0xBEEF,\n sample_size=256,\n extension_level=20,\n categorical_encoding=\"one_hot_explicit\")\n eif_model.train(x=[\"Origin\", \"Dest\"], training_frame=train)\n anomaly_score = eif_model.predict(train)\n anomaly = anomaly_score['anomaly_score'].as_data_frame(use_pandas=True)[\"anomaly_score\"]\n\n assert 0.36 < anomaly.mean() < 0.39, \\\n \"Not expected output: Mean anomaly score is suspiciously different.\" + str(anomaly.mean())\n\n print(anomaly_score)\n print(eif_model)",
"def test(df, partition, col='averageRating'):\n stats = community_stats(df, partition)\n stats = add_threshold(stats)\n df = df.merge(partition, on=['title']).merge(stats[['community', 'g']], on=['community'])\n g0, g1 = split_to_groups(df)\n\n summerize(col, df)\n levene_homogenity = run_levene(col, g0, g1)\n diff = g1[col] - g0[col]\n run_shapiro(diff)\n run_ttest(col, g0, g1, levene_homogenity.pvalue <= 0.05)",
"def test_general_subset_level():\n pass",
"def test_evi(self):\n scene = Landsat8Scene(self.filenames)\n geoimg = scene.evi()\n self.assertEquals(geoimg.nbands(), 1)\n self.assertTrue('evi' in geoimg.bandnames())",
"def test_general_subset_invalid_level():\n pass",
"def test_trans_extflag1(self):\n x = np.array([1, 2])\n lmax = 0\n ext_flag = 1\n expected = np.log(np.pi/(2*x)) - x\n result = bessel_sk.trans_bessels(x, lmax, ext_flag)\n assert_almost_equal(result[:, 0], expected.T)",
"def test_xhat_extension(self):\n from mpisppy.extensions.xhatlooper import XhatLooper\n PHoptions = self._copy_of_base_options()\n PHoptions[\"PHIterLimit\"] = 1\n PHoptions[\"xhat_looper_options\"] = {\"xhat_solver_options\":\\\n PHoptions[\"iterk_solver_options\"],\n \"scen_limit\": 3}\n\n ph = mpisppy.opt.ph.PH(PHoptions, self.all3_scenario_names,\n scenario_creator, scenario_denouement,\n cb_data=3, PH_extensions=XhatLooper)\n conv, basic_obj, tbound = ph.ph_main()\n # in this particular case, the extobject is an xhatter\n xhatobj1 = round_pos_sig(ph.extobject._xhat_looper_obj_final, 1)\n self.assertEqual(xhatobj1, 200000)",
"def test_episodic_overfit(self,\n learner_class,\n learner_config,\n threshold=1.,\n attempts=1):\n gin_config = '\\n'.join((self.BASE_GIN_CONFIG, learner_config))\n gin.parse_config(gin_config)\n\n episode_config = config.EpisodeDescriptionConfig(\n num_ways=self.NUM_EXAMPLES, num_support=1, num_query=1)\n\n trainer_instance = trainer.Trainer(\n train_learner_class=learner_class,\n eval_learner_class=learner_class,\n is_training=True,\n train_dataset_list=['fake'],\n eval_dataset_list=['fake'],\n records_root_dir=self.temp_dir,\n checkpoint_dir=os.path.join(self.temp_dir, 'checkpoints'),\n train_episode_config=episode_config,\n eval_episode_config=episode_config,\n data_config=config.DataConfig(),\n )\n # Train 1 update at a time for the last `attempts - 1` steps.\n trainer_instance.num_updates -= (attempts - 1)\n trainer_instance.train()\n valid_accs = [trainer_instance.valid_acc]\n for _ in range(attempts - 1):\n trainer_instance.num_updates += 1\n trainer_instance.train()\n valid_accs.append(trainer_instance.valid_acc)\n self.assertGreaterEqual(max(valid_accs), threshold)",
"def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)",
"def test_enforce_exogenous_no_exo_data(load_pos_and_neg_data):\n data = load_pos_and_neg_data\n\n exp1 = TSForecastingExperiment()\n exp1.setup(data=data, enforce_exogenous=True)\n num_models1 = len(exp1.models())\n\n exp2 = TSForecastingExperiment()\n exp2.setup(data=data, enforce_exogenous=False)\n num_models2 = len(exp2.models())\n\n # Irrespective of the enforce_exogenous flag, all models are enabled when\n # the data does not contain exogenous variables.\n assert num_models1 == num_models2",
"def two_pop_var_test(datae,dataf,alpha):\n \n # Dataset E\n data_e = 1.0*np.array(datae)\n n_e = data_e.shape[0]*data_e.shape[1]\n mean_e = np.array(data_e).mean()\n var_e = np.array(data_e).var(ddof=1)\n df_e = n_e-1\n \n # Dataset F\n data_f = 1.0*np.array(dataf)\n n_f = dataf.shape[0]*dataf.shape[1]\n mean_f = np.array(data_f).mean()\n var_f = np.array(data_f).var(ddof=1)\n df_f = n_f-1\n \n # Calculate Critical Regions\n F = var_e/var_f\n critical_region_left = scs.f.ppf(alpha-(alpha/2),df_e,df_f) \n critical_region_right = scs.f.ppf(1-alpha/2,df_e,df_f) \n \n # Decision \n if F < critical_region_left and F > critical_region_right:\n decision = 'Reject H0'\n return critical_region_left,critical_region_right,F,decision\n else:\n decision = 'Fail to Reject H0'\n return critical_region_left,critical_region_right,F,decision",
"def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.66)",
"def extinction(pyrat):\n # Load parameters for that molecular species (in future pyrat.cond.names for condensate species) \n species = 'H2O'\n pyrat.haze.cloudBenneke.readVP(species) # do this outside once in beginning of code and call it??\n # Fill in Cloud Base Pressure and Temperature\n pyrat.haze.cloudBenneke.cloudbase(pyrat.atm.temp,pyrat.atm.press)\n # Calculate the extinction coefficient (in cm2 molecule-1)\n pyrat.haze.cloudBenneke.extinction(pyrat.spec.wn, pyrat.atm.press)",
"def _add_multilevel_rois_for_test(blobs, name):\n lvl_min = cfg.FPN.ROI_MIN_LEVEL\n lvl_max = cfg.FPN.ROI_MAX_LEVEL\n lvls = fpn.map_rois_to_fpn_levels(blobs[name][:, 1:5], lvl_min, lvl_max)\n fpn.add_multilevel_roi_blobs(\n blobs, name, blobs[name], lvls, lvl_min, lvl_max\n )",
"def test_badge_should_have_extensions(self):\n\n badge = self.get_sample_badge()\n self.assertTrue(hasattr(badge, 'extensions'))",
"def assert_extension_info(ext):\n tu.assert_str(ext.display_name, \"HadGEM2-A\")\n tu.assert_str(ext.description, \"The HadGEM2-A model\", True)\n tu.assert_str(ext.full_display_name, \"CMIP5 Model : MOHC - HadGEM2-A\")\n tu.assert_str(ext.type_display_name, \"Model\")\n tu.assert_int(ext.summary_fields, 2)\n tu.assert_str(ext.summary_fields[0], \"HadGEM2-A\")\n tu.assert_str(ext.summary_fields[1], \"Hadley Global Environment Model 2 - Atmosphere\")",
"def test_genextreme_fit(self):\n p = generic.fit(self.genextreme, \"genextreme\")\n np.testing.assert_allclose(p, (0.20949, 297.954091, 75.7911863), 1e-5)",
"def test_using_ego_graph(self):\n assert_equal(nx.local_efficiency(self.G3), 7 / 12)",
"def test_lfc_ml2():\n levels = np.array([1024.95703125, 1016.61474609, 1005.33056641, 991.08544922, 973.4163208,\n 951.3381958, 924.82836914, 898.25482178, 873.46124268, 848.69830322,\n 823.92553711, 788.49304199, 743.44580078, 700.50970459, 659.62017822,\n 620.70861816, 583.69421387, 548.49719238, 515.03826904, 483.24401855,\n 453.0418396, 424.36477661, 397.1505127, 371.33441162, 346.85922241,\n 323.66995239, 301.70935059, 280.92651367, 261.27053833, 242.69168091,\n 225.14237976, 208.57781982, 192.95333862, 178.22599792, 164.39630127,\n 151.54336548, 139.68635559, 128.74923706, 118.6588974, 109.35111237,\n 100.76405334, 92.84288025, 85.53556824, 78.79430389, 72.57549286,\n 66.83885193, 61.54678726, 56.66480637, 52.16108322]) * units.mbar\n temperatures = np.array([6.00750732, 5.14892578, 4.177948, 3.00268555, 1.55535889,\n -0.25527954, -1.93988037, -3.57766724, -4.40600586, -4.19238281,\n -3.71185303, -4.47943115, -6.81280518, -8.08685303, -8.41287231,\n -10.79302979, -14.13262939, -16.85784912, -19.51675415,\n -22.28689575, -24.99938965, -27.79664612, -30.90414429,\n -34.49435425, -38.438797, -42.27981567, -45.99230957,\n -49.75340271, -53.58230591, -57.30686951, -60.76026917,\n -63.92070007, -66.72470093, -68.97846985, -70.4264679,\n -71.16407776, -71.53797913, -71.64375305, -71.52735901,\n -71.53523254, -71.61097717, -71.92687988, -72.68682861,\n -74.129776, -76.02471924, -76.88977051, -76.26008606,\n -75.90351868, -76.15809631]) * units.celsius\n dewpoints = np.array([4.50012302, 3.42483997, 2.78102994, 2.24474645, 1.593485, -0.9440815,\n -3.8044982, -3.55629468, -9.7376976, -10.2950449, -9.67498302,\n -10.30486488, -8.70559597, -8.71669006, -12.66509628, -18.6697197,\n -23.00351334, -29.46240425, -36.82178497, -41.68824768, -44.50320816,\n -48.54426575, -52.50753403, -51.09564209, -48.92690659, -49.97380829,\n -51.57516098, -52.62096405, -54.24332809, -57.09109879, -60.5596199,\n -63.93486404, -67.07530212, -70.01263428, -72.9258728, -76.12271881,\n -79.49847412, -82.2350769, -83.91127014, -84.95665741, -85.61238861,\n -86.16391754, -86.7653656, -87.34436035, -87.87495422, -88.34281921,\n -88.74453735, -89.04680634, -89.26436615]) * units.celsius\n __, t_mixed, td_mixed = mixed_parcel(levels, temperatures, dewpoints)\n mixed_parcel_prof = parcel_profile(levels, t_mixed, td_mixed)\n lfc_pressure, lfc_temp = lfc(levels, temperatures, dewpoints, mixed_parcel_prof, td_mixed)\n assert_almost_equal(lfc_pressure, 962.34 * units.mbar, 2)\n assert_almost_equal(lfc_temp, 0.767 * units.degC, 2)",
"def GlobalThresholding(image, kernel_sigma, N_levels, N_classes, step = 1): \n \n if kernel_sigma >= 1:\n image = Denoising(image, kernel_sigma);\n \n pixel_count, pixel_count_normalized = CountPixels(image, N_levels);\n mean_g = image.mean(); # global mean\n\n if N_classes == 2: \n interclass_var = np.zeros((N_levels)); # inter-class variance\n range_array = np.arange(0, N_levels, 1).reshape(N_levels, 1);\n for ii in range(0, N_levels - 1, step): \n\n threshold = ii;\n \n mask_1 = range_array <= threshold;\n mask_2 = range_array > threshold;\n \n p_1 = pixel_count_normalized[mask_1].sum(); # probability of class 1\n p_2 = 1 - p_1; # probability of class 2\n \n mean_1 = 1 / p_1 * np.sum(range_array[mask_1] * pixel_count_normalized[mask_1]); # mean of class 1\n mean_2 = 1 / p_2 * np.sum(range_array[mask_2] * pixel_count_normalized[mask_2]); # mean of class 2\n \n temp = p_1 * (mean_1 - mean_g) ** 2 + p_2 * (mean_2 - mean_g) ** 2;\n interclass_var[ii] = np.nan_to_num(temp);\n \n threshold = np.argmax(interclass_var);\n mask_1 = image <= threshold;\n mask_2 = image > threshold;\n mask = np.zeros(image.shape);\n mask[mask_1] = 0;\n mask[mask_2] = 1;\n return mask;\n elif N_classes == 3:\n interclass_var = np.zeros((N_levels, N_levels)); # inter-class variance\n range_array = np.arange(0, N_levels, 1).reshape(N_levels, 1);\n for ii in range(0, N_levels - 2, step): \n for jj in range(ii + 1, N_levels - 1, step):\n\n threshold1 = ii;\n threshold2 = jj;\n \n mask_1 = range_array <= threshold1;\n mask_2 = (range_array > threshold1) * (range_array <= threshold2);\n mask_3 = range_array > threshold2;\n \n p_1 = pixel_count_normalized[mask_1].sum(); # probability of class 1\n p_2 = pixel_count_normalized[mask_2].sum(); # probability of class 2\n p_3 = 1 - (p_1 + p_2); # probability of class 3\n \n mean_1 = 1 / p_1 * np.sum(range_array[mask_1] * pixel_count_normalized[mask_1]); # mean of class 1\n mean_2 = 1 / p_2 * np.sum(range_array[mask_2] * pixel_count_normalized[mask_2]); # mean of class 2\n mean_3 = 1 / p_3 * np.sum(range_array[mask_3] * pixel_count_normalized[mask_3]); # mean of class 3\n \n temp = p_1 * (mean_1 - mean_g) ** 2 + p_2 * (mean_2 - mean_g) ** 2 + p_3 * (mean_3 - mean_g) ** 2;\n interclass_var[ii, jj] = np.nan_to_num(temp);\n \n threshold = np.unravel_index(np.argmax(interclass_var, axis=None), interclass_var.shape);\n threshold1 = threshold[0];\n threshold2 = threshold[1];\n \n mask_1 = image <= threshold1;\n mask_2 = (image > threshold1) * (image <= threshold2);\n mask_3 = image > threshold2;\n mask = np.zeros(image.shape);\n mask[mask_1] = 0;\n mask[mask_2] = 1;\n mask[mask_3] = 2;\n return mask;\n elif N_classes == 4:\n interclass_var = np.zeros((N_levels, N_levels, N_levels)); # inter-class variance\n range_array = np.arange(0, N_levels, 1).reshape(N_levels, 1);\n for ii in range(0, N_levels - 3, step): \n for jj in range(ii + 1, N_levels - 2, step):\n for kk in range(jj + 1, N_levels - 1, step): \n \n threshold1 = ii;\n threshold2 = jj;\n threshold3 = kk;\n \n mask_1 = range_array <= threshold1;\n mask_2 = (range_array > threshold1) * (range_array <= threshold2);\n mask_3 = (range_array > threshold2) * (range_array <= threshold3); \n mask_4 = range_array > threshold3;\n \n p_1 = pixel_count_normalized[mask_1].sum(); # probability of class 1\n p_2 = pixel_count_normalized[mask_2].sum(); # probability of class 2\n p_3 = pixel_count_normalized[mask_3].sum(); # probability of class 3\n p_4 = 1 - (p_1 + p_2 + p_3); # probability of class 4\n \n mean_1 = 1 / p_1 * np.sum(range_array[mask_1] * pixel_count_normalized[mask_1]); # mean of class 1\n mean_2 = 1 / p_2 * np.sum(range_array[mask_2] * pixel_count_normalized[mask_2]); # mean of class 2\n mean_3 = 1 / p_3 * np.sum(range_array[mask_3] * pixel_count_normalized[mask_3]); # mean of class 3\n mean_4 = 1 / p_4 * np.sum(range_array[mask_4] * pixel_count_normalized[mask_4]); # mean of class 4\n \n temp = p_1 * (mean_1 - mean_g) ** 2 + p_2 * (mean_2 - mean_g) ** 2 + \\\n p_3 * (mean_3 - mean_g) ** 2 + p_4 * (mean_4 - mean_g) ** 2;\n interclass_var[ii, jj, kk] = np.nan_to_num(temp);\n \n threshold = np.unravel_index(np.argmax(interclass_var, axis=None), interclass_var.shape);\n threshold1 = threshold[0];\n threshold2 = threshold[1];\n threshold3 = threshold[2];\n \n mask_1 = image <= threshold1;\n mask_2 = (image > threshold1) * (image <= threshold2);\n mask_3 = (image > threshold2) * (image <= threshold3);\n mask_4 = image > threshold3;\n mask = np.zeros(image.shape);\n mask[mask_1] = 0;\n mask[mask_2] = 1;\n mask[mask_3] = 2;\n mask[mask_4] = 3;\n return mask;\n else:\n print('max supported N_class == 4. Abort..\\n')\n return None;",
"def test_trans_extflag0(self):\n x = np.array([1, 2])\n lmax = 0\n ext_flag = 0\n expected = -np.log(x) + np.log(np.sinh(x))\n result = bessel_sk.trans_bessels(x, lmax, ext_flag)\n assert_almost_equal(result[:, 0], expected.T)",
"def test_score_without_fitted_estimator(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n assert_not_fitted(oz)\n\n with pytest.raises(NotFitted):\n oz.score(self.binary.X.test, self.binary.y.test)\n assert_not_fitted(oz)",
"def test_evaluate_no_exog_against_with_exog():\n y, X = load_longley()\n forecaster = DirectReductionForecaster(LinearRegression())\n cv = SlidingWindowSplitter()\n scoring = MeanAbsolutePercentageError(symmetric=True)\n\n out_exog = evaluate(forecaster, cv, y, X=X, scoring=scoring)\n out_no_exog = evaluate(forecaster, cv, y, X=None, scoring=scoring)\n\n scoring_name = f\"test_{scoring.name}\"\n assert np.all(out_exog[scoring_name] != out_no_exog[scoring_name])",
"def test_no_backg_subt():\n \n test_object = fa.read_in_envision(data_csv=HsHis6_PEX5C_vs_HsPEX5C, platemap_csv=Hs_His6_PEX5C_vs_HsPEX5C_platemap, data_type='plate', size=384)\n test_object.calculate_r_i(correct=True, plot_i=False, thr=80)",
"def main_extension(fname, scoring_matrix, gap, gap_ext, conserved_seq=\"\", conserved_strength=0, bound=-1,\n ignore_start_gaps=False, ignore_end_gaps=False, auto_bound=False):\n s, t = read_fasta(fname)\n max_score, s_aligned, t_aligned = GAFF_extended(s, t, scoring_matrix, gap, gap_ext, conserved_seq,\n conserved_strength, bound, ignore_start_gaps,\n ignore_end_gaps, auto_bound)\n # show important information\n print_all_info(s_aligned, t_aligned, max_score, scoring_matrix)\n return max_score, s_aligned, t_aligned",
"def test_eigrp(sw):\n\tcmd = sw.show('show ip eigrp')\n\tresp = xmltodict.parse(cmd[1])['ins_api']['outputs']['output']\n\n\ttry:\n\t\tif resp[\"code\"] == \"400\":\n\t\t\t#most likely feature eigrp is not in the configuration.\n\t\t\treturn False\n\t\telif resp[\"code\"] == \"501\" and resp[\"clierror\"] == \"Note: process currently not running\\n\":\n\t\t\t#feature eigrp is enabled but not configured.\n\t\t\treturn False\n\t\telif resp[\"code\"] == \"200\":\n\t\t\t#eigrp appears to be configured\n\t\t\tcontexts = resp[\"body\"][\"TABLE_asn\"][\"ROW_asn\"]\n\t\t\tif len(contexts) > 0:\n\t\t\t\treturn True\n\texcept Exception as oops:\n\t\tprint type(oops)\n\t\tprint oops.args\n\t\tprint oops\n\treturn False",
"def test_fit_score(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n assert_not_fitted(oz, X_test=self.binary.X.test)\n assert oz.fit(self.binary.X.train, self.binary.y.train) is oz\n assert 0.0 <= oz.score(self.binary.X.test, self.binary.y.test) <= 1.0\n assert_fitted(oz, X_test=self.binary.X.test)",
"def test_efficiency_disconnected_nodes(self):\n assert_equal(nx.efficiency(self.G1, 1, 2), 0)",
"def MultiExtension_getDefaultLevel():\n return _libsbml.MultiExtension_getDefaultLevel()",
"def test_information_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertAlmostEqual(self.decision_tree.get_root_node().node_split.criterion_value,\n 2. * -0.3 * math.log2(0.3) - 0.4 * math.log2(0.4))"
] |
[
"0.5861506",
"0.5104245",
"0.5055874",
"0.50445503",
"0.50417703",
"0.49671063",
"0.49630588",
"0.49009794",
"0.487647",
"0.48635438",
"0.48563376",
"0.4849839",
"0.48132083",
"0.47682765",
"0.47103968",
"0.46879795",
"0.46877152",
"0.46719715",
"0.46637863",
"0.46590954",
"0.4643691",
"0.46362972",
"0.46142933",
"0.46111077",
"0.46061718",
"0.4606006",
"0.4588429",
"0.45818657",
"0.45544523",
"0.45523873"
] |
0.7157335
|
0
|
Buy the required quantity Y of X
|
def Buy(self, X, Y):
if self.money - (int(Y) * self.price[X][0] * (1 + self.taxe)) < 0:
raise TradeError("Not Enough Money")
self.share[X] += int(Y)
self.money -= int(Y) * self.price[X][0] * (1 + self.taxe)
print(f"BUY:{str(int(Y))}:{str(X)}", flush = True)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _buy(self, units=1):\n self.quantity -= units",
"async def buy(self, ctx, quantity: int, symbol: str):\r\n symbol = symbol.upper()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n await self.market_open_check(ctx)\r\n await self.stock_symbol_check(ctx, db, symbol)\r\n \r\n price = self.iex.price(symbol)\r\n cost = quantity * price\r\n if company.balance < cost:\r\n await ctx.send(f\"{company.name}\\nBalance: {company.balance} USD\\nPurchase cost: {cost} USD\")\r\n raise StonksError()\r\n\r\n value = price * quantity\r\n self.iex.buy(db, company.id, symbol, quantity, price)\r\n await ctx.send(f\"``-{value} {company.name} ⯮ {quantity} {symbol} @ {price}``\")",
"def buy(self, stock, amount):\n self.orders[stock] += amount",
"def _pay(self, asked_value):\n\t\tpayment_value = min(self.stock, asked_value)\n\t\tself.stock -= payment_value\n\t\treturn payment_value",
"def buy(self, price, volume):\r\n self.order(\"bid\", price, volume)",
"def trade_action(self, BUY_QTY):\n BUY_QTY = 4500\n self.trade(BUY_QTY)\n #self.show()",
"async def sell(self, ctx, quantity: int, symbol: str):\r\n symbol = symbol.upper()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n await self.market_open_check(ctx)\r\n await self.stock_symbol_check(ctx, db, symbol)\r\n \r\n inventory = self.iex.get_held_stock_quantity(db, company.id, symbol)\r\n if inventory < quantity:\r\n await ctx.send(f\"``{company.name}\\n{inventory} {symbol}``\")\r\n raise StonksError()\r\n\r\n price = self.iex.price(symbol)\r\n value = price * quantity\r\n self.iex.sell(db, company.id, symbol, quantity, price)\r\n await ctx.send(f\"``+{value} {company.name} ⯬ {quantity} {symbol} @ {price}``\")",
"def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)",
"def qty_available(quant) -> float:\n return quant.quantity - quant.reserved_quantity",
"def market_buy(self, order_id, quantity):\n Library.functions.market_buy(self._book, order_id, quantity)",
"def purchase(self, item_type):",
"def produce(self, quantitiy):\n self._newly = self._newly + quantitiy",
"def buy_commodity(umid, id1, val1, cur=tradeables.find_one({'baseCurrency': True})['name']):\n # todo: rework for DJango and new Classes\n currency = tradeables.find_one({'name': cur})['_id']\n valBase = tradeables.find_one({'_id': id1})['priceBase'] * val1 * -1 # todo: remove\n valCur = valBase * tradeables.find_one({'name': cur})['base2this']\n\n if val1 >= 0:\n action = 'Bought'\n else:\n action = 'Sold'\n\n trade(umid, id1, val1, currency, valCur, action)\n print()\n print(' {} {:.2f} {} for {}'.format(action, val1, id1.split('_')[1], cur))\n print(' {:<3} Unit price: {:.2f} Total: {:.2f}'.format('USD', valBase/-val1, valBase))\n print(' {:<3} Unit price: {:.2f} Total: {:.2f} <-- final'.format(cur, valCur/-val1, valCur))",
"def buy(self, irc, msg, args, optlist, amount, thing, price, otherthing, notes):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n gpgauth = self._checkGPGAuth(irc, msg.prefix)\n if gpgauth is None:\n irc.error(\"For identification purposes, you must be identified via GPG \"\n \"to use the order book.\")\n return\n results = self.db.getByNick(gpgauth['nick'])\n if len(results) >= self.registryValue('maxUserOpenOrders'):\n irc.error(\"You may not have more than %s outstanding open orders.\" % \\\n self.registryValue('maxUserOpenOrders'))\n return\n extratime = 0\n if dict(optlist).has_key('long'):\n extratime = self.registryValue('longOrderDuration')\n trust = self._getTrust(irc, 'nanotube', gpgauth['nick'])\n sumtrust = sum([t for t,n in trust])\n if sumtrust < self.registryValue('minTrustForLongOrders'):\n irc.error(\"You must have a minimum of %s cumulative trust at \"\n \"level 1 and level 2 from nanotube to \"\n \"to place long orders.\" % (self.registryValue('minTrustForLongOrders'),))\n return\n orderid = self.db.buy(gpgauth['nick'], msg.host, amount, thing, price, otherthing, notes, extratime)\n irc.reply(\"Order id %s created.\" % (orderid,))\n if not world.testing:\n irc.queueMsg(ircmsgs.privmsg(\"#bitcoin-otc-ticker\",\n \"#%s || %s || BUY %s %s @ %s %s || %s\" % (orderid,\n gpgauth['nick'],\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes,)))",
"def test_product_buy(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 5)\n self.assertEqual(result_buy, 175)",
"def buy(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n return self.trader.buy(symbol, quantity, in_force, extended)",
"def buy_and_pay(self):\n return self.price",
"def argument_quantity(self, quantity):\n self.quantity = self.quantity + int(quantity)\n self.save()",
"def buy_btc(self, qty, pricevaries=False):\n url = self.base_url + 'buys'\n request_data = {\n \"qty\": qty,\n \"agree_btc_amount_varies\": pricevaries\n }\n body = json.dumps(request_data)\n self.session.headers.update(self.sign(url, body=body))\n resp = self.session.post(url=url, data=body)\n return resp.json()",
"def quant(date, bid, ask, voodoo):\n\n future = 200\n voodoo[:] = ask-bid\n for i in xrange(0, future):\n voodoo += (ask-bid + ask-bid + ask-bid + ask-bid\n +ask-bid + ask-bid + ask-bid + ask-bid\n ) / 8\n voodoo[:] = voodoo / future",
"def userBuyShipObj(self, user : bbUser.bbUser, requestedShip : bbShip.bbShip):\n if self.userCanAffordItemObj(user, requestedShip):\n self.shipsStock.removeItem(requestedShip)\n user.credits -= requestedShip.getValue()\n user.inactiveShips.addItem(requestedShip)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy ship \" + requestedShip.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedShip.getValue()))",
"def await_buy(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n return self.trader.await_buy(symbol, quantity, in_force, extended)",
"def _increment_quantity(self, units):\n self.quantity += units",
"async def buybait(self, ctx:commands.Context, quantity:int, *bait_type:str):\r\n\r\n if not await self.IsSpecialized(ctx.guild, ctx.channel.id, SHOP_CHANNEL):\r\n await ctx.send('Cannot buy bait here\\nUse `add shop` to turn this channel into a shop')\r\n return\r\n\r\n bait_type = ' '.join(bait_type)\r\n if not bait_type in fishing_bait:\r\n await ctx.send(f'{bait_type} is not a valid form of bait')\r\n\r\n bulk_mod = await self.GetModifier(ctx.guild, 'bulk_purchase_mod')\r\n bulk_requirement = await self.GetSetting(ctx.guild, 'bulk_minimum')\r\n total = int(bait_prices[bait_type] * quantity * await self.GetModifier(ctx.guild, \"bait_price\") * (1 if quantity < bulk_requirement else 1 - bulk_mod))\r\n\r\n if not bank.can_spend(ctx.message.author, total):\r\n await ctx.send(f'You don\\'t have enough {await bank.get_currency_name(ctx.guild)}')\r\n return\r\n\r\n msg = await ctx.send(f'Are you sure you want to buy {bait_type} x{quantity} ({total} {await bank.get_currency_name(ctx.guild)})'\r\n + (f'\\n*-{100 * bulk_mod}% for buying in bulk*' if quantity >= bulk_requirement else ''))\r\n\r\n start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)\r\n pred = ReactionPredicate.yes_or_no(msg, ctx.author)\r\n try:\r\n await ctx.bot.wait_for(\"reaction_add\", check=pred, timeout=15)\r\n except asyncio.TimeoutError:\r\n await msg.clear_reactions()\r\n return\r\n\r\n if pred.result is True:\r\n member_bait = await self.config.member(ctx.message.author).bait()\r\n member_bait[bait_type] += quantity\r\n await self.config.member(ctx.message.author).bait.set(member_bait)\r\n await msg.edit(content=f'{quantity} {bait_type} bought for {total} {await bank.get_currency_name(ctx.guild)}')\r\n await bank.withdraw_credits(ctx.message.author, total)\r\n else:\r\n await msg.edit(content='Sale cancelled')\r\n\r\n await msg.clear_reactions()",
"def buy(self,\n currency_pair,\n rate,\n amount):\n pass",
"def sell(self, price, volume):\r\n self.order(\"ask\", price, volume)",
"async def buy(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, item = ch.parse_number_and_name(args)\n if number and item:\n out = item_helpers.buy(ctx.author.id, item, number=number)\n await ctx.send(out)",
"def Sell(self, X, Y):\n if (self.share[X] - int(Y)) * (1 + self.taxe) < 0:\n raise TradeError(\"Not Enough Share\")\n self.share[X] -= int(Y)\n self.money += int(Y) * self.price[X][0] * (1 + self.taxe)\n print(f\"SELL:{str(int(Y))}:{str(X)}\", flush = True)",
"def sum_availability(val, quant) -> float:\n return val + qty_available(quant)",
"def buyGem(self, amount):\n returnVal = False\n if self.spendCoin(amount=25):\n self.gems += 1\n returnVal = True\n \n return returnVal"
] |
[
"0.7238275",
"0.67132986",
"0.6397412",
"0.62846696",
"0.6236672",
"0.61686414",
"0.61398065",
"0.6100066",
"0.60873055",
"0.60862577",
"0.60640794",
"0.6057393",
"0.60468096",
"0.6044836",
"0.59907424",
"0.59554666",
"0.5952139",
"0.59387517",
"0.59303457",
"0.58676326",
"0.58666664",
"0.58607686",
"0.58517706",
"0.5825159",
"0.5784281",
"0.5769756",
"0.57675326",
"0.5747178",
"0.572441",
"0.5713915"
] |
0.719711
|
1
|
Get the value of all money and all share converted into money
|
def GetSpeculated(self):
return self.money + sum([self.share[i] * self.price[i][0] * (1 + self.taxe) for i in self.price])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getCurrencies():",
"def getAmount2(*args):",
"def getAmount1(*args):",
"def get_money(self):\n return self.money",
"def getMoney(self):\n if 1 in self.money:\n return self.money[1]\n else:\n return 0",
"def get_cash(self):\r\n return self.cash",
"def getUserCurrency():",
"def getValue(currency=None):",
"def money(self):\r\n return self._money",
"def returnCurrencies(self):\n pass",
"def __call__(self):\n currency_data = getUtility(ICurrencyData)\n currency_data_list = currency_data.currency_data_list()\n results = {}\n for i in currency_data_list:\n results.update({i['code']:i['decimal']})\n return results",
"def _amount_all(self):\n res = {}\n ut_obj = self.env['l10n.ut']\n for iwdl_brw in self.browse(self.ids):\n # Using a clousure to make this call shorter\n f_xc = ut_obj.sxc(\n iwdl_brw.invoice_id.company_id.currency_id.id,\n iwdl_brw.invoice_id.currency_id.id,\n iwdl_brw.islr_wh_doc_id.date_uid)\n\n res[iwdl_brw.id] = {\n 'amount': (iwdl_brw.base_amount * (iwdl_brw.retencion_islr / 100.0)) or 0.0,\n 'currency_amount': 0.0,\n 'currency_base_amount': 0.0,\n }\n for xml_brw in iwdl_brw.xml_ids:\n res[iwdl_brw.id]['amount'] = xml_brw.wh\n res[iwdl_brw.id]['currency_amount'] = f_xc(\n res[iwdl_brw.id]['amount'])\n res[iwdl_brw.id]['currency_base_amount'] = f_xc(\n iwdl_brw.base_amount)",
"def money(self):\n return self._money",
"def getBaseCurrency():",
"def get_company_and_price(shares: list[Share]) -> list[Share]:\n\n for share in shares:\n\n share_info = lookup(share.symbol)\n if share_info is not None:\n share.company_name = share_info[\"name\"]\n share.price = share_info[\"price\"]\n share.total = share.price * share.qty\n else:\n share.company_name = CMP_NOT_FOUND\n\n return shares",
"def total_value(self):\n total = 0.0\n for account in self.accounts():\n total += account.available_cash()\n for asset in account.assets():\n total += asset.adjusted_value()\n return total",
"def getActiveCurrencies():",
"def test_convert_amounts(self):\n pass",
"def get_money(self) -> float: \n money = get_owned()\n try:\n assert type(self.owned_init) == float\n except AssertionError: #The first time one tries to make a bet this is evoked\n self.owned_init = money\n finally:\n return money",
"def _amount_all(self, cr, uid, ids,field_name, arg, context={}):\n res={}\n for record in self.browse(cr, uid, ids, context=context):\n val = 0.0\n for line in record.enrich_lines:\n if line.state == 'done' :\n val += line.cost\n res[record.id] = {\n 'paid_amount':val,\n 'residual_amount':record.amount - val,\n }\n return res",
"def main():\n print(cash_money(44.333333))",
"def money_from_args(args, fromobj):\n allcoins = (\"coins\", \"coin\", \"silver\", \"money\", \"pieces\", \"all\")\n currency = fromobj.item_data.currency\n currency = float(currency)\n currency = round(currency, 2)\n if args in allcoins:\n val = currency\n else:\n arglist = args.split()\n val = float(arglist[0])\n val = round(val, 2)\n vals = (val, currency)\n return vals",
"def cash(self, qtt_100s, qtt_50s, qtt_20s):\n return (qtt_100s * 100) + (qtt_50s * 50) + (qtt_20s * 20)",
"def getActiveCurrency():",
"def getFactor(currency):",
"def calc_total_money(stock):\n tot_amount = stock[\"five\"] * 5\n tot_amount += stock[\"one\"]\n tot_amount += stock[\"quarter\"] / 4\n tot_amount += stock[\"dime\"] / 10\n tot_amount += stock[\"nickel\"] / 20\n \n return (int(tot_amount), int(str(tot_amount)[str(tot_amount).find('.')+1::]))",
"def get_currency_values_if_valid(self):\n home_value_exists = False\n foreign_value_exists = False\n if self.root.ids.home_currency_input.text == '':\n self.root.ids.home_currency_input.hint_text = 'Must enter an amount before calibrating'\n else:\n home_value_exists = True\n if self.root.ids.foreign_currency_input.text == '':\n self.root.ids.foreign_currency_input.hint_text = 'Must enter an amount before converting'\n else:\n foreign_value_exists = True\n if foreign_value_exists:\n try:\n foreign_amount = float(self.root.ids.foreign_currency_input.text)\n valid_foreign_amount = True\n except ValueError:\n self.root.ids.foreign_currency_input.text = ''\n self.root.ids.foreign_currency_input.hint_text = 'Invalid amount (not a number)'\n foreign_amount = 0\n valid_foreign_amount = False\n else:\n valid_foreign_amount = False\n foreign_amount = 0\n if home_value_exists:\n try:\n home_amount = float(self.root.ids.home_currency_input.text)\n valid_home_amount = True\n except ValueError:\n self.root.ids.home_currency_input.text = ''\n self.root.ids.home_currency_input.hint_text = 'Invalid amount (not a number)'\n home_amount = 0\n valid_home_amount = False\n else:\n valid_home_amount = False\n home_amount = 0\n\n return home_value_exists is foreign_value_exists is valid_foreign_amount is valid_home_amount is True, \\\n home_amount, foreign_amount",
"def compute(self):\n\t\tmontant = self.spn_montant.value() #recuperation de la valeur de la spn\n\t\tdevise_from = self.cbb_devisesFrom.currentText() #recuperation de la valeur de la cbb\n\t\tdevise_to = self.cbb_devisesTo.currentText()\n\t\t\n\t\t# on effectue la conversion grace a currency_converter\n\t\t# on fait une gestion d'erreur pour eviter les conversions non trouvees\n\t\ttry :\n\t\t\t\"\"\"on essaie\"\"\"\n\t\t\tresultat = self.c.convert(montant, devise_from, devise_to)\n\t\t\n\t\texcept currency_converter.currency_converter.RateNotFoundError :\n\t\t\t\"\"\"si erreur\"\"\"\n\t\t\tprint(\"le taux de conversion n'a pas ete trouve\")\n\t\t\n\t\telse :\n\t\t\t\"\"\"si pas d'erreur\"\"\"\n\t\t\tself.spn_montantConverti.setValue(resultat) #affichage dans la cbb",
"def somme_encaissee(self) -> Numeric:\n return query_sum(\n self.offres().filter(paye=True),\n \"prix\",\n output_field=models.DecimalField(),\n )",
"def getamount(self):\n return self.__amount"
] |
[
"0.6711975",
"0.64237803",
"0.6346039",
"0.6321251",
"0.62965816",
"0.6288939",
"0.62287134",
"0.6211428",
"0.6123923",
"0.6059905",
"0.6005782",
"0.59984255",
"0.59953415",
"0.59829986",
"0.5922546",
"0.5908083",
"0.5864107",
"0.5860223",
"0.5854684",
"0.5848573",
"0.58208156",
"0.57879454",
"0.5775685",
"0.5773297",
"0.57696265",
"0.57050127",
"0.56808895",
"0.5668354",
"0.5665521",
"0.56569976"
] |
0.67540455
|
0
|
Draw Bollinger Bands for little value
|
def DrawBands(self, count):
value = self.little[0]
mobile_average = float(sum([float(self.little[i])
for i in range(len(self.little))])) / float(self.period)
standard_derivation = sqrt(sum([pow(self.little[i] - mobile_average, 2)
for i in range(len(self.little))]) / self.period)
upper_band = mobile_average + (standard_derivation * self.sd_coef)
lower_band = mobile_average - (standard_derivation * self.sd_coef)
self.upper.insert(0, upper_band)
self.lower.insert(0, lower_band)
if len(self.upper) >= self.period:
self.upper.pop()
if len(self.lower) >= self.period:
self.lower.pop()
if count >= self.period:
for i in range(len(self.little) - 1):
self.canvas.create_line((i * self.incr / 1.725) + self.incr * 4,
self.height - self.incr * 4 + (self.little[i] - 1) * 5000 - 200,
(i * self.incr / 1.725) + self.incr * 4 + self.incr / 1.725,
self.height - self.incr * 4 + (self.little[i + 1] - 1) * 5000 - 200,
fill = "#FFFF00", width = 2)
for i in range(len(self.upper) - 1):
self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,
self.height - self.incr * 4 + (self.upper[i] - 1) * 5000 - 200,
(i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,
self.height - self.incr * 4 + (self.upper[i + 1] - 1) * 5000 - 200,
fill = "#FF6600", width = 3)
self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,
self.height - self.incr * 4 + (self.lower[i] - 1) * 5000 - 200,
(i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,
self.height - self.incr * 4 + (self.lower[i + 1] - 1) * 5000 - 200,
fill = "#FF0000", width = 3)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_bollinger_bonds(rm, rstd):\r\n upper_band = rm + rstd * 2\r\n lower_band = rm - rstd * 2\r\n return upper_band, lower_band",
"def bblo(wave,bstar,airlimit,fig):\n import matplotlib.pyplot as plt\n import logging\n from scipy.interpolate import splrep,splev\n import tmath.wombat.womconfig as womconfig\n from tmath.wombat.womwaverange import womwaverange\n from tmath.wombat.womget_element import womget_element\n from tmath.wombat.inputter_single import inputter_single\n from tmath.wombat.onclick import onclick\n from tmath.pydux.waveparse import waveparse\n from tmath.wombat.yesno import yesno\n# global nsplinepoints, tmpsplptsx, tmpsplptsy, pflag\n print('Select regions to blotch')\n done=False\n while (not done):\n plt.cla()\n plt.plot(wave,bstar, drawstyle='steps-mid')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.pause(0.01)\n wavesub,fluxsub,mode=womwaverange(wave,bstar,'none')\n wavebind=womget_element(wave,wavesub[0])\n waverind=womget_element(wave,wavesub[-1])\n plt.cla()\n plt.plot(wave[wavebind:waverind+1],bstar[wavebind:waverind+1], \\\n drawstyle='steps-mid')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.pause(0.01)\n print('Do you want to enter blotch wavelengths by hand (w),')\n print('mark points (m), fit a spline (s), or quit (q)?')\n choice=inputter_single('(w/m/s/q): ','wmsq')\n if (choice == 'w') or (choice == 'm'):\n blotchgood=False\n while (not blotchgood):\n wavechoicedone=False\n while (not wavechoicedone):\n if (choice == 'w'):\n waveselb,waveselr=waveparse()\n else:\n print('Mark the two endpoints of the blotch region')\n endpoints=plt.ginput(2, timeout=-1)\n waveselb=endpoints[0][0]\n waveselr=endpoints[1][0]\n if (waveselb > waveselr):\n waveselb,waveselr=waveselr,waveselb\n waveselbind=womget_element(wave,waveselb)\n waveselrind=womget_element(wave,waveselr)\n print(waveselb, waveselr,waveselbind,waveselrind)\n if (waveselbind == 0) or (waveselrind == (len(wave)-1)):\n print('Wavelengths incorrect--too close to endpoints')\n else:\n wavechoicedone=True\n contblue=bstar[waveselbind-1]\n contred=bstar[waveselrind+1]\n delta=(contred-contblue)/(waveselrind-waveselbind+1)\n bstarcor=bstar.copy()\n for i in range(waveselbind,waveselrind+1):\n bstarcor[i]=contblue+ (i-waveselbind+1)*delta\n plt.plot(wave[wavebind:waverind+1],bstarcor[wavebind:waverind+1], \\\n drawstyle='steps-mid')\n plt.pause(0.01)\n print('Is this acceptable')\n answer=yesno('y')\n if (answer == 'y'):\n bstar=bstarcor.copy()\n blotchgood=True\n logging.info('File {} blotched from {} to {}'.format('bstar', wave[waveselbind], wave[waveselrind]))\n elif (choice == 's'):\n xmin,xmax=plt.xlim()\n ymin,ymax=plt.ylim()\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n womconfig.nsplinepoints=0\n womconfig.tmpsplptsx=[]\n womconfig.tmpsplptsy=[]\n\n spldone=False\n while (not spldone):\n plt.cla()\n plt.plot(wave[wavebind:waverind+1],bstar[wavebind:waverind+1], \\\n drawstyle='steps-mid')\n if (len(womconfig.tmpsplptsx) > 0):\n plt.plot(womconfig.tmpsplptsx,womconfig.tmpsplptsy,'ro')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n cid = fig.canvas.mpl_connect('button_press_event', onclick)\n print('\\nClick on continuum points for spline fit.')\n print('Spline will replace values between first and last point')\n print('Left button = add point')\n print('Middle button = delete point')\n print('Right button = done\\n')\n womconfig.pflag=''\n while (womconfig.pflag != 'done'):\n plt.pause(0.01)\n fig.canvas.mpl_disconnect(cid)\n\n splptsy=[z for _,z in sorted(zip(womconfig.tmpsplptsx,womconfig.tmpsplptsy))]\n splptsx=sorted(womconfig.tmpsplptsx)\n spline=splrep(splptsx,splptsy,k=3)\n splblueindex=womget_element(wave,splptsx[0])\n splredindex=womget_element(wave,splptsx[-1])\n splwave=wave[splblueindex:splredindex+1].copy()\n splineresult=splev(splwave,spline)\n bstarcor=bstar.copy()\n bstarcor[splblueindex:splredindex+1]=splineresult.copy()\n plt.plot(splwave,splineresult,drawstyle='steps-mid')\n print('Is this acceptable')\n answer=yesno('y')\n if (answer == 'y'):\n bstar=bstarcor.copy()\n spldone=True\n logging.info('File {} blotched with spline from {} to {}'.format('bstar', wave[splblueindex], wave[splredindex]))\n else:\n done=True \n print('Do another region?')\n another=yesno('n')\n if (another == 'n'):\n done=True\n \n return bstar",
"def _drawBolts(self,view):\n if len(self._bolts)>0:\n for n in self._bolts:\n n.draw(view)",
"def determineBlinds(self):\n\t\tif self.roundNo <= 1:\n\t\t\tself.smallBlind = 5\n\t\t\tself.bigBlind = 10\n\t\telif self.roundNo == 2:\n\t\t\tself.smallBlind = 10\n\t\t\tself.bigBlind = 20\n\t\telif self.roundNo == 3:\n\t\t\tself.smallBlind = 20\n\t\t\tself.bigBlind = 40\n\t\telif self.roundNo == 4:\n\t\t\tself.smallBlind = 40\n\t\t\tself.bigBlind = 80\n\t\telif self.roundNo == 5:\n\t\t\tself.smallBlind = 80\n\t\t\tself.bigBlind = 160\n\t\telif self.roundNo >= 6:\n\t\t\tself.smallBlind = 160\n\t\t\tself.bigBlind = 320",
"def add_bollinger_bands(self, rstd):\n self.data['upper_band'] = self.data['rolling_mean'] + 2 * rstd\n self.data['lower_band'] = self.data['rolling_mean'] - 2 * rstd",
"def Roosbroeck_with_screening_B(vals, nxc, doping, temp, Blow):\n\n bmin = vals['rmax'] + (vals['rmin'] - vals['rmax']) / (\n 1. + (temp / vals['r1'])**vals['r2'])\n b1 = (vals['smax'] + (vals['smin'] - vals['smax']) / (\n 1. + (temp / vals['s1'])**vals['s2'])) * 2\n b3 = (vals['wmax'] + (vals['wmin'] - vals['wmax']\n ) / (\n 1. + (temp / vals['w1'])**vals['w2'])) * 2\n\n # print bmin\n\n B = Blow * (bmin + (vals['bmax'] - bmin) / (\n 1. + ((2. * nxc + doping) / b1\n )**vals['b2']\n + ((2. * nxc + doping) / b3)**vals['b4']))\n\n return B",
"def get_bollinger_bands(rm, rstd):\n \n upper_band=rm+2*rstd\n lower_band=rm-2*rstd\n return upper_band, lower_band",
"async def update_bbands(self, pair: str):\n\n if not config['enable_bbands']:\n return\n\n bband_window = config['ma_windows'][config['bband_ma']]\n source = self.adjusted_close_values[pair]\n source_ma = self.close_value_mas[pair][bband_window]\n num = self.last_update_nums[pair]\n end_index = len(source)\n end_ma_index = len(source_ma)\n ma_index = end_ma_index - num\n\n bband_high = []\n bband_low = []\n\n for index in range(end_index - num, end_index):\n bband_stdev = np.std(np.array(source[index - bband_window:index])) * config['bband_mult']\n bband_high.append(source_ma[ma_index] + bband_stdev)\n bband_low.append(source_ma[ma_index] - bband_stdev)\n ma_index += 1\n\n self.bollinger_bands[pair]['H'].extend(bband_high)\n self.bollinger_bands[pair]['L'].extend(bband_low)\n\n self.log.debug('{} Updated Bollinger bands.', pair, verbosity=1)",
"def high_bri(self):\r\n for light in self.lights:\r\n bri = self.b.get_light(light,'bri')\r\n bri = bri + 50 \r\n if bri > 255:\r\n bri = 255 \r\n self.b.set_light(light,'bri',bri)",
"def lower_bri(self):\r\n for light in self.lights:\r\n bri = self.b.get_light(light,'bri')\r\n bri = bri - 50\r\n if bri < 0:\r\n bri = 1\r\n self.b.set_light(light,'bri',bri)",
"def cal_bl(self, offset, size):\n blbegin = offset / conf.blsize\n blend = (offset + size - 1) / conf.blsize + 1\n blnum = range(blbegin, blend)\n\n blfrom = [offset % conf.blsize, ]\n blfrom.extend([0 for i in range(len(blnum) - 1)])\n\n blto = [conf.blsize for i in range(len(blnum) - 1)]\n least = (offset + size) % conf.blsize\n\n if least == 0:\n least = conf.blsize\n blto.append(least)\n\n return zip(blnum, blfrom, blto)",
"def get_bollinger_bands(rm, rstd, degrees):\n\tupper_band = rm + rstd * degrees\n\tlower_band = rm - rstd * degrees\n\treturn upper_band, lower_band",
"def bollinger_bands(self, normalize=False):\n sma = self.sma(normalize=normalize)\n rstd = self.rolling_std(normalize=normalize)\n upper_band = sma + rstd\n lower_band = sma - rstd\n return upper_band, lower_band",
"def get_bollinger_bands(rm, rstd, deviation=2):\n upper_band = rm + rstd * deviation\n lower_band = rm - rstd * deviation\n return upper_band, lower_band",
"def _pre_draw_bge(self):\r\n self._pre_draw_common()\r\n # draw rays\r\n self._drawRays()",
"def bollinger(client, symbol, range=\"6m\", col=\"close\", period=2):\n df = client.chartDF(symbol, range)\n bb = t.BBANDS(df[col].values.astype(float), period)\n return pd.DataFrame(\n {col: df[col].values, \"upper\": bb[0], \"middle\": bb[1], \"lower\": bb[2]}\n )",
"def power_bells(state):\n if not pinlessMode:\n if state:\n for pin in bellPins:\n GPIO.output(pin, GPIO.HIGH)\n elif not state:\n for pin in bellPins:\n GPIO.output(pin, GPIO.LOW)\n else:\n logging.debug(\"Bell state: \" + str(state))",
"async def refresh_bbands(self, pair: str):\n\n if not config['enable_bbands']:\n return\n\n bband_window = config['ma_windows'][config['bband_ma']]\n source = self.adjusted_close_values[pair][-(config['chart_age'] + bband_window):]\n bband_high = []\n bband_low = []\n ma_index = 0\n\n for index in range(bband_window, len(source)):\n bband_stdev = np.std(np.array(source[index - bband_window:index])) * config['bband_mult']\n bband_high.append(self.close_value_mas[pair][bband_window][ma_index] + bband_stdev)\n bband_low.append(self.close_value_mas[pair][bband_window][ma_index] - bband_stdev)\n ma_index += 1\n\n self.bollinger_bands[pair]['H'] = bband_high\n self.bollinger_bands[pair]['L'] = bband_low\n\n self.log.debug('{} Refreshed Bollinger bands.', pair, verbosity=1)",
"def _butter_bandpass(lowcut: float, highcut: float, fs: float, order: int = 5) -> tuple:\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype=\"band\")\n return b, a",
"def drawBolts(self,view):\r\n for bolt in self.getBolts():\r\n bolt.draw(view)",
"def theaterChaseRainbow(strip, state, maxBrightness, wait_ms=50, bling=True):\n for j in range(256):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i + q, wheel((i + j) % 255))\n if (STATE != state):\n break\n if (STATE != state):\n break\n if bling:\n global CYCLECOUNTER\n CYCLECOUNTER = CYCLECOUNTER + 1\n if CYCLECOUNTER > BLINGDELAY:\n CYCLECOUNTER = 1\n if CYCLECOUNTER == BLINGDELAY:\n strip.setPixelColor(random.randint(0, strip.numPixels()), Color(255, 255, 255))\n brightness = int((LED_BRIGHTNESS * maxBrightness) / 255)\n strip.setBrightness(brightness)\n strip.show()\n time.sleep(wait_ms / 1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i + q, 0)\n if (STATE != state):\n off(strip)\n break",
"def MakeVenetianBlinds(self):\r\n\r\n amount = 128\r\n size = self.GetClientSize()\r\n region = wx.Region(0, 0, size.x, 1)\r\n\r\n for y in xrange(size.y):\r\n\r\n # Reverse the order of the bottom 4 bits\r\n j = (y & 8 and [1] or [0])[0] | (y & 4 and [2] or [0])[0] | \\\r\n (y & 2 and [4] or [0])[0] | (y & 1 and [8] or [0])[0]\r\n \r\n if 16*j+8 < amount:\r\n region.Union(0, y, size.x, 1)\r\n \r\n self.SetShape(region)",
"def breathingRainbow(strip, state, maxBrightness, wait_ms=50):\n global BRIGHTNESS\n direction = 1\n step = 1\n minBreath = 8\n maxBreath = maxBrightness\n\t\n if BRIGHTNESS < minBreath:\n BRIGHTNESS = minBreath\n for j in range(256):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel((i + j) & 255))\n if (STATE != state):\n break\n BRIGHTNESS = BRIGHTNESS + (direction * step)\n if BRIGHTNESS >= maxBreath or BRIGHTNESS < minBreath:\n direction = direction * -1\n strip.setBrightness(BRIGHTNESS)\n if (STATE != state):\n off(strip)\n break\n strip.show()\n time.sleep(wait_ms / 1000.0)",
"def fastbollingerSignal(price, uband, lband):\n n = len(price)\n signal = np.zeros(n)\n for i in prange(n):\n if price[i] <= lband[i]: # crossing down\n signal[i] = 1\n elif price[i] >= uband[i]: # crossing up\n signal[i] = -1\n else:\n signal[i] = 0\n return signal",
"def kx_plus_b(bottom_x, bottom_y):\r\n x_plus_delta = []\r\n y_plus_delta = []\r\n pixel_step = 1\r\n poligon_dots = 0\r\n for i in range(len(bottom_x) - 1):\r\n next_x = int(bottom_x[i])\r\n next_y = bottom_y[i]\r\n x_plus_delta.append(next_x)\r\n y_plus_delta.append(round(next_y,1))\r\n poligon_dots = poligon_dots + 1\r\n try:\r\n k = (bottom_y[i] - bottom_y[i+1])/(bottom_x[i] - bottom_x[i+1])\r\n b = bottom_y[i] - k * bottom_x[i]\r\n dots_between_edges = int((((bottom_x[i+1] - bottom_x[i])**2 + (bottom_y[i+1] - bottom_y[i])**2)**0.5) / pixel_step)\r\n X_step = (bottom_x[i+1] - bottom_x[i]) / dots_between_edges\r\n for j in range(dots_between_edges):\r\n next_x = next_x + X_step\r\n next_y = k * next_x + b\r\n x_plus_delta.append(int(next_x))\r\n y_plus_delta.append(round(next_y,1))\r\n poligon_dots = poligon_dots + dots_between_edges\r\n except:\r\n print('Расстояние между точками 0 пикселей! Пропуск точки')\r\n\r\n x_plus_delta.append(int(bottom_x[-1]))\r\n y_plus_delta.append(round(bottom_y[-1],1))\r\n poligon_dots = poligon_dots + 1\r\n return poligon_dots, x_plus_delta, y_plus_delta",
"def get_BBands(sma, stdev):\n\tupper_band = sma + 2*stdev\n\tlower_band = sma - 2*stdev\n\treturn upper_band, lower_band",
"def bollinger_lband_indicator(close, n=20, ndev=2, fillna=False):\n df = pd.DataFrame([close]).transpose()\n mavg = close.rolling(n).mean()\n mstd = close.rolling(n).std()\n lband = mavg - ndev * mstd\n df['lband'] = 0.0\n df.loc[close < lband, 'lband'] = 1.0\n lband = df['lband']\n if fillna:\n lband = lband.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(lband, name='bbilband')",
"def addBL(self):\n self.parent.copyCurrentWinState(self.pltw)\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()",
"def display(self, value):\n\t\tbattled = self.led\n\t\t# Sensor Reading\n\t\t# arduinoSerialData.write('2')\n\n\t\t# Grabs Sensor Data\n\t\t# batt = float(arduinoSerialData.readline())\n\t\t# Added 99 to prevent Static Mode Sensor Reading Collision\n\t\tbatt = value + 99.55\n\t\tprint(batt)\n\n\t\t# 100 to 87.5 Battery\n\t\tif batt > 104.13:\n\t\t\tbattled.clear()\n\t\t\tfor i in range(0, 8):\n\t\t\t\tfor j in range(0, 8):\n\t\t\t\t\tbattled.set(i, j, 1)\n\t\t# 75 Battery\n\t\telif batt > 103.94 and batt <= 104.13:\n\t\t\tbattled.clear()\n\t\t\tfor i in range(2, 8):\n\t\t\t\tfor j in range(0, 8):\n\t\t\t\t\tbattled.set(i, j, 1)\n\t\t# 62.5 Battery\n\t\telif batt > 103.75 and batt <= 103.94:\n\t\t\tbattled.clear()\n\t\t\tfor i in range(3, 8):\n\t\t\t\tfor j in range(0, 8):\n\t\t\t\t\tbattled.set(i, j, 1)\n\t\t# 50 Battery\n\t\telif batt > 103.56 and batt <= 103.75:\n\t\t\tbattled.clear()\n\t\t\tfor i in range(4, 8):\n\t\t\t\tfor j in range(0, 8):\n\t\t\t\t\tbattled.set(i, j, 3)\n\t\t# 37.5 Battery\n\t\telif batt > 103.40 and batt <= 103.56:\n\t\t\tbattled.clear()\n\t\t\tfor i in range(5, 8):\n\t\t\t\tfor j in range(0, 8):\n\t\t\t\t\tbattled.set(i, j, 3)\n\t\t# 25 Battery\n\t\telif batt > 103.19 and batt <= 103.40:\n\t\t\tbattled.clear()\n\t\t\tfor i in range(6, 8):\n\t\t\t\tfor j in range(0, 8):\n\t\t\t\t\tbattled.set(i, j, 2)\n\t\t# 12.5 Battery\n\t\telif batt > 103.1 and batt <= 103.19:\n\t\t\tbattled.clear()\n\t\t\tfor i in range(7, 8):\n\t\t\t\tfor j in range(0, 8):\n\t\t\t\t\tbattled.set(i, j, 2)\n\t\t# 0 Battery\n\t\telif batt < 103.1:\n\t\t\tbattled.clear()\n\t\t\tfor i in range(0, 8):\n\t\t\t\tfor j in range(0, 8):\n\t\t\t\t\tbattled.set(i, j, 2)\n\t\tbattled.write()\n\t\ttime.sleep(1.5)",
"def calcBW(t,g,bw):\n if type(g) is not np.array:\n g = np.array(g)\n if bw>0.0:\n bw*=-1.0\n maxg = np.max(g)\n g = g-maxg\n #plt.figure('testbw')\n \n imax = np.argmax(g)\n mb = np.where(g>3.0*bw)\n # ...left\n gg = g[mb[0][0]:imax]\n tt = t[mb[0][0]:imax]\n #plt.plot(tt,gg)\n ha1 = np.interp(bw,gg,tt)\n # ...right\n gg = np.flipud(g[imax:mb[0][-1]])\n tt = np.flipud(t[imax:mb[0][-1]])\n ha2 = np.interp(bw,gg,tt)\n #plt.plot(tt,gg)\n # ...full\n FWHM = abs(ha1) + abs(ha2)\n #print 'bw: ',ha1,ha2\n return FWHM"
] |
[
"0.63031137",
"0.6106892",
"0.6081654",
"0.6056311",
"0.60343945",
"0.6010062",
"0.6008821",
"0.6005752",
"0.5995668",
"0.5961967",
"0.59498906",
"0.5860355",
"0.58507574",
"0.5788195",
"0.57752544",
"0.57669455",
"0.5724179",
"0.57093775",
"0.56955534",
"0.5668017",
"0.5646141",
"0.5626211",
"0.55974036",
"0.5545993",
"0.5544362",
"0.55442077",
"0.55423605",
"0.5535866",
"0.5503017",
"0.54835016"
] |
0.70920604
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.